diff --git a/CHANGES.txt b/CHANGES.txt index 52d21202fe9c..31a6b5b8cf01 100755 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -7,7 +7,7 @@ Release 0.92.1 - Unreleased BUG FIXES HBASE-5176 AssignmentManager#getRegion: logging nit adds a redundant '+' (Karthik K) HBASE-5237 Addendum for HBASE-5160 and HBASE-4397 (Ram) - HBASE-5235 HLogSplitter writer thread's streams not getting closed when any + HBASE-5235 HLogSplitter writer thread's streams not getting closed when any of the writer threads has exceptions. (Ram) HBASE-5243 LogSyncerThread not getting shutdown waiting for the interrupted flag (Ram) HBASE-5255 Use singletons for OperationStatus to save memory (Benoit) @@ -144,7 +144,7 @@ Release 0.92.0 - 01/23/2012 HBASE-3897 Docs (notsoquick guide) suggest invalid XML (Philip Zeyliger) HBASE-3898 TestSplitTransactionOnCluster broke in TRUNK HBASE-3826 Minor compaction needs to check if still over - compactionThreshold after compacting (Nicolas Spiegelberg) + compactionThreshold after compacting (Nicolas Spiegelberg) HBASE-3912 [Stargate] Columns not handle by Scan HBASE-3903 A successful write to client write-buffer may be lost or not visible (Doug Meil) @@ -198,7 +198,7 @@ Release 0.92.0 - 01/23/2012 HBASE-4112 Creating table may throw NullPointerException (Jinchao via Ted Yu) HBASE-4093 When verifyAndAssignRoot throws exception, the deadServers state cannot be changed (fulin wang via Ted Yu) - HBASE-4118 method regionserver.MemStore#updateColumnValue: the check for + HBASE-4118 method regionserver.MemStore#updateColumnValue: the check for qualifier and family is missing (N Keywal via Ted Yu) HBASE-4127 Don't modify table's name away in HBaseAdmin HBASE-4105 Stargate does not support Content-Type: application/json and @@ -300,7 +300,7 @@ Release 0.92.0 - 01/23/2012 HBASE-4395 EnableTableHandler races with itself HBASE-4414 Region splits by size not being triggered HBASE-4322 HBASE-4322 [hbck] Update checkIntegrity/checkRegionChain - to present more accurate region split problem + to present more accurate region split problem (Jon Hseih) HBASE-4417 HBaseAdmin.checkHBaseAvailable() doesn't close ZooKeeper connections (Stefan Seelmann) @@ -483,7 +483,7 @@ Release 0.92.0 - 01/23/2012 HBASE-5100 Rollback of split could cause closed region to be opened again (Chunhui) HBASE-4397 -ROOT-, .META. tables stay offline for too long in recovery phase after all RSs are shutdown at the same time (Ming Ma) - HBASE-5094 The META can hold an entry for a region with a different server name from the one + HBASE-5094 The META can hold an entry for a region with a different server name from the one actually in the AssignmentManager thus making the region inaccessible. (Ram) HBASE-5081 Distributed log splitting deleteNode races against splitLog retry (Prakash) HBASE-4357 Region stayed in transition - in closing state (Ming Ma) @@ -517,7 +517,7 @@ Release 0.92.0 - 01/23/2012 HBASE-5105 TestImportTsv failed with hadoop 0.22 (Ming Ma) IMPROVEMENTS - HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack) + HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack) HBASE-3292 Expose block cache hit/miss/evict counts into region server metrics HBASE-2936 Differentiate between daemon & restart sleep periods @@ -538,7 +538,7 @@ Release 0.92.0 - 01/23/2012 (rpc version 43) HBASE-3563 [site] Add one-page-only version of hbase doc HBASE-3564 DemoClient.pl - a demo client in Perl - HBASE-3560 the hbase-default entry of "hbase.defaults.for.version" + HBASE-3560 the hbase-default entry of "hbase.defaults.for.version" causes tests not to run via not-maven HBASE-3513 upgrade thrift to 0.5.0 and use mvn version HBASE-3533 Allow HBASE_LIBRARY_PATH env var to specify extra locations @@ -601,7 +601,7 @@ Release 0.92.0 - 01/23/2012 HBASE-3765 metrics.xml - small format change and adding nav to hbase book metrics section (Doug Meil) HBASE-3759 Eliminate use of ThreadLocals for CoprocessorEnvironment - bypass() and complete() + bypass() and complete() HBASE-3701 revisit ArrayList creation (Ted Yu via Stack) HBASE-3753 Book.xml - architecture, adding more Store info (Doug Meil) HBASE-3784 book.xml - adding small subsection in architecture/client on @@ -738,7 +738,7 @@ Release 0.92.0 - 01/23/2012 HBASE-4425 Provide access to RpcServer instance from RegionServerServices HBASE-4411 When copying tables/CFs, allow CF names to be changed (David Revell) - HBASE-4424 Provide coprocessors access to createTable() via + HBASE-4424 Provide coprocessors access to createTable() via MasterServices HBASE-4432 Enable/Disable off heap cache with config (Li Pi) HBASE-4434 seek optimization: don't do eager HFile Scanner @@ -1098,7 +1098,7 @@ Release 0.90.3 - May 19th, 2011 HBASE-3846 Set RIT timeout higher Release 0.90.2 - 20110408 - + BUG FIXES HBASE-3545 Possible liveness issue with MasterServerAddress in HRegionServer getMaster (Greg Bowyer via Stack) @@ -1151,7 +1151,7 @@ Release 0.90.2 - 20110408 HBASE-3654 Weird blocking between getOnlineRegion and createRegionLoad (Subbu M Iyer via Stack) HBASE-3666 TestScannerTimeout fails occasionally - HBASE-3497 TableMapReduceUtil.initTableReducerJob broken due to setConf + HBASE-3497 TableMapReduceUtil.initTableReducerJob broken due to setConf method in TableOutputFormat HBASE-3686 ClientScanner skips too many rows on recovery if using scanner caching (Sean Sechrist via Stack) @@ -1159,7 +1159,7 @@ Release 0.90.2 - 20110408 IMPROVEMENTS HBASE-3542 MultiGet methods in Thrift HBASE-3586 Improve the selection of regions to balance (Ted Yu via Andrew - Purtell) + Purtell) HBASE-3603 Remove -XX:+HeapDumpOnOutOfMemoryError autodump of heap option on OOME HBASE-3285 Hlog recovery takes too much time @@ -1186,19 +1186,19 @@ Release 0.90.1 - February 9th, 2011 HBASE-3455 Add memstore-local allocation buffers to combat heap fragmentation in the region server. Experimental / disabled by default in 0.90.1 - + BUG FIXES HBASE-3445 Master crashes on data that was moved from different host HBASE-3449 Server shutdown handlers deadlocked waiting for META HBASE-3456 Fix hardcoding of 20 second socket timeout down in HBaseClient HBASE-3476 HFile -m option need not scan key values (Prakash Khemani via Lars George) - HBASE-3481 max seq id in flushed file can be larger than its correct value + HBASE-3481 max seq id in flushed file can be larger than its correct value causing data loss during recovery HBASE-3493 HMaster sometimes hangs during initialization due to missing notify call (Bruno Dumon via Stack) HBASE-3483 Memstore lower limit should trigger asynchronous flushes - HBASE-3494 checkAndPut implementation doesnt verify row param and writable + HBASE-3494 checkAndPut implementation doesnt verify row param and writable row are the same HBASE-3416 For intra-row scanning, the update readers notification resets the query matcher and can lead to incorrect behavior @@ -1288,7 +1288,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-1830 HbaseObjectWritable methods should allow null HBCs for when Writable is not Configurable (Stack via jgray) HBASE-1847 Delete latest of a null qualifier when non-null qualifiers - exist throws a RuntimeException + exist throws a RuntimeException HBASE-1850 src/examples/mapred do not compile after HBASE-1822 HBASE-1853 Each time around the regionserver core loop, we clear the messages to pass master, even if we failed to deliver them @@ -1343,9 +1343,9 @@ Release 0.90.0 - January 19th, 2011 HBASE-1954 Transactional scans do not see newest put (Clint Morgan via Stack) HBASE-1919 code: HRS.delete seems to ignore exceptions it shouldnt - HBASE-1951 Stack overflow when calling HTable.checkAndPut() + HBASE-1951 Stack overflow when calling HTable.checkAndPut() when deleting a lot of values - HBASE-1781 Weird behavior of WildcardColumnTracker.checkColumn(), + HBASE-1781 Weird behavior of WildcardColumnTracker.checkColumn(), looks like recursive loop HBASE-1949 KeyValue expiration by Time-to-Live during major compaction is broken (Gary Helmling via Stack) @@ -1377,7 +1377,7 @@ Release 0.90.0 - January 19th, 2011 'descendingIterator' (Ching-Shen Chen via Stack) HBASE-2033 Shell scan 'limit' is off by one HBASE-2040 Fixes to group commit - HBASE-2047 Example command in the "Getting Started" + HBASE-2047 Example command in the "Getting Started" documentation doesn't work (Benoit Sigoure via JD) HBASE-2048 Small inconsistency in the "Example API Usage" (Benoit Sigoure via JD) @@ -1385,14 +1385,14 @@ Release 0.90.0 - January 19th, 2011 HBASE-1960 Master should wait for DFS to come up when creating hbase.version HBASE-2054 memstore size 0 is >= than blocking -2.0g size - HBASE-2064 Cannot disable a table if at the same the Master is moving + HBASE-2064 Cannot disable a table if at the same the Master is moving its regions around - HBASE-2065 Cannot disable a table if any of its region is opening + HBASE-2065 Cannot disable a table if any of its region is opening at the same time HBASE-2026 NPE in StoreScanner on compaction HBASE-2072 fs.automatic.close isn't passed to FileSystem HBASE-2075 Master requires HDFS superuser privileges due to waitOnSafeMode - HBASE-2077 NullPointerException with an open scanner that expired causing + HBASE-2077 NullPointerException with an open scanner that expired causing an immediate region server shutdown (Sam Pullara via JD) HBASE-2078 Add JMX settings as commented out lines to hbase-env.sh (Lars George via JD) @@ -1459,11 +1459,11 @@ Release 0.90.0 - January 19th, 2011 HBASE-2258 The WhileMatchFilter doesn't delegate the call to filterRow() HBASE-2259 StackOverflow in ExplicitColumnTracker when row has many columns HBASE-2268 [stargate] Failed tests and DEBUG output is dumped to console - since move to Mavenized build - HBASE-2276 Hbase Shell hcd() method is broken by the replication scope + since move to Mavenized build + HBASE-2276 Hbase Shell hcd() method is broken by the replication scope parameter (Alexey Kovyrin via Lars George) HBASE-2244 META gets inconsistent in a number of crash scenarios - HBASE-2284 fsWriteLatency metric may be incorrectly reported + HBASE-2284 fsWriteLatency metric may be incorrectly reported (Kannan Muthukkaruppan via Stack) HBASE-2063 For hfileoutputformat, on timeout/failure/kill clean up half-written hfile (Ruslan Salyakhov via Stack) @@ -1478,7 +1478,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-2308 Fix the bin/rename_table.rb script, make it work again HBASE-2307 hbase-2295 changed hregion size, testheapsize broke... fix it HBASE-2269 PerformanceEvaluation "--nomapred" may assign duplicate random - seed over multiple testing threads (Tatsuya Kawano via Stack) + seed over multiple testing threads (Tatsuya Kawano via Stack) HBASE-2287 TypeError in shell (Alexey Kovyrin via Stack) HBASE-2023 Client sync block can cause 1 thread of a multi-threaded client to block all others (Karthik Ranganathan via Stack) @@ -1548,10 +1548,10 @@ Release 0.90.0 - January 19th, 2011 HBASE-2544 Forward port branch 0.20 WAL to TRUNK HBASE-2546 Specify default filesystem in both the new and old way (needed if we are to run on 0.20 and 0.21 hadoop) - HBASE-1895 HConstants.MAX_ROW_LENGTH is incorrectly 64k, should be 32k - HBASE-1968 Give clients access to the write buffer + HBASE-1895 HConstants.MAX_ROW_LENGTH is incorrectly 64k, should be 32k + HBASE-1968 Give clients access to the write buffer HBASE-2028 Add HTable.incrementColumnValue support to shell - (Lars George via Andrew Purtell) + (Lars George via Andrew Purtell) HBASE-2138 unknown metrics type HBASE-2551 Forward port fixes that are in branch but not in trunk (part of the merge of old 0.20 into TRUNK task) -- part 1. @@ -1560,7 +1560,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-2344 InfoServer and hence HBase Master doesn't fully start if you have HADOOP-6151 patch (Kannan Muthukkaruppan via Stack) HBASE-2382 Don't rely on fs.getDefaultReplication() to roll HLogs - (Nicolas Spiegelberg via Stack) + (Nicolas Spiegelberg via Stack) HBASE-2415 Disable META splitting in 0.20 (Todd Lipcon via Stack) HBASE-2421 Put hangs for 10 retries on failed region servers HBASE-2442 Log lease recovery catches IOException too widely @@ -1617,7 +1617,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-2703 ui not working in distributed context HBASE-2710 Shell should use default terminal width when autodetection fails (Kannan Muthukkaruppan via Todd Lipcon) - HBASE-2712 Cached region location that went stale won't recover if + HBASE-2712 Cached region location that went stale won't recover if asking for first row HBASE-2732 TestZooKeeper was broken, HBASE-2691 showed it HBASE-2670 Provide atomicity for readers even when new insert has @@ -1653,7 +1653,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-2772 Scan doesn't recover from region server failure HBASE-2775 Update of hadoop jar in HBASE-2771 broke TestMultiClusters HBASE-2774 Spin in ReadWriteConsistencyControl eating CPU (load > 40) and - no progress running YCSB on clean cluster startup + no progress running YCSB on clean cluster startup HBASE-2785 TestScannerTimeout.test2772 is flaky HBASE-2787 PE is confused about flushCommits HBASE-2707 Can't recover from a dead ROOT server if any exceptions happens @@ -1665,18 +1665,18 @@ Release 0.90.0 - January 19th, 2011 HBASE-2797 Another NPE in ReadWriteConsistencyControl HBASE-2831 Fix '$bin' path duplication in setup scripts (Nicolas Spiegelberg via Stack) - HBASE-2781 ZKW.createUnassignedRegion doesn't make sure existing znode is + HBASE-2781 ZKW.createUnassignedRegion doesn't make sure existing znode is in the right state (Karthik Ranganathan via JD) HBASE-2727 Splits writing one file only is untenable; need dir of recovered edits ordered by sequenceid - HBASE-2843 Readd bloomfilter test over zealously removed by HBASE-2625 + HBASE-2843 Readd bloomfilter test over zealously removed by HBASE-2625 HBASE-2846 Make rest server be same as thrift and avro servers HBASE-1511 Pseudo distributed mode in LocalHBaseCluster (Nicolas Spiegelberg via Stack) HBASE-2851 Remove testDynamicBloom() unit test (Nicolas Spiegelberg via Stack) HBASE-2853 TestLoadIncrementalHFiles fails on TRUNK - HBASE-2854 broken tests on trunk + HBASE-2854 broken tests on trunk HBASE-2859 Cleanup deprecated stuff in TestHLog (Alex Newman via Stack) HBASE-2858 TestReplication.queueFailover fails half the time HBASE-2863 HBASE-2553 removed an important edge case @@ -1789,7 +1789,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-3064 Long sleeping in HConnectionManager after thread is interrupted (Bruno Dumon via Stack) HBASE-2753 Remove sorted() methods from Result now that Gets are Scans - HBASE-3059 TestReadWriteConsistencyControl occasionally hangs (Hairong + HBASE-3059 TestReadWriteConsistencyControl occasionally hangs (Hairong via Ryan) HBASE-2906 [rest/stargate] URI decoding in RowResource HBASE-3008 Memstore.updateColumnValue passes wrong flag to heapSizeChange @@ -1820,7 +1820,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-3121 [rest] Do not perform cache control when returning results HBASE-2669 HCM.shutdownHook causes data loss with hbase.client.write.buffer != 0 - HBASE-2985 HRegionServer.multi() no longer calls HRegion.put(List) when + HBASE-2985 HRegionServer.multi() no longer calls HRegion.put(List) when possible HBASE-3031 CopyTable MR job named "Copy Table" in Driver HBASE-2658 REST (stargate) TableRegionModel Regions need to be updated to @@ -1891,7 +1891,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-3199 large response handling: some fixups and cleanups HBASE-3212 More testing of enable/disable uncovered base condition not in place; i.e. that only one enable/disable runs at a time - HBASE-2898 MultiPut makes proper error handling impossible and leads to + HBASE-2898 MultiPut makes proper error handling impossible and leads to corrupted data HBASE-3213 If do abort of backup master will get NPE instead of graceful abort @@ -1904,7 +1904,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-3224 NPE in KeyValue$KVComparator.compare when compacting HBASE-3233 Fix Long Running Stats HBASE-3232 Fix KeyOnlyFilter + Add Value Length (Nicolas via Ryan) - HBASE-3235 Intermittent incrementColumnValue failure in TestHRegion + HBASE-3235 Intermittent incrementColumnValue failure in TestHRegion (Gary via Ryan) HBASE-3241 check to see if we exceeded hbase.regionserver.maxlogs limit is incorrect (Kannan Muthukkaruppan via JD) @@ -1955,7 +1955,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-3352 enabling a non-existent table from shell prints no error HBASE-3353 table.jsp doesn't handle entries in META without server info HBASE-3351 ReplicationZookeeper goes to ZK every time a znode is modified - HBASE-3326 Replication state's znode should be created else it + HBASE-3326 Replication state's znode should be created else it defaults to false HBASE-3355 Stopping a stopped cluster leaks an HMaster HBASE-3356 Add more checks in replication if RS is stopped @@ -2060,8 +2060,8 @@ Release 0.90.0 - January 19th, 2011 HBASE-1942 Update hadoop jars in trunk; update to r831142 HBASE-1943 Remove AgileJSON; unused HBASE-1944 Add a "deferred log flush" attribute to HTD - HBASE-1945 Remove META and ROOT memcache size bandaid - HBASE-1947 If HBase starts/stops often in less than 24 hours, + HBASE-1945 Remove META and ROOT memcache size bandaid + HBASE-1947 If HBase starts/stops often in less than 24 hours, you end up with lots of store files HBASE-1829 Make use of start/stop row in TableInputFormat (Lars George via Stack) @@ -2109,7 +2109,7 @@ Release 0.90.0 - January 19th, 2011 Stack) HBASE-2076 Many javadoc warnings HBASE-2068 MetricsRate is missing "registry" parameter (Lars George via JD) - HBASE-2025 0.20.2 accessed from older client throws + HBASE-2025 0.20.2 accessed from older client throws UndeclaredThrowableException; frustrates rolling upgrade HBASE-2081 Set the retries higher in shell since client pause is lower HBASE-1956 Export HDFS read and write latency as a metric @@ -2131,7 +2131,7 @@ Release 0.90.0 - January 19th, 2011 ./bin/start-hbase.sh in a checkout HBASE-2136 Forward-port the old mapred package HBASE-2133 Increase default number of client handlers - HBASE-2109 status 'simple' should show total requests per second, also + HBASE-2109 status 'simple' should show total requests per second, also the requests/sec is wrong as is HBASE-2151 Remove onelab and include generated thrift classes in javadoc (Lars Francke via Stack) @@ -2170,9 +2170,9 @@ Release 0.90.0 - January 19th, 2011 HBASE-2250 typo in the maven pom HBASE-2254 Improvements to the Maven POMs (Lars Francke via Stack) HBASE-2262 ZKW.ensureExists should check for existence - HBASE-2264 Adjust the contrib apps to the Maven project layout + HBASE-2264 Adjust the contrib apps to the Maven project layout (Lars Francke via Lars George) - HBASE-2245 Unnecessary call to syncWal(region); in HRegionServer + HBASE-2245 Unnecessary call to syncWal(region); in HRegionServer (Benoit Sigoure via JD) HBASE-2246 Add a getConfiguration method to HTableInterface (Benoit Sigoure via JD) @@ -2180,10 +2180,10 @@ Release 0.90.0 - January 19th, 2011 development (Alexey Kovyrin via Stack) HBASE-2267 More improvements to the Maven build (Lars Francke via Stack) HBASE-2174 Stop from resolving HRegionServer addresses to names using DNS - on every heartbeat (Karthik Ranganathan via Stack) + on every heartbeat (Karthik Ranganathan via Stack) HBASE-2302 Optimize M-R by bulk excluding regions - less InputSplit-s to avoid traffic on region servers when performing M-R on a subset - of the table (Kay Kay via Stack) + of the table (Kay Kay via Stack) HBASE-2309 Add apache releases to pom (list of ) repositories (Kay Kay via Stack) HBASE-2279 Hbase Shell does not have any tests (Alexey Kovyrin via Stack) @@ -2209,15 +2209,15 @@ Release 0.90.0 - January 19th, 2011 HBASE-2374 TableInputFormat - Configurable parameter to add column families (Kay Kay via Stack) HBASE-2388 Give a very explicit message when we figure a big GC pause - HBASE-2270 Improve how we handle recursive calls in ExplicitColumnTracker + HBASE-2270 Improve how we handle recursive calls in ExplicitColumnTracker and WildcardColumnTracker HBASE-2402 [stargate] set maxVersions on gets - HBASE-2087 The wait on compaction because "Too many store files" + HBASE-2087 The wait on compaction because "Too many store files" holds up all flushing HBASE-2252 Mapping a very big table kills region servers HBASE-2412 [stargate] PerformanceEvaluation HBASE-2419 Remove from RS logs the fat NotServingRegionException stack - HBASE-2286 [Transactional Contrib] Correctly handle or avoid cases where + HBASE-2286 [Transactional Contrib] Correctly handle or avoid cases where writes occur in same millisecond (Clint Morgan via J-D) HBASE-2360 Make sure we have all the hadoop fixes in our our copy of its rpc (Todd Lipcon via Stack) @@ -2251,7 +2251,7 @@ Release 0.90.0 - January 19th, 2011 (Todd Lipcon via Stack) HBASE-2547 [mvn] assembly:assembly does not include hbase-X.X.X-test.jar (Paul Smith via Stack) - HBASE-2037 The core elements of HBASE-2037: refactoring flushing, and adding + HBASE-2037 The core elements of HBASE-2037: refactoring flushing, and adding configurability in which HRegion subclass is instantiated HBASE-2248 Provide new non-copy mechanism to assure atomic reads in get and scan HBASE-2523 Add check for licenses before rolling an RC, add to @@ -2264,7 +2264,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-2520 Cleanup arrays vs Lists of scanners (Todd Lipcon via Stack) HBASE-2551 Forward port fixes that are in branch but not in trunk (part of the merge of old 0.20 into TRUNK task) - HBASE-2466 Improving filter API to allow for modification of keyvalue list + HBASE-2466 Improving filter API to allow for modification of keyvalue list by filter (Juhani Connolly via Ryan) HBASE-2566 Remove 'lib' dir; it only has libthrift and that is being pulled from http://people.apache.org/~rawson/repo/.... @@ -2289,13 +2289,13 @@ Release 0.90.0 - January 19th, 2011 failing hudson on occasion) HBASE-2651 Allow alternate column separators to be specified for ImportTsv HBASE-2661 Add test case for row atomicity guarantee - HBASE-2578 Add ability for tests to override server-side timestamp + HBASE-2578 Add ability for tests to override server-side timestamp setting (currentTimeMillis) (Daniel Ploeg via Ryan Rawson) HBASE-2558 Our javadoc overview -- "Getting Started", requirements, etc. -- is not carried across by mvn javadoc:javadoc target HBASE-2618 Don't inherit from HConstants (Benoit Sigoure via Stack) HBASE-2208 TableServers # processBatchOfRows - converts from List to [ ] - - Expensive copy + - Expensive copy HBASE-2694 Move RS to Master region open/close messaging into ZooKeeper HBASE-2716 Make HBase's maven artifacts configurable with -D (Alex Newman via Stack) @@ -2308,7 +2308,7 @@ Release 0.90.0 - January 19th, 2011 message HBASE-2724 Update to new release of Guava library HBASE-2735 Make HBASE-2694 replication-friendly - HBASE-2683 Make it obvious in the documentation that ZooKeeper needs + HBASE-2683 Make it obvious in the documentation that ZooKeeper needs permanent storage HBASE-2764 Force all Chore tasks to have a thread name HBASE-2762 Add warning to master if running without append enabled @@ -2319,7 +2319,7 @@ Release 0.90.0 - January 19th, 2011 (Nicolas Spiegelberg via JD) HBASE-2786 TestHLog.testSplit hangs (Nicolas Spiegelberg via JD) HBASE-2790 Purge apache-forrest from TRUNK - HBASE-2793 Add ability to extract a specified list of versions of a column + HBASE-2793 Add ability to extract a specified list of versions of a column in a single roundtrip (Kannan via Ryan) HBASE-2828 HTable unnecessarily coupled with HMaster (Nicolas Spiegelberg via Stack) @@ -2331,7 +2331,7 @@ Release 0.90.0 - January 19th, 2011 next column (Pranav via jgray) HBASE-2835 Update hadoop jar to head of branch-0.20-append to catch three added patches - HBASE-2840 Remove the final remnants of the old Get code - the query matchers + HBASE-2840 Remove the final remnants of the old Get code - the query matchers and other helper classes HBASE-2845 Small edit of shell main help page cutting down some on white space and text @@ -2360,9 +2360,9 @@ Release 0.90.0 - January 19th, 2011 HBASE-1517 Implement inexpensive seek operations in HFile (Pranav via Ryan) HBASE-2903 ColumnPrefix filtering (Pranav via Ryan) HBASE-2904 Smart seeking using filters (Pranav via Ryan) - HBASE-2922 HLog preparation and cleanup are done under the updateLock, + HBASE-2922 HLog preparation and cleanup are done under the updateLock, major slowdown - HBASE-1845 MultiGet, MultiDelete, and MultiPut - batched to the + HBASE-1845 MultiGet, MultiDelete, and MultiPut - batched to the appropriate region servers (Marc Limotte via Ryan) HBASE-2867 Have master show its address using hostname rather than IP HBASE-2696 ZooKeeper cleanup and refactor @@ -2375,7 +2375,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-2857 HBaseAdmin.tableExists() should not require a full meta scan HBASE-2962 Add missing methods to HTableInterface (and HTable) (Lars Francke via Stack) - HBASE-2942 Custom filters should not require registration in + HBASE-2942 Custom filters should not require registration in HBaseObjectWritable (Gary Helmling via Andrew Purtell) HBASE-2976 Running HFile tool passing fully-qualified filename I get 'IllegalArgumentException: Wrong FS' @@ -2417,7 +2417,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-3133 Only log compaction requests when a request is actually added to the queue HBASE-3132 Print TimestampRange and BloomFilters in HFile pretty print - HBASE-2514 RegionServer should refuse to be assigned a region that use + HBASE-2514 RegionServer should refuse to be assigned a region that use LZO when LZO isn't available HBASE-3082 For ICV gets, first look in MemStore before reading StoreFiles (prakash via jgray) @@ -2548,7 +2548,7 @@ Release 0.90.0 - January 19th, 2011 HBASE-410 [testing] Speed up the test suite HBASE-2041 Change WAL default configuration values HBASE-2997 Performance fixes - profiler driven - HBASE-2450 For single row reads of specific columns, seek to the + HBASE-2450 For single row reads of specific columns, seek to the first column in HFiles rather than start of row (Pranav via Ryan, some Ryan) @@ -2615,8 +2615,8 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 HBASE-1243 oldlogfile.dat is screwed, so is it's region HBASE-1169 When a shutdown is requested, stop scanning META regions immediately - HBASE-1251 HConnectionManager.getConnection(HBaseConfiguration) returns - same HConnection for different HBaseConfigurations + HBASE-1251 HConnectionManager.getConnection(HBaseConfiguration) returns + same HConnection for different HBaseConfigurations HBASE-1157, HBASE-1156 If we do not take start code as a part of region server recovery, we could inadvertantly try to reassign regions assigned to a restarted server with a different start code; @@ -2675,7 +2675,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 (Thomas Schneider via Andrew Purtell) HBASE-1374 NPE out of ZooKeeperWrapper.loadZooKeeperConfig HBASE-1336 Splitting up the compare of family+column into 2 different - compare + compare HBASE-1377 RS address is null in master web UI HBASE-1344 WARN IllegalStateException: Cannot set a region as open if it has not been pending @@ -2737,7 +2737,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 binary comparator (Jon Gray via Stack) HBASE-1500 KeyValue$KeyComparator array overrun HBASE-1513 Compactions too slow - HBASE-1516 Investigate if StoreScanner will not return the next row if + HBASE-1516 Investigate if StoreScanner will not return the next row if earlied-out of previous row (Jon Gray) HBASE-1520 StoreFileScanner catches and ignore IOExceptions from HFile HBASE-1522 We delete splits before their time occasionally @@ -2848,7 +2848,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 when trying to read HBASE-1705 Thrift server: deletes in mutateRow/s don't delete (Tim Sell and Ryan Rawson via Stack) - HBASE-1703 ICVs across /during a flush can cause multiple keys with the + HBASE-1703 ICVs across /during a flush can cause multiple keys with the same TS (bad) HBASE-1671 HBASE-1609 broke scanners riding across splits HBASE-1717 Put on client-side uses passed-in byte[]s rather than always @@ -2921,9 +2921,9 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 (Toby White via Andrew Purtell) HBASE-1180 Add missing import statements to SampleUploader and remove unnecessary @Overrides (Ryan Smith via Andrew Purtell) - HBASE-1191 ZooKeeper ensureParentExists calls fail + HBASE-1191 ZooKeeper ensureParentExists calls fail on absolute path (Nitay Joffe via Jean-Daniel Cryans) - HBASE-1187 After disabling/enabling a table, the regions seems to + HBASE-1187 After disabling/enabling a table, the regions seems to be assigned to only 1-2 region servers HBASE-1210 Allow truncation of output for scan and get commands in shell (Lars George via Stack) @@ -2955,7 +2955,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 (Nitay Joffe via Stack) HBASE-1285 Forcing compactions should be available via thrift (Tim Sell via Stack) - HBASE-1186 Memory-aware Maps with LRU eviction for cell cache + HBASE-1186 Memory-aware Maps with LRU eviction for cell cache (Jonathan Gray via Andrew Purtell) HBASE-1205 RegionServers should find new master when a new master comes up (Nitay Joffe via Andrew Purtell) @@ -3033,7 +3033,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 HBASE-1466 Binary keys are not first class citizens (Ryan Rawson via Stack) HBASE-1445 Add the ability to start a master from any machine - HBASE-1474 Add zk attributes to list of attributes + HBASE-1474 Add zk attributes to list of attributes in master and regionserver UIs HBASE-1448 Add a node in ZK to tell all masters to shutdown HBASE-1478 Remove hbase master options from shell (Nitay Joffe via Stack) @@ -3042,7 +3042,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 HBASE-1490 Update ZooKeeper library HBASE-1489 Basic git ignores for people who use git and eclipse HBASE-1453 Add HADOOP-4681 to our bundled hadoop, add to 'gettting started' - recommendation that hbase users backport + recommendation that hbase users backport HBASE-1507 iCMS as default JVM HBASE-1509 Add explanation to shell "help" command on how to use binarykeys (Lars George via Stack) @@ -3054,7 +3054,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009 on hbase-user traffic HBASE-1539 prevent aborts due to missing zoo.cfg HBASE-1488 Fix TestThriftServer and re-enable it - HBASE-1541 Scanning multiple column families in the presence of deleted + HBASE-1541 Scanning multiple column families in the presence of deleted families results in bad scans HBASE-1540 Client delete unit test, define behavior (Jonathan Gray via Stack) @@ -3161,13 +3161,13 @@ Release 0.19.0 - 01/21/2009 HBASE-906 [shell] Truncates output HBASE-912 PE is broken when other tables exist HBASE-853 [shell] Cannot describe meta tables (Izaak Rubin via Stack) - HBASE-844 Can't pass script to hbase shell + HBASE-844 Can't pass script to hbase shell HBASE-837 Add unit tests for ThriftServer.HBaseHandler (Izaak Rubin via Stack) HBASE-913 Classes using log4j directly HBASE-914 MSG_REPORT_CLOSE has a byte array for a message HBASE-918 Region balancing during startup makes cluster unstable - HBASE-921 region close and open processed out of order; makes for + HBASE-921 region close and open processed out of order; makes for disagreement between master and regionserver on region state HBASE-925 HRS NPE on way out if no master to connect to HBASE-928 NPE throwing RetriesExhaustedException @@ -3277,7 +3277,7 @@ Release 0.19.0 - 01/21/2009 crashed server; regionserver tries to execute incomplete log HBASE-1104, HBASE-1098, HBASE-1096: Doubly-assigned regions redux, IllegalStateException: Cannot set a region to be closed it it was - not already marked as closing, Does not recover if HRS carrying + not already marked as closing, Does not recover if HRS carrying -ROOT- goes down HBASE-1114 Weird NPEs compacting HBASE-1116 generated web.xml and svn don't play nice together @@ -3320,7 +3320,7 @@ Release 0.19.0 - 01/21/2009 HBASE-949 Add an HBase Manual HBASE-839 Update hadoop libs in hbase; move hbase TRUNK on to an hadoop 0.19.0 RC - HBASE-785 Remove InfoServer, use HADOOP-3824 StatusHttpServer + HBASE-785 Remove InfoServer, use HADOOP-3824 StatusHttpServer instead (requires hadoop 0.19) HBASE-81 When a scanner lease times out, throw a more "user friendly" exception HBASE-978 Remove BloomFilterDescriptor. It is no longer used. @@ -3396,7 +3396,7 @@ Release 0.18.0 - September 21st, 2008 BUG FIXES HBASE-881 Fixed bug when Master tries to reassign split or offline regions from a dead server - HBASE-860 Fixed Bug in IndexTableReduce where it concerns writing lucene + HBASE-860 Fixed Bug in IndexTableReduce where it concerns writing lucene index fields. HBASE-805 Remove unnecessary getRow overloads in HRS (Jonathan Gray via Jim Kellerman) (Fix whitespace diffs in HRegionServer) @@ -3504,8 +3504,8 @@ Release 0.2.0 - August 8, 2008. HBASE-487 Replace hql w/ a hbase-friendly jirb or jython shell Part 1: purge of hql and added raw jirb in its place. HBASE-521 Improve client scanner interface - HBASE-288 Add in-memory caching of data. Required update of hadoop to - 0.17.0-dev.2008-02-07_12-01-58. (Tom White via Stack) + HBASE-288 Add in-memory caching of data. Required update of hadoop to + 0.17.0-dev.2008-02-07_12-01-58. (Tom White via Stack) HBASE-696 Make bloomfilter true/false and self-sizing HBASE-720 clean up inconsistencies around deletes (Izaak Rubin via Stack) HBASE-796 Deprecates Text methods from HTable @@ -3577,7 +3577,7 @@ Release 0.2.0 - August 8, 2008. HBASE-715 Base HBase 0.2 on Hadoop 0.17.1 HBASE-718 hbase shell help info HBASE-717 alter table broke with new shell returns InvalidColumnNameException - HBASE-573 HBase does not read hadoop-*.xml for dfs configuration after + HBASE-573 HBase does not read hadoop-*.xml for dfs configuration after moving out hadoop/contrib HBASE-11 Unexpected exits corrupt DFS HBASE-12 When hbase regionserver restarts, it says "impossible state for @@ -3632,7 +3632,7 @@ Release 0.2.0 - August 8, 2008. HBASE-8 Delete table does not remove the table directory in the FS HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown that reach the client even after retries - HBASE-460 TestMigrate broken when HBase moved to subproject + HBASE-460 TestMigrate broken when HBase moved to subproject HBASE-462 Update migration tool HBASE-473 When a table is deleted, master sends multiple close messages to the region server @@ -3656,7 +3656,7 @@ Release 0.2.0 - August 8, 2008. HBASE-537 Wait for hdfs to exit safe mode HBASE-476 RegexpRowFilter behaves incorectly when there are multiple store files (Clint Morgan via Jim Kellerman) - HBASE-527 RegexpRowFilter does not work when there are columns from + HBASE-527 RegexpRowFilter does not work when there are columns from multiple families (Clint Morgan via Jim Kellerman) HBASE-534 Double-assignment at SPLIT-time HBASE-712 midKey found compacting is the first, not necessarily the optimal @@ -3721,13 +3721,13 @@ Release 0.2.0 - August 8, 2008. HBASE-790 During import, single region blocks requests for >10 minutes, thread dumps, throws out pending requests, and continues (Jonathan Gray via Stack) - + IMPROVEMENTS HBASE-559 MR example job to count table rows HBASE-596 DemoClient.py (Ivan Begtin via Stack) HBASE-581 Allow adding filters to TableInputFormat (At same time, ensure TIF is subclassable) (David Alves via Stack) - HBASE-603 When an exception bubbles out of getRegionServerWithRetries, wrap + HBASE-603 When an exception bubbles out of getRegionServerWithRetries, wrap the exception with a RetriesExhaustedException HBASE-600 Filters have excessive DEBUG logging HBASE-611 regionserver should do basic health check before reporting @@ -3789,7 +3789,7 @@ Release 0.2.0 - August 8, 2008. HMaster (Bryan Duxbury via Stack) HBASE-440 Add optional log roll interval so that log files are garbage collected - HBASE-407 Keep HRegionLocation information in LRU structure + HBASE-407 Keep HRegionLocation information in LRU structure HBASE-444 hbase is very slow at determining table is not present HBASE-438 XMLOutputter state should be initialized. HBASE-414 Move client classes into client package @@ -3801,7 +3801,7 @@ Release 0.2.0 - August 8, 2008. HBASE-464 HBASE-419 introduced javadoc errors HBASE-468 Move HStoreKey back to o.a.h.h HBASE-442 Move internal classes out of HRegionServer - HBASE-466 Move HMasterInterface, HRegionInterface, and + HBASE-466 Move HMasterInterface, HRegionInterface, and HMasterRegionInterface into o.a.h.h.ipc HBASE-479 Speed up TestLogRolling HBASE-480 Tool to manually merge two regions @@ -3851,7 +3851,7 @@ Release 0.2.0 - August 8, 2008. timestamps HBASE-511 Do exponential backoff in clients on NSRE, WRE, ISE, etc. (Andrew Purtell via Jim Kellerman) - + OPTIMIZATIONS HBASE-430 Performance: Scanners and getRow return maps with duplicate data @@ -3867,7 +3867,7 @@ Release 0.1.3 - 07/25/2008 HBASE-648 If mapfile index is empty, run repair HBASE-659 HLog#cacheFlushLock not cleared; hangs a region HBASE-663 Incorrect sequence number for cache flush - HBASE-652 Dropping table fails silently if table isn't disabled + HBASE-652 Dropping table fails silently if table isn't disabled HBASE-674 Memcache size unreliable HBASE-665 server side scanner doesn't honor stop row HBASE-681 NPE in Memcache (Clint Morgan via Jim Kellerman) @@ -3918,7 +3918,7 @@ Release 0.1.2 - 05/13/2008 HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting HBASE-619 Fix 'logs' link in UI HBASE-620 testmergetool failing in branch and trunk since hbase-618 went in - + IMPROVEMENTS HBASE-559 MR example job to count table rows HBASE-578 Upgrade branch to 0.16.3 hadoop. @@ -3952,7 +3952,7 @@ Release 0.1.1 - 04/11/2008 Release 0.1.0 INCOMPATIBLE CHANGES - HADOOP-2750 Deprecated methods startBatchUpdate, commitBatch, abortBatch, + HADOOP-2750 Deprecated methods startBatchUpdate, commitBatch, abortBatch, and renewLease have been removed from HTable (Bryan Duxbury via Jim Kellerman) HADOOP-2786 Move hbase out of hadoop core @@ -3961,7 +3961,7 @@ Release 0.1.0 with a hbase from 0.16.0 NEW FEATURES - HBASE-506 When an exception has to escape ServerCallable due to exhausted retries, + HBASE-506 When an exception has to escape ServerCallable due to exhausted retries, show all the exceptions that lead to this situation OPTIMIZATIONS @@ -3997,7 +3997,7 @@ Release 0.1.0 HBASE-514 table 'does not exist' when it does HBASE-537 Wait for hdfs to exit safe mode HBASE-534 Double-assignment at SPLIT-time - + IMPROVEMENTS HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid repetition of retry-on-failure logic (thanks to Peter Dolan and @@ -4006,22 +4006,22 @@ Release 0.1.0 HBASE-480 Tool to manually merge two regions HBASE-477 Add support for an HBASE_CLASSPATH HBASE-515 At least double default timeouts between regionserver and master - HBASE-482 package-level javadoc should have example client or at least + HBASE-482 package-level javadoc should have example client or at least point at the FAQ HBASE-497 RegionServer needs to recover if datanode goes down HBASE-456 Clearly state which ports need to be opened in order to run HBase HBASE-483 Merge tool won't merge two overlapping regions HBASE-476 RegexpRowFilter behaves incorectly when there are multiple store files (Clint Morgan via Jim Kellerman) - HBASE-527 RegexpRowFilter does not work when there are columns from + HBASE-527 RegexpRowFilter does not work when there are columns from multiple families (Clint Morgan via Jim Kellerman) - + Release 0.16.0 2008/02/04 HBase is now a subproject of Hadoop. The first HBase release as a subproject will be release 0.1.0 which will be equivalent to the version of HBase included in Hadoop 0.16.0. In order to - accomplish this, the HBase portion of HBASE-288 (formerly + accomplish this, the HBase portion of HBASE-288 (formerly HADOOP-1398) has been backed out. Once 0.1.0 is frozen (depending mostly on changes to infrastructure due to becoming a sub project instead of a contrib project), this patch will re-appear on HBase @@ -4030,7 +4030,7 @@ Release 0.16.0 INCOMPATIBLE CHANGES HADOOP-2056 A table with row keys containing colon fails to split regions HADOOP-2079 Fix generated HLog, HRegion names - HADOOP-2495 Minor performance improvements: Slim-down BatchOperation, etc. + HADOOP-2495 Minor performance improvements: Slim-down BatchOperation, etc. HADOOP-2506 Remove the algebra package HADOOP-2519 Performance improvements: Customized RPC serialization HADOOP-2478 Restructure how HBase lays out files in the file system (phase 1) @@ -4155,7 +4155,7 @@ Release 0.16.0 TableNotFoundException when a different table has been created previously (Bryan Duxbury via Stack) HADOOP-2587 Splits blocked by compactions cause region to be offline for - duration of compaction. + duration of compaction. HADOOP-2592 Scanning, a region can let out a row that its not supposed to have HADOOP-2493 hbase will split on row when the start and end row is the @@ -4188,7 +4188,7 @@ Release 0.16.0 table or table you are enumerating isn't the first table Delete empty file: src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/ TableOutputCollector.java per Nigel Daley - + IMPROVEMENTS HADOOP-2401 Add convenience put method that takes writable (Johan Oskarsson via Stack) @@ -4230,7 +4230,7 @@ Release 0.16.0 HADOOP-2351 If select command returns no result, it doesn't need to show the header information (Edward Yoon via Stack) HADOOP-2285 Add being able to shutdown regionservers (Dennis Kubes via Stack) - HADOOP-2458 HStoreFile.writeSplitInfo should just call + HADOOP-2458 HStoreFile.writeSplitInfo should just call HStoreFile.Reference.write HADOOP-2471 Add reading/writing MapFile to PerformanceEvaluation suite HADOOP-2522 Separate MapFile benchmark from PerformanceEvaluation @@ -4250,7 +4250,7 @@ Release 0.16.0 HADOOP-2616 hbase not spliting when the total size of region reaches max region size * 1.5 HADOOP-2643 Make migration tool smarter. - + Release 0.15.1 Branch 0.15 @@ -4318,9 +4318,9 @@ Branch 0.15 HADOOP-1975 HBase tests failing with java.lang.NumberFormatException HADOOP-1990 Regression test instability affects nightly and patch builds HADOOP-1996 TestHStoreFile fails on windows if run multiple times - HADOOP-1937 When the master times out a region server's lease, it is too + HADOOP-1937 When the master times out a region server's lease, it is too aggressive in reclaiming the server's log. - HADOOP-2004 webapp hql formatting bugs + HADOOP-2004 webapp hql formatting bugs HADOOP_2011 Make hbase daemon scripts take args in same order as hadoop daemon scripts HADOOP-2017 TestRegionServerAbort failure in patch build #903 and @@ -4339,7 +4339,7 @@ Branch 0.15 HADOOP-1794 Remove deprecated APIs HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode' HADOOP-1833 bin/stop_hbase.sh returns before it completes - (Izaak Rubin via Stack) + (Izaak Rubin via Stack) HADOOP-1835 Updated Documentation for HBase setup/installation (Izaak Rubin via Stack) HADOOP-1868 Make default configuration more responsive @@ -4358,13 +4358,13 @@ Below are the list of changes before 2007-08-18 1. HADOOP-1384. HBase omnibus patch. (jimk, Vuk Ercegovac, and Michael Stack) 2. HADOOP-1402. Fix javadoc warnings in hbase contrib. (Michael Stack) 3. HADOOP-1404. HBase command-line shutdown failing (Michael Stack) - 4. HADOOP-1397. Replace custom hbase locking with + 4. HADOOP-1397. Replace custom hbase locking with java.util.concurrent.locks.ReentrantLock (Michael Stack) 5. HADOOP-1403. HBase reliability - make master and region server more fault tolerant. 6. HADOOP-1418. HBase miscellaneous: unit test for HClient, client to do 'Performance Evaluation', etc. - 7. HADOOP-1420, HADOOP-1423. Findbugs changes, remove reference to removed + 7. HADOOP-1420, HADOOP-1423. Findbugs changes, remove reference to removed class HLocking. 8. HADOOP-1424. TestHBaseCluster fails with IllegalMonitorStateException. Fix regression introduced by HADOOP-1397. @@ -4378,7 +4378,7 @@ Below are the list of changes before 2007-08-18 14. HADOOP-1460 On shutdown IOException with complaint 'Cannot cancel lease that is not held' 15. HADOOP-1421 Failover detection, split log files. - For the files modified, also clean up javadoc, class, field and method + For the files modified, also clean up javadoc, class, field and method visibility (HADOOP-1466) 16. HADOOP-1479 Fix NPE in HStore#get if store file only has keys < passed key. 17. HADOOP-1476 Distributed version of 'Performance Evaluation' script @@ -4397,13 +4397,13 @@ Below are the list of changes before 2007-08-18 26. HADOOP-1543 [hbase] Add HClient.tableExists 27. HADOOP-1519 [hbase] map/reduce interface for HBase. (Vuk Ercegovac and Jim Kellerman) - 28. HADOOP-1523 Hung region server waiting on write locks + 28. HADOOP-1523 Hung region server waiting on write locks 29. HADOOP-1560 NPE in MiniHBaseCluster on Windows 30. HADOOP-1531 Add RowFilter to HRegion.HScanner Adds a row filtering interface and two implemenentations: A page scanner, and a regex row/column-data matcher. (James Kennedy via Stack) 31. HADOOP-1566 Key-making utility - 32. HADOOP-1415 Provide configurable per-column bloom filters. + 32. HADOOP-1415 Provide configurable per-column bloom filters. HADOOP-1466 Clean up visibility and javadoc issues in HBase. 33. HADOOP-1538 Provide capability for client specified time stamps in HBase HADOOP-1466 Clean up visibility and javadoc issues in HBase. @@ -4417,7 +4417,7 @@ Below are the list of changes before 2007-08-18 41. HADOOP-1614 [hbase] HClient does not protect itself from simultaneous updates 42. HADOOP-1468 Add HBase batch update to reduce RPC overhead 43. HADOOP-1616 Sporadic TestTable failures - 44. HADOOP-1615 Replacing thread notification-based queue with + 44. HADOOP-1615 Replacing thread notification-based queue with java.util.concurrent.BlockingQueue in HMaster, HRegionServer 45. HADOOP-1606 Updated implementation of RowFilterSet, RowFilterInterface (Izaak Rubin via Stack) @@ -4438,10 +4438,10 @@ Below are the list of changes before 2007-08-18 53. HADOOP-1528 HClient for multiple tables - expose close table function 54. HADOOP-1466 Clean up warnings, visibility and javadoc issues in HBase. 55. HADOOP-1662 Make region splits faster - 56. HADOOP-1678 On region split, master should designate which host should + 56. HADOOP-1678 On region split, master should designate which host should serve daughter splits. Phase 1: Master balances load for new regions and when a region server fails. - 57. HADOOP-1678 On region split, master should designate which host should + 57. HADOOP-1678 On region split, master should designate which host should serve daughter splits. Phase 2: Master assigns children of split region instead of HRegionServer serving both children. 58. HADOOP-1710 All updates should be batch updates diff --git a/bin/considerAsDead.sh b/bin/considerAsDead.sh index ae1b8d885bf3..848e276cd004 100755 --- a/bin/considerAsDead.sh +++ b/bin/considerAsDead.sh @@ -17,7 +17,7 @@ # * See the License for the specific language governing permissions and # * limitations under the License. # */ -# +# usage="Usage: considerAsDead.sh --hostname serverName" @@ -50,12 +50,12 @@ do rs_parts=(${rs//,/ }) hostname=${rs_parts[0]} echo $deadhost - echo $hostname + echo $hostname if [ "$deadhost" == "$hostname" ]; then znode="$zkrs/$rs" echo "ZNode Deleting:" $znode $bin/hbase zkcli delete $znode > /dev/null 2>&1 sleep 1 - ssh $HBASE_SSH_OPTS $hostname $remote_cmd 2>&1 | sed "s/^/$hostname: /" - fi + ssh $HBASE_SSH_OPTS $hostname $remote_cmd 2>&1 | sed "s/^/$hostname: /" + fi done diff --git a/bin/hbase-cleanup.sh b/bin/hbase-cleanup.sh index 92b40cca6ae0..69c1f72b6074 100755 --- a/bin/hbase-cleanup.sh +++ b/bin/hbase-cleanup.sh @@ -74,7 +74,7 @@ check_for_znodes() { znodes=`"$bin"/hbase zkcli ls $zparent/$zchild 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"` if [ "$znodes" != "" ]; then echo -n "ZNode(s) [${znodes}] of $command are not expired. Exiting without cleaning hbase data." - echo #force a newline + echo #force a newline exit 1; else echo -n "All ZNode(s) of $command are expired." @@ -99,7 +99,7 @@ execute_clean_acls() { clean_up() { case $1 in - --cleanZk) + --cleanZk) execute_zk_command "deleteall ${zparent}"; ;; --cleanHdfs) @@ -120,7 +120,7 @@ clean_up() { ;; *) ;; - esac + esac } check_znode_exists() { diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh index 23385d6d6a40..104e9a0b67c3 100644 --- a/bin/hbase-config.sh +++ b/bin/hbase-config.sh @@ -103,7 +103,7 @@ do break fi done - + # Allow alternate hbase conf dir location. HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}" # List of hbase regions servers. diff --git a/bin/master-backup.sh b/bin/master-backup.sh index feca4ab86572..5d3f7cb75615 100755 --- a/bin/master-backup.sh +++ b/bin/master-backup.sh @@ -17,7 +17,7 @@ # * See the License for the specific language governing permissions and # * limitations under the License. # */ -# +# # Run a shell command on all backup master hosts. # # Environment Variables @@ -45,7 +45,7 @@ bin=`cd "$bin">/dev/null; pwd` . "$bin"/hbase-config.sh # If the master backup file is specified in the command line, -# then it takes precedence over the definition in +# then it takes precedence over the definition in # hbase-env.sh. Save it here. HOSTLIST=$HBASE_BACKUP_MASTERS @@ -69,6 +69,6 @@ if [ -f $HOSTLIST ]; then sleep $HBASE_SLAVE_SLEEP fi done -fi +fi wait diff --git a/bin/regionservers.sh b/bin/regionservers.sh index b83c1f3c79eb..b10e5a3ec9f4 100755 --- a/bin/regionservers.sh +++ b/bin/regionservers.sh @@ -17,7 +17,7 @@ # * See the License for the specific language governing permissions and # * limitations under the License. # */ -# +# # Run a shell command on all regionserver hosts. # # Environment Variables @@ -45,7 +45,7 @@ bin=`cd "$bin">/dev/null; pwd` . "$bin"/hbase-config.sh # If the regionservers file is specified in the command line, -# then it takes precedence over the definition in +# then it takes precedence over the definition in # hbase-env.sh. Save it here. HOSTLIST=$HBASE_REGIONSERVERS diff --git a/bin/stop-hbase.sh b/bin/stop-hbase.sh index b47ae1f7743b..d10e618f2d21 100755 --- a/bin/stop-hbase.sh +++ b/bin/stop-hbase.sh @@ -52,7 +52,7 @@ fi export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-master-$HOSTNAME export HBASE_LOGFILE=$HBASE_LOG_PREFIX.log -logout=$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out +logout=$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out loglog="${HBASE_LOG_DIR}/${HBASE_LOGFILE}" pid=${HBASE_PID_DIR:-/tmp}/hbase-$HBASE_IDENT_STRING-master.pid @@ -74,7 +74,7 @@ fi # distributed == false means that the HMaster will kill ZK when it exits # HBASE-6504 - only take the first line of the output in case verbose gc is on distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1` -if [ "$distMode" == 'true' ] +if [ "$distMode" == 'true' ] then "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" stop zookeeper fi diff --git a/bin/test/process_based_cluster.sh b/bin/test/process_based_cluster.sh index eb8633f502cb..1c4c72532134 100755 --- a/bin/test/process_based_cluster.sh +++ b/bin/test/process_based_cluster.sh @@ -68,7 +68,7 @@ while [ $# -ne 0 ]; do -h|--help) print_usage ;; --kill) - IS_KILL=1 + IS_KILL=1 cmd_specified ;; --show) IS_SHOW=1 @@ -106,5 +106,3 @@ else echo "No command specified" >&2 exit 1 fi - - diff --git a/bin/zookeepers.sh b/bin/zookeepers.sh index 97bf41b60528..5d22d82a559a 100755 --- a/bin/zookeepers.sh +++ b/bin/zookeepers.sh @@ -17,7 +17,7 @@ # * See the License for the specific language governing permissions and # * limitations under the License. # */ -# +# # Run a shell command on all zookeeper hosts. # # Environment Variables diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh index 3889973febaa..bebd53c64091 100644 --- a/conf/hbase-env.sh +++ b/conf/hbase-env.sh @@ -33,7 +33,7 @@ # The maximum amount of heap to use. Default is left to JVM default. # export HBASE_HEAPSIZE=1G -# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of +# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of # offheap, set the value to "8G". See http://hbase.apache.org/book.html#direct.memory # in the refguide for guidance setting this config. # export HBASE_OFFHEAPSIZE=1G @@ -71,7 +71,7 @@ # export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc: -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M" # See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations -# needed setting up off-heap block caching. +# needed setting up off-heap block caching. # Uncomment and adjust to enable JMX exporting # See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access. @@ -102,7 +102,7 @@ # Where log files are stored. $HBASE_HOME/logs by default. # export HBASE_LOG_DIR=${HBASE_HOME}/logs -# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers +# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers # export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070" # export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071" # export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072" @@ -126,13 +126,13 @@ # Tell HBase whether it should manage it's own instance of ZooKeeper or not. # export HBASE_MANAGES_ZK=true -# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the +# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the # RFA appender. Please refer to the log4j2.properties file to see more details on this appender. # In case one needs to do log rolling on a date change, one should set the environment property # HBASE_ROOT_LOGGER to ",DRFA". # For example: # export HBASE_ROOT_LOGGER=INFO,DRFA -# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as +# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as # DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context. # Tell HBase whether it should include Hadoop's lib when start up, diff --git a/conf/hbase-policy.xml b/conf/hbase-policy.xml index bf472407d173..5a0256d5164a 100644 --- a/conf/hbase-policy.xml +++ b/conf/hbase-policy.xml @@ -24,20 +24,20 @@ security.client.protocol.acl * - ACL for ClientProtocol and AdminProtocol implementations (ie. + ACL for ClientProtocol and AdminProtocol implementations (ie. clients talking to HRegionServers) - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. security.admin.protocol.acl * - ACL for HMasterInterface protocol implementation (ie. + ACL for HMasterInterface protocol implementation (ie. clients talking to HMaster for admin operations). - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. @@ -46,8 +46,8 @@ * ACL for HMasterRegionInterface protocol implementations (for HRegionServers communicating with HMaster) - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. diff --git a/dev-support/HBase Code Template.xml b/dev-support/HBase Code Template.xml index 3b666c97a8a6..9c69a5a40b34 100644 --- a/dev-support/HBase Code Template.xml +++ b/dev-support/HBase Code Template.xml @@ -38,4 +38,4 @@ ${type_declaration} \ No newline at end of file +// ${todo} Implement constructor diff --git a/dev-support/HOW_TO_YETUS_LOCAL.md b/dev-support/HOW_TO_YETUS_LOCAL.md index 8d22978d422c..2ac4ecd09dc1 100644 --- a/dev-support/HOW_TO_YETUS_LOCAL.md +++ b/dev-support/HOW_TO_YETUS_LOCAL.md @@ -87,7 +87,7 @@ these personalities; a pre-packaged personality can be selected via the `--project` parameter. There is a provided HBase personality in Yetus, however the HBase project maintains its own within the HBase source repository. Specify the path to the personality file using `--personality`. The HBase repository -places this file under `dev-support/hbase-personality.sh`. +places this file under `dev-support/hbase-personality.sh`. ## Docker mode diff --git a/dev-support/git-jira-release-audit/README.md b/dev-support/git-jira-release-audit/README.md index 6ea575e16fd3..a60695ae580a 100644 --- a/dev-support/git-jira-release-audit/README.md +++ b/dev-support/git-jira-release-audit/README.md @@ -141,7 +141,7 @@ Interactions with Jira: This invocation will build a "simple" database, correlating commits to branches. It omits gathering the detailed release tag data, so it runs pretty -quickly. +quickly. Example Run: diff --git a/dev-support/hbase_eclipse_formatter.xml b/dev-support/hbase_eclipse_formatter.xml index 6dec653ad620..99000a62e214 100644 --- a/dev-support/hbase_eclipse_formatter.xml +++ b/dev-support/hbase_eclipse_formatter.xml @@ -18,297 +18,401 @@ * limitations under the License. */ --> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev-support/hbase_nightly_pseudo-distributed-test.sh b/dev-support/hbase_nightly_pseudo-distributed-test.sh index 1267a85a71ad..ffe630865925 100755 --- a/dev-support/hbase_nightly_pseudo-distributed-test.sh +++ b/dev-support/hbase_nightly_pseudo-distributed-test.sh @@ -344,53 +344,53 @@ EOF echo "writing out example TSV to example.tsv" cat >"${working_dir}/example.tsv" < the test didn't finish notFinishedCounter=$(($notFinishedCounter + 1)) notFinishedList="$notFinishedList,$testClass" - fi + fi done #list of all tests that failed @@ -411,7 +411,7 @@ echo echo "Tests in error are: $errorPresList" echo "Tests that didn't finish are: $notFinishedPresList" echo -echo "Execution time in minutes: $exeTime" +echo "Execution time in minutes: $exeTime" echo "##########################" diff --git a/dev-support/jenkinsEnv.sh b/dev-support/jenkinsEnv.sh index d7fe87339e2a..969ece4dc4c6 100755 --- a/dev-support/jenkinsEnv.sh +++ b/dev-support/jenkinsEnv.sh @@ -33,4 +33,3 @@ export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin: export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}" ulimit -n - diff --git a/dev-support/rebase_all_git_branches.sh b/dev-support/rebase_all_git_branches.sh index ef213c8fb3db..5c63e4054691 100755 --- a/dev-support/rebase_all_git_branches.sh +++ b/dev-support/rebase_all_git_branches.sh @@ -17,11 +17,11 @@ # specific language governing permissions and limitations # under the License. -# This script assumes that your remote is called "origin" +# This script assumes that your remote is called "origin" # and that your local master branch is called "master". # I am sure it could be made more abstract but these are the defaults. -# Edit this line to point to your default directory, +# Edit this line to point to your default directory, # or always pass a directory to the script. DEFAULT_DIR="EDIT_ME" @@ -69,13 +69,13 @@ function check_git_branch_status { } function get_jira_status { - # This function expects as an argument the JIRA ID, + # This function expects as an argument the JIRA ID, # and returns 99 if resolved and 1 if it couldn't # get the status. - # The JIRA status looks like this in the HTML: + # The JIRA status looks like this in the HTML: # span id="resolution-val" class="value resolved" > - # The following is a bit brittle, but filters for lines with + # The following is a bit brittle, but filters for lines with # resolution-val returns 99 if it's resolved jira_url='https://issues.apache.org/jira/rest/api/2/issue' jira_id="$1" @@ -106,7 +106,7 @@ while getopts ":hd:" opt; do print_usage exit 0 ;; - *) + *) echo "Invalid argument: $OPTARG" >&2 print_usage >&2 exit 1 @@ -135,7 +135,7 @@ get_tracking_branches for i in "${tracking_branches[@]}"; do git checkout -q "$i" # Exit if git status is dirty - check_git_branch_status + check_git_branch_status git pull -q --rebase status=$? if [ "$status" -ne 0 ]; then @@ -169,7 +169,7 @@ for i in "${all_branches[@]}"; do git checkout -q "$i" # Exit if git status is dirty - check_git_branch_status + check_git_branch_status # If this branch has a remote, don't rebase it # If it has a remote, it has a log with at least one entry @@ -184,7 +184,7 @@ for i in "${all_branches[@]}"; do echo "Failed. Rolling back. Rebase $i manually." git rebase --abort fi - elif [ $status -ne 0 ]; then + elif [ $status -ne 0 ]; then # If status is 0 it means there is a remote branch, we already took care of it echo "Unknown error: $?" >&2 exit 1 @@ -195,10 +195,10 @@ done for i in "${deleted_branches[@]}"; do read -p "$i's JIRA is resolved. Delete? " yn case $yn in - [Yy]) + [Yy]) git branch -D $i ;; - *) + *) echo "To delete it manually, run git branch -D $deleted_branches" ;; esac diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index 9200e3ba921c..a8a22b06ef16 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -52,7 +52,7 @@ if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then # correct place to put those files. # NOTE 2014/07/17: -# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash +# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash # causing below checks to fail. Once it is fixed, we can revert the commit and enable this again. # TMP2=/tmp/tmp.paths.2.$$ diff --git a/dev-support/test-util.sh b/dev-support/test-util.sh index 9219bb96606c..b97e2de383fc 100755 --- a/dev-support/test-util.sh +++ b/dev-support/test-util.sh @@ -32,7 +32,7 @@ options: -h Show this message -c Run 'mvn clean' before running the tests -f FILE Run the additional tests listed in the FILE - -u Only run unit tests. Default is to run + -u Only run unit tests. Default is to run unit and integration tests -n N Run each test N times. Default = 1. -s N Print N slowest tests @@ -92,7 +92,7 @@ do r) server=1 ;; - ?) + ?) usage exit 1 esac @@ -175,7 +175,7 @@ done # Print a report of the slowest running tests if [ ! -z $showSlowest ]; then - + testNameIdx=0 for (( i = 0; i < ${#test[@]}; i++ )) do diff --git a/dev-support/zombie-detector.sh b/dev-support/zombie-detector.sh index df4c197ce4df..3a2708a14adf 100755 --- a/dev-support/zombie-detector.sh +++ b/dev-support/zombie-detector.sh @@ -29,7 +29,7 @@ #set -x # printenv -### Setup some variables. +### Setup some variables. bindir=$(dirname $0) # This key is set by our surefire configuration up in the main pom.xml diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml index 1d14aa6cc793..a833e637d038 100644 --- a/hbase-annotations/pom.xml +++ b/hbase-annotations/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase org.apache.hbase + hbase 3.0.0-alpha-3-SNAPSHOT .. diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java index c2510efb026a..d9bae8490637 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the client. This tests the hbase-client package and all of the client * tests in hbase-server. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java index 4341becbd68a..a168adec08af 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to coprocessors. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java index a91033fa2d38..84f346baaea2 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java index 22fbc1b724ff..c23bfa298b36 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as failing commonly on public build infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java index c2375ca4e5cb..8eee0e6ae4b9 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and * the like. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java index 6bc712e270cf..4e555b73fedb 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as 'integration/system' test, meaning that the test class has the following * characteristics: *
    - *
  • Possibly takes hours to complete
  • - *
  • Can be run on a mini cluster or an actual cluster
  • - *
  • Can make changes to the given cluster (starting stopping daemons, etc)
  • - *
  • Should not be run in parallel of other integration tests
  • + *
  • Possibly takes hours to complete
  • + *
  • Can be run on a mini cluster or an actual cluster
  • + *
  • Can make changes to the given cluster (starting stopping daemons, etc)
  • + *
  • Should not be run in parallel of other integration tests
  • *
- * - * Integration / System tests should have a class name starting with "IntegrationTest", and - * should be annotated with @Category(IntegrationTests.class). Integration tests can be run - * using the IntegrationTestsDriver class or from mvn verify. - * + * Integration / System tests should have a class name starting with "IntegrationTest", and should + * be annotated with @Category(IntegrationTests.class). Integration tests can be run using the + * IntegrationTestsDriver class or from mvn verify. * @see SmallTests * @see MediumTests * @see LargeTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java index aa183d5607d7..b47e5bab9a46 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'large', means that the test class has the following characteristics: *
    - *
  • it can executed in an isolated JVM (Tests can however be executed in different JVM on the - * same machine simultaneously so be careful two concurrent tests end up fighting over ports - * or other singular resources).
  • - *
  • ideally, the whole large test-suite/class, no matter how many or how few test methods it - * has, will run in last less than three minutes
  • - *
  • No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests' - * if you need to run tests longer than this.
  • + *
  • it can executed in an isolated JVM (Tests can however be executed in different JVM on the + * same machine simultaneously so be careful two concurrent tests end up fighting over ports or + * other singular resources).
  • + *
  • ideally, the whole large test-suite/class, no matter how many or how few test methods it has, + * will run in last less than three minutes
  • + *
  • No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests' + * if you need to run tests longer than this.
  • *
- * * @see SmallTests * @see MediumTests * @see IntegrationTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java index 4b49da4e4dc0..0e68ab3c0340 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to mapred or mapreduce. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java index e837f49a268a..5dcf51b27e59 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the master. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java index 0f8055b5bab0..d1f836ec0049 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'medium' means that the test class has the following characteristics: *
    - *
  • it can be executed in an isolated JVM (Tests can however be executed in different JVMs on - * the same machine simultaneously so be careful two concurrent tests end up fighting over ports - * or other singular resources).
  • - *
  • ideally, the whole medium test-suite/class, no matter how many or how few test methods it - * has, will complete in 50 seconds; otherwise make it a 'large' test.
  • + *
  • it can be executed in an isolated JVM (Tests can however be executed in different JVMs on the + * same machine simultaneously so be careful two concurrent tests end up fighting over ports or + * other singular resources).
  • + *
  • ideally, the whole medium test-suite/class, no matter how many or how few test methods it + * has, will complete in 50 seconds; otherwise make it a 'large' test.
  • *
- * - * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster. - * + * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster. * @see SmallTests * @see LargeTests * @see IntegrationTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java index 59962a74c280..27beaacf963e 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java index 2759bfc96df7..695042e801bf 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as not easily falling into any of the below categories. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java index 4edb9bf031d2..929bd6487edf 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to RPC. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java index 80b04eb7e598..050a70762928 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java index 0f03b761fcb1..3439afa76eba 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the regionserver. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java index 8b8be4de8125..df606c960c25 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to replication. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java index e7d1d1d4c88c..a648b4c39e03 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the REST capability of HBase. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java index 5263d467cbee..a4e55ad3aba0 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to security. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java index 80e6c9d24209..64d2bce381b6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,14 @@ /** * Tagging a test as 'small' means that the test class has the following characteristics: *
    - *
  • it can be run simultaneously with other small tests all in the same JVM
  • - *
  • ideally, the WHOLE implementing test-suite/class, no matter how many or how few test - * methods it has, should take less than 15 seconds to complete
  • - *
  • it does not use a cluster
  • + *
  • it can be run simultaneously with other small tests all in the same JVM
  • + *
  • ideally, the WHOLE implementing test-suite/class, no matter how many or how few test methods + * it has, should take less than 15 seconds to complete
  • + *
  • it does not use a cluster
  • *
- * * @see MediumTests * @see LargeTests * @see IntegrationTests */ -public interface SmallTests {} +public interface SmallTests { +} diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java index efc8d5ddc84c..d1f433b9719d 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** - * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build + * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build * infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java index 85507de5ad4d..f556979e5b6a 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as region tests which takes longer than 5 minutes to run on public build * infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java index 86aa6bdc85e6..9fa0579ed47e 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml b/hbase-archetypes/hbase-archetype-builder/pom.xml index 851a3a7ed459..29dfb0692ada 100644 --- a/hbase-archetypes/hbase-archetype-builder/pom.xml +++ b/hbase-archetypes/hbase-archetype-builder/pom.xml @@ -1,6 +1,5 @@ - - + + hbase-client__copy-src-to-build-archetype-subdir - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir} @@ -76,29 +75,30 @@ hbase-client__copy-pom-to-temp-for-xslt-processing - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir} /${project.basedir}/../${hbase-client.dir} - true + true + pom.xml - + hbase-shaded-client__copy-src-to-build-archetype-subdir - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir} @@ -113,20 +113,21 @@ hbase-shaded-client__copy-pom-to-temp-for-xslt-processing - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir} /${project.basedir}/../${hbase-shaded-client.dir} - true + true + pom.xml - + @@ -137,10 +138,10 @@ using xml-maven-plugin for xslt transformation, below. --> hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing - prepare-package copy-resources + prepare-package /${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir} @@ -149,16 +150,16 @@ pom.xml - + hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing - prepare-package copy-resources + prepare-package /${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir} @@ -167,7 +168,7 @@ pom.xml - + @@ -182,10 +183,10 @@ modify-exemplar-pom-files-via-xslt - process-resources transform + process-resources @@ -212,10 +213,10 @@ prevent warnings when project is generated from archetype. --> modify-archetype-pom-files-via-xslt - package transform + package @@ -242,32 +243,32 @@ - maven-antrun-plugin + maven-antrun-plugin make-scripts-executable - process-resources run + process-resources - - + + run-createArchetypes-script - compile run + compile - - - + + + run-installArchetypes-script - install run + install - - - + + + diff --git a/hbase-archetypes/hbase-client-project/pom.xml b/hbase-archetypes/hbase-client-project/pom.xml index c6d0aa7c97e4..2b1afc0e7eba 100644 --- a/hbase-archetypes/hbase-client-project/pom.xml +++ b/hbase-archetypes/hbase-client-project/pom.xml @@ -1,8 +1,5 @@ - + 4.0.0 - hbase-archetypes org.apache.hbase + hbase-archetypes 3.0.0-alpha-3-SNAPSHOT .. diff --git a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java index a9e522fe16d4..a9795d6ba918 100644 --- a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java +++ b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Successful running of this application requires access to an active instance - * of HBase. For install instructions for a standalone instance of HBase, please - * refer to https://hbase.apache.org/book.html#quickstart + * Successful running of this application requires access to an active instance of HBase. For + * install instructions for a standalone instance of HBase, please refer to + * https://hbase.apache.org/book.html#quickstart */ public final class HelloHBase { protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); - static final byte[] MY_FIRST_COLUMN_QUALIFIER - = Bytes.toBytes("myFirstColumn"); - static final byte[] MY_SECOND_COLUMN_QUALIFIER - = Bytes.toBytes("mySecondColumn"); + static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn"); + static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); // Private constructor included here to avoid checkstyle warnings @@ -61,20 +58,20 @@ public static void main(final String[] args) throws IOException { final boolean deleteAllAtEOJ = true; /** - * ConnectionFactory#createConnection() automatically looks for - * hbase-site.xml (HBase configuration parameters) on the system's - * CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. + * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase + * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to + * HBase via ZooKeeper. */ try (Connection connection = ConnectionFactory.createConnection(); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.getClusterMetrics(); // assure connection successfully established - System.out.println("\n*** Hello HBase! -- Connection has been " - + "established via ZooKeeper!!\n"); + System.out + .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n"); createNamespaceAndTable(admin); System.out.println("Getting a Table object for [" + MY_TABLE_NAME - + "] with which to perform CRUD operations in HBase."); + + "] with which to perform CRUD operations in HBase."); try (Table table = connection.getTable(MY_TABLE_NAME)) { putRowToTable(table); @@ -92,9 +89,8 @@ public static void main(final String[] args) throws IOException { } /** - * Invokes Admin#createNamespace and Admin#createTable to create a namespace - * with a table that has one column-family. - * + * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has + * one column-family. * @param admin Standard Admin object * @throws IOException If IO problem encountered */ @@ -103,48 +99,38 @@ static void createNamespaceAndTable(final Admin admin) throws IOException { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); - admin.createNamespace(NamespaceDescriptor - .create(MY_NAMESPACE_NAME).build()); + admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build()); } if (!admin.tableExists(MY_TABLE_NAME)) { System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() - + "], with one Column Family [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); + + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build(); admin.createTable(desc); } } /** - * Invokes Table#put to store a row (with two new columns created 'on the - * fly') into the table. - * + * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { - table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, - MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, - MY_SECOND_COLUMN_QUALIFIER, - Bytes.toBytes("World!"))); - - System.out.println("Row [" + Bytes.toString(MY_ROW_ID) - + "] was put into Table [" - + table.getName().getNameAsString() + "] in HBase;\n" - + " the row's two columns (created 'on the fly') are: [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) - + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); + table.put(new Put(MY_ROW_ID) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); + + System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + + table.getName().getNameAsString() + "] in HBase;\n" + + " the row's two columns (created 'on the fly') are: [" + + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); } /** * Invokes Table#get and prints out the contents of the retrieved row. - * * @param table Standard Table object * @throws IOException If IO problem encountered */ @@ -152,38 +138,32 @@ static void getAndPrintRowContents(final Table table) throws IOException { Result row = table.get(new Get(MY_ROW_ID)); - System.out.println("Row [" + Bytes.toString(row.getRow()) - + "] was retrieved from Table [" - + table.getName().getNameAsString() - + "] in HBase, with the following content:"); + System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + + table.getName().getNameAsString() + "] in HBase, with the following content:"); - for (Entry> colFamilyEntry - : row.getNoVersionMap().entrySet()) { + for (Entry> colFamilyEntry : row.getNoVersionMap() + .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); - System.out.println(" Columns in Column Family [" + columnFamilyName - + "]:"); + System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); - for (Entry columnNameAndValueMap - : colFamilyEntry.getValue().entrySet()) { + for (Entry columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" - + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " - + Bytes.toString(columnNameAndValueMap.getValue())); + + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + + Bytes.toString(columnNameAndValueMap.getValue())); } } } /** * Checks to see whether a namespace exists. - * - * @param admin Standard Admin object + * @param admin Standard Admin object * @param namespaceName Name of namespace * @return true If namespace exists * @throws IOException If IO problem encountered */ - static boolean namespaceExists(final Admin admin, final String namespaceName) - throws IOException { + static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException { try { admin.getNamespaceDescriptor(namespaceName); } catch (NamespaceNotFoundException e) { @@ -194,28 +174,24 @@ static boolean namespaceExists(final Admin admin, final String namespaceName) /** * Invokes Table#delete to delete test data (i.e. the row) - * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { - System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) - + "] from Table [" - + table.getName().getNameAsString() + "]."); + System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); } /** - * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to - * disable/delete Table and delete Namespace. - * + * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete + * Table and delete Namespace. * @param admin Standard Admin object * @throws IOException If IO problem is encountered */ static void deleteNamespaceAndTable(final Admin admin) throws IOException { if (admin.tableExists(MY_TABLE_NAME)) { - System.out.println("Disabling/deleting Table [" - + MY_TABLE_NAME.getNameAsString() + "]."); + System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "]."); admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.deleteTable(MY_TABLE_NAME); } diff --git a/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java b/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java index a7c7a5e5ad2a..98086b6260ec 100644 --- a/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java +++ b/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,10 +44,9 @@ public class TestHelloHBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHelloHBase.class); + HBaseClassTestRule.forClass(TestHelloHBase.class); - private static final HBaseTestingUtil TEST_UTIL - = new HBaseTestingUtil(); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @BeforeClass public static void beforeClass() throws Exception { @@ -67,13 +66,11 @@ public void testNamespaceExists() throws Exception { Admin admin = TEST_UTIL.getAdmin(); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); - assertEquals("#namespaceExists failed: found nonexistent namespace.", - false, exists); + assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); - assertEquals("#namespaceExists failed: did NOT find existing namespace.", - true, exists); + assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists); admin.deleteNamespace(EXISTING_NAMESPACE); } @@ -82,14 +79,11 @@ public void testCreateNamespaceAndTable() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); - boolean namespaceExists - = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); - assertEquals("#createNamespaceAndTable failed to create namespace.", - true, namespaceExists); + boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); + assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); - assertEquals("#createNamespaceAndTable failed to create table.", - true, tableExists); + assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); @@ -100,8 +94,7 @@ public void testCreateNamespaceAndTable() throws Exception { public void testPutRowToTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); HelloHBase.putRowToTable(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); @@ -115,13 +108,10 @@ public void testPutRowToTable() throws IOException { public void testDeleteRow() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); - table.put(new Put(HelloHBase.MY_ROW_ID). - addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, - HelloHBase.MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("xyz"))); + table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, + HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz"))); HelloHBase.deleteRow(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml b/hbase-archetypes/hbase-shaded-client-project/pom.xml index 0ede67b739c9..6e7a2a143d2d 100644 --- a/hbase-archetypes/hbase-shaded-client-project/pom.xml +++ b/hbase-archetypes/hbase-shaded-client-project/pom.xml @@ -1,8 +1,5 @@ - + 4.0.0 - hbase-archetypes org.apache.hbase + hbase-archetypes 3.0.0-alpha-3-SNAPSHOT .. @@ -44,16 +41,16 @@ org.apache.hbase hbase-testing-util test - - - javax.xml.bind - jaxb-api - - - javax.ws.rs - jsr311-api - - + + + javax.xml.bind + jaxb-api + + + javax.ws.rs + jsr311-api + + org.apache.hbase diff --git a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java index 053275a3ad33..66581d5acc68 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,19 +36,17 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Successful running of this application requires access to an active instance - * of HBase. For install instructions for a standalone instance of HBase, please - * refer to https://hbase.apache.org/book.html#quickstart + * Successful running of this application requires access to an active instance of HBase. For + * install instructions for a standalone instance of HBase, please refer to + * https://hbase.apache.org/book.html#quickstart */ public final class HelloHBase { protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); - static final byte[] MY_FIRST_COLUMN_QUALIFIER - = Bytes.toBytes("myFirstColumn"); - static final byte[] MY_SECOND_COLUMN_QUALIFIER - = Bytes.toBytes("mySecondColumn"); + static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn"); + static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); // Private constructor included here to avoid checkstyle warnings @@ -60,20 +57,20 @@ public static void main(final String[] args) throws IOException { final boolean deleteAllAtEOJ = true; /** - * ConnectionFactory#createConnection() automatically looks for - * hbase-site.xml (HBase configuration parameters) on the system's - * CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. + * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase + * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to + * HBase via ZooKeeper. */ try (Connection connection = ConnectionFactory.createConnection(); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.getClusterMetrics(); // assure connection successfully established - System.out.println("\n*** Hello HBase! -- Connection has been " - + "established via ZooKeeper!!\n"); + System.out + .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n"); createNamespaceAndTable(admin); System.out.println("Getting a Table object for [" + MY_TABLE_NAME - + "] with which to perform CRUD operations in HBase."); + + "] with which to perform CRUD operations in HBase."); try (Table table = connection.getTable(MY_TABLE_NAME)) { putRowToTable(table); @@ -91,9 +88,8 @@ public static void main(final String[] args) throws IOException { } /** - * Invokes Admin#createNamespace and Admin#createTable to create a namespace - * with a table that has one column-family. - * + * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has + * one column-family. * @param admin Standard Admin object * @throws IOException If IO problem encountered */ @@ -102,13 +98,11 @@ static void createNamespaceAndTable(final Admin admin) throws IOException { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); - admin.createNamespace(NamespaceDescriptor - .create(MY_NAMESPACE_NAME).build()); + admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build()); } if (!admin.tableExists(MY_TABLE_NAME)) { System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() - + "], with one Column Family [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); + + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); admin.createTable(TableDescriptorBuilder.newBuilder(MY_TABLE_NAME) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build()); @@ -116,33 +110,26 @@ static void createNamespaceAndTable(final Admin admin) throws IOException { } /** - * Invokes Table#put to store a row (with two new columns created 'on the - * fly') into the table. - * + * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { - table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, - MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, - MY_SECOND_COLUMN_QUALIFIER, - Bytes.toBytes("World!"))); - - System.out.println("Row [" + Bytes.toString(MY_ROW_ID) - + "] was put into Table [" - + table.getName().getNameAsString() + "] in HBase;\n" - + " the row's two columns (created 'on the fly') are: [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) - + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); + table.put(new Put(MY_ROW_ID) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); + + System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + + table.getName().getNameAsString() + "] in HBase;\n" + + " the row's two columns (created 'on the fly') are: [" + + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); } /** * Invokes Table#get and prints out the contents of the retrieved row. - * * @param table Standard Table object * @throws IOException If IO problem encountered */ @@ -150,38 +137,32 @@ static void getAndPrintRowContents(final Table table) throws IOException { Result row = table.get(new Get(MY_ROW_ID)); - System.out.println("Row [" + Bytes.toString(row.getRow()) - + "] was retrieved from Table [" - + table.getName().getNameAsString() - + "] in HBase, with the following content:"); + System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + + table.getName().getNameAsString() + "] in HBase, with the following content:"); - for (Entry> colFamilyEntry - : row.getNoVersionMap().entrySet()) { + for (Entry> colFamilyEntry : row.getNoVersionMap() + .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); - System.out.println(" Columns in Column Family [" + columnFamilyName - + "]:"); + System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); - for (Entry columnNameAndValueMap - : colFamilyEntry.getValue().entrySet()) { + for (Entry columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" - + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " - + Bytes.toString(columnNameAndValueMap.getValue())); + + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + + Bytes.toString(columnNameAndValueMap.getValue())); } } } /** * Checks to see whether a namespace exists. - * - * @param admin Standard Admin object + * @param admin Standard Admin object * @param namespaceName Name of namespace * @return true If namespace exists * @throws IOException If IO problem encountered */ - static boolean namespaceExists(final Admin admin, final String namespaceName) - throws IOException { + static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException { try { admin.getNamespaceDescriptor(namespaceName); } catch (NamespaceNotFoundException e) { @@ -192,28 +173,24 @@ static boolean namespaceExists(final Admin admin, final String namespaceName) /** * Invokes Table#delete to delete test data (i.e. the row) - * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { - System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) - + "] from Table [" - + table.getName().getNameAsString() + "]."); + System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); } /** - * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to - * disable/delete Table and delete Namespace. - * + * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete + * Table and delete Namespace. * @param admin Standard Admin object * @throws IOException If IO problem is encountered */ static void deleteNamespaceAndTable(final Admin admin) throws IOException { if (admin.tableExists(MY_TABLE_NAME)) { - System.out.println("Disabling/deleting Table [" - + MY_TABLE_NAME.getNameAsString() + "]."); + System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "]."); admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.deleteTable(MY_TABLE_NAME); } diff --git a/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java b/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java index 0282ff68a336..45fa1357c243 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,10 +44,9 @@ public class TestHelloHBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHelloHBase.class); + HBaseClassTestRule.forClass(TestHelloHBase.class); - private static final HBaseTestingUtil TEST_UTIL - = new HBaseTestingUtil(); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @BeforeClass public static void beforeClass() throws Exception { @@ -67,13 +66,11 @@ public void testNamespaceExists() throws Exception { Admin admin = TEST_UTIL.getAdmin(); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); - assertEquals("#namespaceExists failed: found nonexistent namespace.", - false, exists); + assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); - assertEquals("#namespaceExists failed: did NOT find existing namespace.", - true, exists); + assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists); admin.deleteNamespace(EXISTING_NAMESPACE); } @@ -82,14 +79,11 @@ public void testCreateNamespaceAndTable() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); - boolean namespaceExists - = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); - assertEquals("#createNamespaceAndTable failed to create namespace.", - true, namespaceExists); + boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); + assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); - assertEquals("#createNamespaceAndTable failed to create table.", - true, tableExists); + assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); @@ -100,8 +94,7 @@ public void testCreateNamespaceAndTable() throws Exception { public void testPutRowToTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); HelloHBase.putRowToTable(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); @@ -115,13 +108,10 @@ public void testPutRowToTable() throws IOException { public void testDeleteRow() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); - table.put(new Put(HelloHBase.MY_ROW_ID). - addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, - HelloHBase.MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("xyz"))); + table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, + HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz"))); HelloHBase.deleteRow(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml index 1a05b9617d4f..f6bb3a4e9986 100644 --- a/hbase-archetypes/pom.xml +++ b/hbase-archetypes/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -68,10 +67,10 @@ spotbugs-maven-plugin - false spotbugs + false ${project.basedir}/../dev-support/spotbugs-exclude.xml diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 2cb2f85c8364..86bd6dea1469 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-assembly - Apache HBase - Assembly - - Module that does project assembly and that is all that it does. - pom + Apache HBase - Assembly + Module that does project assembly and that is all that it does. true - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - aggregate-licenses - - process - - - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - maven-assembly-plugin - - - hbase-${project.version} - false - true - posix - - ${assembly.file} - src/main/assembly/client.xml - - - - - maven-dependency-plugin - - - - create-hbase-generated-classpath - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath.txt - jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce - - - - - - create-hbase-generated-classpath-jline - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath_jline.txt - jline - - - - - - create-hbase-generated-classpath-jruby - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath_jruby.txt - jruby-complete - - - - - - - unpack-dependency-notices - prepare-package - - unpack-dependencies - - - pom - true - **\/NOTICE,**\/NOTICE.txt - - - - - - org.codehaus.mojo - exec-maven-plugin - ${exec.maven.version} - - - concat-NOTICE-files - package - - exec - - - env - - bash - -c - cat maven-shared-archive-resources/META-INF/NOTICE \ - `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` - - - ${project.build.directory}/NOTICE.aggregate - ${project.build.directory} - - - - - - - @@ -189,7 +47,7 @@ org.apache.hbase hbase-shaded-mapreduce - + org.apache.hbase hbase-it @@ -254,25 +112,25 @@ hbase-external-blockcache - org.apache.hbase - hbase-testing-util + org.apache.hbase + hbase-testing-util - org.apache.hbase - hbase-metrics-api + org.apache.hbase + hbase-metrics-api - org.apache.hbase - hbase-metrics + org.apache.hbase + hbase-metrics org.apache.hbase hbase-protocol-shaded - org.apache.hbase - hbase-resource-bundle - true + org.apache.hbase + hbase-resource-bundle + true org.apache.httpcomponents @@ -390,4 +248,143 @@ compile + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + aggregate-licenses + + process + + + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + maven-assembly-plugin + + + hbase-${project.version} + false + true + posix + + ${assembly.file} + src/main/assembly/client.xml + + + + + maven-dependency-plugin + + + + create-hbase-generated-classpath + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath.txt + jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce + + + + + + create-hbase-generated-classpath-jline + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath_jline.txt + jline + + + + + + create-hbase-generated-classpath-jruby + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath_jruby.txt + jruby-complete + + + + + + + unpack-dependency-notices + + unpack-dependencies + + prepare-package + + pom + true + **\/NOTICE,**\/NOTICE.txt + + + + + + org.codehaus.mojo + exec-maven-plugin + ${exec.maven.version} + + + concat-NOTICE-files + + exec + + package + + env + + bash + -c + cat maven-shared-archive-resources/META-INF/NOTICE \ + `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` + + ${project.build.directory}/NOTICE.aggregate + ${project.build.directory} + + + + + + + diff --git a/hbase-asyncfs/pom.xml b/hbase-asyncfs/pom.xml index 073eec750d33..0544cf9d6b84 100644 --- a/hbase-asyncfs/pom.xml +++ b/hbase-asyncfs/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,33 +30,6 @@ hbase-asyncfs Apache HBase - Asynchronous FileSystem HBase Asynchronous FileSystem Implementation for WAL - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -169,13 +141,42 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -224,8 +225,7 @@ lifecycle-mapping - - + diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java index 059ca00b02cc..b88b32bdb814 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface for asynchronous filesystem output stream. diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java index 5b713196d0b0..a530ca4a2a0d 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,9 +47,9 @@ private AsyncFSOutputHelper() { * implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}. */ public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite, - boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, - Class channelClass, StreamSlowMonitor monitor) - throws IOException, CommonFSUtils.StreamLacksCapabilityException { + boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, + Class channelClass, StreamSlowMonitor monitor) + throws IOException, CommonFSUtils.StreamLacksCapabilityException { if (fs instanceof DistributedFileSystem) { return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f, overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java index 5885ea685b32..8906f003bc88 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -180,7 +180,10 @@ public Callback(CompletableFuture future, long ackedLength, // State for connections to DN private enum State { - STREAMING, CLOSING, BROKEN, CLOSED + STREAMING, + CLOSING, + BROKEN, + CLOSED } private volatile State state; @@ -196,7 +199,7 @@ private void completed(Channel channel) { if (c.unfinishedReplicas.remove(channel.id())) { long current = EnvironmentEdgeManager.currentTime(); streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen, - current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); + current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); c.lastAckTimestamp = current; if (c.unfinishedReplicas.isEmpty()) { // we need to remove first before complete the future. It is possible that after we @@ -284,13 +287,13 @@ public AckHandler(int timeoutMs) { protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception { Status reply = getStatus(ack); if (reply != Status.SUCCESS) { - failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + - block + " from datanode " + ctx.channel().remoteAddress())); + failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block + + " from datanode " + ctx.channel().remoteAddress())); return; } if (PipelineAck.isRestartOOBStatus(reply)) { - failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + - block + " from datanode " + ctx.channel().remoteAddress())); + failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + + block + " from datanode " + ctx.channel().remoteAddress())); return; } if (ack.getSeqno() == HEART_BEAT_SEQNO) { @@ -345,10 +348,10 @@ private void setupReceiver(int timeoutMs) { } } - FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs, - DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId, - LocatedBlock locatedBlock, Encryptor encryptor, Map datanodeInfoMap, - DataChecksum summer, ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) { + FanOutOneBlockAsyncDFSOutput(Configuration conf, DistributedFileSystem dfs, DFSClient client, + ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock, + Encryptor encryptor, Map datanodeInfoMap, DataChecksum summer, + ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) { this.conf = conf; this.dfs = dfs; this.client = client; @@ -403,7 +406,7 @@ public DatanodeInfo[] getPipeline() { } private void flushBuffer(CompletableFuture future, ByteBuf dataBuf, - long nextPacketOffsetInBlock, boolean syncBlock) { + long nextPacketOffsetInBlock, boolean syncBlock) { int dataLen = dataBuf.readableBytes(); int chunkLen = summer.getBytesPerChecksum(); int trailingPartialChunkLen = dataLen % chunkLen; @@ -413,13 +416,13 @@ private void flushBuffer(CompletableFuture future, ByteBuf dataBuf, summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen)); checksumBuf.writerIndex(checksumLen); PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, - nextPacketSeqno, false, dataLen, syncBlock); + nextPacketSeqno, false, dataLen, syncBlock); int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); - Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, - datanodeInfoMap.keySet(), dataLen); + Callback c = + new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen); waitingAckQueue.addLast(c); // recheck again after we pushed the callback to queue if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) { @@ -429,7 +432,7 @@ private void flushBuffer(CompletableFuture future, ByteBuf dataBuf, return; } // TODO: we should perhaps measure time taken per DN here; - // we could collect statistics per DN, and/or exclude bad nodes in createOutput. + // we could collect statistics per DN, and/or exclude bad nodes in createOutput. datanodeInfoMap.keySet().forEach(ch -> { ch.write(headerBuf.retainedDuplicate()); ch.write(checksumBuf.retainedDuplicate()); @@ -514,7 +517,7 @@ private void flush0(CompletableFuture future, boolean syncBlock) { } trailingPartialChunkLength = dataLen % summer.getBytesPerChecksum(); ByteBuf newBuf = alloc.directBuffer(sendBufSizePRedictor.guess(dataLen)) - .ensureWritable(trailingPartialChunkLength); + .ensureWritable(trailingPartialChunkLength); if (trailingPartialChunkLength != 0) { buf.readerIndex(dataLen - trailingPartialChunkLength).readBytes(newBuf, trailingPartialChunkLength); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index 7c62d67c6cee..2517f2d2c01a 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -116,7 +116,7 @@ @InterfaceAudience.Private public final class FanOutOneBlockAsyncDFSOutputHelper { private static final Logger LOG = - LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class); + LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class); private FanOutOneBlockAsyncDFSOutputHelper() { } @@ -145,9 +145,8 @@ private interface LeaseManager { // helper class for creating files. private interface FileCreator { default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked, - String clientName, EnumSetWritable flag, boolean createParent, - short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) - throws Exception { + String clientName, EnumSetWritable flag, boolean createParent, short replication, + long blockSize, CryptoProtocolVersion[] supportedVersions) throws Exception { try { return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions); @@ -161,15 +160,15 @@ default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission } Object createObject(ClientProtocol instance, String src, FsPermission masked, String clientName, - EnumSetWritable flag, boolean createParent, short replication, long blockSize, - CryptoProtocolVersion[] supportedVersions) throws Exception; + EnumSetWritable flag, boolean createParent, short replication, long blockSize, + CryptoProtocolVersion[] supportedVersions) throws Exception; } private static final FileCreator FILE_CREATOR; private static LeaseManager createLeaseManager() throws NoSuchMethodException { Method beginFileLeaseMethod = - DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class); + DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class); beginFileLeaseMethod.setAccessible(true); Method endFileLeaseMethod = DFSClient.class.getDeclaredMethod("endFileLease", long.class); endFileLeaseMethod.setAccessible(true); @@ -197,13 +196,13 @@ public void end(DFSClient client, long inodeId) { private static FileCreator createFileCreator3_3() throws NoSuchMethodException { Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, - String.class, EnumSetWritable.class, boolean.class, short.class, long.class, - CryptoProtocolVersion[].class, String.class, String.class); + String.class, EnumSetWritable.class, boolean.class, short.class, long.class, + CryptoProtocolVersion[].class, String.class, String.class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, - supportedVersions) -> { + supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, - createParent, replication, blockSize, supportedVersions, null, null); + createParent, replication, blockSize, supportedVersions, null, null); }; } @@ -213,7 +212,7 @@ private static FileCreator createFileCreator3() throws NoSuchMethodException { CryptoProtocolVersion[].class, String.class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, - supportedVersions) -> { + supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions, null); }; @@ -249,9 +248,9 @@ public boolean progress() { LEASE_MANAGER = createLeaseManager(); FILE_CREATOR = createFileCreator(); } catch (Exception e) { - String msg = "Couldn't properly initialize access to HDFS internals. Please " + - "update your WAL Provider to not make use of the 'asyncfs' provider. See " + - "HBASE-16110 for more information."; + String msg = "Couldn't properly initialize access to HDFS internals. Please " + + "update your WAL Provider to not make use of the 'asyncfs' provider. See " + + "HBASE-16110 for more information."; LOG.error(msg, e); throw new Error(msg, e); } @@ -282,7 +281,7 @@ static Status getStatus(PipelineAckProto ack) { } private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnInfo, - Promise promise, int timeoutMs) { + Promise promise, int timeoutMs) { channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()), @@ -290,7 +289,7 @@ private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnIn @Override protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp) - throws Exception { + throws Exception { Status pipelineStatus = resp.getStatus(); if (PipelineAck.isRestartOOBStatus(pipelineStatus)) { throw new IOException("datanode " + dnInfo + " is restarting"); @@ -298,11 +297,11 @@ protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); if (resp.getStatus() != Status.SUCCESS) { if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException("Got access token error" + ", status message " + - resp.getMessage() + ", " + logInfo); + throw new InvalidBlockTokenException("Got access token error" + ", status message " + + resp.getMessage() + ", " + logInfo); } else { - throw new IOException("Got error" + ", status=" + resp.getStatus().name() + - ", status message " + resp.getMessage() + ", " + logInfo); + throw new IOException("Got error" + ", status=" + resp.getStatus().name() + + ", status message " + resp.getMessage() + ", " + logInfo); } } // success @@ -329,7 +328,7 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) { promise - .tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); + .tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); } else { super.userEventTriggered(ctx, evt); } @@ -343,7 +342,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } private static void requestWriteBlock(Channel channel, StorageType storageType, - OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { + OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { OpWriteBlockProto proto = writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build(); int protoLen = proto.getSerializedSize(); @@ -356,9 +355,9 @@ private static void requestWriteBlock(Channel channel, StorageType storageType, } private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo, - StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, - DFSClient client, Token accessToken, Promise promise) - throws IOException { + StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, + DFSClient client, Token accessToken, Promise promise) + throws IOException { Promise saslPromise = channel.eventLoop().newPromise(); trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise); saslPromise.addListener(new FutureListener() { @@ -377,13 +376,13 @@ public void operationComplete(Future future) throws Exception { } private static List> connectToDataNodes(Configuration conf, DFSClient client, - String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, - BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, - Class channelClass) { + String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, + BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, + Class channelClass) { StorageType[] storageTypes = locatedBlock.getStorageTypes(); DatanodeInfo[] datanodeInfos = locatedBlock.getLocations(); boolean connectToDnViaHostname = - conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); + conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); @@ -392,11 +391,11 @@ private static List> connectToDataNodes(Configuration conf, DFSC .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) .setClientName(clientName).build(); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); - OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() - .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) - .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()) - .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS) - .setRequestedChecksum(checksumProto) + OpWriteBlockProto.Builder writeBlockProtoBuilder = + OpWriteBlockProto.newBuilder().setHeader(header) + .setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1) + .setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd) + .setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto) .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build()); List> futureList = new ArrayList<>(datanodeInfos.length); for (int i = 0; i < datanodeInfos.length; i++) { @@ -406,26 +405,26 @@ private static List> connectToDataNodes(Configuration conf, DFSC futureList.add(promise); String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname); new Bootstrap().group(eventLoopGroup).channel(channelClass) - .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer() { + .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer() { - @Override - protected void initChannel(Channel ch) throws Exception { - // we need to get the remote address of the channel so we can only move on after - // channel connected. Leave an empty implementation here because netty does not allow - // a null handler. - } - }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() { - - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (future.isSuccess()) { - initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, - timeoutMs, client, locatedBlock.getBlockToken(), promise); - } else { - promise.tryFailure(future.cause()); - } + @Override + protected void initChannel(Channel ch) throws Exception { + // we need to get the remote address of the channel so we can only move on after + // channel connected. Leave an empty implementation here because netty does not allow + // a null handler. + } + }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() { + + @Override + public void operationComplete(ChannelFuture future) throws Exception { + if (future.isSuccess()) { + initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, + timeoutMs, client, locatedBlock.getBlockToken(), promise); + } else { + promise.tryFailure(future.cause()); } - }); + } + }); } return futureList; } @@ -453,21 +452,21 @@ private static EnumSetWritable getCreateFlags(boolean overwrite) { } private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, - boolean overwrite, boolean createParent, short replication, long blockSize, - EventLoopGroup eventLoopGroup, Class channelClass, - StreamSlowMonitor monitor) throws IOException { + boolean overwrite, boolean createParent, short replication, long blockSize, + EventLoopGroup eventLoopGroup, Class channelClass, StreamSlowMonitor monitor) + throws IOException { Configuration conf = dfs.getConf(); DFSClient client = dfs.getClient(); String clientName = client.getClientName(); ClientProtocol namenode = client.getNamenode(); - int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, - DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES); + int createMaxRetries = + conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES); ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager(); Set toExcludeNodes = new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); for (int retry = 0;; retry++) { LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, - toExcludeNodes, retry); + toExcludeNodes, retry); HdfsFileStatus stat; try { stat = FILE_CREATOR.create(namenode, src, @@ -556,14 +555,14 @@ public void operationComplete(Future future) throws Exception { * inside an {@link EventLoop}. */ public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f, - boolean overwrite, boolean createParent, short replication, long blockSize, - EventLoopGroup eventLoopGroup, Class channelClass, - final StreamSlowMonitor monitor) throws IOException { + boolean overwrite, boolean createParent, short replication, long blockSize, + EventLoopGroup eventLoopGroup, Class channelClass, + final StreamSlowMonitor monitor) throws IOException { return new FileSystemLinkResolver() { @Override public FanOutOneBlockAsyncDFSOutput doCall(Path p) - throws IOException, UnresolvedLinkException { + throws IOException, UnresolvedLinkException { return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor); } @@ -583,7 +582,7 @@ public static boolean shouldRetryCreate(RemoteException e) { } static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName, - ExtendedBlock block, long fileId) { + ExtendedBlock block, long fileId) { for (int retry = 0;; retry++) { try { if (namenode.complete(src, clientName, block, fileId)) { diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java index 090b9b4a63f1..89f386c8d644 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -104,7 +104,7 @@ @InterfaceAudience.Private public final class FanOutOneBlockAsyncDFSOutputSaslHelper { private static final Logger LOG = - LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class); + LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class); private FanOutOneBlockAsyncDFSOutputSaslHelper() { } @@ -129,21 +129,21 @@ private interface SaslAdaptor { private interface TransparentCryptoHelper { Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) - throws IOException; + throws IOException; } private static final TransparentCryptoHelper TRANSPARENT_CRYPTO_HELPER; private static SaslAdaptor createSaslAdaptor() - throws NoSuchFieldException, NoSuchMethodException { + throws NoSuchFieldException, NoSuchMethodException { Field saslPropsResolverField = - SaslDataTransferClient.class.getDeclaredField("saslPropsResolver"); + SaslDataTransferClient.class.getDeclaredField("saslPropsResolver"); saslPropsResolverField.setAccessible(true); Field trustedChannelResolverField = - SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver"); + SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver"); trustedChannelResolverField.setAccessible(true); Field fallbackToSimpleAuthField = - SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth"); + SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth"); fallbackToSimpleAuthField.setAccessible(true); return new SaslAdaptor() { @@ -177,7 +177,7 @@ public AtomicBoolean getFallbackToSimpleAuth(SaslDataTransferClient saslClient) } private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396() - throws NoSuchMethodException { + throws NoSuchMethodException { Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class .getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class); decryptEncryptedDataEncryptionKeyMethod.setAccessible(true); @@ -185,7 +185,7 @@ private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS1 @Override public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, - DFSClient client) throws IOException { + DFSClient client) throws IOException { try { KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo); @@ -206,7 +206,7 @@ public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, } private static TransparentCryptoHelper createTransparentCryptoHelperWithHDFS12396() - throws ClassNotFoundException, NoSuchMethodException { + throws ClassNotFoundException, NoSuchMethodException { Class hdfsKMSUtilCls = Class.forName("org.apache.hadoop.hdfs.HdfsKMSUtil"); Method decryptEncryptedDataEncryptionKeyMethod = hdfsKMSUtilCls.getDeclaredMethod( "decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class, KeyProvider.class); @@ -215,7 +215,7 @@ private static TransparentCryptoHelper createTransparentCryptoHelperWithHDFS1239 @Override public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, - DFSClient client) throws IOException { + DFSClient client) throws IOException { try { KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod .invoke(null, feInfo, client.getKeyProvider()); @@ -236,12 +236,12 @@ public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, } private static TransparentCryptoHelper createTransparentCryptoHelper() - throws NoSuchMethodException, ClassNotFoundException { + throws NoSuchMethodException, ClassNotFoundException { try { return createTransparentCryptoHelperWithoutHDFS12396(); } catch (NoSuchMethodException e) { - LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + - " should be hadoop version with HDFS-12396", e); + LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + + " should be hadoop version with HDFS-12396", e); } return createTransparentCryptoHelperWithHDFS12396(); } @@ -252,8 +252,8 @@ private static TransparentCryptoHelper createTransparentCryptoHelper() TRANSPARENT_CRYPTO_HELPER = createTransparentCryptoHelper(); } catch (Exception e) { String msg = "Couldn't properly initialize access to HDFS internals. Please " - + "update your WAL Provider to not make use of the 'asyncfs' provider. See " - + "HBASE-16110 for more information."; + + "update your WAL Provider to not make use of the 'asyncfs' provider. See " + + "HBASE-16110 for more information."; LOG.error(msg, e); throw new Error(msg, e); } @@ -324,8 +324,8 @@ private static final class SaslNegotiateHandler extends ChannelDuplexHandler { private int step = 0; public SaslNegotiateHandler(Configuration conf, String username, char[] password, - Map saslProps, int timeoutMs, Promise promise, - DFSClient dfsClient) throws SaslException { + Map saslProps, int timeoutMs, Promise promise, DFSClient dfsClient) + throws SaslException { this.conf = conf; this.saslProps = saslProps; this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL, @@ -355,8 +355,8 @@ private List getCipherOptions() throws IOException { } /** - * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. - * After Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. + * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. After + * Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. * Use Reflection to check which ones to use. */ private static class BuilderPayloadSetter { @@ -366,13 +366,11 @@ private static class BuilderPayloadSetter { /** * Create a ByteString from byte array without copying (wrap), and then set it as the payload * for the builder. - * * @param builder builder for HDFS DataTransferEncryptorMessage. - * @param payload byte array of payload. - * @throws IOException + * @param payload byte array of payload. n */ - static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload) - throws IOException { + static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, + byte[] payload) throws IOException { Object byteStringObject; try { // byteStringObject = new LiteralByteString(payload); @@ -396,18 +394,18 @@ static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, try { // See if it can load the relocated ByteString, which comes from hadoop-thirdparty. byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString"); - LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + - " Assuming this is Hadoop 3.3.0+."); + LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + + " Assuming this is Hadoop 3.3.0+."); } catch (ClassNotFoundException e) { - LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + - " Assuming this is below Hadoop 3.3.0", e); + LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + + " Assuming this is below Hadoop 3.3.0", e); } // LiteralByteString is a package private class in protobuf. Make it accessible. Class literalByteStringClass; try { - literalByteStringClass = Class.forName( - "org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); + literalByteStringClass = + Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found."); } catch (ClassNotFoundException e) { try { @@ -435,9 +433,9 @@ static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, } private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload, - List options) throws IOException { + List options) throws IOException { DataTransferEncryptorMessageProto.Builder builder = - DataTransferEncryptorMessageProto.newBuilder(); + DataTransferEncryptorMessageProto.newBuilder(); builder.setStatus(DataTransferEncryptorStatus.SUCCESS); if (payload != null) { BuilderPayloadSetter.wrapAndSetPayload(builder, payload); @@ -486,7 +484,7 @@ private boolean isNegotiatedQopPrivacy() { private boolean requestedQopContainsPrivacy() { Set requestedQop = - ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); + ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); return requestedQop.contains("auth-conf"); } @@ -495,15 +493,14 @@ private void checkSaslComplete() throws IOException { throw new IOException("Failed to complete SASL handshake"); } Set requestedQop = - ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); + ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); String negotiatedQop = getNegotiatedQop(); LOG.debug( "Verifying QOP, requested QOP = " + requestedQop + ", negotiated QOP = " + negotiatedQop); if (!requestedQop.contains(negotiatedQop)) { throw new IOException(String.format("SASL handshake completed, but " - + "channel does not have acceptable quality of protection, " - + "requested = %s, negotiated = %s", - requestedQop, negotiatedQop)); + + "channel does not have acceptable quality of protection, " + + "requested = %s, negotiated = %s", requestedQop, negotiatedQop)); } } @@ -522,13 +519,13 @@ private CipherOption unwrap(CipherOption option, SaslClient saslClient) throws I outKey = saslClient.unwrap(outKey, 0, outKey.length); } return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey, - option.getOutIv()); + option.getOutIv()); } private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto, - boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException { + boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException { List cipherOptions = - PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList()); + PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList()); if (cipherOptions == null || cipherOptions.isEmpty()) { return null; } @@ -558,7 +555,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception assert response == null; checkSaslComplete(); CipherOption cipherOption = - getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient); + getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient); ChannelPipeline p = ctx.pipeline(); while (p.first() != null) { p.removeFirst(); @@ -639,7 +636,7 @@ public void handlerAdded(ChannelHandlerContext ctx) throws Exception { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + throws Exception { if (msg instanceof ByteBuf) { ByteBuf buf = (ByteBuf) msg; cBuf.addComponent(buf); @@ -676,7 +673,7 @@ private static final class DecryptHandler extends SimpleChannelInboundHandler private final Encryptor encryptor; public EncryptHandler(CryptoCodec codec, byte[] key, byte[] iv) - throws GeneralSecurityException, IOException { + throws GeneralSecurityException, IOException { this.encryptor = codec.createEncryptor(); this.encryptor.init(key, Arrays.copyOf(iv, iv.length)); } @Override protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) - throws Exception { + throws Exception { if (preferDirect) { return ctx.alloc().directBuffer(msg.readableBytes()); } else { @@ -747,7 +744,7 @@ protected void encode(ChannelHandlerContext ctx, ByteBuf msg, ByteBuf out) throw private static String getUserNameFromEncryptionKey(DataEncryptionKey encryptionKey) { return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER - + Base64.getEncoder().encodeToString(encryptionKey.nonce); + + Base64.getEncoder().encodeToString(encryptionKey.nonce); } private static char[] encryptionKeyToPassword(byte[] encryptionKey) { @@ -771,26 +768,26 @@ private static Map createSaslPropertiesForEncryption(String encr } private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs, - String username, char[] password, Map saslProps, Promise saslPromise, - DFSClient dfsClient) { + String username, char[] password, Map saslProps, Promise saslPromise, + DFSClient dfsClient) { try { channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()), new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise, - dfsClient)); + dfsClient)); } catch (SaslException e) { saslPromise.tryFailure(e); } } static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo, - int timeoutMs, DFSClient client, Token accessToken, - Promise saslPromise) throws IOException { + int timeoutMs, DFSClient client, Token accessToken, + Promise saslPromise) throws IOException { SaslDataTransferClient saslClient = client.getSaslDataTransferClient(); SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient); TrustedChannelResolver trustedChannelResolver = - SASL_ADAPTOR.getTrustedChannelResolver(saslClient); + SASL_ADAPTOR.getTrustedChannelResolver(saslClient); AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient); InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress(); if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) { @@ -805,24 +802,23 @@ static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo d } doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey), encryptionKeyToPassword(encryptionKey.encryptionKey), - createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, - client); + createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client); } else if (!UserGroupInformation.isSecurityEnabled()) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr - + ", datanodeId = " + dnInfo); + + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (dnInfo.getXferPort() < 1024) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with " - + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo); + + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with " - + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo); + + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (saslPropsResolver != null) { @@ -832,21 +828,21 @@ static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo d } doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken), buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise, - client); + client); } else { // It's a secured cluster using non-privileged ports, but no SASL. The only way this can // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare // edge case. if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with no SASL " - + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo); + + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } } static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client) - throws IOException { + throws IOException { FileEncryptionInfo feInfo = stat.getFileEncryptionInfo(); if (feInfo == null) { return null; diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java index 3be9a2e49c1b..a0b5cc00841b 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,33 +17,29 @@ */ package org.apache.hadoop.hbase.io.asyncfs; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder; import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.List; /** - * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. - * The Netty's ProtobufDecode supports unshaded protobuf messages (com.google.protobuf). - * - * Hadoop 3.3.0 and above relocates protobuf classes to a shaded jar (hadoop-thirdparty), and - * so we must use reflection to detect which one (relocated or not) to use. - * - * Do not use this to process HBase's shaded protobuf messages. This is meant to process the - * protobuf messages in HDFS for the asyncfs use case. - * */ + * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. The Netty's ProtobufDecode + * supports unshaded protobuf messages (com.google.protobuf). Hadoop 3.3.0 and above relocates + * protobuf classes to a shaded jar (hadoop-thirdparty), and so we must use reflection to detect + * which one (relocated or not) to use. Do not use this to process HBase's shaded protobuf messages. + * This is meant to process the protobuf messages in HDFS for the asyncfs use case. + */ @InterfaceAudience.Private public class ProtobufDecoder extends MessageToMessageDecoder { - private static final Logger LOG = - LoggerFactory.getLogger(ProtobufDecoder.class); + private static final Logger LOG = LoggerFactory.getLogger(ProtobufDecoder.class); private static Class protobufMessageLiteClass = null; private static Class protobufMessageLiteBuilderClass = null; @@ -60,23 +56,22 @@ public class ProtobufDecoder extends MessageToMessageDecoder { private Object parser; private Object builder; - public ProtobufDecoder(Object prototype) { try { - Method getDefaultInstanceForTypeMethod = protobufMessageLiteClass.getMethod( - "getDefaultInstanceForType"); - Object prototype1 = getDefaultInstanceForTypeMethod - .invoke(ObjectUtil.checkNotNull(prototype, "prototype")); + Method getDefaultInstanceForTypeMethod = + protobufMessageLiteClass.getMethod("getDefaultInstanceForType"); + Object prototype1 = + getDefaultInstanceForTypeMethod.invoke(ObjectUtil.checkNotNull(prototype, "prototype")); // parser = prototype.getParserForType() parser = getParserForTypeMethod.invoke(prototype1); - parseFromMethod = parser.getClass().getMethod( - "parseFrom", byte[].class, int.class, int.class); + parseFromMethod = + parser.getClass().getMethod("parseFrom", byte[].class, int.class, int.class); // builder = prototype.newBuilderForType(); builder = newBuilderForTypeMethod.invoke(prototype1); - mergeFromMethod = builder.getClass().getMethod( - "mergeFrom", byte[].class, int.class, int.class); + mergeFromMethod = + builder.getClass().getMethod("mergeFrom", byte[].class, int.class, int.class); // All protobuf message builders inherits from MessageLite.Builder buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build"); @@ -88,8 +83,7 @@ public ProtobufDecoder(Object prototype) { } } - protected void decode( - ChannelHandlerContext ctx, ByteBuf msg, List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List out) throws Exception { int length = msg.readableBytes(); byte[] array; int offset; @@ -122,8 +116,8 @@ protected void decode( try { protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite"); - protobufMessageLiteBuilderClass = Class.forName( - "org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); + protobufMessageLiteBuilderClass = + Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); LOG.debug("Hadoop 3.3 and above shades protobuf."); } catch (ClassNotFoundException e) { LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java index 2f652440e38e..d5dbfb02abc2 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java index c7cc1fcfcb4b..0297285b93ee 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -50,7 +49,7 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput { public WrapperAsyncFSOutput(Path file, FSDataOutputStream out) { this.out = out; this.executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build()); + .setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build()); } @Override @@ -95,8 +94,8 @@ private void flush0(CompletableFuture future, ByteArrayOutputStream buffer } long pos = out.getPos(); /** - * This flush0 method could only be called by single thread, so here we could - * safely overwrite without any synchronization. + * This flush0 method could only be called by single thread, so here we could safely overwrite + * without any synchronization. */ this.syncedLength = pos; future.complete(pos); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java index 80748cad609a..61f75582a1c9 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java @@ -56,24 +56,23 @@ public class ExcludeDatanodeManager implements ConfigurationObserver { private final int maxExcludeDNCount; private final Configuration conf; // This is a map of providerId->StreamSlowMonitor - private final Map streamSlowMonitors = - new ConcurrentHashMap<>(1); + private final Map streamSlowMonitors = new ConcurrentHashMap<>(1); public ExcludeDatanodeManager(Configuration conf) { this.conf = conf; this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT); this.excludeDNsCache = CacheBuilder.newBuilder() - .expireAfterWrite(this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, - DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) - .maximumSize(this.maxExcludeDNCount) - .build(); + .expireAfterWrite( + this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .maximumSize(this.maxExcludeDNCount).build(); } /** * Try to add a datanode to the regionserver excluding cache * @param datanodeInfo the datanode to be added to the excluded cache - * @param cause the cause that the datanode is hope to be excluded + * @param cause the cause that the datanode is hope to be excluded * @return True if the datanode is added to the regionserver excluding cache, false otherwise */ public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) { @@ -85,15 +84,15 @@ public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) { datanodeInfo, cause, excludeDNsCache.size()); return true; } - LOG.debug("Try add datanode {} to exclude cache by [{}] failed, " - + "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet()); + LOG.debug( + "Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}", + datanodeInfo, cause, getExcludeDNs().keySet()); return false; } public StreamSlowMonitor getStreamSlowMonitor(String name) { String key = name == null || name.isEmpty() ? "defaultMonitorName" : name; - return streamSlowMonitors - .computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this)); + return streamSlowMonitors.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this)); } public Map getExcludeDNs() { @@ -105,10 +104,12 @@ public void onConfigurationChange(Configuration conf) { for (StreamSlowMonitor monitor : streamSlowMonitors.values()) { monitor.onConfigurationChange(conf); } - this.excludeDNsCache = CacheBuilder.newBuilder().expireAfterWrite( - this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), - TimeUnit.HOURS).maximumSize(this.conf - .getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) + this.excludeDNsCache = CacheBuilder.newBuilder() + .expireAfterWrite( + this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .maximumSize(this.conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, + DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) .build(); } } diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java index 73cce1895742..c415706aa6af 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java @@ -38,18 +38,16 @@ import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; /** - * Class for monitor the wal file flush performance. - * Each active wal file has a StreamSlowMonitor. + * Class for monitor the wal file flush performance. Each active wal file has a StreamSlowMonitor. */ @InterfaceAudience.Private public class StreamSlowMonitor implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class); /** - * Configure for the min count for a datanode detected slow. - * If a datanode is detected slow times up to this count, then it will be added to the exclude - * datanode cache by {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} - * of this regionsever. + * Configure for the min count for a datanode detected slow. If a datanode is detected slow times + * up to this count, then it will be added to the exclude datanode cache by + * {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} of this regionsever. */ private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY = "hbase.regionserver.async.wal.min.slow.detect.count"; @@ -63,9 +61,9 @@ public class StreamSlowMonitor implements ConfigurationObserver { private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms /** - * Configure for the speed check of packet min length. - * For packets whose data length smaller than this value, check slow by processing time. - * While for packets whose data length larger than this value, check slow by flushing speed. + * Configure for the speed check of packet min length. For packets whose data length smaller than + * this value, check slow by processing time. While for packets whose data length larger than this + * value, check slow by flushing speed. */ private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY = "hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min"; @@ -73,8 +71,8 @@ public class StreamSlowMonitor implements ConfigurationObserver { private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024; /** - * Configure for the slow packet process time, a duration from send to ACK. - * The processing time check is for packets that data length smaller than + * Configure for the slow packet process time, a duration from send to ACK. The processing time + * check is for packets that data length smaller than * {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY} */ public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY = @@ -105,15 +103,16 @@ public class StreamSlowMonitor implements ConfigurationObserver { private long minLengthForSpeedCheck; public StreamSlowMonitor(Configuration conf, String name, - ExcludeDatanodeManager excludeDatanodeManager) { + ExcludeDatanodeManager excludeDatanodeManager) { setConf(conf); this.name = name; this.excludeDatanodeManager = excludeDatanodeManager; this.datanodeSlowDataQueue = CacheBuilder.newBuilder() .maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) - .expireAfterWrite(conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, - DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) + .expireAfterWrite( + conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) .build(new CacheLoader>() { @Override public Deque load(DatanodeInfo key) throws Exception { @@ -129,30 +128,33 @@ public static StreamSlowMonitor create(Configuration conf, String name) { /** * Check if the packet process time shows that the relevant datanode is a slow node. - * @param datanodeInfo the datanode that processed the packet - * @param packetDataLen the data length of the packet (in bytes) - * @param processTimeMs the process time (in ms) of the packet on the datanode, + * @param datanodeInfo the datanode that processed the packet + * @param packetDataLen the data length of the packet (in bytes) + * @param processTimeMs the process time (in ms) of the packet on the datanode, * @param lastAckTimestamp the last acked timestamp of the packet on another datanode - * @param unfinished if the packet is unfinished flushed to the datanode replicas + * @param unfinished if the packet is unfinished flushed to the datanode replicas */ public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen, - long processTimeMs, long lastAckTimestamp, int unfinished) { + long processTimeMs, long lastAckTimestamp, int unfinished) { long current = EnvironmentEdgeManager.currentTime(); // Here are two conditions used to determine whether a datanode is slow, // 1. For small packet, we just have a simple time limit, without considering // the size of the packet. // 2. For large packet, we will calculate the speed, and check if the speed is too slow. - boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) || ( - packetDataLen > minLengthForSpeedCheck + boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) + || (packetDataLen > minLengthForSpeedCheck && (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs); if (slow) { // Check if large diff ack timestamp between replicas, // should try to avoid misjudgments that caused by GC STW. - if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) || ( - lastAckTimestamp <= 0 && unfinished == 0)) { - LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " - + "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs, - unfinished, lastAckTimestamp, this.name); + if ( + (lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) + || (lastAckTimestamp <= 0 && unfinished == 0) + ) { + LOG.info( + "Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " + + "lastAckTimestamp={}, monitor name: {}", + datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name); if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) { excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack"); } @@ -168,8 +170,10 @@ public void onConfigurationChange(Configuration conf) { private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long processTime) { Deque slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo); long current = EnvironmentEdgeManager.currentTime(); - while (!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl - || slowDNQueue.size() >= minSlowDetectCount)) { + while ( + !slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl + || slowDNQueue.size() >= minSlowDetectCount) + ) { slowDNQueue.removeFirst(); } slowDNQueue.addLast(new PacketAckData(dataLength, processTime)); @@ -177,13 +181,13 @@ private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long } private void setConf(Configuration conf) { - this.minSlowDetectCount = conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, - DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); + this.minSlowDetectCount = + conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL); this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY, - DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); - this.minLengthForSpeedCheck = conf.getLong( - DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, + DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); + this.minLengthForSpeedCheck = + conf.getLong(DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH); this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY, DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java index 91c003cb6dd0..0f80f874a319 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns - * a boolean to support canceling the operation. + * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns a boolean to support + * canceling the operation. *

* Used for doing updating of OPENING znode during log replay on region open. */ @@ -30,8 +29,8 @@ public interface CancelableProgressable { /** - * Report progress. Returns true if operations should continue, false if the - * operation should be canceled and rolled back. + * Report progress. Returns true if operations should continue, false if the operation should be + * canceled and rolled back. * @return whether to continue (true) or cancel (false) the operation */ boolean progress(); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java index 9c3da1658c70..e4a410aa9c34 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -120,8 +120,10 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina // Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though. long localStartWaiting = EnvironmentEdgeManager.currentTime(); - while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase * - nbAttempt) { + while ( + (EnvironmentEdgeManager.currentTime() - localStartWaiting) + < subsequentPauseBase * nbAttempt + ) { Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000)); if (findIsFileClosedMeth) { try { @@ -152,10 +154,10 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, final int nbAttempt, final Path p, final long startWaiting) { if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { - LOG.warn("Cannot recoverLease after trying for " + - conf.getInt("hbase.lease.recovery.timeout", 900000) + - "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + - getLogMessageDetail(nbAttempt, p, startWaiting)); + LOG.warn("Cannot recoverLease after trying for " + + conf.getInt("hbase.lease.recovery.timeout", 900000) + + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + + getLogMessageDetail(nbAttempt, p, startWaiting)); return true; } return false; @@ -170,8 +172,8 @@ private static boolean recoverLease(final DistributedFileSystem dfs, final int n boolean recovered = false; try { recovered = dfs.recoverLease(p); - LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + - getLogMessageDetail(nbAttempt, p, startWaiting)); + LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { // This exception comes out instead of FNFE, fix it @@ -189,8 +191,8 @@ private static boolean recoverLease(final DistributedFileSystem dfs, final int n */ private static String getLogMessageDetail(final int nbAttempt, final Path p, final long startWaiting) { - return "attempt=" + nbAttempt + " on file=" + p + " after " + - (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; + return "attempt=" + nbAttempt + " on file=" + p + " after " + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; } /** diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java index 51a4aa0b89c0..d5c12069deb9 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java index a3da52ef335f..f7ca1639ec60 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -44,19 +45,15 @@ public void testExcludeSlowDNBySpeed() { StreamSlowMonitor streamSlowDNsMonitor = excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - DatanodeInfo datanodeInfo = - new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") - .setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) - .setIpcPort(444).setNetworkLocation("location1").build(); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); + DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0") + .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222) + .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build(); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); } @@ -68,19 +65,15 @@ public void testExcludeSlowDNByProcessTime() { StreamSlowMonitor streamSlowDNsMonitor = excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - DatanodeInfo datanodeInfo = - new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") - .setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) - .setIpcPort(444).setNetworkLocation("location1").build(); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); + DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0") + .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222) + .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build(); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java index d363282921c9..26cbbe034a58 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,6 +57,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; @@ -240,9 +241,9 @@ public void testExcludeFailedConnectToDatanode() StreamSlowMonitor streamSlowDNsMonitor = excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, - f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, - CHANNEL_CLASS, streamSlowDNsMonitor)) { + try (FanOutOneBlockAsyncDFSOutput output = + FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, + FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) { // should exclude the dead dn when retry so here we only have 2 DNs in pipeline assertEquals(2, output.getPipeline().length); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java index 8ee838449e14..3a9c2979b6cf 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; @@ -70,10 +71,10 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class); + HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class); private static final Logger LOG = - LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class); + LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class); private static DistributedFileSystem FS; diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java index eff8d8a86b7a..cb936a4e7c65 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java index 592598c8bb44..7a3a6de10f09 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java index e7fce27d60c9..cb5fb4006d3e 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java index 55ef0b72b527..07fc3afbf2ff 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ public class TestSendBufSizePredictor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSendBufSizePredictor.class); + HBaseClassTestRule.forClass(TestSendBufSizePredictor.class); @Test public void test() { diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java index a91c95ac4dbc..60a492bd2dc3 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java @@ -110,9 +110,9 @@ public static Configuration getSecuredConfiguration() { /** * Set up configuration for a secure HDFS+HBase cluster. - * @param conf configuration object. + * @param conf configuration object. * @param servicePrincipal service principal used by NN, HM and RS. - * @param spnegoPrincipal SPNEGO principal used by NN web UI. + * @param spnegoPrincipal SPNEGO principal used by NN web UI. */ public static void setSecuredConfiguration(Configuration conf, String servicePrincipal, String spnegoPrincipal) { @@ -156,7 +156,7 @@ private static void setSecuredHadoopConfiguration(Configuration conf, /** * Set up SSL configuration for HDFS NameNode and DataNode. * @param utility a HBaseTestingUtility object. - * @param clazz the caller test class. + * @param clazz the caller test class. * @throws Exception if unable to set up SSL configuration */ public static void setSSLConfiguration(HBaseCommonTestingUtil utility, Class clazz) diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java index 3c58d9c5c780..3740cab6937a 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -69,8 +68,8 @@ public void testRecoverLease() throws IOException { Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // invocations will happen pretty fast... the we fall into the longer wait loop). - assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 * - HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); + assertTrue((EnvironmentEdgeManager.currentTime() - startTime) + > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); } /** diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml index 2014710cfa50..93e00df17d4c 100644 --- a/hbase-backup/pom.xml +++ b/hbase-backup/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-backup Apache HBase - Backup Backup for HBase - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - @@ -173,12 +153,34 @@ test + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -213,8 +215,7 @@ lifecycle-mapping - - + diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java index ff1e13f79594..25055fd5e8e6 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.Closeable; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupSet; import org.apache.yetus.audience.InterfaceAudience; @@ -30,8 +28,8 @@ * The administrative API for HBase Backup. Construct an instance and call {@link #close()} * afterwards. *

- * BackupAdmin can be used to create backups, restore data from backups and for other - * backup-related operations. + * BackupAdmin can be used to create backups, restore data from backups and for other backup-related + * operations. * @since 2.0 */ @InterfaceAudience.Private @@ -71,9 +69,9 @@ public interface BackupAdmin extends Closeable { /** * Merge backup images command - * @param backupIds array of backup ids of images to be merged - * The resulting backup image will have the same backup id as the most - * recent image from a list of images to be merged + * @param backupIds array of backup ids of images to be merged The resulting backup image will + * have the same backup id as the most recent image from a list of images to be + * merged * @throws IOException exception */ void mergeBackups(String[] backupIds) throws IOException; @@ -120,7 +118,7 @@ public interface BackupAdmin extends Closeable { /** * Add tables to backup set command - * @param name name of backup set. + * @param name name of backup set. * @param tables array of tables to be added to this set. * @throws IOException exception */ @@ -128,7 +126,7 @@ public interface BackupAdmin extends Closeable { /** * Remove tables from backup set - * @param name name of backup set. + * @param name name of backup set. * @param tables array of tables to be removed from this set. * @throws IOException exception */ diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java index e3abb6039970..d710e82c4fd3 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +18,11 @@ package org.apache.hadoop.hbase.backup; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; import org.apache.hadoop.hbase.backup.impl.TableBackupClient; import org.apache.hadoop.hbase.client.Connection; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java index f5e213716612..4753003fdf2b 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.backup.impl.BackupManager; @@ -34,16 +32,16 @@ public interface BackupCopyJob extends Configurable { /** * Copy backup data to destination - * @param backupInfo context object + * @param backupInfo context object * @param backupManager backup manager - * @param conf configuration - * @param backupType backup type (FULL or INCREMENTAL) - * @param options array of options (implementation-specific) + * @param conf configuration + * @param backupType backup type (FULL or INCREMENTAL) + * @param options array of options (implementation-specific) * @return result (0 - success, -1 failure ) * @throws IOException exception */ int copy(BackupInfo backupInfo, BackupManager backupManager, Configuration conf, - BackupType backupType, String[] options) throws IOException; + BackupType backupType, String[] options) throws IOException; /** * Cancel copy job diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index 7889f6cf7b3f..547a39c8d623 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,9 +58,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** - * * Command-line entry point for backup operation - * */ @InterfaceAudience.Private public class BackupDriver extends AbstractHBaseTool { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java index 0550f9bc1473..619cecaeaaac 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; @@ -54,7 +53,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor private Connection connection; private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table - //used by unit test to skip reading backup:system + // used by unit test to skip reading backup:system private boolean checkForFullyBackedUpTables = true; private List fullyBackedUpTables = null; @@ -79,8 +78,7 @@ private Set loadHFileRefs(List tableList) throws IOException connection = ConnectionFactory.createConnection(conf); } try (BackupSystemTable tbl = new BackupSystemTable(connection)) { - Map>[] res = - tbl.readBulkLoadedFiles(null, tableList); + Map>[] res = tbl.readBulkLoadedFiles(null, tableList); secondPrevReadFromBackupTbl = prevReadFromBackupTbl; prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime(); return getFilenameFromBulkLoad(res); @@ -91,6 +89,7 @@ private Set loadHFileRefs(List tableList) throws IOException void setCheckForFullyBackedUpTables(boolean b) { checkForFullyBackedUpTables = b; } + @Override public Iterable getDeletableFiles(Iterable files) { if (conf == null) { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index d8a6940362a5..8a8f65951acf 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; @@ -35,6 +34,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder; @@ -59,7 +59,10 @@ public interface Filter { * Backup session states */ public enum BackupState { - RUNNING, COMPLETE, FAILED, ANY + RUNNING, + COMPLETE, + FAILED, + ANY } /** @@ -67,7 +70,12 @@ public enum BackupState { * BackupState.RUNNING */ public enum BackupPhase { - REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST + REQUEST, + SNAPSHOT, + PREPARE_INCREMENTAL, + SNAPSHOTCOPY, + INCREMENTAL_COPY, + STORE_MANIFEST } /** @@ -137,8 +145,8 @@ public enum BackupPhase { private Map> tableSetTimestampMap; /** - * Previous Region server log timestamps for table set after distributed log roll key - - * table name, value - map of RegionServer hostname -> last log rolled timestamp + * Previous Region server log timestamps for table set after distributed log roll key - table + * name, value - map of RegionServer hostname -> last log rolled timestamp */ private Map> incrTimestampMap; @@ -198,8 +206,7 @@ public Map> getTableSetTimestampMap() { return tableSetTimestampMap; } - public void setTableSetTimestampMap(Map> tableSetTimestampMap) { + public void setTableSetTimestampMap(Map> tableSetTimestampMap) { this.tableSetTimestampMap = tableSetTimestampMap; } @@ -357,8 +364,7 @@ public void setIncrBackupFileList(List incrBackupFileList) { * Set the new region server log timestamps after distributed log roll * @param prevTableSetTimestampMap table timestamp map */ - public void setIncrTimestampMap(Map> prevTableSetTimestampMap) { + public void setIncrTimestampMap(Map> prevTableSetTimestampMap) { this.incrTimestampMap = prevTableSetTimestampMap; } @@ -482,8 +488,8 @@ public static BackupInfo fromProto(BackupProtos.BackupInfo proto) { context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name())); } - context.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), - proto.getBackupId())); + context + .setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), proto.getBackupId())); if (proto.hasBackupPhase()) { context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name())); @@ -507,12 +513,12 @@ private static Map toMap(List> getTableSetTimestampMap( - Map map) { + private static Map> + getTableSetTimestampMap(Map map) { Map> tableSetTimestampMap = new HashMap<>(); for (Entry entry : map.entrySet()) { - tableSetTimestampMap - .put(TableName.valueOf(entry.getKey()), entry.getValue().getRsTimestampMap()); + tableSetTimestampMap.put(TableName.valueOf(entry.getKey()), + entry.getValue().getRsTimestampMap()); } return tableSetTimestampMap; @@ -549,7 +555,7 @@ public String getShortDescription() { public String getStatusAndProgressAsString() { StringBuilder sb = new StringBuilder(); sb.append("id: ").append(getBackupId()).append(" state: ").append(getState()) - .append(" progress: ").append(getProgress()); + .append(" progress: ").append(getProgress()); return sb.toString(); } @@ -567,7 +573,7 @@ public String getTableListAsString() { @Override public int compareTo(BackupInfo o) { Long thisTS = - Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); + Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); return thisTS.compareTo(otherTS); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java index de91fa19c52c..1e2b17145025 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.yetus.audience.InterfaceAudience; @@ -32,7 +30,6 @@ public interface BackupMergeJob extends Configurable { /** * Run backup merge operation. - * * @param backupIds backup image ids * @throws IOException if the backup merge operation fails */ diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java index 191e5025dd70..73f97365adbe 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.backup; @@ -22,7 +21,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -56,7 +54,7 @@ public Optional getRegionObserver() { @Override public void postBulkLoadHFile(ObserverContext ctx, List> stagingFamilyPaths, Map> finalPaths) - throws IOException { + throws IOException { Configuration cfg = ctx.getEnvironment().getConfiguration(); if (finalPaths == null) { // there is no need to record state @@ -67,7 +65,7 @@ public void postBulkLoadHFile(ObserverContext ctx, return; } try (Connection connection = ConnectionFactory.createConnection(cfg); - BackupSystemTable tbl = new BackupSystemTable(connection)) { + BackupSystemTable tbl = new BackupSystemTable(connection)) { List fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); RegionInfo info = ctx.getEnvironment().getRegionInfo(); TableName tableName = info.getTable(); @@ -82,16 +80,17 @@ public void postBulkLoadHFile(ObserverContext ctx, LOG.error("Failed to get tables which have been fully backed up", ioe); } } + @Override public void preCommitStoreFile(final ObserverContext ctx, - final byte[] family, final List> pairs) throws IOException { + final byte[] family, final List> pairs) throws IOException { Configuration cfg = ctx.getEnvironment().getConfiguration(); if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) { LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled"); return; } try (Connection connection = ConnectionFactory.createConnection(cfg); - BackupSystemTable tbl = new BackupSystemTable(connection)) { + BackupSystemTable tbl = new BackupSystemTable(connection)) { List fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); RegionInfo info = ctx.getEnvironment().getRegionInfo(); TableName tableName = info.getTable(); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java index 003c0e793e2f..c9c7a5b61810 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.util.List; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java index 0e0b90c0fc4d..56c454519d81 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import org.apache.hadoop.hbase.HConstants; @@ -45,14 +44,14 @@ public interface BackupRestoreConstants { int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000; /* - * Drivers option list + * Drivers option list */ String OPTION_OVERWRITE = "o"; String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists"; String OPTION_CHECK = "c"; String OPTION_CHECK_DESC = - "Check restore sequence and dependencies only (does not execute the command)"; + "Check restore sequence and dependencies only (does not execute the command)"; String OPTION_SET = "s"; String OPTION_SET_DESC = "Backup set name"; @@ -62,8 +61,8 @@ public interface BackupRestoreConstants { String OPTION_DEBUG_DESC = "Enable debug loggings"; String OPTION_TABLE = "t"; - String OPTION_TABLE_DESC = "Table name. If specified, only backup images," - + " which contain this table will be listed."; + String OPTION_TABLE_DESC = + "Table name. If specified, only backup images," + " which contain this table will be listed."; String OPTION_LIST = "l"; String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated."; @@ -84,37 +83,32 @@ public interface BackupRestoreConstants { String OPTION_KEEP = "k"; String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete"; - String OPTION_TABLE_MAPPING = "m"; - String OPTION_TABLE_MAPPING_DESC = - "A comma separated list of target tables. " - + "If specified, each table in must have a mapping"; + String OPTION_TABLE_MAPPING_DESC = "A comma separated list of target tables. " + + "If specified, each table in must have a mapping"; String OPTION_YARN_QUEUE_NAME = "q"; String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on"; String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on"; String JOB_NAME_CONF_KEY = "mapreduce.job.name"; - String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY - + "=true\n" - + "hbase.master.logcleaner.plugins=" - +"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n" - + "hbase.procedure.master.classes=YOUR_CLASSES," - +"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n" - + "hbase.procedure.regionserver.classes=YOUR_CLASSES," - + "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n" - + "hbase.coprocessor.region.classes=YOUR_CLASSES," - + "org.apache.hadoop.hbase.backup.BackupObserver\n" - + "and restart the cluster\n" - + "For more information please see http://hbase.apache.org/book.html#backuprestore\n"; - String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+ - "in hbase-site.xml, set:\n " - + BACKUP_CONFIG_STRING; + String BACKUP_CONFIG_STRING = + BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n" + "hbase.master.logcleaner.plugins=" + + "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n" + + "hbase.procedure.master.classes=YOUR_CLASSES," + + "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n" + + "hbase.procedure.regionserver.classes=YOUR_CLASSES," + + "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n" + + "hbase.coprocessor.region.classes=YOUR_CLASSES," + + "org.apache.hadoop.hbase.backup.BackupObserver\n" + "and restart the cluster\n" + + "For more information please see http://hbase.apache.org/book.html#backuprestore\n"; + String ENABLE_BACKUP = "Backup is not enabled. To enable backup, " + "in hbase-site.xml, set:\n " + + BACKUP_CONFIG_STRING; String VERIFY_BACKUP = "To enable backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING; /* - * Delimiter in table name list in restore command + * Delimiter in table name list in restore command */ String TABLENAME_DELIMITER_IN_COMMAND = ","; @@ -123,7 +117,24 @@ public interface BackupRestoreConstants { String BACKUPID_PREFIX = "backup_"; enum BackupCommand { - CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS, - SET, SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST, REPAIR + CREATE, + CANCEL, + DELETE, + DESCRIBE, + HISTORY, + STATUS, + CONVERT, + MERGE, + STOP, + SHOW, + HELP, + PROGRESS, + SET, + SET_ADD, + SET_REMOVE, + SET_DELETE, + SET_DESCRIBE, + SET_LIST, + REPAIR } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java index b1bc532d6c1e..40bbb4bc7fea 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ /** * Factory implementation for backup/restore related jobs - * */ @InterfaceAudience.Private public final class BackupRestoreFactory { @@ -45,7 +44,7 @@ private BackupRestoreFactory() { */ public static RestoreJob getRestoreJob(Configuration conf) { Class cls = - conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class); + conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class); RestoreJob service = ReflectionUtils.newInstance(cls, conf); service.setConf(conf); return service; @@ -57,9 +56,8 @@ public static RestoreJob getRestoreJob(Configuration conf) { * @return backup copy job instance */ public static BackupCopyJob getBackupCopyJob(Configuration conf) { - Class cls = - conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyJob.class, - BackupCopyJob.class); + Class cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, + MapReduceBackupCopyJob.class, BackupCopyJob.class); BackupCopyJob service = ReflectionUtils.newInstance(cls, conf); service.setConf(conf); return service; @@ -71,9 +69,8 @@ public static BackupCopyJob getBackupCopyJob(Configuration conf) { * @return backup merge job instance */ public static BackupMergeJob getBackupMergeJob(Configuration conf) { - Class cls = - conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, MapReduceBackupMergeJob.class, - BackupMergeJob.class); + Class cls = conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, + MapReduceBackupMergeJob.class, BackupMergeJob.class); BackupMergeJob service = ReflectionUtils.newInstance(cls, conf); service.setConf(conf); return service; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java index 50abcc82acc5..01097422e3a1 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; @@ -29,14 +29,14 @@ */ @InterfaceAudience.Private -public class BackupTableInfo { +public class BackupTableInfo { /* - * Table name for backup + * Table name for backup */ private TableName table; /* - * Snapshot name for offline/online snapshot + * Snapshot name for offline/online snapshot */ private String snapshotName = null; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java index e0975548ae36..c41a4a182435 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -1,14 +1,13 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; import java.util.HashMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -52,15 +49,15 @@ private HBackupFileSystem() { * "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory * @param backupRootDir backup root directory - * @param backupId backup id - * @param tableName table name + * @param backupId backup id + * @param tableName table name * @return backupPath String for the particular table */ - public static String - getTableBackupDir(String backupRootDir, String backupId, TableName tableName) { + public static String getTableBackupDir(String backupRootDir, String backupId, + TableName tableName) { return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR - + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() - + Path.SEPARATOR; + + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + + Path.SEPARATOR; } /** @@ -75,7 +72,7 @@ public static Path getBackupTmpDirPath(String backupRootDir) { /** * Get backup tmp directory for backupId * @param backupRoot backup root - * @param backupId backup id + * @param backupId backup id * @return backup tmp directory path */ public static Path getBackupTmpDirPathForBackupId(String backupRoot, String backupId) { @@ -83,7 +80,7 @@ public static Path getBackupTmpDirPathForBackupId(String backupRoot, String back } public static String getTableBackupDataDir(String backupRootDir, String backupId, - TableName tableName) { + TableName tableName) { return getTableBackupDir(backupRootDir, backupId, tableName) + Path.SEPARATOR + "data"; } @@ -97,8 +94,8 @@ public static Path getBackupPath(String backupRootDir, String backupId) { * "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory * @param backupRootPath backup root path - * @param tableName table name - * @param backupId backup Id + * @param tableName table name + * @param backupId backup Id * @return backupPath for the particular table */ public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) { @@ -109,12 +106,12 @@ public static Path getTableBackupPath(TableName tableName, Path backupRootPath, * Given the backup root dir and the backup id, return the log file location for an incremental * backup. * @param backupRootDir backup root directory - * @param backupId backup id + * @param backupId backup id * @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738" */ public static String getLogBackupDir(String backupRootDir, String backupId) { return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR - + HConstants.HREGION_LOGDIR_NAME; + + HConstants.HREGION_LOGDIR_NAME; } public static Path getLogBackupPath(String backupRootDir, String backupId) { @@ -124,37 +121,35 @@ public static Path getLogBackupPath(String backupRootDir, String backupId) { // TODO we do not keep WAL files anymore // Move manifest file to other place private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId) - throws IOException { + throws IOException { FileSystem fs = backupRootPath.getFileSystem(conf); - Path manifestPath = - new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR - + BackupManifest.MANIFEST_FILE_NAME); + Path manifestPath = new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR + + BackupManifest.MANIFEST_FILE_NAME); if (!fs.exists(manifestPath)) { - String errorMsg = - "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for " - + backupId + ". File " + manifestPath + " does not exists. Did " + backupId - + " correspond to previously taken backup ?"; + String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + + " for " + backupId + ". File " + manifestPath + " does not exists. Did " + backupId + + " correspond to previously taken backup ?"; throw new IOException(errorMsg); } return manifestPath; } - public static BackupManifest - getManifest(Configuration conf, Path backupRootPath, String backupId) throws IOException { + public static BackupManifest getManifest(Configuration conf, Path backupRootPath, String backupId) + throws IOException { BackupManifest manifest = - new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId)); + new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId)); return manifest; } /** * Check whether the backup image path and there is manifest file in the path. * @param backupManifestMap If all the manifests are found, then they are put into this map - * @param tableArray the tables involved + * @param tableArray the tables involved * @throws IOException exception */ public static void checkImageManifestExist(HashMap backupManifestMap, - TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId) - throws IOException { + TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId) + throws IOException { for (TableName tableName : tableArray) { BackupManifest manifest = getManifest(conf, backupRootPath, backupId); backupManifestMap.put(tableName, manifest); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index 433815851a73..b4d73e134fab 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,9 +59,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; /** - * * Command-line entry point for restore operation - * */ @InterfaceAudience.Private public class RestoreDriver extends AbstractHBaseTool { @@ -69,10 +67,10 @@ public class RestoreDriver extends AbstractHBaseTool { private CommandLine cmd; private static final String USAGE_STRING = - "Usage: hbase restore [options]\n" - + " backup_path Path to a backup destination root\n" - + " backup_id Backup image ID to restore\n" - + " table(s) Comma-separated list of tables to restore\n"; + "Usage: hbase restore [options]\n" + + " backup_path Path to a backup destination root\n" + + " backup_id Backup image ID to restore\n" + + " table(s) Comma-separated list of tables to restore\n"; private static final String USAGE_FOOTER = ""; @@ -101,19 +99,19 @@ private int parseAndRun(String[] args) throws IOException { boolean overwrite = cmd.hasOption(OPTION_OVERWRITE); if (overwrite) { LOG.debug("Found -overwrite option in restore command, " - + "will overwrite to existing table if any in the restore target"); + + "will overwrite to existing table if any in the restore target"); } // whether to only check the dependencies, false by default boolean check = cmd.hasOption(OPTION_CHECK); if (check) { - LOG.debug("Found -check option in restore command, " - + "will check and verify the dependencies"); + LOG.debug( + "Found -check option in restore command, " + "will check and verify the dependencies"); } if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) { - System.err.println("Options -s and -t are mutaully exclusive,"+ - " you can not specify both of them."); + System.err.println( + "Options -s and -t are mutaully exclusive," + " you can not specify both of them."); printToolUsage(); return -1; } @@ -141,9 +139,9 @@ private int parseAndRun(String[] args) throws IOException { String backupId = remainArgs[1]; String tables; String tableMapping = - cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null; + cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null; try (final Connection conn = ConnectionFactory.createConnection(conf); - BackupAdmin client = new BackupAdminImpl(conn)) { + BackupAdmin client = new BackupAdminImpl(conn)) { // Check backup set if (cmd.hasOption(OPTION_SET)) { String setName = cmd.getOptionValue(OPTION_SET); @@ -155,8 +153,8 @@ private int parseAndRun(String[] args) throws IOException { return -2; } if (tables == null) { - System.out.println("ERROR: Backup set '" + setName - + "' is either empty or does not exist"); + System.out + .println("ERROR: Backup set '" + setName + "' is either empty or does not exist"); printToolUsage(); return -3; } @@ -167,15 +165,16 @@ private int parseAndRun(String[] args) throws IOException { TableName[] sTableArray = BackupUtils.parseTableNames(tables); TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping); - if (sTableArray != null && tTableArray != null && - (sTableArray.length != tTableArray.length)) { + if ( + sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length) + ) { System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping); printToolUsage(); return -4; } - client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, - sTableArray, tTableArray, overwrite)); + client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, sTableArray, + tTableArray, overwrite)); } catch (Exception e) { LOG.error("Error while running restore backup", e); return -5; @@ -184,7 +183,7 @@ private int parseAndRun(String[] args) throws IOException { } private String getTablesForSet(Connection conn, String name, Configuration conf) - throws IOException { + throws IOException { try (final BackupSystemTable table = new BackupSystemTable(conn)) { List tables = table.describeBackupSet(name); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java index 29b128887780..b014e6693bbc 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; @@ -34,12 +32,12 @@ public interface RestoreJob extends Configurable { /** * Run restore operation - * @param dirPaths path array of WAL log directories - * @param fromTables from tables - * @param toTables to tables + * @param dirPaths path array of WAL log directories + * @param fromTables from tables + * @param toTables to tables * @param fullBackupRestore full backup restore * @throws IOException if running the job fails */ - void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, - boolean fullBackupRestore) throws IOException; + void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, boolean fullBackupRestore) + throws IOException; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java index a654cce50d5b..eb4786f57869 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index 0d20f37def6c..f580fb0c47bb 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -57,7 +56,7 @@ public class BackupAdminImpl implements BackupAdmin { public final static String CHECK_OK = "Checking backup images: OK"; public final static String CHECK_FAILED = - "Checking backup images: Failed. Some dependencies are missing for restore"; + "Checking backup images: Failed. Some dependencies are missing for restore"; private static final Logger LOG = LoggerFactory.getLogger(BackupAdminImpl.class); private final Connection conn; @@ -107,8 +106,8 @@ public int deleteBackups(String[] backupIds) throws IOException { deleteSessionStarted = true; } catch (IOException e) { LOG.warn("You can not run delete command while active backup session is in progress. \n" - + "If there is no active backup session running, run backup repair utility to " - + "restore \nbackup system integrity."); + + "If there is no active backup session running, run backup repair utility to " + + "restore \nbackup system integrity."); return -1; } @@ -158,7 +157,7 @@ public int deleteBackups(String[] backupIds) throws IOException { BackupSystemTable.deleteSnapshot(conn); // We still have record with unfinished delete operation LOG.error("Delete operation failed, please run backup repair utility to restore " - + "backup system integrity", e); + + "backup system integrity", e); throw e; } else { LOG.warn("Delete operation succeeded, there were some errors: ", e); @@ -177,15 +176,15 @@ public int deleteBackups(String[] backupIds) throws IOException { /** * Updates incremental backup set for every backupRoot * @param tablesMap map [backupRoot: {@code Set}] - * @param table backup system table + * @param table backup system table * @throws IOException if a table operation fails */ private void finalizeDelete(Map> tablesMap, BackupSystemTable table) - throws IOException { + throws IOException { for (String backupRoot : tablesMap.keySet()) { Set incrTableSet = table.getIncrementalBackupTableSet(backupRoot); Map> tableMap = - table.getBackupHistoryForTableSet(incrTableSet, backupRoot); + table.getBackupHistoryForTableSet(incrTableSet, backupRoot); for (Map.Entry> entry : tableMap.entrySet()) { if (entry.getValue() == null) { // No more backups for a table @@ -283,10 +282,10 @@ private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOE } private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) - throws IOException { + throws IOException { List tables = info.getTableNames(); - LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables=" - + info.getTableListAsString()); + LOG.debug( + "Remove " + tn + " from " + info.getBackupId() + " tables=" + info.getTableListAsString()); if (tables.contains(tn)) { tables.remove(tn); @@ -306,7 +305,7 @@ private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSys } private List getAffectedBackupSessions(BackupInfo backupInfo, TableName tn, - BackupSystemTable table) throws IOException { + BackupSystemTable table) throws IOException { LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn); long ts = backupInfo.getStartTs(); List list = new ArrayList<>(); @@ -325,7 +324,7 @@ private List getAffectedBackupSessions(BackupInfo backupInfo, TableN list.clear(); } else { LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn - + " added " + info.getBackupId() + " tables=" + info.getTableListAsString()); + + " added " + info.getBackupId() + " tables=" + info.getTableListAsString()); list.add(info); } } @@ -338,7 +337,7 @@ private List getAffectedBackupSessions(BackupInfo backupInfo, TableN * @throws IOException if cleaning up the backup directory fails */ private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) - throws IOException { + throws IOException { try { // clean up the data at target directory String targetDir = backupInfo.getBackupRootDir(); @@ -349,9 +348,8 @@ private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configurat FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); - Path targetDirPath = - new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(), - backupInfo.getBackupId(), table)); + Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(), + backupInfo.getBackupId(), table)); if (outputFs.delete(targetDirPath, true)) { LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); } else { @@ -359,13 +357,13 @@ private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configurat } } catch (IOException e1) { LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table - + "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); + + "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); throw e1; } } private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) - throws IOException { + throws IOException { List history = table.getBackupHistory(); for (BackupInfo info : history) { List tables = info.getTableNames(); @@ -466,7 +464,7 @@ public boolean deleteBackupSet(String name) throws IOException { public void addToBackupSet(String name, TableName[] tables) throws IOException { String[] tableNames = new String[tables.length]; try (final BackupSystemTable table = new BackupSystemTable(conn); - final Admin admin = conn.getAdmin()) { + final Admin admin = conn.getAdmin()) { for (int i = 0; i < tables.length; i++) { tableNames[i] = tables[i].getNameAsString(); if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { @@ -474,8 +472,8 @@ public void addToBackupSet(String name, TableName[] tables) throws IOException { } } table.addToBackupSet(name, tableNames); - LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name - + "' backup set"); + LOG.info( + "Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + "' backup set"); } } @@ -484,8 +482,8 @@ public void removeFromBackupSet(String name, TableName[] tables) throws IOExcept LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); try (final BackupSystemTable table = new BackupSystemTable(conn)) { table.removeFromBackupSet(name, toStringArray(tables)); - LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name - + "' completed."); + LOG.info( + "Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "' completed."); } } @@ -534,9 +532,9 @@ public String backupTables(BackupRequest request) throws IOException { } if (incrTableSet.isEmpty()) { - String msg = "Incremental backup table set contains no tables. " - + "You need to run full backup first " - + (tableList != null ? "on " + StringUtils.join(tableList, ",") : ""); + String msg = + "Incremental backup table set contains no tables. " + "You need to run full backup first " + + (tableList != null ? "on " + StringUtils.join(tableList, ",") : ""); throw new IOException(msg); } @@ -545,7 +543,7 @@ public String backupTables(BackupRequest request) throws IOException { if (!tableList.isEmpty()) { String extraTables = StringUtils.join(tableList, ","); String msg = "Some tables (" + extraTables + ") haven't gone through full backup. " - + "Perform full backup on " + extraTables + " first, " + "then retry the command"; + + "Perform full backup on " + extraTables + " first, " + "then retry the command"; throw new IOException(msg); } } @@ -554,13 +552,13 @@ public String backupTables(BackupRequest request) throws IOException { if (tableList != null && !tableList.isEmpty()) { for (TableName table : tableList) { String targetTableBackupDir = - HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); Path targetTableBackupDirPath = new Path(targetTableBackupDir); FileSystem outputFs = - FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration()); + FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration()); if (outputFs.exists(targetTableBackupDirPath)) { - throw new IOException("Target backup directory " + targetTableBackupDir - + " exists already."); + throw new IOException( + "Target backup directory " + targetTableBackupDir + " exists already."); } outputFs.mkdirs(targetTableBackupDirPath); } @@ -581,8 +579,8 @@ public String backupTables(BackupRequest request) throws IOException { tableList = excludeNonExistingTables(tableList, nonExistingTableList); } else { // Throw exception only in full mode - we try to backup non-existing table - throw new IOException("Non-existing tables found in the table list: " - + nonExistingTableList); + throw new IOException( + "Non-existing tables found in the table list: " + nonExistingTableList); } } } @@ -590,9 +588,9 @@ public String backupTables(BackupRequest request) throws IOException { // update table list BackupRequest.Builder builder = new BackupRequest.Builder(); request = builder.withBackupType(request.getBackupType()).withTableList(tableList) - .withTargetRootDir(request.getTargetRootDir()) - .withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks()) - .withBandwidthPerTasks((int) request.getBandwidth()).build(); + .withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName()) + .withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth()) + .build(); TableBackupClient client; try { @@ -608,7 +606,7 @@ public String backupTables(BackupRequest request) throws IOException { } private List excludeNonExistingTables(List tableList, - List nonExistingTableList) { + List nonExistingTableList) { for (TableName table : nonExistingTableList) { tableList.remove(table); } @@ -619,7 +617,7 @@ private List excludeNonExistingTables(List tableList, public void mergeBackups(String[] backupIds) throws IOException { try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { checkIfValidForMerge(backupIds, sysTable); - //TODO run job on remote cluster + // TODO run job on remote cluster BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration()); job.run(backupIds); } @@ -627,7 +625,6 @@ public void mergeBackups(String[] backupIds) throws IOException { /** * Verifies that backup images are valid for merge. - * *

    *
  • All backups MUST be in the same destination *
  • No FULL backups are allowed - only INCREMENTAL @@ -636,11 +633,11 @@ public void mergeBackups(String[] backupIds) throws IOException { *
*

* @param backupIds list of backup ids - * @param table backup system table + * @param table backup system table * @throws IOException if the backup image is not valid for merge */ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) - throws IOException { + throws IOException { String backupRoot = null; final Set allTables = new HashSet<>(); @@ -656,7 +653,7 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) backupRoot = bInfo.getBackupRootDir(); } else if (!bInfo.getBackupRootDir().equals(backupRoot)) { throw new IOException("Found different backup destinations in a list of a backup sessions " - + "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir()); + + "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir()); } if (bInfo.getType() == BackupType.FULL) { throw new IOException("FULL backup image can not be merged for: \n" + bInfo); @@ -664,7 +661,7 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) if (bInfo.getState() != BackupState.COMPLETE) { throw new IOException("Backup image " + backupId - + " can not be merged becuase of its state: " + bInfo.getState()); + + " can not be merged becuase of its state: " + bInfo.getState()); } allBackups.add(backupId); allTables.addAll(bInfo.getTableNames()); @@ -677,7 +674,7 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) } } - final long startRangeTime = minTime; + final long startRangeTime = minTime; final long endRangeTime = maxTime; final String backupDest = backupRoot; // Check we have no 'holes' in backup id list @@ -688,7 +685,7 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) BackupInfo.Filter timeRangeFilter = info -> { long time = info.getStartTs(); - return time >= startRangeTime && time <= endRangeTime ; + return time >= startRangeTime && time <= endRangeTime; }; BackupInfo.Filter tableFilter = info -> { @@ -699,20 +696,20 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL; BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE; - List allInfos = table.getBackupHistory(-1, destinationFilter, - timeRangeFilter, tableFilter, typeFilter, stateFilter); + List allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter, + tableFilter, typeFilter, stateFilter); if (allInfos.size() != allBackups.size()) { - // Yes we have at least one hole in backup image sequence + // Yes we have at least one hole in backup image sequence List missingIds = new ArrayList<>(); - for(BackupInfo info: allInfos) { - if(allBackups.contains(info.getBackupId())) { + for (BackupInfo info : allInfos) { + if (allBackups.contains(info.getBackupId())) { continue; } missingIds.add(info.getBackupId()); } String errMsg = - "Sequence of backup ids has 'holes'. The following backup images must be added:" + - org.apache.hadoop.util.StringUtils.join(",", missingIds); + "Sequence of backup ids has 'holes'. The following backup images must be added:" + + org.apache.hadoop.util.StringUtils.join(",", missingIds); throw new IOException(errMsg); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index b0a29e257b07..53295401f761 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC; @@ -44,7 +43,6 @@ import java.io.IOException; import java.net.URI; import java.util.List; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -80,33 +78,32 @@ public final class BackupCommands { public final static String INCORRECT_USAGE = "Incorrect usage"; public final static String TOP_LEVEL_NOT_ALLOWED = - "Top level (root) folder is not allowed to be a backup destination"; + "Top level (root) folder is not allowed to be a backup destination"; public static final String USAGE = "Usage: hbase backup COMMAND [command-specific arguments]\n" - + "where COMMAND is one of:\n" + " create create a new backup image\n" - + " delete delete an existing backup image\n" - + " describe show the detailed information of a backup image\n" - + " history show history of all successful backups\n" - + " progress show the progress of the latest backup request\n" - + " set backup set management\n" - + " repair repair backup system table\n" - + " merge merge backup images\n" - + "Run \'hbase backup COMMAND -h\' to see help message for each command\n"; + + "where COMMAND is one of:\n" + " create create a new backup image\n" + + " delete delete an existing backup image\n" + + " describe show the detailed information of a backup image\n" + + " history show history of all successful backups\n" + + " progress show the progress of the latest backup request\n" + + " set backup set management\n" + " repair repair backup system table\n" + + " merge merge backup images\n" + + "Run \'hbase backup COMMAND -h\' to see help message for each command\n"; public static final String CREATE_CMD_USAGE = - "Usage: hbase backup create [options]\n" - + " type \"full\" to create a full backup image\n" - + " \"incremental\" to create an incremental backup image\n" - + " backup_path Full path to store the backup image\n"; + "Usage: hbase backup create [options]\n" + + " type \"full\" to create a full backup image\n" + + " \"incremental\" to create an incremental backup image\n" + + " backup_path Full path to store the backup image\n"; public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress \n" - + " backup_id Backup image id (optional). If no id specified, the command will show\n" - + " progress for currently running backup session."; + + " backup_id Backup image id (optional). If no id specified, the command will show\n" + + " progress for currently running backup session."; public static final String NO_INFO_FOUND = "No info was found for backup id: "; public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found."; - public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup describe \n" - + " backup_id Backup image id\n"; + public static final String DESCRIBE_CMD_USAGE = + "Usage: hbase backup describe \n" + " backup_id Backup image id\n"; public static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [options]"; @@ -115,14 +112,13 @@ public final class BackupCommands { public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n"; public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n" - + " name Backup set name\n" - + " tables Comma separated list of tables.\n" + "COMMAND is one of:\n" - + " add add tables to a set, create a set if needed\n" - + " remove remove tables from a set\n" - + " list list all backup sets in the system\n" - + " describe describe set\n" + " delete delete backup set\n"; + + " name Backup set name\n" + " tables Comma separated list of tables.\n" + + "COMMAND is one of:\n" + " add add tables to a set, create a set if needed\n" + + " remove remove tables from a set\n" + + " list list all backup sets in the system\n" + " describe describe set\n" + + " delete delete backup set\n"; public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n" - + " backup_ids Comma separated list of backup image ids.\n"; + + " backup_ids Comma separated list of backup image ids.\n"; public static final String USAGE_FOOTER = ""; @@ -281,8 +277,10 @@ public void execute() throws IOException { throw new IOException(INCORRECT_USAGE); } - if (!BackupType.FULL.toString().equalsIgnoreCase(args[1]) - && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) { + if ( + !BackupType.FULL.toString().equalsIgnoreCase(args[1]) + && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1]) + ) { System.out.println("ERROR: invalid backup type: " + args[1]); printUsage(); throw new IOException(INCORRECT_USAGE); @@ -301,8 +299,8 @@ public void execute() throws IOException { // Check if we have both: backup set and list of tables if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) { - System.out.println("ERROR: You can specify either backup set or list" - + " of tables, but not both"); + System.out + .println("ERROR: You can specify either backup set or list" + " of tables, but not both"); printUsage(); throw new IOException(INCORRECT_USAGE); } @@ -315,20 +313,20 @@ public void execute() throws IOException { tables = getTablesForSet(setName, getConf()); if (tables == null) { - System.out.println("ERROR: Backup set '" + setName - + "' is either empty or does not exist"); + System.out + .println("ERROR: Backup set '" + setName + "' is either empty or does not exist"); printUsage(); throw new IOException(INCORRECT_USAGE); } } else { tables = cmdline.getOptionValue(OPTION_TABLE); } - int bandwidth = - cmdline.hasOption(OPTION_BANDWIDTH) ? Integer.parseInt(cmdline - .getOptionValue(OPTION_BANDWIDTH)) : -1; - int workers = - cmdline.hasOption(OPTION_WORKERS) ? Integer.parseInt(cmdline - .getOptionValue(OPTION_WORKERS)) : -1; + int bandwidth = cmdline.hasOption(OPTION_BANDWIDTH) + ? Integer.parseInt(cmdline.getOptionValue(OPTION_BANDWIDTH)) + : -1; + int workers = cmdline.hasOption(OPTION_WORKERS) + ? Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS)) + : -1; if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) { String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME); @@ -338,13 +336,11 @@ public void execute() throws IOException { try (BackupAdminImpl admin = new BackupAdminImpl(conn)) { BackupRequest.Builder builder = new BackupRequest.Builder(); - BackupRequest request = - builder - .withBackupType(BackupType.valueOf(args[1].toUpperCase())) - .withTableList( - tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null) - .withTargetRootDir(targetBackupDir).withTotalTasks(workers) - .withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build(); + BackupRequest request = builder.withBackupType(BackupType.valueOf(args[1].toUpperCase())) + .withTableList( + tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null) + .withTargetRootDir(targetBackupDir).withTotalTasks(workers) + .withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build(); String backupId = admin.backupTables(request); System.out.println("Backup session " + backupId + " finished. Status: SUCCESS"); } catch (IOException e) { @@ -506,8 +502,8 @@ public static class ProgressCommand extends Command { public void execute() throws IOException { if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) { - System.out.println("No backup id was specified, " - + "will retrieve the most recent (ongoing) session"); + System.out.println( + "No backup id was specified, " + "will retrieve the most recent (ongoing) session"); } String[] args = cmdline == null ? null : cmdline.getArgs(); if (args != null && args.length > 2) { @@ -601,15 +597,15 @@ public boolean apply(BackupInfo info) { }; List history = null; try (final BackupSystemTable sysTable = new BackupSystemTable(conn); - BackupAdminImpl admin = new BackupAdminImpl(conn)) { + BackupAdminImpl admin = new BackupAdminImpl(conn)) { history = sysTable.getBackupHistory(-1, dateFilter); String[] backupIds = convertToBackupIds(history); int deleted = admin.deleteBackups(backupIds); System.out.println("Deleted " + deleted + " backups. Total older than " + days + " days: " - + backupIds.length); + + backupIds.length); } catch (IOException e) { System.err.println("Delete command FAILED. Please run backup repair tool to restore backup " - + "system integrity"); + + "system integrity"); throw e; } } @@ -631,7 +627,7 @@ private void executeDeleteListOfBackups(CommandLine cmdline) throws IOException System.out.println("Deleted " + deleted + " backups. Total requested: " + backupIds.length); } catch (IOException e) { System.err.println("Delete command FAILED. Please run backup repair tool to restore backup " - + "system integrity"); + + "system integrity"); throw e; } @@ -673,14 +669,14 @@ public void execute() throws IOException { Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); try (final Connection conn = ConnectionFactory.createConnection(conf); - final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + final BackupSystemTable sysTable = new BackupSystemTable(conn)) { // Failed backup BackupInfo backupInfo; List list = sysTable.getBackupInfos(BackupState.RUNNING); if (list.size() == 0) { // No failed sessions found System.out.println("REPAIR status: no failed sessions found." - + " Checking failed delete backup operation ..."); + + " Checking failed delete backup operation ..."); repairFailedBackupDeletionIfAny(conn, sysTable); repairFailedBackupMergeIfAny(conn, sysTable); return; @@ -694,10 +690,9 @@ public void execute() throws IOException { // set overall backup status: failed backupInfo.setState(BackupState.FAILED); // compose the backup failed data - String backupFailedData = - "BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs() - + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" - + backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg(); + String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts=" + + backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + + backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg(); System.out.println(backupFailedData); TableBackupClient.cleanupAndRestoreBackupSystem(conn, backupInfo, conf); // If backup session is updated to FAILED state - means we @@ -709,7 +704,7 @@ public void execute() throws IOException { } private void repairFailedBackupDeletionIfAny(Connection conn, BackupSystemTable sysTable) - throws IOException { + throws IOException { String[] backupIds = sysTable.getListOfBackupIdsFromDeleteOperation(); if (backupIds == null || backupIds.length == 0) { System.out.println("No failed backup DELETE operation found"); @@ -730,7 +725,7 @@ private void repairFailedBackupDeletionIfAny(Connection conn, BackupSystemTable } public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTable sysTable) - throws IOException { + throws IOException { String[] backupIds = sysTable.getListOfBackupIdsFromMergeOperation(); if (backupIds == null || backupIds.length == 0) { @@ -754,9 +749,11 @@ public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTab } boolean res = fs.rename(tmpPath, destPath); if (!res) { - throw new IOException("MERGE repair: failed to rename from "+ tmpPath+" to "+ destPath); + throw new IOException( + "MERGE repair: failed to rename from " + tmpPath + " to " + destPath); } - System.out.println("MERGE repair: renamed from "+ tmpPath+" to "+ destPath+" res="+ res); + System.out + .println("MERGE repair: renamed from " + tmpPath + " to " + destPath + " res=" + res); } else { checkRemoveBackupImages(fs, backupRoot, backupIds); } @@ -773,16 +770,16 @@ public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTab private static void checkRemoveBackupImages(FileSystem fs, String backupRoot, String[] backupIds) throws IOException { String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds); - for (String backupId: backupIds) { + for (String backupId : backupIds) { if (backupId.equals(mergedBackupId)) { continue; } Path path = HBackupFileSystem.getBackupPath(backupRoot, backupId); if (fs.exists(path)) { if (!fs.delete(path, true)) { - System.out.println("MERGE repair removing: "+ path +" - FAILED"); + System.out.println("MERGE repair removing: " + path + " - FAILED"); } else { - System.out.println("MERGE repair removing: "+ path +" - OK"); + System.out.println("MERGE repair removing: " + path + " - OK"); } } } @@ -816,23 +813,23 @@ public void execute() throws IOException { String[] args = cmdline == null ? null : cmdline.getArgs(); if (args == null || (args.length != 2)) { - System.err.println("ERROR: wrong number of arguments: " - + (args == null ? null : args.length)); + System.err + .println("ERROR: wrong number of arguments: " + (args == null ? null : args.length)); printUsage(); throw new IOException(INCORRECT_USAGE); } String[] backupIds = args[1].split(","); if (backupIds.length < 2) { - String msg = "ERROR: can not merge a single backup image. "+ - "Number of images must be greater than 1."; + String msg = "ERROR: can not merge a single backup image. " + + "Number of images must be greater than 1."; System.err.println(msg); throw new IOException(msg); } Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); try (final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdminImpl admin = new BackupAdminImpl(conn)) { + final BackupAdminImpl admin = new BackupAdminImpl(conn)) { admin.mergeBackups(backupIds); } } @@ -889,7 +886,7 @@ public boolean apply(BackupInfo info) { } else { // load from backup FS history = - BackupUtils.getHistory(getConf(), n, backupRootPath, tableNameFilter, tableSetFilter); + BackupUtils.getHistory(getConf(), n, backupRootPath, tableNameFilter, tableSetFilter); } for (BackupInfo info : history) { System.out.println(info.getShortDescription()); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java index 0147c292a276..8dd262cbb88f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import org.apache.hadoop.hbase.HBaseIOException; @@ -48,7 +47,7 @@ public BackupException(Throwable cause) { /** * Exception for the given backup that has no previous root cause - * @param msg reason why the backup failed + * @param msg reason why the backup failed * @param desc description of the backup that is being failed */ public BackupException(String msg, BackupInfo desc) { @@ -58,9 +57,9 @@ public BackupException(String msg, BackupInfo desc) { /** * Exception for the given backup due to another exception - * @param msg reason why the backup failed + * @param msg reason why the backup failed * @param cause root cause of the failure - * @param desc description of the backup that is being failed + * @param desc description of the backup that is being failed */ public BackupException(String msg, Throwable cause, BackupInfo desc) { super(msg, cause); @@ -68,10 +67,9 @@ public BackupException(String msg, Throwable cause, BackupInfo desc) { } /** - * Exception when the description of the backup cannot be determined, due to some other root - * cause + * Exception when the description of the backup cannot be determined, due to some other root cause * @param message description of what caused the failure - * @param e root cause + * @param e root cause */ public BackupException(String message, Exception e) { super(message, e); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index 08494f0a1e5e..a543b577b7ae 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +59,7 @@ public class BackupManager implements Closeable { // in seconds public final static String BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY = - "hbase.backup.exclusive.op.timeout.seconds"; + "hbase.backup.exclusive.op.timeout.seconds"; // In seconds private final static int DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT = 3600; private static final Logger LOG = LoggerFactory.getLogger(BackupManager.class); @@ -77,10 +76,12 @@ public class BackupManager implements Closeable { * @throws IOException exception */ public BackupManager(Connection conn, Configuration conf) throws IOException { - if (!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, - BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) { + if ( + !conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, + BackupRestoreConstants.BACKUP_ENABLE_DEFAULT) + ) { throw new BackupException("HBase backup is not enabled. Check your " - + BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting."); + + BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting."); } this.conf = conf; this.conn = conn; @@ -120,12 +121,13 @@ public static void decorateMasterConfiguration(Configuration conf) { } plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); - conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (plugins == null ? "" : plugins + ",") + - BackupHFileCleaner.class.getName()); + conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, + (plugins == null ? "" : plugins + ",") + BackupHFileCleaner.class.getName()); if (LOG.isDebugEnabled()) { - LOG.debug("Added log cleaner: {}. Added master procedure manager: {}." - +"Added master procedure manager: {}", cleanerClass, masterProcedureClass, - BackupHFileCleaner.class.getName()); + LOG.debug( + "Added log cleaner: {}. Added master procedure manager: {}." + + "Added master procedure manager: {}", + cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName()); } } @@ -163,8 +165,7 @@ public static boolean isBackupEnabled(Configuration conf) { } /** - * Get configuration - * @return configuration + * Get configuration n */ Configuration getConf() { return conf; @@ -186,17 +187,15 @@ public void close() { /** * Creates a backup info based on input backup request. - * @param backupId backup id - * @param type type - * @param tableList table list + * @param backupId backup id + * @param type type + * @param tableList table list * @param targetRootDir root dir - * @param workers number of parallel workers - * @param bandwidth bandwidth per worker in MB per sec - * @return BackupInfo - * @throws BackupException exception + * @param workers number of parallel workers + * @param bandwidth bandwidth per worker in MB per sec n * @throws BackupException exception */ public BackupInfo createBackupInfo(String backupId, BackupType type, List tableList, - String targetRootDir, int workers, long bandwidth) throws BackupException { + String targetRootDir, int workers, long bandwidth) throws BackupException { if (targetRootDir == null) { throw new BackupException("Wrong backup request parameter: target backup root directory"); } @@ -292,8 +291,8 @@ public ArrayList getAncestors(BackupInfo backupInfo) throws IOExcep BackupImage.Builder builder = BackupImage.newBuilder(); BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) - .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) - .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); + .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) + .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); // Only direct ancestors for a backup are required and not entire history of backup for this // table resulting in verifying all of the previous backups which is unnecessary and backup @@ -320,21 +319,21 @@ public ArrayList getAncestors(BackupInfo backupInfo) throws IOExcep if (BackupManifest.canCoverImage(ancestors, image)) { LOG.debug("Met the backup boundary of the current table set:"); for (BackupImage image1 : ancestors) { - LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir()); + LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir()); } } else { Path logBackupPath = - HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId()); - LOG.debug("Current backup has an incremental backup ancestor, " - + "touching its image manifest in {}" - + " to construct the dependency.", logBackupPath.toString()); + HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId()); + LOG.debug( + "Current backup has an incremental backup ancestor, " + + "touching its image manifest in {}" + " to construct the dependency.", + logBackupPath.toString()); BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath); BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); ancestors.add(lastIncrImage); - LOG.debug( - "Last dependent incremental backup image: {BackupID={}" + - "BackupDir={}}", lastIncrImage.getBackupId(), lastIncrImage.getRootDir()); + LOG.debug("Last dependent incremental backup image: {BackupID={}" + "BackupDir={}}", + lastIncrImage.getBackupId(), lastIncrImage.getRootDir()); } } } @@ -345,12 +344,12 @@ public ArrayList getAncestors(BackupInfo backupInfo) throws IOExcep /** * Get the direct ancestors of this backup for one table involved. * @param backupInfo backup info - * @param table table + * @param table table * @return backupImages on the dependency list * @throws IOException exception */ public ArrayList getAncestors(BackupInfo backupInfo, TableName table) - throws IOException { + throws IOException { ArrayList ancestors = getAncestors(backupInfo); ArrayList tableAncestors = new ArrayList<>(); for (BackupImage image : ancestors) { @@ -399,11 +398,13 @@ public void startBackupSession() throws IOException { // Restore the interrupted status Thread.currentThread().interrupt(); } - if (lastWarningOutputTime == 0 - || (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000) { + if ( + lastWarningOutputTime == 0 + || (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000 + ) { lastWarningOutputTime = EnvironmentEdgeManager.currentTime(); LOG.warn("Waiting to acquire backup exclusive lock for {}s", - +(lastWarningOutputTime - startTime) / 1000); + +(lastWarningOutputTime - startTime) / 1000); } } else { throw e; @@ -480,8 +481,8 @@ public ArrayList getBackupHistory(boolean completed) throws IOExcept * @param tables tables * @throws IOException exception */ - public void writeRegionServerLogTimestamp(Set tables, - Map newTimestamps) throws IOException { + public void writeRegionServerLogTimestamp(Set tables, Map newTimestamps) + throws IOException { systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir()); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java index 4d4965dd6576..482b2a266db7 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; @@ -26,7 +25,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -50,9 +48,8 @@ /** * Backup manifest contains all the meta data of a backup image. The manifest info will be bundled * as manifest file together with data. So that each backup image will contain all the info needed - * for restore. BackupManifest is a storage container for BackupImage. - * It is responsible for storing/reading backup image data and has some additional utility methods. - * + * for restore. BackupManifest is a storage container for BackupImage. It is responsible for + * storing/reading backup image data and has some additional utility methods. */ @InterfaceAudience.Private public class BackupManifest { @@ -126,8 +123,8 @@ public BackupImage() { super(); } - private BackupImage(String backupId, BackupType type, String rootDir, - List tableList, long startTs, long completeTs) { + private BackupImage(String backupId, BackupType type, String rootDir, List tableList, + long startTs, long completeTs) { this.backupId = backupId; this.type = type; this.rootDir = rootDir; @@ -149,9 +146,9 @@ static BackupImage fromProto(BackupProtos.BackupImage im) { List ancestorList = im.getAncestorsList(); - BackupType type = - im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL - : BackupType.INCREMENTAL; + BackupType type = im.getBackupType() == BackupProtos.BackupType.FULL + ? BackupType.FULL + : BackupType.INCREMENTAL; BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); for (BackupProtos.BackupImage img : ancestorList) { @@ -187,8 +184,8 @@ BackupProtos.BackupImage toProto() { return builder.build(); } - private static Map> loadIncrementalTimestampMap( - BackupProtos.BackupImage proto) { + private static Map> + loadIncrementalTimestampMap(BackupProtos.BackupImage proto) { List list = proto.getTstMapList(); Map> incrTimeRanges = new HashMap<>(); @@ -221,13 +218,13 @@ private void setIncrementalTimestampMap(BackupProtos.BackupImage.Builder builder TableName key = entry.getKey(); Map value = entry.getValue(); BackupProtos.TableServerTimestamp.Builder tstBuilder = - BackupProtos.TableServerTimestamp.newBuilder(); + BackupProtos.TableServerTimestamp.newBuilder(); tstBuilder.setTableName(ProtobufUtil.toProtoTableName(key)); for (Map.Entry entry2 : value.entrySet()) { String s = entry2.getKey(); BackupProtos.ServerTimestamp.Builder stBuilder = - BackupProtos.ServerTimestamp.newBuilder(); + BackupProtos.ServerTimestamp.newBuilder(); HBaseProtos.ServerName.Builder snBuilder = HBaseProtos.ServerName.newBuilder(); ServerName sn = ServerName.parseServerName(s); snBuilder.setHostName(sn.getHostname()); @@ -378,10 +375,9 @@ private void setIncrTimeRanges(Map> incrTimeRanges) */ public BackupManifest(BackupInfo backup) { BackupImage.Builder builder = BackupImage.newBuilder(); - this.backupImage = - builder.withBackupId(backup.getBackupId()).withType(backup.getType()) - .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) - .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); + this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) + .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) + .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); } /** @@ -393,16 +389,14 @@ public BackupManifest(BackupInfo backup, TableName table) { List tables = new ArrayList(); tables.add(table); BackupImage.Builder builder = BackupImage.newBuilder(); - this.backupImage = - builder.withBackupId(backup.getBackupId()).withType(backup.getType()) - .withRootDir(backup.getBackupRootDir()).withTableList(tables) - .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); + this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) + .withRootDir(backup.getBackupRootDir()).withTableList(tables) + .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); } /** * Construct manifest from a backup directory. - * - * @param conf configuration + * @param conf configuration * @param backupPath backup path * @throws IOException if constructing the manifest from the backup directory fails */ @@ -412,7 +406,7 @@ public BackupManifest(Configuration conf, Path backupPath) throws IOException { /** * Construct manifest from a backup directory. - * @param fs the FileSystem + * @param fs the FileSystem * @param backupPath backup path * @throws BackupException exception */ @@ -449,7 +443,7 @@ public BackupManifest(FileSystem fs, Path backupPath) throws BackupException { } this.backupImage = BackupImage.fromProto(proto); LOG.debug("Loaded manifest instance from manifest file: " - + BackupUtils.getPath(subFile.getPath())); + + BackupUtils.getPath(subFile.getPath())); return; } } @@ -480,10 +474,10 @@ public void store(Configuration conf) throws BackupException { byte[] data = backupImage.toProto().toByteArray(); // write the file, overwrite if already exist Path manifestFilePath = - new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), - backupImage.getBackupId()), MANIFEST_FILE_NAME); + new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()), + MANIFEST_FILE_NAME); try (FSDataOutputStream out = - manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) { + manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) { out.write(data); } catch (IOException e) { throw new BackupException(e.getMessage()); @@ -531,8 +525,8 @@ public ArrayList getRestoreDependentList(boolean reverse) { for (BackupImage image : backupImage.getAncestors()) { restoreImages.put(Long.valueOf(image.startTs), image); } - return new ArrayList<>(reverse ? (restoreImages.descendingMap().values()) - : (restoreImages.values())); + return new ArrayList<>( + reverse ? (restoreImages.descendingMap().values()) : (restoreImages.values())); } /** @@ -614,7 +608,7 @@ public static boolean canCoverImage(BackupImage image1, BackupImage image2) { /** * Check whether backup image set could cover a backup image or not. * @param fullImages The backup image set - * @param image The target backup image + * @param image The target backup image * @return true if fullImages can cover image, otherwise false */ public static boolean canCoverImage(ArrayList fullImages, BackupImage image) { @@ -664,8 +658,8 @@ public BackupInfo toBackupInfo() { info.setStartTs(backupImage.getStartTs()); info.setBackupRootDir(backupImage.getRootDir()); if (backupImage.getType() == BackupType.INCREMENTAL) { - info.setHLogTargetDir(BackupUtils.getLogBackupDir(backupImage.getRootDir(), - backupImage.getBackupId())); + info.setHLogTargetDir( + BackupUtils.getLogBackupDir(backupImage.getRootDir(), backupImage.getBackupId())); } return info; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index 88093ba1c9e1..19ddd8141677 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,6 +68,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -232,7 +232,7 @@ private void waitForSystemTable(Admin admin, TableName tableName) throws IOExcep long TIMEOUT = 60000; long startTime = EnvironmentEdgeManager.currentTime(); LOG.debug("Backup table {} is not present and available, waiting for it to become so", - tableName); + tableName); while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) { try { Thread.sleep(100); @@ -274,15 +274,17 @@ public void updateBackupInfo(BackupInfo info) throws IOException { Map readBulkLoadedFiles(String backupId) throws IOException { Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId); try (Table table = connection.getTable(bulkLoadTableName); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { Result res = null; Map map = new TreeMap<>(Bytes.BYTES_COMPARATOR); while ((res = scanner.next()) != null) { res.advance(); byte[] row = CellUtil.cloneRow(res.listCells().get(0)); for (Cell cell : res.listCells()) { - if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, - BackupSystemTable.PATH_COL.length) == 0) { + if ( + CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, + BackupSystemTable.PATH_COL.length) == 0 + ) { map.put(row, Bytes.toString(CellUtil.cloneValue(cell))); } } @@ -298,11 +300,11 @@ Map readBulkLoadedFiles(String backupId) throws IOException { * @return array of Map of family to List of Paths */ public Map>[] readBulkLoadedFiles(String backupId, List sTableList) - throws IOException { + throws IOException { Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId); Map>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()]; try (Table table = connection.getTable(bulkLoadTableName); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { Result res = null; while ((res = scanner.next()) != null) { res.advance(); @@ -310,14 +312,20 @@ public Map>[] readBulkLoadedFiles(String backupId, List> finalPaths) throws IOException { + Map> finalPaths) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size() + " entries"); @@ -388,14 +396,14 @@ public void writePathsPostBulkLoad(TableName tabName, byte[] region, * @param pairs list of paths for hfiles */ public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family, - final List> pairs) throws IOException { + final List> pairs) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug( "write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries"); } try (Table table = connection.getTable(bulkLoadTableName)) { List puts = - BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs); + BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs); table.put(puts); LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName); } @@ -434,7 +442,7 @@ public void deleteBulkLoadedRows(List rows) throws IOException { Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable); Map>>> tblMap = map.get(tTable); try (Table table = connection.getTable(bulkLoadTableName); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { Result res = null; while ((res = scanner.next()) != null) { res.advance(); @@ -448,14 +456,20 @@ public void deleteBulkLoadedRows(List rows) throws IOException { rows.add(row); String rowStr = Bytes.toString(row); region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr); - if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0, - BackupSystemTable.FAM_COL.length) == 0) { + if ( + CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0, + BackupSystemTable.FAM_COL.length) == 0 + ) { fam = Bytes.toString(CellUtil.cloneValue(cell)); - } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, - BackupSystemTable.PATH_COL.length) == 0) { + } else if ( + CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, + BackupSystemTable.PATH_COL.length) == 0 + ) { path = Bytes.toString(CellUtil.cloneValue(cell)); - } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0, - BackupSystemTable.STATE_COL.length) == 0) { + } else if ( + CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0, + BackupSystemTable.STATE_COL.length) == 0 + ) { byte[] state = CellUtil.cloneValue(cell); if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) { raw = true; @@ -489,7 +503,7 @@ public void deleteBulkLoadedRows(List rows) throws IOException { * @param backupId the backup Id */ public void writeBulkLoadedFiles(List sTableList, Map>[] maps, - String backupId) throws IOException { + String backupId) throws IOException { try (Table table = connection.getTable(bulkLoadTableName)) { long ts = EnvironmentEdgeManager.currentTime(); int cnt = 0; @@ -566,7 +580,7 @@ public String readBackupStartCode(String backupRoot) throws IOException { /** * Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte. - * @param startCode start code + * @param startCode start code * @param backupRoot root directory path to backup * @throws IOException exception */ @@ -583,7 +597,7 @@ public void writeBackupStartCode(Long startCode, String backupRoot) throws IOExc /** * Exclusive operations are: create, delete, merge * @throws IOException if a table operation fails or an active backup exclusive operation is - * already underway + * already underway */ public void startBackupExclusiveOperation() throws IOException { LOG.debug("Start new backup exclusive operation"); @@ -591,11 +605,15 @@ public void startBackupExclusiveOperation() throws IOException { try (Table table = connection.getTable(tableName)) { Put put = createPutForStartBackupSession(); // First try to put if row does not exist - if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) - .ifNotExists().thenPut(put)) { + if ( + !table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) + .ifNotExists().thenPut(put) + ) { // Row exists, try to put if value == ACTIVE_SESSION_NO - if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) - .ifEquals(ACTIVE_SESSION_NO).thenPut(put)) { + if ( + !table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) + .ifEquals(ACTIVE_SESSION_NO).thenPut(put) + ) { throw new ExclusiveOperationException(); } } @@ -613,8 +631,10 @@ public void finishBackupExclusiveOperation() throws IOException { try (Table table = connection.getTable(tableName)) { Put put = createPutForStopBackupSession(); - if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) - .ifEquals(ACTIVE_SESSION_YES).thenPut(put)) { + if ( + !table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) + .ifEquals(ACTIVE_SESSION_YES).thenPut(put) + ) { throw new IOException("There is no active backup exclusive operation"); } } @@ -633,13 +653,13 @@ private Put createPutForStopBackupSession() { * @throws IOException exception */ public HashMap readRegionServerLastLogRollResult(String backupRoot) - throws IOException { + throws IOException { LOG.trace("read region server last roll log result to backup system table"); Scan scan = createScanForReadRegionServerLastLogRollResult(backupRoot); try (Table table = connection.getTable(tableName); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { Result res; HashMap rsTimestampMap = new HashMap<>(); while ((res = scanner.next()) != null) { @@ -656,13 +676,13 @@ public HashMap readRegionServerLastLogRollResult(String backupRoot /** * Writes Region Server last roll log result (timestamp) to backup system table table - * @param server Region Server name - * @param ts last log timestamp + * @param server Region Server name + * @param ts last log timestamp * @param backupRoot root directory path to backup * @throws IOException exception */ public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot) - throws IOException { + throws IOException { LOG.trace("write region server last roll log result to backup system table"); try (Table table = connection.getTable(tableName)) { @@ -710,7 +730,7 @@ public List getHistory(int n) throws IOException { /** * Get backup history records filtered by list of filters. - * @param n max number of records, if n == -1 , then max number is ignored + * @param n max number of records, if n == -1 , then max number is ignored * @param filters list of filters * @return backup records * @throws IOException if getting the backup history fails @@ -793,7 +813,7 @@ public List getBackupHistoryForTable(TableName name) throws IOExcept } public Map> getBackupHistoryForTableSet(Set set, - String backupRoot) throws IOException { + String backupRoot) throws IOException { List history = getBackupHistory(backupRoot); Map> tableHistoryMap = new HashMap<>(); for (Iterator iterator = history.iterator(); iterator.hasNext();) { @@ -829,7 +849,7 @@ public ArrayList getBackupInfos(BackupState state) throws IOExceptio ArrayList list = new ArrayList<>(); try (Table table = connection.getTable(tableName); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { Result res; while ((res = scanner.next()) != null) { res.advance(); @@ -847,16 +867,16 @@ public ArrayList getBackupInfos(BackupState state) throws IOExceptio * Write the current timestamps for each regionserver to backup system table after a successful * full or incremental backup. The saved timestamp is of the last log file that was backed up * already. - * @param tables tables + * @param tables tables * @param newTimestamps timestamps - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @throws IOException exception */ - public void writeRegionServerLogTimestamp(Set tables, - Map newTimestamps, String backupRoot) throws IOException { + public void writeRegionServerLogTimestamp(Set tables, Map newTimestamps, + String backupRoot) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("write RS log time stamps to backup system table for tables [" - + StringUtils.join(tables, ",") + "]"); + + StringUtils.join(tables, ",") + "]"); } List puts = new ArrayList<>(); for (TableName table : tables) { @@ -879,7 +899,7 @@ public void writeRegionServerLogTimestamp(Set tables, * @throws IOException exception */ public Map> readLogTimestampMap(String backupRoot) - throws IOException { + throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("read RS log ts from backup system table for root=" + backupRoot); } @@ -888,7 +908,7 @@ public Map> readLogTimestampMap(String backupRoot) Scan scan = createScanForReadLogTimestampMap(backupRoot); try (Table table = connection.getTable(tableName); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { Result res; while ((res = scanner.next()) != null) { res.advance(); @@ -899,11 +919,11 @@ public Map> readLogTimestampMap(String backupRoot) byte[] data = CellUtil.cloneValue(cell); if (data == null) { throw new IOException("Data of last backup data from backup system table " - + "is empty. Create a backup first."); + + "is empty. Create a backup first."); } if (data != null && data.length > 0) { HashMap lastBackup = - fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data)); + fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data)); tableTimestampMap.put(tn, lastBackup); } } @@ -912,11 +932,11 @@ public Map> readLogTimestampMap(String backupRoot) } private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table, - Map map) { + Map map) { BackupProtos.TableServerTimestamp.Builder tstBuilder = - BackupProtos.TableServerTimestamp.newBuilder(); + BackupProtos.TableServerTimestamp.newBuilder(); tstBuilder - .setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table)); + .setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table)); for (Entry entry : map.entrySet()) { BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder(); @@ -939,7 +959,7 @@ private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName List list = proto.getServerTimestampList(); for (BackupProtos.ServerTimestamp st : list) { ServerName sn = - org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName()); + org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName()); map.put(sn.getHostname() + ":" + sn.getPort(), st.getTimestamp()); } return map; @@ -973,12 +993,12 @@ public Set getIncrementalBackupTableSet(String backupRoot) throws IOE /** * Add tables to global incremental backup set - * @param tables set of tables + * @param tables set of tables * @param backupRoot root directory path to backup * @throws IOException exception */ public void addIncrementalBackupTableSet(Set tables, String backupRoot) - throws IOException { + throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot + " tables [" + StringUtils.join(tables, " ") + "]"); @@ -1019,7 +1039,7 @@ public boolean hasBackupSessions() throws IOException { Scan scan = createScanForBackupHistory(); scan.setCaching(1); try (Table table = connection.getTable(tableName); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { if (scanner.next() != null) { result = true; } @@ -1073,13 +1093,13 @@ public List describeBackupSet(String name) throws IOException { res.advance(); String[] tables = cellValueToBackupSet(res.current()); return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item)) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } } /** * Add backup set (list of tables) - * @param name set name + * @param name set name * @param newTables list of tables, comma-separated * @throws IOException if a table operation fails */ @@ -1105,7 +1125,7 @@ public void addToBackupSet(String name, String[] newTables) throws IOException { /** * Remove tables from backup set (list of tables) - * @param name set name + * @param name set name * @param toRemove list of tables * @throws IOException if a table operation or deleting the backup set fails */ @@ -1132,7 +1152,7 @@ public void removeFromBackupSet(String name, String[] toRemove) throws IOExcepti table.put(put); } else if (disjoint.length == tables.length) { LOG.warn("Backup set '" + name + "' does not contain tables [" - + StringUtils.join(toRemove, " ") + "]"); + + StringUtils.join(toRemove, " ") + "]"); } else { // disjoint.length == 0 and tables.length >0 // Delete backup set LOG.info("Backup set '" + name + "' is empty. Deleting."); @@ -1176,7 +1196,7 @@ public static TableDescriptor getSystemTableDescriptor(Configuration conf) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf)); ColumnFamilyDescriptorBuilder colBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); + ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); colBuilder.setMaxVersions(1); Configuration config = HBaseConfiguration.create(); @@ -1213,10 +1233,10 @@ public static String getSnapshotName(Configuration conf) { */ public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf)); + TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf)); ColumnFamilyDescriptorBuilder colBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); + ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); colBuilder.setMaxVersions(1); Configuration config = HBaseConfiguration.create(); int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY, @@ -1375,11 +1395,11 @@ private BackupInfo cellToBackupInfo(Cell current) throws IOException { /** * Creates Put to write RS last roll log timestamp map * @param table table - * @param smap map, containing RS:ts + * @param smap map, containing RS:ts * @return put operation */ private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, - String backupRoot) { + String backupRoot) { Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap); return put; @@ -1414,12 +1434,12 @@ private String getTableNameForReadLogTimestampMap(byte[] cloneRow) { /** * Creates Put to store RS last log result - * @param server server name + * @param server server name * @param timestamp log roll result (timestamp) * @return put operation */ private Put createPutForRegionServerLastLogRollResult(String server, Long timestamp, - String backupRoot) { + String backupRoot) { Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"), Bytes.toBytes(timestamp)); @@ -1458,7 +1478,7 @@ private String getServerNameForReadRegionServerLastLogRollResult(byte[] row) { * Creates Put's for bulk load resulting from running LoadIncrementalHFiles */ static List createPutForCommittedBulkload(TableName table, byte[] region, - Map> finalPaths) { + Map> finalPaths) { List puts = new ArrayList<>(); for (Map.Entry> entry : finalPaths.entrySet()) { for (Path path : entry.getValue()) { @@ -1472,8 +1492,8 @@ static List createPutForCommittedBulkload(TableName table, byte[] region, put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file)); put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT); puts.add(put); - LOG.debug( - "writing done bulk path " + file + " for " + table + " " + Bytes.toString(region)); + LOG + .debug("writing done bulk path " + file + " for " + table + " " + Bytes.toString(region)); } } return puts; @@ -1538,7 +1558,7 @@ public static void deleteSnapshot(Connection conn) throws IOException { * Creates Put's for bulk load resulting from running LoadIncrementalHFiles */ static List createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family, - final List> pairs) { + final List> pairs) { List puts = new ArrayList<>(pairs.size()); for (Pair pair : pairs) { Path path = pair.getSecond(); @@ -1740,8 +1760,8 @@ static String getRegionNameFromOrigBulkLoadRow(String rowStr) { */ static Scan createScanForBulkLoadedFiles(String backupId) { Scan scan = new Scan(); - byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES - : rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM); + byte[] startRow = + backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.withStartRow(startRow); @@ -1752,7 +1772,7 @@ static Scan createScanForBulkLoadedFiles(String backupId) { } static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId, - long ts, int idx) { + long ts, int idx) { Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx)); put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName()); put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam); @@ -1798,7 +1818,7 @@ private Delete createDeleteForBackupSet(String name) { /** * Creates Put operation to update backup set content - * @param name backup set's name + * @param name backup set's name * @param tables list of tables * @return put operation */ diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/ExclusiveOperationException.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/ExclusiveOperationException.java index af7fd8bb1c89..d5c4ab31c655 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/ExclusiveOperationException.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/ExclusiveOperationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index 6ad409e70b36..fee2e825728e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_ATTEMPTS_PAUSE_MS_KEY; @@ -28,7 +27,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupCopyJob; import org.apache.hadoop.hbase.backup.BackupInfo; @@ -48,7 +46,6 @@ /** * Full table backup implementation - * */ @InterfaceAudience.Private public class FullTableBackupClient extends TableBackupClient { @@ -58,7 +55,7 @@ public FullTableBackupClient() { } public FullTableBackupClient(final Connection conn, final String backupId, BackupRequest request) - throws IOException { + throws IOException { super(conn, backupId, request); } @@ -117,7 +114,7 @@ protected void snapshotCopy(BackupInfo backupInfo) throws Exception { LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] - + " with reason code " + res); + + " with reason code " + res); } conf.unset(JOB_NAME_CONF_KEY); @@ -127,7 +124,6 @@ protected void snapshotCopy(BackupInfo backupInfo) throws Exception { /** * Backup request execution. - * * @throws IOException if the execution of the backup fails */ @Override @@ -163,9 +159,8 @@ public void execute() throws IOException { // SNAPSHOT_TABLES: backupInfo.setPhase(BackupPhase.SNAPSHOT); for (TableName tableName : tableList) { - String snapshotName = - "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" - + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); + String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); snapshotTable(admin, tableName, snapshotName); backupInfo.setSnapshotName(tableName, snapshotName); @@ -187,12 +182,11 @@ public void execute() throws IOException { backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps); Map> newTableSetTimestampMap = - backupManager.readLogTimestampMap(); + backupManager.readLogTimestampMap(); backupInfo.setTableSetTimestampMap(newTableSetTimestampMap); Long newStartCode = - BackupUtils.getMinValue(BackupUtils - .getRSLogTimestampMins(newTableSetTimestampMap)); + BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); backupManager.writeBackupStartCode(newStartCode); // backup complete @@ -205,11 +199,9 @@ public void execute() throws IOException { } protected void snapshotTable(Admin admin, TableName tableName, String snapshotName) - throws IOException { - int maxAttempts = - conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS); - int pause = - conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS); + throws IOException { + int maxAttempts = conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS); + int pause = conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS); int attempts = 0; while (attempts++ < maxAttempts) { @@ -218,7 +210,7 @@ protected void snapshotTable(Admin admin, TableName tableName, String snapshotNa return; } catch (IOException ee) { LOG.warn("Snapshot attempt " + attempts + " failed for table " + tableName - + ", sleeping for " + pause + "ms", ee); + + ", sleeping for " + pause + "ms", ee); if (attempts < maxAttempts) { try { Thread.sleep(pause); @@ -229,6 +221,6 @@ protected void snapshotTable(Admin admin, TableName tableName, String snapshotNa } } } - throw new IOException("Failed to snapshot table "+ tableName); + throw new IOException("Failed to snapshot table " + tableName); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 847837f04424..c92c0747e83c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; @@ -77,11 +76,11 @@ public Map getIncrBackupLogFileMap() throws IOException { LOG.debug("StartCode " + savedStartCode + "for backupID " + backupInfo.getBackupId()); } // get all new log files from .logs and .oldlogs after last TS and before new timestamp - if (savedStartCode == null || previousTimestampMins == null - || previousTimestampMins.isEmpty()) { - throw new IOException( - "Cannot read any previous back up timestamps from backup system table. " - + "In order to create an incremental backup, at least one full backup is needed."); + if ( + savedStartCode == null || previousTimestampMins == null || previousTimestampMins.isEmpty() + ) { + throw new IOException("Cannot read any previous back up timestamps from backup system table. " + + "In order to create an incremental backup, at least one full backup is needed."); } LOG.info("Execute roll log procedure for incremental backup ..."); @@ -103,9 +102,9 @@ public Map getIncrBackupLogFileMap() throws IOException { private List excludeProcV2WALs(List logList) { List list = new ArrayList<>(); - for (int i=0; i < logList.size(); i++) { + for (int i = 0; i < logList.size(); i++) { Path p = new Path(logList.get(i)); - String name = p.getName(); + String name = p.getName(); if (name.startsWith(WALProcedureStore.LOG_PREFIX)) { continue; @@ -119,18 +118,18 @@ private List excludeProcV2WALs(List logList) { /** * For each region server: get all log files newer than the last timestamps but not newer than the * newest timestamps. - * @param olderTimestamps the timestamp for each region server of the last backup. + * @param olderTimestamps the timestamp for each region server of the last backup. * @param newestTimestamps the timestamp for each region server that the backup should lead to. - * @param conf the Hadoop and Hbase configuration - * @param savedStartCode the startcode (timestamp) of last successful backup. + * @param conf the Hadoop and Hbase configuration + * @param savedStartCode the startcode (timestamp) of last successful backup. * @return a list of log files to be backed up * @throws IOException exception */ private List getLogFilesForNewBackup(Map olderTimestamps, - Map newestTimestamps, Configuration conf, String savedStartCode) - throws IOException { + Map newestTimestamps, Configuration conf, String savedStartCode) + throws IOException { LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps - + "\n newestTimestamps: " + newestTimestamps); + + "\n newestTimestamps: " + newestTimestamps); Path walRootDir = CommonFSUtils.getWALRootDir(conf); Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME); @@ -191,10 +190,10 @@ private List getLogFilesForNewBackup(Map olderTimestamps, // or RS is down (was decommisioned). In any case, we treat this // log file as eligible for inclusion into incremental backup log list Long ts = newestTimestamps.get(host); - if (ts == null) { + if (ts == null) { LOG.warn("ORPHAN log found: " + log + " host=" + host); LOG.debug("Known hosts (from newestTimestamps):"); - for (String s: newestTimestamps.keySet()) { + for (String s : newestTimestamps.keySet()) { LOG.debug(s); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java index 918e99a444fd..0e800ea520b4 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; @@ -53,9 +52,7 @@ import org.slf4j.LoggerFactory; /** - * Incremental backup implementation. - * See the {@link #execute() execute} method. - * + * Incremental backup implementation. See the {@link #execute() execute} method. */ @InterfaceAudience.Private public class IncrementalTableBackupClient extends TableBackupClient { @@ -65,7 +62,7 @@ protected IncrementalTableBackupClient() { } public IncrementalTableBackupClient(final Connection conn, final String backupId, - BackupRequest request) throws IOException { + BackupRequest request) throws IOException { super(conn, backupId, request); } @@ -105,19 +102,19 @@ protected static int getIndex(TableName tbl, List sTableList) { } /* - * Reads bulk load records from backup table, iterates through the records and forms the paths - * for bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination + * Reads bulk load records from backup table, iterates through the records and forms the paths for + * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination * @param sTableList list of tables to be backed up * @return map of table to List of files */ @SuppressWarnings("unchecked") protected Map>[] handleBulkLoad(List sTableList) - throws IOException { + throws IOException { Map>[] mapForSrc = new Map[sTableList.size()]; List activeFiles = new ArrayList<>(); List archiveFiles = new ArrayList<>(); Pair>>>>, List> pair = - backupManager.readBulkloadRows(sTableList); + backupManager.readBulkloadRows(sTableList); Map>>>> map = pair.getFirst(); FileSystem tgtFs; try { @@ -128,8 +125,8 @@ protected Map>[] handleBulkLoad(List sTableList) Path rootdir = CommonFSUtils.getRootDir(conf); Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId); - for (Map.Entry>>>> tblEntry : - map.entrySet()) { + for (Map.Entry>>>> tblEntry : map + .entrySet()) { TableName srcTable = tblEntry.getKey(); int srcIdx = getIndex(srcTable, sTableList); @@ -142,14 +139,14 @@ protected Map>[] handleBulkLoad(List sTableList) } Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable); Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()), - srcTable.getQualifierAsString()); - for (Map.Entry>>> regionEntry : - tblEntry.getValue().entrySet()){ + srcTable.getQualifierAsString()); + for (Map.Entry>>> regionEntry : tblEntry + .getValue().entrySet()) { String regionName = regionEntry.getKey(); Path regionDir = new Path(tblDir, regionName); // map from family to List of hfiles - for (Map.Entry>> famEntry : - regionEntry.getValue().entrySet()) { + for (Map.Entry>> famEntry : regionEntry.getValue() + .entrySet()) { String fam = famEntry.getKey(); Path famDir = new Path(regionDir, fam); List files; @@ -170,7 +167,7 @@ protected Map>[] handleBulkLoad(List sTableList) int idx = file.lastIndexOf("/"); String filename = file; if (idx > 0) { - filename = file.substring(idx+1); + filename = file.substring(idx + 1); } Path p = new Path(famDir, filename); Path tgt = new Path(tgtFam, filename); @@ -183,7 +180,7 @@ protected Map>[] handleBulkLoad(List sTableList) LOG.trace("copying " + p + " to " + tgt); } activeFiles.add(p.toString()); - } else if (fs.exists(archive)){ + } else if (fs.exists(archive)) { LOG.debug("copying archive " + archive + " to " + tgt); archiveFiles.add(archive.toString()); } @@ -199,7 +196,7 @@ protected Map>[] handleBulkLoad(List sTableList) } private void copyBulkLoadedFiles(List activeFiles, List archiveFiles) - throws IOException { + throws IOException { try { // Enable special mode of BackupDistCp conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 5); @@ -207,8 +204,8 @@ private void copyBulkLoadedFiles(List activeFiles, List archiveF String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId(); int attempt = 1; while (activeFiles.size() > 0) { - LOG.info("Copy "+ activeFiles.size() + - " active bulk loaded files. Attempt ="+ (attempt++)); + LOG.info( + "Copy " + activeFiles.size() + " active bulk loaded files. Attempt =" + (attempt++)); String[] toCopy = new String[activeFiles.size()]; activeFiles.toArray(toCopy); // Active file can be archived during copy operation, @@ -245,7 +242,7 @@ private void copyBulkLoadedFiles(List activeFiles, List archiveF } private void updateFileLists(List activeFiles, List archiveFiles) - throws IOException { + throws IOException { List newlyArchived = new ArrayList<>(); for (String spath : activeFiles) { @@ -269,9 +266,8 @@ public void execute() throws IOException { beginBackup(backupManager, backupInfo); backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL); LOG.debug("For incremental backup, current table set is " - + backupManager.getIncrementalBackupTableSet()); - newTimestamps = - ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap(); + + backupManager.getIncrementalBackupTableSet()); + newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap(); } catch (Exception e) { // fail the overall backup and return failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", @@ -285,8 +281,8 @@ public void execute() throws IOException { BackupUtils.copyTableRegionInfo(conn, backupInfo, conf); // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT convertWALsToHFiles(); - incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()}, - backupInfo.getBackupRootDir()); + incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() }, + backupInfo.getBackupRootDir()); } catch (Exception e) { String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId; // fail the overall backup and return @@ -298,8 +294,7 @@ public void execute() throws IOException { // After this checkpoint, even if entering cancel process, will let the backup finished try { // Set the previousTimestampMap which is before this current log roll to the manifest. - Map> previousTimestampMap = - backupManager.readLogTimestampMap(); + Map> previousTimestampMap = backupManager.readLogTimestampMap(); backupInfo.setIncrTimestampMap(previousTimestampMap); // The table list in backupInfo is good for both full backup and incremental backup. @@ -307,11 +302,11 @@ public void execute() throws IOException { backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps); Map> newTableSetTimestampMap = - backupManager.readLogTimestampMap(); + backupManager.readLogTimestampMap(); backupInfo.setTableSetTimestampMap(newTableSetTimestampMap); Long newStartCode = - BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); + BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); backupManager.writeBackupStartCode(newStartCode); handleBulkLoad(backupInfo.getTableNames()); @@ -345,11 +340,11 @@ protected void incrementalCopyHFiles(String[] files, String backupDest) throws I int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr); if (res != 0) { LOG.error("Copy incremental HFile files failed with return code: " + res + "."); - throw new IOException("Failed copy from " + StringUtils.join(files, ',') - + " to " + backupDest); + throw new IOException( + "Failed copy from " + StringUtils.join(files, ',') + " to " + backupDest); } - LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') - + " to " + backupDest + " finished."); + LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') + " to " + backupDest + + " finished."); } finally { deleteBulkLoadDirectory(); } @@ -398,7 +393,7 @@ protected void walToHFiles(List dirPaths, List tableList) throws // a Map task for each file. We use ';' as separator // because WAL file names contains ',' String dirs = StringUtils.join(dirPaths, ';'); - String jobname = "Incremental_Backup-" + backupId ; + String jobname = "Incremental_Backup-" + backupId; Path bulkOutputPath = getBulkOutputDir(); conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); @@ -410,7 +405,7 @@ protected void walToHFiles(List dirPaths, List tableList) throws try { player.setConf(conf); int result = player.run(playerArgs); - if(result != 0) { + if (result != 0) { throw new IOException("WAL Player failed"); } conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); @@ -419,7 +414,7 @@ protected void walToHFiles(List dirPaths, List tableList) throws throw e; } catch (Exception ee) { throw new IOException("Can not convert from directory " + dirs - + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee); + + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java index 5ec44ee93964..3c0eafadb82a 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; @@ -25,7 +24,6 @@ import java.util.HashMap; import java.util.List; import java.util.TreeSet; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -47,7 +45,6 @@ /** * Restore table implementation - * */ @InterfaceAudience.Private public class RestoreTablesClient { @@ -76,7 +73,6 @@ public RestoreTablesClient(Connection conn, RestoreRequest request) { /** * Validate target tables. - * * @param tTableArray target tables * @param isOverwrite overwrite existing table * @throws IOException exception @@ -95,26 +91,25 @@ private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) thr } } else { LOG.info("HBase table " + tableName - + " does not exist. It will be created during restore process"); + + " does not exist. It will be created during restore process"); } } } if (existTableList.size() > 0) { if (!isOverwrite) { - LOG.error("Existing table (" + existTableList - + ") found in the restore target, please add " - + "\"-o\" as overwrite option in the command if you mean" - + " to restore to these existing tables"); - throw new IOException("Existing table found in target while no \"-o\" " - + "as overwrite option found"); + LOG.error("Existing table (" + existTableList + ") found in the restore target, please add " + + "\"-o\" as overwrite option in the command if you mean" + + " to restore to these existing tables"); + throw new IOException( + "Existing table found in target while no \"-o\" " + "as overwrite option found"); } else { if (disabledTableList.size() > 0) { LOG.error("Found offline table in the restore target, " - + "please enable them before restore with \"-overwrite\" option"); + + "please enable them before restore with \"-overwrite\" option"); LOG.info("Offline table list in restore target: " + disabledTableList); throw new IOException( - "Found offline table in the target when restore with \"-overwrite\" option"); + "Found offline table in the target when restore with \"-overwrite\" option"); } } } @@ -122,16 +117,15 @@ private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) thr /** * Restore operation handle each backupImage in array. - * - * @param images array BackupImage - * @param sTable table to be restored - * @param tTable table to be restored to + * @param images array BackupImage + * @param sTable table to be restored + * @param tTable table to be restored to * @param truncateIfExists truncate table * @throws IOException exception */ private void restoreImages(BackupImage[] images, TableName sTable, TableName tTable, - boolean truncateIfExists) throws IOException { + boolean truncateIfExists) throws IOException { // First image MUST be image of a FULL backup BackupImage image = images[0]; String rootDir = image.getRootDir(); @@ -144,7 +138,7 @@ private void restoreImages(BackupImage[] images, TableName sTable, TableName tTa BackupManifest manifest = HBackupFileSystem.getManifest(conf, backupRoot, backupId); if (manifest.getType() == BackupType.FULL) { LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" + " backup image " - + tableBackupPath.toString()); + + tableBackupPath.toString()); conf.set(JOB_NAME_CONF_KEY, "Full_Restore-" + backupId + "-" + tTable); restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable, truncateIfExists, lastIncrBackupId); @@ -164,7 +158,7 @@ private void restoreImages(BackupImage[] images, TableName sTable, TableName tTa for (int i = 1; i < images.length; i++) { BackupImage im = images[i]; String fileBackupDir = - HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable); + HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable); List list = getFilesRecursively(fileBackupDir); dirList.addAll(list); @@ -186,7 +180,7 @@ private void restoreImages(BackupImage[] images, TableName sTable, TableName tTa } private List getFilesRecursively(String fileBackupDir) - throws IllegalArgumentException, IOException { + throws IllegalArgumentException, IOException { FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), new Configuration()); List list = new ArrayList<>(); RemoteIterator it = fs.listFiles(new Path(fileBackupDir), true); @@ -202,12 +196,12 @@ private List getFilesRecursively(String fileBackupDir) /** * Restore operation. Stage 2: resolved Backup Image dependency * @param backupManifestMap : tableName, Manifest - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to * @throws IOException exception */ private void restore(HashMap backupManifestMap, - TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { + TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { TreeSet restoreImageSet = new TreeSet<>(); for (int i = 0; i < sTableArray.length; i++) { @@ -229,8 +223,7 @@ private void restore(HashMap backupManifestMap, LOG.info("Restore includes the following image(s):"); for (BackupImage image : restoreImageSet) { LOG.info("Backup: " + image.getBackupId() + " " - + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), - table)); + + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table)); } } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java index 57f3a50a8eb7..0ca5509262fa 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,10 +44,9 @@ import org.slf4j.LoggerFactory; /** - * Base class for backup operation. Concrete implementation for - * full and incremental backup are delegated to corresponding sub-classes: - * {@link FullTableBackupClient} and {@link IncrementalTableBackupClient} - * + * Base class for backup operation. Concrete implementation for full and incremental backup are + * delegated to corresponding sub-classes: {@link FullTableBackupClient} and + * {@link IncrementalTableBackupClient} */ @InterfaceAudience.Private public abstract class TableBackupClient { @@ -72,12 +71,12 @@ public TableBackupClient() { } public TableBackupClient(final Connection conn, final String backupId, BackupRequest request) - throws IOException { + throws IOException { init(conn, backupId, request); } public void init(final Connection conn, final String backupId, BackupRequest request) - throws IOException { + throws IOException { if (request.getBackupType() == BackupType.FULL) { backupManager = new BackupManager(conn, conn.getConfiguration()); } else { @@ -88,9 +87,8 @@ public void init(final Connection conn, final String backupId, BackupRequest req this.conn = conn; this.conf = conn.getConfiguration(); this.fs = CommonFSUtils.getCurrentFileSystem(conf); - backupInfo = - backupManager.createBackupInfo(backupId, request.getBackupType(), tableList, - request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth()); + backupInfo = backupManager.createBackupInfo(backupId, request.getBackupType(), tableList, + request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth()); if (tableList == null || tableList.isEmpty()) { this.tableList = new ArrayList<>(backupInfo.getTables()); } @@ -104,7 +102,7 @@ public void init(final Connection conn, final String backupId, BackupRequest req * @throws IOException exception */ protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo) - throws IOException { + throws IOException { BackupSystemTable.snapshot(conn); backupManager.setBackupInfo(backupInfo); @@ -136,7 +134,7 @@ protected String getMessage(Exception e) { * @throws IOException exception */ protected static void deleteSnapshots(final Connection conn, BackupInfo backupInfo, - Configuration conf) throws IOException { + Configuration conf) throws IOException { LOG.debug("Trying to delete snapshot for full backup."); for (String snapshotName : backupInfo.getSnapshotNames()) { if (snapshotName == null) { @@ -148,7 +146,7 @@ protected static void deleteSnapshots(final Connection conn, BackupInfo backupIn admin.deleteSnapshot(snapshotName); } LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupInfo.getBackupId() - + " succeeded."); + + " succeeded."); } } @@ -159,9 +157,8 @@ protected static void deleteSnapshots(final Connection conn, BackupInfo backupIn */ protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException { FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf); - Path stagingDir = - new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() - .toString())); + Path stagingDir = new Path( + conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory().toString())); FileStatus[] files = CommonFSUtils.listStatus(fs, stagingDir); if (files == null) { return; @@ -177,30 +174,29 @@ protected static void cleanupExportSnapshotLog(Configuration conf) throws IOExce } /** - * Clean up the uncompleted data at target directory if the ongoing backup has already entered - * the copy phase. + * Clean up the uncompleted data at target directory if the ongoing backup has already entered the + * copy phase. */ protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) { try { // clean up the uncompleted data at target directory if the ongoing backup has already entered // the copy phase - LOG.debug("Trying to cleanup up target dir. Current backup phase: " - + backupInfo.getPhase()); - if (backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY) + LOG.debug("Trying to cleanup up target dir. Current backup phase: " + backupInfo.getPhase()); + if ( + backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY) || backupInfo.getPhase().equals(BackupPhase.INCREMENTAL_COPY) - || backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)) { - FileSystem outputFs = - FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); + || backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST) + ) { + FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); // now treat one backup as a transaction, clean up data that has been partially copied at // table level for (TableName table : backupInfo.getTables()) { - Path targetDirPath = - new Path(HBackupFileSystem.getTableBackupDir(backupInfo.getBackupRootDir(), - backupInfo.getBackupId(), table)); + Path targetDirPath = new Path(HBackupFileSystem + .getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table)); if (outputFs.delete(targetDirPath, true)) { - LOG.debug("Cleaning up uncompleted backup data at " + targetDirPath.toString() - + " done."); + LOG.debug( + "Cleaning up uncompleted backup data at " + targetDirPath.toString() + " done."); } else { LOG.debug("No data has been copied to " + targetDirPath.toString() + "."); } @@ -216,18 +212,18 @@ protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf } catch (IOException e1) { LOG.error("Cleaning up uncompleted backup data of " + backupInfo.getBackupId() + " at " - + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); + + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); } } /** * Fail the overall backup. * @param backupInfo backup info - * @param e exception + * @param e exception * @throws IOException exception */ protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager backupManager, - Exception e, String msg, BackupType type, Configuration conf) throws IOException { + Exception e, String msg, BackupType type, Configuration conf) throws IOException { try { LOG.error(msg + getMessage(e), e); // If this is a cancel exception, then we've already cleaned. @@ -238,10 +234,9 @@ protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager // set overall backup status: failed backupInfo.setState(BackupState.FAILED); // compose the backup failed data - String backupFailedData = - "BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs() - + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + backupInfo.getPhase() - + ",failedmessage=" + backupInfo.getFailedMsg(); + String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts=" + + backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + + backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg(); LOG.error(backupFailedData); cleanupAndRestoreBackupSystem(conn, backupInfo, conf); // If backup session is updated to FAILED state - means we @@ -256,7 +251,7 @@ protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager } public static void cleanupAndRestoreBackupSystem(Connection conn, BackupInfo backupInfo, - Configuration conf) throws IOException { + Configuration conf) throws IOException { BackupType type = backupInfo.getType(); // if full backup, then delete HBase snapshots if there already are snapshots taken // and also clean up export snapshot log files if exist @@ -278,7 +273,7 @@ public static void cleanupAndRestoreBackupSystem(Connection conn, BackupInfo bac * @throws IOException exception */ protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type, - Configuration conf) throws IOException { + Configuration conf) throws IOException { // set the overall backup phase : store manifest backupInfo.setPhase(BackupPhase.STORE_MANIFEST); @@ -365,7 +360,7 @@ protected void cleanupDistCpLog(BackupInfo backupInfo, Configuration conf) throw * @throws IOException exception */ protected void completeBackup(final Connection conn, BackupInfo backupInfo, - BackupManager backupManager, BackupType type, Configuration conf) throws IOException { + BackupManager backupManager, BackupType type, Configuration conf) throws IOException { // set the complete timestamp of the overall backup backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime()); // set overall backup status: complete @@ -376,9 +371,8 @@ protected void completeBackup(final Connection conn, BackupInfo backupInfo, // compose the backup complete data String backupCompleteData = - obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() - + ",completets=" + backupInfo.getCompleteTs() + ",bytescopied=" - + backupInfo.getTotalBytesCopied(); + obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() + ",completets=" + + backupInfo.getCompleteTs() + ",bytescopied=" + backupInfo.getTotalBytesCopied(); if (LOG.isDebugEnabled()) { LOG.debug("Backup " + backupInfo.getBackupId() + " finished: " + backupCompleteData); } @@ -404,23 +398,26 @@ protected void completeBackup(final Connection conn, BackupInfo backupInfo, /** * Backup request execution. - * * @throws IOException if the execution of the backup fails */ public abstract void execute() throws IOException; protected Stage getTestStage() { - return Stage.valueOf("stage_"+ conf.getInt(BACKUP_TEST_MODE_STAGE, 0)); + return Stage.valueOf("stage_" + conf.getInt(BACKUP_TEST_MODE_STAGE, 0)); } protected void failStageIf(Stage stage) throws IOException { Stage current = getTestStage(); if (current == stage) { - throw new IOException("Failed stage " + stage+" in testing"); + throw new IOException("Failed stage " + stage + " in testing"); } } public enum Stage { - stage_0, stage_1, stage_2, stage_3, stage_4 + stage_0, + stage_1, + stage_2, + stage_3, + stage_4 } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java index 3cc8bcf74091..5dca48788855 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -125,29 +124,27 @@ public BackupInfo getBackupInfo() { /** * Update the ongoing backup with new progress. - * @param backupInfo backup info + * @param backupInfo backup info * @param newProgress progress * @param bytesCopied bytes copied * @throws NoNodeException exception */ - static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, - int newProgress, long bytesCopied) throws IOException { + static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, int newProgress, + long bytesCopied) throws IOException { // compose the new backup progress data, using fake number for now String backupProgressData = newProgress + "%"; backupInfo.setProgress(newProgress); backupManager.updateBackupInfo(backupInfo); LOG.debug("Backup progress data \"" + backupProgressData - + "\" has been updated to backup system table for " + backupInfo.getBackupId()); + + "\" has been updated to backup system table for " + backupInfo.getBackupId()); } /** - * Extends DistCp for progress updating to backup system table - * during backup. Using DistCpV2 (MAPREDUCE-2765). - * Simply extend it and override execute() method to get the - * Job reference for progress updating. - * Only the argument "src1, [src2, [...]] dst" is supported, - * no more DistCp options. + * Extends DistCp for progress updating to backup system table during backup. Using DistCpV2 + * (MAPREDUCE-2765). Simply extend it and override execute() method to get the Job reference for + * progress updating. Only the argument "src1, [src2, [...]] dst" is supported, no more DistCp + * options. */ class BackupDistCp extends DistCp { @@ -156,14 +153,12 @@ class BackupDistCp extends DistCp { private BackupManager backupManager; public BackupDistCp(Configuration conf, DistCpOptions options, BackupInfo backupInfo, - BackupManager backupManager) throws Exception { + BackupManager backupManager) throws Exception { super(conf, options); this.backupInfo = backupInfo; this.backupManager = backupManager; } - - @Override public Job execute() throws Exception { @@ -188,43 +183,41 @@ public Job execute() throws Exception { long totalSrcLgth = 0; for (Path aSrc : srcs) { - totalSrcLgth += - BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc); + totalSrcLgth += BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc); } // Async call job = super.execute(); // Update the copy progress to system table every 0.5s if progress value changed - int progressReportFreq = - MapReduceBackupCopyJob.this.getConf().getInt("hbase.backup.progressreport.frequency", - 500); + int progressReportFreq = MapReduceBackupCopyJob.this.getConf() + .getInt("hbase.backup.progressreport.frequency", 500); float lastProgress = progressDone; while (!job.isComplete()) { float newProgress = - progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); + progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); if (newProgress > lastProgress) { BigDecimal progressData = - new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); + new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); String newProgressStr = progressData + "%"; LOG.info("Progress: " + newProgressStr); updateProgress(backupInfo, backupManager, progressData.intValue(), bytesCopied); LOG.debug("Backup progress data updated to backup system table: \"Progress: " - + newProgressStr + ".\""); + + newProgressStr + ".\""); lastProgress = newProgress; } Thread.sleep(progressReportFreq); } // update the progress data after copy job complete float newProgress = - progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); + progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); BigDecimal progressData = - new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); + new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); String newProgressStr = progressData + "%"; LOG.info("Progress: " + newProgressStr + " subTask: " + subTaskPercntgInWholeTask - + " mapProgress: " + job.mapProgress()); + + " mapProgress: " + job.mapProgress()); // accumulate the overall backup progress progressDone = newProgress; @@ -232,7 +225,7 @@ public Job execute() throws Exception { updateProgress(backupInfo, backupManager, progressData.intValue(), bytesCopied); LOG.debug("Backup progress data updated to backup system table: \"Progress: " - + newProgressStr + " - " + bytesCopied + " bytes copied.\""); + + newProgressStr + " - " + bytesCopied + " bytes copied.\""); } catch (Throwable t) { LOG.error(t.toString(), t); throw t; @@ -241,8 +234,8 @@ public Job execute() throws Exception { String jobID = job.getJobID().toString(); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); - LOG.debug("DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " - + job.isSuccessful()); + LOG.debug( + "DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " + job.isSuccessful()); Counters ctrs = job.getCounters(); LOG.debug(Objects.toString(ctrs)); if (job.isComplete() && !job.isSuccessful()) { @@ -252,11 +245,11 @@ public Job execute() throws Exception { return job; } - private Field getInputOptionsField(Class classDistCp) throws IOException{ + private Field getInputOptionsField(Class classDistCp) throws IOException { Field f = null; try { f = classDistCp.getDeclaredField("inputOptions"); - } catch(Exception e) { + } catch (Exception e) { // Haddop 3 try { f = classDistCp.getDeclaredField("context"); @@ -268,7 +261,7 @@ private Field getInputOptionsField(Class classDistCp) throws IOException{ } @SuppressWarnings("unchecked") - private List getSourcePaths(Field fieldInputOptions) throws IOException{ + private List getSourcePaths(Field fieldInputOptions) throws IOException { Object options; try { options = fieldInputOptions.get(this); @@ -282,9 +275,8 @@ private List getSourcePaths(Field fieldInputOptions) throws IOException{ return (List) methodGetSourcePaths.invoke(options); } - } catch (IllegalArgumentException | IllegalAccessException | - ClassNotFoundException | NoSuchMethodException | - SecurityException | InvocationTargetException e) { + } catch (IllegalArgumentException | IllegalAccessException | ClassNotFoundException + | NoSuchMethodException | SecurityException | InvocationTargetException e) { throw new IOException(e); } @@ -321,8 +313,8 @@ protected Path createInputFileListing(Job job) throws IOException { cfg.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, fileListingPath.toString()); cfg.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, totalRecords); } catch (NoSuchFieldException | SecurityException | IllegalArgumentException - | IllegalAccessException | NoSuchMethodException | ClassNotFoundException - | InvocationTargetException e) { + | IllegalAccessException | NoSuchMethodException | ClassNotFoundException + | InvocationTargetException e) { throw new IOException(e); } return fileListingPath; @@ -340,8 +332,8 @@ private Text getKey(Path path) { } private List getSourceFiles() throws NoSuchFieldException, SecurityException, - IllegalArgumentException, IllegalAccessException, NoSuchMethodException, - ClassNotFoundException, InvocationTargetException, IOException { + IllegalArgumentException, IllegalAccessException, NoSuchMethodException, + ClassNotFoundException, InvocationTargetException, IOException { Field options = null; try { options = DistCp.class.getDeclaredField("inputOptions"); @@ -352,8 +344,6 @@ private List getSourceFiles() throws NoSuchFieldException, SecurityExcepti return getSourcePaths(options); } - - private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException { FileSystem fs = pathToListFile.getFileSystem(conf); fs.delete(pathToListFile, false); @@ -367,15 +357,15 @@ private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException { /** * Do backup copy based on different types. - * @param context The backup info - * @param conf The hadoop configuration + * @param context The backup info + * @param conf The hadoop configuration * @param copyType The backup copy type - * @param options Options for customized ExportSnapshot or DistCp + * @param options Options for customized ExportSnapshot or DistCp * @throws Exception exception */ @Override public int copy(BackupInfo context, BackupManager backupManager, Configuration conf, - BackupType copyType, String[] options) throws IOException { + BackupType copyType, String[] options) throws IOException { int res = 0; try { @@ -391,7 +381,7 @@ public int copy(BackupInfo context, BackupManager backupManager, Configuration c setSubTaskPercntgInWholeTask(1f); BackupDistCp distcp = - new BackupDistCp(new Configuration(conf), null, context, backupManager); + new BackupDistCp(new Configuration(conf), null, context, backupManager); // Handle a special case where the source file is a single file. // In this case, distcp will not create the target dir. It just take the // target as a file name and copy source file to the target (as a file name). diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java index 375f34b48306..9a65ed929d7f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.backup.mapreduce; import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -52,9 +53,8 @@ import org.slf4j.LoggerFactory; /** - * MapReduce implementation of {@link BackupMergeJob} - * Must be initialized with configuration of a backup destination cluster - * + * MapReduce implementation of {@link BackupMergeJob} Must be initialized with configuration of a + * backup destination cluster */ @InterfaceAudience.Private public class MapReduceBackupMergeJob implements BackupMergeJob { @@ -119,9 +119,8 @@ public void run(String[] backupIds) throws IOException { Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); String dirs = StringUtils.join(dirPaths, ","); - Path bulkOutputPath = - BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]), - getConf(), false); + Path bulkOutputPath = BackupUtils.getBulkOutputDir( + BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false); // Delete content if exists if (fs.exists(bulkOutputPath)) { if (!fs.delete(bulkOutputPath, true)) { @@ -136,7 +135,7 @@ public void run(String[] backupIds) throws IOException { int result = player.run(playerArgs); if (!succeeded(result)) { throw new IOException("Can not merge backup images for " + dirs - + " (check Hadoop/MR and HBase logs). Player return code =" + result); + + " (check Hadoop/MR and HBase logs). Player return code =" + result); } // Add to processed table list processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath)); @@ -149,14 +148,14 @@ public void run(String[] backupIds) throws IOException { // PHASE 2 (modification of a backup file system) // Move existing mergedBackupId data into tmp directory // we will need it later in case of a failure - Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, - mergedBackupId); + Path tmpBackupDir = + HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId); Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId); if (!fs.rename(backupDirPath, tmpBackupDir)) { - throw new IOException("Failed to rename "+ backupDirPath +" to "+tmpBackupDir); + throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir); } else { - LOG.debug("Renamed "+ backupDirPath +" to "+ tmpBackupDir); + LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir); } // Move new data into backup dest for (Pair tn : processedTableList) { @@ -170,7 +169,7 @@ public void run(String[] backupIds) throws IOException { // Delete tmp dir (Rename back during repair) if (!fs.delete(tmpBackupDir, true)) { // WARN and ignore - LOG.warn("Could not delete tmp dir: "+ tmpBackupDir); + LOG.warn("Could not delete tmp dir: " + tmpBackupDir); } // Delete old data deleteBackupImages(backupsToDelete, conn, fs, backupRoot); @@ -193,8 +192,7 @@ public void run(String[] backupIds) throws IOException { } else { // backup repair must be run throw new IOException( - "Backup merge operation failed, run backup repair tool to restore system's integrity", - e); + "Backup merge operation failed, run backup repair tool to restore system's integrity", e); } } finally { table.close(); @@ -204,13 +202,13 @@ public void run(String[] backupIds) throws IOException { /** * Copy meta data to of a backup session - * @param fs file system - * @param tmpBackupDir temp backup directory, where meta is locaed + * @param fs file system + * @param tmpBackupDir temp backup directory, where meta is locaed * @param backupDirPath new path for backup * @throws IOException exception */ protected void copyMetaData(FileSystem fs, Path tmpBackupDir, Path backupDirPath) - throws IOException { + throws IOException { RemoteIterator it = fs.listFiles(tmpBackupDir, true); List toKeep = new ArrayList(); while (it.hasNext()) { @@ -220,8 +218,10 @@ protected void copyMetaData(FileSystem fs, Path tmpBackupDir, Path backupDirPath } // Keep meta String fileName = p.toString(); - if (fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0 - || fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0) { + if ( + fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0 + || fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0 + ) { toKeep.add(p); } } @@ -234,8 +234,8 @@ protected void copyMetaData(FileSystem fs, Path tmpBackupDir, Path backupDirPath /** * Copy file in DFS from p to newPath - * @param fs file system - * @param p old path + * @param fs file system + * @param p old path * @param newPath new path * @throws IOException exception */ @@ -249,12 +249,12 @@ protected void copyFile(FileSystem fs, Path p, Path newPath) throws IOException } } -/** - * Converts path before copying - * @param p path - * @param backupDirPath backup root - * @return converted path - */ + /** + * Converts path before copying + * @param p path + * @param backupDirPath backup root + * @return converted path + */ protected Path convertToDest(Path p, Path backupDirPath) { String backupId = backupDirPath.getName(); Stack stack = new Stack(); @@ -300,16 +300,16 @@ protected void cleanupBulkLoadDirs(FileSystem fs, List pathList) throws IO } protected void updateBackupManifest(String backupRoot, String mergedBackupId, - List backupsToDelete) throws IllegalArgumentException, IOException { + List backupsToDelete) throws IllegalArgumentException, IOException { BackupManifest manifest = - HBackupFileSystem.getManifest(conf, new Path(backupRoot), mergedBackupId); + HBackupFileSystem.getManifest(conf, new Path(backupRoot), mergedBackupId); manifest.getBackupImage().removeAncestors(backupsToDelete); // save back manifest.store(conf); } protected void deleteBackupImages(List backupIds, Connection conn, FileSystem fs, - String backupRoot) throws IOException { + String backupRoot) throws IOException { // Delete from backup system table try (BackupSystemTable table = new BackupSystemTable(conn)) { for (String backupId : backupIds) { @@ -339,24 +339,24 @@ protected List getBackupIdsToDelete(String[] backupIds, String mergedBac } protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath, - TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException { + TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException { Path dest = - new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName)); + new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName)); FileStatus[] fsts = fs.listStatus(bulkOutputPath); for (FileStatus fst : fsts) { if (fst.isDirectory()) { - String family = fst.getPath().getName(); + String family = fst.getPath().getName(); Path newDst = new Path(dest, family); if (fs.exists(newDst)) { if (!fs.delete(newDst, true)) { - throw new IOException("failed to delete :"+ newDst); + throw new IOException("failed to delete :" + newDst); } } else { fs.mkdirs(dest); } boolean result = fs.rename(fst.getPath(), dest); - LOG.debug("MoveData from "+ fst.getPath() +" to "+ dest+" result="+ result); + LOG.debug("MoveData from " + fst.getPath() + " to " + dest + " result=" + result); } } } @@ -365,7 +365,7 @@ protected TableName[] getTableNamesInBackupImages(String[] backupIds) throws IOE Set allSet = new HashSet<>(); try (Connection conn = ConnectionFactory.createConnection(conf); - BackupSystemTable table = new BackupSystemTable(conn)) { + BackupSystemTable table = new BackupSystemTable(conn)) { for (String backupId : backupIds) { BackupInfo bInfo = table.readBackupInfo(backupId); @@ -378,12 +378,12 @@ protected TableName[] getTableNamesInBackupImages(String[] backupIds) throws IOE } protected Path[] findInputDirectories(FileSystem fs, String backupRoot, TableName tableName, - String[] backupIds) throws IOException { + String[] backupIds) throws IOException { List dirs = new ArrayList<>(); for (String backupId : backupIds) { Path fileBackupDirPath = - new Path(HBackupFileSystem.getTableBackupDir(backupRoot, backupId, tableName)); + new Path(HBackupFileSystem.getTableBackupDir(backupRoot, backupId, tableName)); if (fs.exists(fileBackupDirPath)) { dirs.add(fileBackupDirPath); } else { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java index b8d520c530c7..766a99d778b8 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.mapreduce; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; @@ -69,17 +68,15 @@ protected MapReduceHFileSplitterJob(final Configuration c) { } /** - * A mapper that just writes out cells. This one can be used together with - * {@link CellSortReducer} + * A mapper that just writes out cells. This one can be used together with {@link CellSortReducer} */ - static class HFileCellMapper extends - Mapper { + static class HFileCellMapper extends Mapper { @Override public void map(NullWritable key, Cell value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), - new MapReduceExtendedCell(value)); + new MapReduceExtendedCell(value)); } @Override @@ -100,9 +97,8 @@ public Job createSubmittableJob(String[] args) throws IOException { String tabName = args[1]; conf.setStrings(TABLES_KEY, tabName); conf.set(FileInputFormat.INPUT_DIR, inputDirs); - Job job = - Job.getInstance(conf, - conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); + Job job = Job.getInstance(conf, + conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); job.setJarByClass(MapReduceHFileSplitterJob.class); job.setInputFormatClass(HFileInputFormat.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class); @@ -116,8 +112,8 @@ public Job createSubmittableJob(String[] args) throws IOException { FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputValueClass(MapReduceExtendedCell.class); try (Connection conn = ConnectionFactory.createConnection(conf); - Table table = conn.getTable(tableName); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + Table table = conn.getTable(tableName); + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); } LOG.debug("success configuring load incremental job"); @@ -145,9 +141,9 @@ private void usage(final String errorMsg) { System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println("Other options:"); System.err.println(" -D " + JOB_NAME_CONF_KEY - + "=jobName - use the specified mapreduce job name for the HFile splitter"); + + "=jobName - use the specified mapreduce job name for the HFile splitter"); System.err.println("For performance also consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); } /** diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java index 9daa282ffad4..e6046bf5fb92 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,13 +34,10 @@ import org.slf4j.LoggerFactory; /** - * MapReduce implementation of {@link RestoreJob} - * - * For backup restore, it runs {@link MapReduceHFileSplitterJob} job and creates - * HFiles which are aligned with a region boundaries of a table being - * restored. - * - * The resulting HFiles then are loaded using HBase bulk load tool {@link BulkLoadHFiles}. + * MapReduce implementation of {@link RestoreJob} For backup restore, it runs + * {@link MapReduceHFileSplitterJob} job and creates HFiles which are aligned with a region + * boundaries of a table being restored. The resulting HFiles then are loaded using HBase bulk load + * tool {@link BulkLoadHFiles}. */ @InterfaceAudience.Private public class MapReduceRestoreJob implements RestoreJob { @@ -54,7 +51,7 @@ public MapReduceRestoreJob() { @Override public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNames, - boolean fullBackupRestore) throws IOException { + boolean fullBackupRestore) throws IOException { String bulkOutputConfKey; player = new MapReduceHFileSplitterJob(); @@ -65,24 +62,21 @@ public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNam if (LOG.isDebugEnabled()) { LOG.debug("Restore " + (fullBackupRestore ? "full" : "incremental") - + " backup from directory " + dirs + " from hbase tables " - + StringUtils.join(tableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND) - + " to tables " - + StringUtils.join(newTableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND)); + + " backup from directory " + dirs + " from hbase tables " + + StringUtils.join(tableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND) + + " to tables " + + StringUtils.join(newTableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND)); } for (int i = 0; i < tableNames.length; i++) { LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]); - Path bulkOutputPath = - BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), - getConf()); + Path bulkOutputPath = BackupUtils + .getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf()); Configuration conf = getConf(); conf.set(bulkOutputConfKey, bulkOutputPath.toString()); - String[] playerArgs = { - dirs, fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i] - .getNameAsString() - }; + String[] playerArgs = { dirs, + fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i].getNameAsString() }; int result; try { @@ -97,18 +91,18 @@ public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNam } if (loader.bulkLoad(newTableNames[i], bulkOutputPath).isEmpty()) { - throw new IOException("Can not restore from backup directory " + dirs + - " (check Hadoop and HBase logs). Bulk loader returns null"); + throw new IOException("Can not restore from backup directory " + dirs + + " (check Hadoop and HBase logs). Bulk loader returns null"); } } else { throw new IOException("Can not restore from backup directory " + dirs - + " (check Hadoop/MR and HBase logs). Player return code =" + result); + + " (check Hadoop/MR and HBase logs). Player return code =" + result); } LOG.debug("Restore Job finished:" + result); } catch (Exception e) { LOG.error(e.toString(), e); - throw new IOException("Can not restore from backup directory " + dirs - + " (check Hadoop and HBase logs) ", e); + throw new IOException( + "Can not restore from backup directory " + dirs + " (check Hadoop and HBase logs) ", e); } } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 79404b34e6de..f3ddda499b0f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,6 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; @@ -62,8 +62,7 @@ public BackupLogCleaner() { @Override public void init(Map params) { - MasterServices master = (MasterServices) MapUtils.getObject(params, - HMaster.MASTER); + MasterServices master = (MasterServices) MapUtils.getObject(params, HMaster.MASTER); if (master != null) { conn = master.getConnection(); if (getConf() == null) { @@ -79,7 +78,6 @@ public void init(Map params) { } } - private Map getServersToOldestBackupMapping(List backups) throws IOException { Map serverAddressToLastBackupMap = new HashMap<>(); @@ -136,8 +134,10 @@ public Iterable getDeletableFiles(Iterable files) { Address.fromString(BackupUtils.parseHostNameFromLogFile(file.getPath())); long walTimestamp = AbstractFSWALProvider.getTimestamp(file.getPath().getName()); - if (!addressToLastBackupMap.containsKey(walServerAddress) - || addressToLastBackupMap.get(walServerAddress) >= walTimestamp) { + if ( + !addressToLastBackupMap.containsKey(walServerAddress) + || addressToLastBackupMap.get(walServerAddress) >= walTimestamp + ) { filteredFiles.add(file); } } catch (Exception ex) { @@ -147,8 +147,8 @@ public Iterable getDeletableFiles(Iterable files) { } } - LOG - .info("Total files: {}, Filtered Files: {}", IterableUtils.size(files), filteredFiles.size()); + LOG.info("Total files: {}, Filtered Files: {}", IterableUtils.size(files), + filteredFiles.size()); return filteredFiles; } @@ -156,8 +156,10 @@ public Iterable getDeletableFiles(Iterable files) { public void setConf(Configuration config) { // If backup is disabled, keep all members null super.setConf(config); - if (!config.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, - BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) { + if ( + !config.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, + BackupRestoreConstants.BACKUP_ENABLE_DEFAULT) + ) { LOG.warn("Backup is disabled - allowing all wals to be deleted"); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java index 32e3e23fdafb..12b0bda56152 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.master; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ThreadPoolExecutor; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; @@ -61,7 +59,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager { public static final String BACKUP_WAKE_MILLIS_KEY = "hbase.backup.logroll.wake.millis"; public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.logroll.timeout.millis"; public static final String BACKUP_POOL_THREAD_NUMBER_KEY = - "hbase.backup.logroll.pool.thread.number"; + "hbase.backup.logroll.pool.thread.number"; public static final int BACKUP_WAKE_MILLIS_DEFAULT = 500; public static final int BACKUP_TIMEOUT_MILLIS_DEFAULT = 180000; @@ -82,26 +80,24 @@ public boolean isStopped() { @Override public void initialize(MasterServices master, MetricsMaster metricsMaster) - throws IOException, UnsupportedOperationException { + throws IOException, UnsupportedOperationException { this.master = master; this.done = false; // setup the default procedure coordinator String name = master.getServerName().toString(); - // get the configuration for the coordinator Configuration conf = master.getConfiguration(); long wakeFrequency = conf.getInt(BACKUP_WAKE_MILLIS_KEY, BACKUP_WAKE_MILLIS_DEFAULT); - long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY,BACKUP_TIMEOUT_MILLIS_DEFAULT); - int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY, - BACKUP_POOL_THREAD_NUMBER_DEFAULT); + long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); + int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY, BACKUP_POOL_THREAD_NUMBER_DEFAULT); // setup the default procedure coordinator ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, opThreads); ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(master); ProcedureCoordinatorRpcs comms = - coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), name); + coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), name); this.coordinator = new ProcedureCoordinator(comms, tpool, timeoutMillis, wakeFrequency); } @@ -115,7 +111,7 @@ public String getProcedureSignature() { public void execProcedure(ProcedureDescription desc) throws IOException { if (!isBackupEnabled()) { LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY - + " setting"); + + " setting"); return; } this.done = false; @@ -149,12 +145,12 @@ public void execProcedure(ProcedureDescription desc) throws IOException { this.done = true; } catch (InterruptedException e) { ForeignException ee = - new ForeignException("Interrupted while waiting for roll log procdure to finish", e); + new ForeignException("Interrupted while waiting for roll log procdure to finish", e); monitor.receive(ee); Thread.currentThread().interrupt(); } catch (ForeignException e) { ForeignException ee = - new ForeignException("Exception while waiting for roll log procdure to finish", e); + new ForeignException("Exception while waiting for roll log procdure to finish", e); monitor.receive(ee); } monitor.rethrowException(); @@ -162,7 +158,7 @@ public void execProcedure(ProcedureDescription desc) throws IOException { @Override public void checkPermissions(ProcedureDescription desc, AccessChecker accessChecker, User user) - throws IOException { + throws IOException { // TODO: what permissions checks are needed here? } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java index 575be3945171..4802e8b3ad63 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.regionserver; import java.util.HashMap; import java.util.List; import java.util.concurrent.Callable; - import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.client.Connection; @@ -50,10 +48,10 @@ public class LogRollBackupSubprocedure extends Subprocedure { private String backupRoot; public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember member, - ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, - LogRollBackupSubprocedurePool taskManager, byte[] data) { + ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, + LogRollBackupSubprocedurePool taskManager, byte[] data) { super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, errorListener, - wakeFrequency, timeout); + wakeFrequency, timeout); LOG.info("Constructing a LogRollBackupSubprocedure."); this.rss = rss; this.taskManager = taskManager; @@ -91,7 +89,7 @@ public Void call() throws Exception { } LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum - + " highest: " + highest + " on " + rss.getServerName()); + + " highest: " + highest + " on " + rss.getServerName()); ((HRegionServer) rss).getWalRoller().requestRollAll(); long start = EnvironmentEdgeManager.currentTime(); while (!((HRegionServer) rss).getWalRoller().walRollFinished()) { @@ -99,20 +97,20 @@ public Void call() throws Exception { } LOG.debug("log roll took " + (EnvironmentEdgeManager.currentTime() - start)); LOG.info("After roll log in backup subprocedure, current log number: " + fsWAL.getFilenum() - + " on " + rss.getServerName()); + + " on " + rss.getServerName()); Connection connection = rss.getConnection(); try (final BackupSystemTable table = new BackupSystemTable(connection)) { // sanity check, good for testing HashMap serverTimestampMap = - table.readRegionServerLastLogRollResult(backupRoot); + table.readRegionServerLastLogRollResult(backupRoot); String host = rss.getServerName().getHostname(); int port = rss.getServerName().getPort(); String server = host + ":" + port; Long sts = serverTimestampMap.get(host); if (sts != null && sts > highest) { - LOG.warn("Won't update server's last roll log result: current=" + sts + " new=" - + highest); + LOG + .warn("Won't update server's last roll log result: current=" + sts + " new=" + highest); return null; } // write the log number to backup system table. diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java index ef126d7c52d9..d6c39d7ed54f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.regionserver; import java.io.Closeable; @@ -28,19 +27,18 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** - * Handle running each of the individual tasks for completing a backup procedure on a region - * server. + * Handle running each of the individual tasks for completing a backup procedure on a region server. */ @InterfaceAudience.Private public class LogRollBackupSubprocedurePool implements Closeable, Abortable { @@ -58,9 +56,8 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable { public LogRollBackupSubprocedurePool(String name, Configuration conf) { // configure the executor service - long keepAlive = - conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY, - LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT); + long keepAlive = conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY, + LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT); int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS); this.name = name; executor = @@ -94,7 +91,7 @@ public boolean waitForOutstandingTasks() throws ForeignException { } catch (InterruptedException e) { if (aborted) { throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!", - e); + e); } Thread.currentThread().interrupt(); } catch (ExecutionException e) { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java index f09e71005598..95b65f8e69cb 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.regionserver; import java.io.IOException; import java.util.concurrent.ThreadPoolExecutor; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupManager; @@ -53,7 +51,7 @@ @InterfaceAudience.Private public class LogRollRegionServerProcedureManager extends RegionServerProcedureManager { private static final Logger LOG = - LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class); + LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class); /** Conf key for number of request threads to start backup on region servers */ public static final String BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads"; @@ -86,7 +84,7 @@ public LogRollRegionServerProcedureManager() { public void start() { if (!BackupManager.isBackupEnabled(rss.getConfiguration())) { LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY - + " setting"); + + " setting"); return; } this.memberRpcs.start(rss.getServerName().toString(), member); @@ -122,7 +120,7 @@ public Subprocedure buildSubprocedure(byte[] data) { // don't run a backup if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName() - + ", because stopping/stopped!"); + + ", because stopping/stopped!"); } LOG.info("Attempting to run a roll log procedure for backup."); @@ -130,12 +128,12 @@ public Subprocedure buildSubprocedure(byte[] data) { Configuration conf = rss.getConfiguration(); long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); long wakeMillis = - conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT); + conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT); LogRollBackupSubprocedurePool taskManager = - new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf); + new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf); return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis, - taskManager, data); + taskManager, data); } /** @@ -153,12 +151,12 @@ public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; if (!BackupManager.isBackupEnabled(rss.getConfiguration())) { LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY - + " setting"); + + " setting"); return; } ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(rss); this.memberRpcs = coordManager - .getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); + .getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); // read in the backup handler configuration properties Configuration conf = rss.getConfiguration(); @@ -166,7 +164,7 @@ public void initialize(RegionServerServices rss) throws KeeperException { int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT); // create the actual cohort member ThreadPoolExecutor pool = - ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); + ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder()); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java index 47bb12bb76e5..4228000d1966 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.util; import java.util.List; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java index 90bb4fdcc203..4b4ebd361a64 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.util; import java.io.FileNotFoundException; @@ -83,8 +82,8 @@ private BackupUtils() { * @param rsLogTimestampMap timestamp map * @return the min timestamp of each RS */ - public static Map getRSLogTimestampMins( - Map> rsLogTimestampMap) { + public static Map + getRSLogTimestampMins(Map> rsLogTimestampMap) { if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) { return null; } @@ -114,13 +113,13 @@ public static Map getRSLogTimestampMins( /** * copy out Table RegionInfo into incremental backup image need to consider move this logic into * HBackupFileSystem - * @param conn connection + * @param conn connection * @param backupInfo backup info - * @param conf configuration + * @param conf configuration * @throws IOException exception */ public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); @@ -140,8 +139,8 @@ public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, C FSTableDescriptors descriptors = new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf)); descriptors.createTableDescriptorForTableDirectory(target, orig, false); - LOG.debug("Attempting to copy table info for:" + table + " target: " + target + - " descriptor: " + orig); + LOG.debug("Attempting to copy table info for:" + table + " target: " + target + + " descriptor: " + orig); LOG.debug("Finished copying tableinfo."); List regions = MetaTableAccessor.getTableRegions(conn, table); // For each region, write the region info to disk @@ -161,7 +160,7 @@ public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, C * Write the .regioninfo file on-disk. */ public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs, - final Path regionInfoDir, RegionInfo regionInfo) throws IOException { + final Path regionInfoDir, RegionInfo regionInfo) throws IOException { final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo); Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR); // First check to get the permissions @@ -219,7 +218,7 @@ public static String getUniqueWALFileNamePart(Path p) { /** * Get the total length of files under the given directory recursively. - * @param fs The hadoop file system + * @param fs The hadoop file system * @param dir The target directory * @return the total length of files * @throws IOException exception @@ -241,13 +240,13 @@ public static long getFilesLength(FileSystem fs, Path dir) throws IOException { /** * Get list of all old WAL files (WALs and archive) - * @param c configuration + * @param c configuration * @param hostTimestampMap {host,timestamp} map * @return list of WAL files * @throws IOException exception */ public static List getWALFilesOlderThan(final Configuration c, - final HashMap hostTimestampMap) throws IOException { + final HashMap hostTimestampMap) throws IOException { Path walRootDir = CommonFSUtils.getWALRootDir(c); Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME); Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -292,7 +291,7 @@ public static TableName[] parseTableNames(String tables) { /** * Check whether the backup path exist * @param backupStr backup - * @param conf configuration + * @param conf configuration * @return Yes if path exists * @throws IOException exception */ @@ -313,7 +312,7 @@ public static boolean checkPathExist(String backupStr, Configuration conf) throw /** * Check target path first, confirm it doesn't exist before backup * @param backupRootPath backup destination path - * @param conf configuration + * @param conf configuration * @throws IOException exception */ public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException { @@ -325,8 +324,7 @@ public static void checkTargetDir(String backupRootPath, Configuration conf) thr String newMsg = null; if (expMsg.contains("No FileSystem for scheme")) { newMsg = - "Unsupported filesystem scheme found in the backup target url. Error Message: " - + expMsg; + "Unsupported filesystem scheme found in the backup target url. Error Message: " + expMsg; LOG.error(newMsg); throw new IOException(newMsg); } else { @@ -390,7 +388,7 @@ public static Long getCreationTime(Path p) throws IOException { } public static List getFiles(FileSystem fs, Path rootDir, List files, - PathFilter filter) throws IOException { + PathFilter filter) throws IOException { RemoteIterator it = fs.listFiles(rootDir, true); while (it.hasNext()) { @@ -414,7 +412,7 @@ public static void cleanupBackupData(BackupInfo context, Configuration conf) thr /** * Clean up directories which are generated when DistCp copying hlogs * @param backupInfo backup info - * @param conf configuration + * @param conf configuration * @throws IOException exception */ private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException { @@ -449,9 +447,8 @@ private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); for (TableName table : backupInfo.getTables()) { - Path targetDirPath = - new Path(getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), - table)); + Path targetDirPath = new Path( + getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table)); if (outputFs.delete(targetDirPath, true)) { LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); } else { @@ -468,7 +465,7 @@ private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true); } catch (IOException e1) { LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at " - + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); + + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); } } @@ -477,15 +474,15 @@ private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) * which is also where the backup manifest file is. return value look like: * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/" * @param backupRootDir backup root directory - * @param backupId backup id - * @param tableName table name + * @param backupId backup id + * @param tableName table name * @return backupPath String for the particular table */ public static String getTableBackupDir(String backupRootDir, String backupId, - TableName tableName) { + TableName tableName) { return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR - + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() - + Path.SEPARATOR; + + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + + Path.SEPARATOR; } /** @@ -510,13 +507,13 @@ public static ArrayList sortHistoryListDesc(ArrayList hi * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. - * @param fs file system - * @param dir directory + * @param fs file system + * @param dir directory * @param filter path filter * @return null if dir is empty or doesn't exist, otherwise FileStatus array */ public static FileStatus[] listStatus(final FileSystem fs, final Path dir, - final PathFilter filter) throws IOException { + final PathFilter filter) throws IOException { FileStatus[] status = null; try { status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); @@ -535,8 +532,8 @@ public static FileStatus[] listStatus(final FileSystem fs, final Path dir, } /** - * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the - * 'path' component of a Path's URI: e.g. If a Path is + * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path' + * component of a Path's URI: e.g. If a Path is * hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir, this method returns * /hbase_trunk/TestTable/compaction.dir. This method is useful if you want to print * out a Path without qualifying Filesystem instance. @@ -551,16 +548,16 @@ public static String getPath(Path p) { * Given the backup root dir and the backup id, return the log file location for an incremental * backup. * @param backupRootDir backup root directory - * @param backupId backup id + * @param backupId backup id * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" */ public static String getLogBackupDir(String backupRootDir, String backupId) { return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR - + HConstants.HREGION_LOGDIR_NAME; + + HConstants.HREGION_LOGDIR_NAME; } private static List getHistory(Configuration conf, Path backupRootPath) - throws IOException { + throws IOException { // Get all (n) history from backup root destination FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf); @@ -605,7 +602,7 @@ private long getTimestamp(String backupId) { } public static List getHistory(Configuration conf, int n, Path backupRootPath, - BackupInfo.Filter... filters) throws IOException { + BackupInfo.Filter... filters) throws IOException { List infos = getHistory(conf, backupRootPath); List ret = new ArrayList<>(); for (BackupInfo info : infos) { @@ -627,7 +624,7 @@ public static List getHistory(Configuration conf, int n, Path backup } public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs) - throws IOException { + throws IOException { Path backupPath = new Path(backupRootPath, backupId); RemoteIterator it = fs.listFiles(backupPath, true); @@ -646,24 +643,24 @@ public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, Fi /** * Create restore request. * @param backupRootDir backup root dir - * @param backupId backup id - * @param check check only - * @param fromTables table list from - * @param toTables table list to - * @param isOverwrite overwrite data + * @param backupId backup id + * @param check check only + * @param fromTables table list from + * @param toTables table list to + * @param isOverwrite overwrite data * @return request obkect */ public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId, - boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) { + boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) { RestoreRequest.Builder builder = new RestoreRequest.Builder(); RestoreRequest request = - builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check) - .withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build(); + builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check) + .withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build(); return request; } public static boolean validate(HashMap backupManifestMap, - Configuration conf) throws IOException { + Configuration conf) throws IOException { boolean isValid = true; for (Entry manifestEntry : backupManifestMap.entrySet()) { @@ -678,7 +675,7 @@ public static boolean validate(HashMap backupManifest LOG.info("Dependent image(s) from old to new:"); for (BackupImage image : imageSet) { String imageDir = - HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); if (!BackupUtils.checkPathExist(imageDir, conf)) { LOG.error("ERROR: backup image does not exist: " + imageDir); isValid = false; @@ -691,13 +688,12 @@ public static boolean validate(HashMap backupManifest } public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit) - throws IOException { + throws IOException { FileSystem fs = FileSystem.get(conf); - String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - fs.getHomeDirectory() + "/hbase-staging"); - Path path = - new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-" - + EnvironmentEdgeManager.currentTime()); + String tmp = + conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging"); + Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-" + + EnvironmentEdgeManager.currentTime()); if (deleteOnExit) { fs.deleteOnExit(path); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java index dafed11fe834..e660ec7b157e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.util; import java.io.FileNotFoundException; @@ -75,7 +74,7 @@ public class RestoreTool { private final HashMap snapshotMap = new HashMap<>(); public RestoreTool(Configuration conf, final Path backupRootPath, final String backupId) - throws IOException { + throws IOException { this.conf = conf; this.backupRootPath = backupRootPath; this.backupId = backupId; @@ -91,8 +90,8 @@ public RestoreTool(Configuration conf, final Path backupRootPath, final String b */ Path getTableArchivePath(TableName tableName) throws IOException { Path baseDir = - new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), - HConstants.HFILE_ARCHIVE_DIRECTORY); + new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), + HConstants.HFILE_ARCHIVE_DIRECTORY); Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR); Path archivePath = new Path(dataDir, tableName.getNamespaceAsString()); Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString()); @@ -142,16 +141,16 @@ void modifyTableSync(Connection conn, TableDescriptor desc) throws IOException { * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in * the future - * @param conn HBase connection + * @param conn HBase connection * @param tableBackupPath backup path - * @param logDirs : incremental backup folders, which contains WAL - * @param tableNames : source tableNames(table names were backuped) - * @param newTableNames : target tableNames(table names to be restored to) - * @param incrBackupId incremental backup Id + * @param logDirs : incremental backup folders, which contains WAL + * @param tableNames : source tableNames(table names were backuped) + * @param newTableNames : target tableNames(table names to be restored to) + * @param incrBackupId incremental backup Id * @throws IOException exception */ public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, - TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException { + TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException { try (Admin admin = conn.getAdmin()) { if (tableNames.length != newTableNames.length) { throw new IOException("Number of source tables and target tables does not match!"); @@ -163,7 +162,7 @@ public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[ for (TableName tableName : newTableNames) { if (!admin.tableExists(tableName)) { throw new IOException("HBase table " + tableName - + " does not exist. Create the table first, e.g. by restoring a full backup."); + + " does not exist. Create the table first, e.g. by restoring a full backup."); } } // adjust table schema @@ -179,7 +178,7 @@ public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[ TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName); List families = Arrays.asList(tableDescriptor.getColumnFamilies()); List existingFamilies = - Arrays.asList(newTableDescriptor.getColumnFamilies()); + Arrays.asList(newTableDescriptor.getColumnFamilies()); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor); boolean schemaChangeNeeded = false; for (ColumnFamilyDescriptor family : families) { @@ -206,8 +205,7 @@ public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[ } public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName, - TableName newTableName, boolean truncateIfExists, String lastIncrBackupId) - throws IOException { + TableName newTableName, boolean truncateIfExists, String lastIncrBackupId) throws IOException { createAndRestoreTable(conn, tableName, newTableName, tableBackupPath, truncateIfExists, lastIncrBackupId); } @@ -216,21 +214,20 @@ public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName ta * Returns value represent path for path to backup table snapshot directory: * "/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot" * @param backupRootPath backup root path - * @param tableName table name - * @param backupId backup Id + * @param tableName table name + * @param backupId backup Id * @return path for snapshot */ Path getTableSnapshotPath(Path backupRootPath, TableName tableName, String backupId) { return new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), - HConstants.SNAPSHOT_DIR_NAME); + HConstants.SNAPSHOT_DIR_NAME); } /** * Returns value represent path for: * ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/ - * snapshot_1396650097621_namespace_table" - * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo, - * .data.manifest (trunk) + * snapshot_1396650097621_namespace_table" this path contains .snapshotinfo, .tabledesc (0.96 and + * 0.98) this path contains .snapshotinfo, .data.manifest (trunk) * @param tableName table name * @return path to table info * @throws IOException exception @@ -241,7 +238,7 @@ Path getTableInfoPath(TableName tableName) throws IOException { // can't build the path directly as the timestamp values are different FileStatus[] snapshots = fs.listStatus(tableSnapShotPath, - new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); + new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); for (FileStatus snapshot : snapshots) { tableInfoPath = snapshot.getPath(); // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; @@ -264,28 +261,27 @@ TableDescriptor getTableDesc(TableName tableName) throws IOException { TableDescriptor tableDescriptor = manifest.getTableDescriptor(); if (!tableDescriptor.getTableName().equals(tableName)) { LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " - + tableInfoPath.toString()); - LOG.error("tableDescriptor.getNameAsString() = " - + tableDescriptor.getTableName().getNameAsString()); + + tableInfoPath.toString()); + LOG.error( + "tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString()); throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName - + " under tableInfoPath: " + tableInfoPath.toString()); + + " under tableInfoPath: " + tableInfoPath.toString()); } return tableDescriptor; } private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName, - String lastIncrBackupId) throws IOException { + String lastIncrBackupId) throws IOException { if (lastIncrBackupId != null) { String target = - BackupUtils.getTableBackupDir(backupRootPath.toString(), - lastIncrBackupId, tableName); + BackupUtils.getTableBackupDir(backupRootPath.toString(), lastIncrBackupId, tableName); return FSTableDescriptors.getTableDescriptorFromFs(fileSys, new Path(target)); } return null; } private void createAndRestoreTable(Connection conn, TableName tableName, TableName newTableName, - Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException { + Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException { if (newTableName == null) { newTableName = tableName; } @@ -304,7 +300,7 @@ private void createAndRestoreTable(Connection conn, TableName tableName, TableNa // check whether snapshot dir already recorded for target table if (snapshotMap.get(tableName) != null) { SnapshotDescription desc = - SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath); + SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath); SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc); tableDescriptor = manifest.getTableDescriptor(); } else { @@ -315,8 +311,8 @@ private void createAndRestoreTable(Connection conn, TableName tableName, TableNa LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost"); } } else { - throw new IOException("Table snapshot directory: " + - tableSnapshotPath + " does not exist."); + throw new IOException( + "Table snapshot directory: " + tableSnapshotPath + " does not exist."); } } @@ -326,15 +322,15 @@ private void createAndRestoreTable(Connection conn, TableName tableName, TableNa // find table descriptor but no archive dir means the table is empty, create table and exit if (LOG.isDebugEnabled()) { LOG.debug("find table descriptor but no archive dir for table " + tableName - + ", will only create table"); + + ", will only create table"); } tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor); checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor, truncateIfExists); return; } else { - throw new IllegalStateException("Cannot restore hbase table because directory '" - + " tableArchivePath is null."); + throw new IllegalStateException( + "Cannot restore hbase table because directory '" + " tableArchivePath is null."); } } @@ -356,7 +352,8 @@ private void createAndRestoreTable(Connection conn, TableName tableName, TableNa RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf); Path[] paths = new Path[regionPathList.size()]; regionPathList.toArray(paths); - restoreService.run(paths, new TableName[]{tableName}, new TableName[] {newTableName}, true); + restoreService.run(paths, new TableName[] { tableName }, new TableName[] { newTableName }, + true); } catch (Exception e) { LOG.error(e.toString(), e); @@ -430,9 +427,11 @@ byte[][] generateBoundaryKeys(ArrayList regionDirList) throws IOException // start to parse hfile inside one family dir Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); for (Path hfile : hfiles) { - if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".") + if ( + hfile.getName().startsWith("_") || hfile.getName().startsWith(".") || StoreFileInfo.isReference(hfile.getName()) - || HFileLink.isHFileLink(hfile.getName())) { + || HFileLink.isHFileLink(hfile.getName()) + ) { continue; } HFile.Reader reader = HFile.createReader(fs, hfile, conf); @@ -441,7 +440,7 @@ byte[][] generateBoundaryKeys(ArrayList regionDirList) throws IOException first = reader.getFirstRowKey().get(); last = reader.getLastRowKey().get(); LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first=" - + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); + + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0; @@ -460,24 +459,24 @@ byte[][] generateBoundaryKeys(ArrayList regionDirList) throws IOException /** * Prepare the table for bulkload, most codes copied from {@code createTable} method in * {@code BulkLoadHFilesTool}. - * @param conn connection - * @param tableBackupPath path - * @param tableName table name - * @param targetTableName target table name - * @param regionDirList region directory list - * @param htd table descriptor + * @param conn connection + * @param tableBackupPath path + * @param tableName table name + * @param targetTableName target table name + * @param regionDirList region directory list + * @param htd table descriptor * @param truncateIfExists truncates table if exists * @throws IOException exception */ private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName, - TableName targetTableName, ArrayList regionDirList, TableDescriptor htd, - boolean truncateIfExists) throws IOException { + TableName targetTableName, ArrayList regionDirList, TableDescriptor htd, + boolean truncateIfExists) throws IOException { try (Admin admin = conn.getAdmin()) { boolean createNew = false; if (admin.tableExists(targetTableName)) { if (truncateIfExists) { - LOG.info("Truncating exising target table '" + targetTableName - + "', preserving region splits"); + LOG.info( + "Truncating exising target table '" + targetTableName + "', preserving region splits"); admin.disableTable(targetTableName); admin.truncateTable(targetTableName, true); } else { @@ -497,7 +496,7 @@ private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableNam // create table using table descriptor and region boundaries admin.createTable(htd, keys); } - } catch (NamespaceNotFoundException e){ + } catch (NamespaceNotFoundException e) { LOG.warn("There was no namespace and the same will be created"); String namespaceAsString = targetTableName.getNamespaceAsString(); LOG.info("Creating target namespace '" + namespaceAsString + "'"); @@ -519,7 +518,7 @@ private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableNam } if (EnvironmentEdgeManager.currentTime() - startTime > TABLE_AVAILABILITY_WAIT_TIME) { throw new IOException("Time out " + TABLE_AVAILABILITY_WAIT_TIME + "ms expired, table " - + targetTableName + " is still not available"); + + targetTableName + " is still not available"); } } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 8a06425d2224..9246c74172fe 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -111,8 +110,8 @@ static class IncrementalTableBackupClientForTest extends IncrementalTableBackupC public IncrementalTableBackupClientForTest() { } - public IncrementalTableBackupClientForTest(Connection conn, - String backupId, BackupRequest request) throws IOException { + public IncrementalTableBackupClientForTest(Connection conn, String backupId, + BackupRequest request) throws IOException { super(conn, backupId, request); } @@ -127,13 +126,13 @@ public void execute() throws IOException { failStageIf(Stage.stage_1); backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL); LOG.debug("For incremental backup, current table set is " - + backupManager.getIncrementalBackupTableSet()); + + backupManager.getIncrementalBackupTableSet()); newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap(); // copy out the table and region info files for each table BackupUtils.copyTableRegionInfo(conn, backupInfo, conf); // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT convertWALsToHFiles(); - incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()}, + incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() }, backupInfo.getBackupRootDir()); failStageIf(Stage.stage_2); @@ -142,7 +141,7 @@ public void execute() throws IOException { // After this checkpoint, even if entering cancel process, will let the backup finished // Set the previousTimestampMap which is before this current log roll to the manifest. Map> previousTimestampMap = - backupManager.readLogTimestampMap(); + backupManager.readLogTimestampMap(); backupInfo.setIncrTimestampMap(previousTimestampMap); // The table list in backupInfo is good for both full backup and incremental backup. @@ -151,10 +150,10 @@ public void execute() throws IOException { failStageIf(Stage.stage_3); Map> newTableSetTimestampMap = - backupManager.readLogTimestampMap(); + backupManager.readLogTimestampMap(); Long newStartCode = - BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); + BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); backupManager.writeBackupStartCode(newStartCode); handleBulkLoad(backupInfo.getTableNames()); @@ -176,7 +175,7 @@ public FullTableBackupClientForTest() { } public FullTableBackupClientForTest(Connection conn, String backupId, BackupRequest request) - throws IOException { + throws IOException { super(conn, backupId, request); } @@ -215,9 +214,8 @@ public void execute() throws IOException { // SNAPSHOT_TABLES: backupInfo.setPhase(BackupPhase.SNAPSHOT); for (TableName tableName : tableList) { - String snapshotName = - "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" - + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); + String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); snapshotTable(admin, tableName, snapshotName); backupInfo.setSnapshotName(tableName, snapshotName); @@ -239,11 +237,10 @@ public void execute() throws IOException { backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps); Map> newTableSetTimestampMap = - backupManager.readLogTimestampMap(); + backupManager.readLogTimestampMap(); Long newStartCode = - BackupUtils.getMinValue(BackupUtils - .getRSLogTimestampMins(newTableSetTimestampMap)); + BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); backupManager.writeBackupStartCode(newStartCode); failStageIf(Stage.stage_4); // backup complete @@ -251,7 +248,7 @@ public void execute() throws IOException { } catch (Exception e) { - if(autoRestoreOnFailure) { + if (autoRestoreOnFailure) { failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ", BackupType.FULL, conf); } @@ -261,13 +258,13 @@ public void execute() throws IOException { } public static void setUpHelper() throws Exception { - BACKUP_ROOT_DIR = Path.SEPARATOR +"backupUT"; + BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT"; BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT"; if (secure) { // set the always on security provider UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(), - HadoopSecurityEnabledUserProviderForTesting.class); + HadoopSecurityEnabledUserProviderForTesting.class); // setup configuration SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); } @@ -299,23 +296,21 @@ public static void setUpHelper() throws Exception { TEST_UTIL.startMiniMapReduceCluster(); BACKUP_ROOT_DIR = - new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), - BACKUP_ROOT_DIR).toString(); + new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR) + .toString(); LOG.info("ROOTDIR " + BACKUP_ROOT_DIR); if (useSecondCluster) { - BACKUP_REMOTE_ROOT_DIR = - new Path(new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) - + BACKUP_REMOTE_ROOT_DIR).toString(); + BACKUP_REMOTE_ROOT_DIR = new Path( + new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) + BACKUP_REMOTE_ROOT_DIR) + .toString(); LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR); } createTables(); populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1); } - /** * Setup Cluster with appropriate configurations before running tests. - * * @throws Exception if starting the mini cluster or setting up the tables fails */ @BeforeClass @@ -327,7 +322,6 @@ public static void setUp() throws Exception { setUpHelper(); } - private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) { Iterator> it = masterConf.iterator(); while (it.hasNext()) { @@ -341,7 +335,7 @@ private static void populateFromMasterConfig(Configuration masterConf, Configura */ @AfterClass public static void tearDown() throws Exception { - try{ + try { SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin()); } catch (Exception e) { } @@ -356,7 +350,7 @@ public static void tearDown() throws Exception { } Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, int numRows) - throws IOException { + throws IOException { Table t = conn.getTable(table); Put p1; for (int i = 0; i < numRows; i++) { @@ -367,17 +361,16 @@ Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, i return t; } - protected BackupRequest createBackupRequest(BackupType type, - List tables, String path) { + protected BackupRequest createBackupRequest(BackupType type, List tables, + String path) { BackupRequest.Builder builder = new BackupRequest.Builder(); - BackupRequest request = builder.withBackupType(type) - .withTableList(tables) - .withTargetRootDir(path).build(); + BackupRequest request = + builder.withBackupType(type).withTableList(tables).withTargetRootDir(path).build(); return request; } protected String backupTables(BackupType type, List tables, String path) - throws IOException { + throws IOException { Connection conn = null; BackupAdmin badmin = null; String backupId; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java index e4c74cfb85f3..b56a15d3a4a8 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,13 +35,12 @@ public class TestBackupBoundaryTests extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupBoundaryTests.class); + HBaseClassTestRule.forClass(TestBackupBoundaryTests.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupBoundaryTests.class); /** * Verify that full backup is created on a single empty table correctly. - * * @throws Exception if doing the full backup fails */ @Test @@ -53,7 +52,6 @@ public void testFullBackupSingleEmpty() throws Exception { /** * Verify that full backup is created on multiple empty tables correctly. - * * @throws Exception if doing the full backup fails */ @Test @@ -66,7 +64,6 @@ public void testFullBackupMultipleEmpty() throws Exception { /** * Verify that full backup fails on a single table that does not exist. - * * @throws Exception if doing the full backup fails */ @Test(expected = IOException.class) @@ -78,7 +75,6 @@ public void testFullBackupSingleDNE() throws Exception { /** * Verify that full backup fails on multiple tables that do not exist. - * * @throws Exception if doing the full backup fails */ @Test(expected = IOException.class) @@ -90,7 +86,6 @@ public void testFullBackupMultipleDNE() throws Exception { /** * Verify that full backup fails on tableset containing real and fake tables. - * * @throws Exception if doing the full backup fails */ @Test(expected = IOException.class) diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java index acde21e3eb5a..77bc9bdb9b2f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestBackupCommandLineTool { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupCommandLineTool.class); + HBaseClassTestRule.forClass(TestBackupCommandLineTool.class); private final static String USAGE_DESCRIBE = "Usage: hbase backup describe "; private final static String USAGE_CREATE = "Usage: hbase backup create"; @@ -86,7 +86,6 @@ public void testBackupDriverDescribeHelp() throws Exception { assertTrue(output.indexOf(USAGE_DESCRIBE) >= 0); } - @Test public void testBackupDriverCreateTopLevelBackupDest() throws Exception { String[] args = new String[] { "create", "full", "hdfs://localhost:1020", "-t", "t1" }; @@ -107,7 +106,6 @@ public void testBackupDriverCreateHelp() throws Exception { assertTrue(output.indexOf(USAGE_CREATE) >= 0); assertTrue(output.indexOf(BackupRestoreConstants.OPTION_TABLE_LIST_DESC) > 0); - baos = new ByteArrayOutputStream(); System.setOut(new PrintStream(baos)); args = new String[] { "create", "-h" }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java index bc8b346175a6..0c4d44d489d8 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,14 +45,13 @@ public class TestBackupDelete extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupDelete.class); + HBaseClassTestRule.forClass(TestBackupDelete.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupDelete.class); /** * Verify that full backup is created on a single table with data correctly. Verify that history * works as expected. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -80,7 +79,6 @@ public void testBackupDelete() throws Exception { /** * Verify that full backup is created on a single table with data correctly. Verify that history * works as expected. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -116,7 +114,7 @@ public void testBackupPurgeOldBackupsCommand() throws Exception { // time - 2 days @Override public long currentTime() { - return System.currentTimeMillis() - 2 * 24 * 3600 * 1000 ; + return System.currentTimeMillis() - 2 * 24 * 3600 * 1000; } }); String backupId = fullTableBackup(tableList); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index f649b921b272..2798e1a16f0d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,14 +42,13 @@ public class TestBackupDeleteRestore extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupDeleteRestore.class); + HBaseClassTestRule.forClass(TestBackupDeleteRestore.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupDeleteRestore.class); /** * Verify that load data- backup - delete some data - restore works as expected - deleted data get * restored. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -72,8 +71,8 @@ public void testBackupDeleteRestore() throws Exception { TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = null;// new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, tablemap, true)); + client.restore( + BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, true)); int numRowsAfterRestore = TEST_UTIL.countRows(table1); assertEquals(numRows, numRowsAfterRestore); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java index 2ab6f55f5b06..12c8d5c4065c 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,11 +57,11 @@ * tests should have their own classes and extend this one */ @Category(LargeTests.class) -public class TestBackupDeleteWithFailures extends TestBackupBase{ +public class TestBackupDeleteWithFailures extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupDeleteWithFailures.class); + HBaseClassTestRule.forClass(TestBackupDeleteWithFailures.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupDeleteWithFailures.class); @@ -75,7 +75,7 @@ public enum Failure { public static class MasterSnapshotObserver implements MasterCoprocessor, MasterObserver { List failures = new ArrayList<>(); - public void setFailures(Failure ... f) { + public void setFailures(Failure... f) { failures.clear(); for (int i = 0; i < f.length; i++) { failures.add(f[i]); @@ -89,8 +89,8 @@ public Optional getMasterObserver() { @Override public void preSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) - throws IOException { + final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) + throws IOException { if (failures.contains(Failure.PRE_SNAPSHOT_FAILURE)) { throw new IOException("preSnapshot"); } @@ -98,7 +98,7 @@ public void preSnapshot(final ObserverContext ctx, @Override public void preDeleteSnapshot(ObserverContext ctx, - SnapshotDescription snapshot) throws IOException { + SnapshotDescription snapshot) throws IOException { if (failures.contains(Failure.PRE_DELETE_SNAPSHOT_FAILURE)) { throw new IOException("preDeleteSnapshot"); } @@ -106,7 +106,7 @@ public void preDeleteSnapshot(ObserverContext ctx, @Override public void postDeleteSnapshot(ObserverContext ctx, - SnapshotDescription snapshot) throws IOException { + SnapshotDescription snapshot) throws IOException { if (failures.contains(Failure.POST_DELETE_SNAPSHOT_FAILURE)) { throw new IOException("postDeleteSnapshot"); } @@ -115,22 +115,20 @@ public void postDeleteSnapshot(ObserverContext ctx /** * Setup Cluster with appropriate configurations before running tests. - * * @throws Exception if starting the mini cluster or setting up the tables fails */ @BeforeClass public static void setUp() throws Exception { TEST_UTIL = new HBaseTestingUtil(); conf1 = TEST_UTIL.getConfiguration(); - conf1.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSnapshotObserver.class.getName()); + conf1.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MasterSnapshotObserver.class.getName()); conf1.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); setUpHelper(); } private MasterSnapshotObserver getMasterSnapshotObserver() { return TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost() - .findCoprocessor(MasterSnapshotObserver.class); + .findCoprocessor(MasterSnapshotObserver.class); } @Test @@ -140,9 +138,9 @@ public void testBackupDeleteWithFailures() throws Exception { testBackupDeleteWithFailuresAfter(1, Failure.PRE_SNAPSHOT_FAILURE); } - private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures) - throws Exception { - LOG.info("test repair backup delete on a single table with data and failures "+ failures[0]); + private void testBackupDeleteWithFailuresAfter(int expected, Failure... failures) + throws Exception { + LOG.info("test repair backup delete on a single table with data and failures " + failures[0]); List tableList = Lists.newArrayList(table1); String backupId = fullTableBackup(tableList); assertTrue(checkSucceeded(backupId)); @@ -161,8 +159,8 @@ private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures observer.setFailures(failures); try { getBackupAdmin().deleteBackups(backupIds); - } catch(IOException e) { - if(expected != 1) { + } catch (IOException e) { + if (expected != 1) { assertTrue(false); } } @@ -173,7 +171,7 @@ private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures String[] ids = table.getListOfBackupIdsFromDeleteOperation(); // Verify that we still have delete record in backup system table - if(expected == 1) { + if (expected == 1) { assertTrue(ids.length == 1); assertTrue(ids[0].equals(backupId)); } else { @@ -181,7 +179,7 @@ private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures } // Now run repair command to repair "failed" delete operation - String[] args = new String[] {"repair"}; + String[] args = new String[] { "repair" }; observer.setFailures(Failure.NO_FAILURES); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java index 6ab3d04feff4..7ce039fd6668 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,13 +43,12 @@ public class TestBackupDescribe extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupDescribe.class); + HBaseClassTestRule.forClass(TestBackupDescribe.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupDescribe.class); /** * Verify that describe works as expected if incorrect backup Id is supplied. - * * @throws Exception if creating the {@link BackupDriver} fails */ @Test diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java index 8393087477df..307440a10ed7 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public class TestBackupHFileCleaner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupHFileCleaner.class); + HBaseClassTestRule.forClass(TestBackupHFileCleaner.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupHFileCleaner.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -121,14 +121,15 @@ public void testGetDeletableFiles() throws IOException { found = true; } } - assertTrue("Cleaner should allow to delete this file as there is no hfile reference " - + "for it.", found); + assertTrue( + "Cleaner should allow to delete this file as there is no hfile reference " + "for it.", + found); // 4. Add the file as bulk load List list = new ArrayList<>(1); list.add(file); try (Connection conn = ConnectionFactory.createConnection(conf); - BackupSystemTable sysTbl = new BackupSystemTable(conn)) { + BackupSystemTable sysTbl = new BackupSystemTable(conn)) { List sTableList = new ArrayList<>(); sTableList.add(tableName); Map>[] maps = new Map[1]; @@ -146,7 +147,8 @@ public void testGetDeletableFiles() throws IOException { found = true; } } - assertFalse("Cleaner should not allow to delete this file as there is a hfile reference " - + "for it.", found); + assertFalse( + "Cleaner should not allow to delete this file as there is a hfile reference " + "for it.", + found); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java index 91bd185b872c..e044b42a0bd9 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicLongArray; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 1a8638c3b7dc..c34f6be43b5e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,12 +43,9 @@ public class TestBackupMerge extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupMerge.class); - - private static final Logger LOG = - LoggerFactory.getLogger(TestBackupMerge.class); - + HBaseClassTestRule.forClass(TestBackupMerge.class); + private static final Logger LOG = LoggerFactory.getLogger(TestBackupMerge.class); @Test public void TestIncBackupMergeRestore() throws Exception { @@ -59,7 +56,6 @@ public void TestIncBackupMergeRestore() throws Exception { List tables = Lists.newArrayList(table1, table2); // Set custom Merge Job implementation - Connection conn = ConnectionFactory.createConnection(conf1); Admin admin = conn.getAdmin(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java index 538488b4c4e4..36cecd3faf58 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupMultipleDeletes.class); + HBaseClassTestRule.forClass(TestBackupMultipleDeletes.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupMultipleDeletes.class); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java index a0369890f3fa..239e1409bdd8 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,11 +40,10 @@ public class TestBackupRepair extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupRepair.class); + HBaseClassTestRule.forClass(TestBackupRepair.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupRepair.class); - @Test public void testFullBackupWithFailuresAndRestore() throws Exception { @@ -52,7 +51,7 @@ public void testFullBackupWithFailuresAndRestore() throws Exception { conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, FullTableBackupClientForTest.class.getName()); - int maxStage = Stage.values().length -1; + int maxStage = Stage.values().length - 1; // Fail stage in loop between 0 and 4 inclusive for (int stage = 0; stage < maxStage; stage++) { LOG.info("Running stage " + stage); @@ -65,23 +64,22 @@ public void runBackupAndFailAtStageWithRestore(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", + table1.getNameAsString() + "," + table2.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertFalse(ret == 0); // Now run restore - args = new String[] {"repair"}; + args = new String[] { "repair" }; - ret = ToolRunner.run(conf1, new BackupDriver(), args); + ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); List backups = table.getBackupHistory(); int after = table.getBackupHistory().size(); - assertTrue(after == before +1); + assertTrue(after == before + 1); for (BackupInfo data : backups) { String backupId = data.getBackupId(); assertFalse(checkSucceeded(backupId)); @@ -91,5 +89,4 @@ public void runBackupAndFailAtStageWithRestore(int stage) throws Exception { } } - } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java index 4526070106d1..fa624250929d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ public class TestBackupShowHistory extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupShowHistory.class); + HBaseClassTestRule.forClass(TestBackupShowHistory.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupShowHistory.class); @@ -60,7 +60,6 @@ private boolean findBackup(List history, String backupId) { /** * Verify that full backup is created on a single table with data correctly. Verify that history * works as expected. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -121,8 +120,7 @@ public void testBackupHistory() throws Exception { assertTrue(success); history = - BackupUtils.getHistory(conf1, 10, new Path(BACKUP_ROOT_DIR), tableNameFilter, - tableSetFilter); + BackupUtils.getHistory(conf1, 10, new Path(BACKUP_ROOT_DIR), tableNameFilter, tableSetFilter); assertTrue(history.size() > 0); success = true; for (BackupInfo info : history) { @@ -134,8 +132,7 @@ public void testBackupHistory() throws Exception { assertTrue(success); args = - new String[] { "history", "-n", "10", "-p", BACKUP_ROOT_DIR, - "-t", "table1", "-s", "backup" }; + new String[] { "history", "-n", "10", "-p", BACKUP_ROOT_DIR, "-t", "table1", "-s", "backup" }; // Run backup ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java index 1aa267b67bbd..83cc19578ade 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,25 +34,27 @@ public class TestBackupSmallTests extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupSmallTests.class); + HBaseClassTestRule.forClass(TestBackupSmallTests.class); private static final UserGroupInformation DIANA = - UserGroupInformation.createUserForTesting("diana", new String[] {}); + UserGroupInformation.createUserForTesting("diana", new String[] {}); private static final String PERMISSION_TEST_PATH = Path.SEPARATOR + "permissionUT"; - @Test public void testBackupPathIsAccessible() throws Exception { + @Test + public void testBackupPathIsAccessible() throws Exception { Path path = new Path(PERMISSION_TEST_PATH); FileSystem fs = FileSystem.get(TEST_UTIL.getConnection().getConfiguration()); fs.mkdirs(path); } - @Test(expected = IOException.class) public void testBackupPathIsNotAccessible() throws Exception { + @Test(expected = IOException.class) + public void testBackupPathIsNotAccessible() throws Exception { Path path = new Path(PERMISSION_TEST_PATH); FileSystem rootFs = FileSystem.get(TEST_UTIL.getConnection().getConfiguration()); rootFs.mkdirs(path.getParent()); rootFs.setPermission(path.getParent(), FsPermission.createImmutable((short) 000)); FileSystem fs = - DFSTestUtil.getFileSystemAs(DIANA, TEST_UTIL.getConnection().getConfiguration()); + DFSTestUtil.getFileSystemAs(DIANA, TEST_UTIL.getConnection().getConfiguration()); fs.mkdirs(path); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java index 6d2091ea697c..1a1e5dbf1cc1 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,13 +40,12 @@ public class TestBackupStatusProgress extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupStatusProgress.class); + HBaseClassTestRule.forClass(TestBackupStatusProgress.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupStatusProgress.class); /** * Verify that full backup is created on a single table with data correctly. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java index e5a6679a97ac..db9d63bca943 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -60,7 +61,7 @@ public class TestBackupSystemTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupSystemTable.class); + HBaseClassTestRule.forClass(TestBackupSystemTable.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); protected static Configuration conf = UTIL.getConfiguration(); @@ -354,8 +355,8 @@ public void testBackupSetAddExists() throws IOException { String[] addTables = new String[] { "table4", "table5", "table6" }; table.addToBackupSet(setName, addTables); - Set expectedTables = new HashSet<>(Arrays.asList("table1", "table2", "table3", - "table4", "table5", "table6")); + Set expectedTables = + new HashSet<>(Arrays.asList("table1", "table2", "table3", "table4", "table5", "table6")); List tnames = table.describeBackupSet(setName); assertTrue(tnames != null); @@ -377,8 +378,8 @@ public void testBackupSetAddExistsIntersects() throws IOException { String[] addTables = new String[] { "table3", "table4", "table5", "table6" }; table.addToBackupSet(setName, addTables); - Set expectedTables = new HashSet<>(Arrays.asList("table1", "table2", "table3", - "table4", "table5", "table6")); + Set expectedTables = + new HashSet<>(Arrays.asList("table1", "table2", "table3", "table4", "table5", "table6")); List tnames = table.describeBackupSet(setName); assertTrue(tnames != null); @@ -471,15 +472,14 @@ public void testBackupSetList() throws IOException { private boolean compare(BackupInfo one, BackupInfo two) { return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType()) - && one.getBackupRootDir().equals(two.getBackupRootDir()) - && one.getStartTs() == two.getStartTs() && one.getCompleteTs() == two.getCompleteTs(); + && one.getBackupRootDir().equals(two.getBackupRootDir()) + && one.getStartTs() == two.getStartTs() && one.getCompleteTs() == two.getCompleteTs(); } private BackupInfo createBackupInfo() { - BackupInfo ctxt = - new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, new TableName[] { - TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") }, - "/hbase/backup"); + BackupInfo ctxt = new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, + new TableName[] { TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") }, + "/hbase/backup"); ctxt.setStartTs(EnvironmentEdgeManager.currentTime()); ctxt.setCompleteTs(EnvironmentEdgeManager.currentTime() + 1); return ctxt; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java index fddbec2e31e6..7bf7a55c599c 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.security.PrivilegedAction; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -39,7 +38,7 @@ public class TestBackupUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupUtils.class); + HBaseClassTestRule.forClass(TestBackupUtils.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupUtils.class); protected static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -50,11 +49,12 @@ public void TestGetBulkOutputDir() { // Create a user who is not the current user String fooUserName = "foo1234"; String fooGroupName = "group1"; - UserGroupInformation - ugi = UserGroupInformation.createUserForTesting(fooUserName, new String[]{fooGroupName}); + UserGroupInformation ugi = + UserGroupInformation.createUserForTesting(fooUserName, new String[] { fooGroupName }); // Get user's home directory Path fooHomeDirectory = ugi.doAs(new PrivilegedAction() { - @Override public Path run() { + @Override + public Path run() { try (FileSystem fs = FileSystem.get(conf)) { return fs.getHomeDirectory(); } catch (IOException ioe) { @@ -65,7 +65,8 @@ public void TestGetBulkOutputDir() { }); Path bulkOutputDir = ugi.doAs(new PrivilegedAction() { - @Override public Path run() { + @Override + public Path run() { try { return BackupUtils.getBulkOutputDir("test", conf, false); } catch (IOException ioe) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java index 28624338f3a3..7cec06799742 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestFullBackup extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFullBackup.class); + HBaseClassTestRule.forClass(TestFullBackup.class); private static final Logger LOG = LoggerFactory.getLogger(TestFullBackup.class); @@ -44,9 +44,8 @@ public void testFullBackupMultipleCommand() throws Exception { LOG.info("test full backup on a multiple tables with data: command-line"); try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", + table1.getNameAsString() + "," + table2.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java index 7a3aec46a9a5..af6ce077e051 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,13 +39,12 @@ public class TestFullBackupSet extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFullBackupSet.class); + HBaseClassTestRule.forClass(TestFullBackupSet.class); private static final Logger LOG = LoggerFactory.getLogger(TestFullBackupSet.class); /** * Verify that full backup is created on a single table with data correctly. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -74,9 +73,8 @@ public void testFullBackupSetExist() throws Exception { LOG.info("backup complete"); // Restore from set into other table - args = - new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", - table1_restore.getNameAsString(), "-o" }; + args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", + table1_restore.getNameAsString(), "-o" }; // Run backup ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java index 3543133734e5..98e05cc5a128 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFullBackupSetRestoreSet.class); + HBaseClassTestRule.forClass(TestFullBackupSetRestoreSet.class); private static final Logger LOG = LoggerFactory.getLogger(TestFullBackupSetRestoreSet.class); @@ -70,9 +70,8 @@ public void testFullRestoreSetToOtherTable() throws Exception { LOG.info("backup complete"); // Restore from set into other table - args = - new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", - table1_restore.getNameAsString(), "-o" }; + args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", + table1_restore.getNameAsString(), "-o" }; // Run backup ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java index bf3a9896e548..1536fd1841fb 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class TestFullBackupWithFailures extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFullBackupWithFailures.class); + HBaseClassTestRule.forClass(TestFullBackupWithFailures.class); private static final Logger LOG = LoggerFactory.getLogger(TestFullBackupWithFailures.class); @@ -48,7 +48,7 @@ public class TestFullBackupWithFailures extends TestBackupBase { public void testFullBackupWithFailures() throws Exception { conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, FullTableBackupClientForTest.class.getName()); - int maxStage = Stage.values().length -1; + int maxStage = Stage.values().length - 1; // Fail stages between 0 and 4 inclusive for (int stage = 0; stage <= maxStage; stage++) { LOG.info("Running stage " + stage); @@ -61,16 +61,15 @@ public void runBackupAndFailAtStage(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", + table1.getNameAsString() + "," + table2.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertFalse(ret == 0); List backups = table.getBackupHistory(); int after = table.getBackupHistory().size(); - assertTrue(after == before +1); + assertTrue(after == before + 1); for (BackupInfo data : backups) { String backupId = data.getBackupId(); assertFalse(checkSucceeded(backupId)); @@ -80,5 +79,4 @@ public void runBackupAndFailAtStage(int stage) throws Exception { } } - } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index f5ad0d7b827e..385a6b3c5193 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,13 +42,12 @@ public class TestFullRestore extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFullRestore.class); + HBaseClassTestRule.forClass(TestFullRestore.class); private static final Logger LOG = LoggerFactory.getLogger(TestFullRestore.class); /** * Verify that a single table is restored to a new table. - * * @throws Exception if doing the backup, restoring it or an operation on the tables fails */ @Test @@ -64,8 +63,8 @@ public void testFullRestoreSingle() throws Exception { TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, tablemap, false)); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, + tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); @@ -81,9 +80,8 @@ public void testFullRestoreSingleCommand() throws Exception { LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", - table1_restore.getNameAsString() }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", + table1_restore.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new RestoreDriver(), args); @@ -103,20 +101,18 @@ public void testFullRestoreCheckCommand() throws Exception { LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", - table1_restore.getNameAsString(), "-c" }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", + table1_restore.getNameAsString(), "-c" }; // Run backup int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - //Verify that table has not been restored + // Verify that table has not been restored Admin hba = TEST_UTIL.getAdmin(); assertFalse(hba.tableExists(table1_restore)); } /** * Verify that multiple tables are restored to new tables. - * * @throws Exception if doing the backup, restoring it or an operation on the tables fails */ @Test @@ -141,7 +137,6 @@ public void testFullRestoreMultiple() throws Exception { /** * Verify that multiple tables are restored to new tables. - * * @throws Exception if doing the backup, restoring it or an operation on the tables fails */ @Test @@ -155,9 +150,8 @@ public void testFullRestoreMultipleCommand() throws Exception { TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; // restore [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", StringUtils.join(restore_tableset, ","), - "-m", StringUtils.join(tablemap, ",") }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", + StringUtils.join(restore_tableset, ","), "-m", StringUtils.join(tablemap, ",") }; // Run backup int ret = ToolRunner.run(conf1, new RestoreDriver(), args); @@ -172,7 +166,6 @@ public void testFullRestoreMultipleCommand() throws Exception { /** * Verify that a single table is restored using overwrite. - * * @throws Exception if doing the backup or restoring it fails */ @Test @@ -186,13 +179,12 @@ public void testFullRestoreSingleOverwrite() throws Exception { TableName[] tableset = new TableName[] { table1 }; BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, null, true)); + client.restore( + BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, null, true)); } /** * Verify that a single table is restored using overwrite. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -205,7 +197,7 @@ public void testFullRestoreSingleOverwriteCommand() throws Exception { TableName[] tableset = new TableName[] { table1 }; // restore [tableMapping] String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", StringUtils.join(tableset, ","), "-o" }; + new String[] { BACKUP_ROOT_DIR, backupId, "-t", StringUtils.join(tableset, ","), "-o" }; // Run restore int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); @@ -217,7 +209,6 @@ public void testFullRestoreSingleOverwriteCommand() throws Exception { /** * Verify that multiple tables are restored to new tables using overwrite. - * * @throws Exception if doing the backup or restoring it fails */ @Test @@ -236,7 +227,6 @@ public void testFullRestoreMultipleOverwrite() throws Exception { /** * Verify that multiple tables are restored to new tables using overwrite. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -249,9 +239,8 @@ public void testFullRestoreMultipleOverwriteCommand() throws Exception { TableName[] restore_tableset = new TableName[] { table2, table3 }; // restore [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", - StringUtils.join(restore_tableset, ","), "-o" }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", + StringUtils.join(restore_tableset, ","), "-o" }; // Run backup int ret = ToolRunner.run(conf1, new RestoreDriver(), args); @@ -264,7 +253,6 @@ public void testFullRestoreMultipleOverwriteCommand() throws Exception { /** * Verify that restore fails on a single table that does not exist. - * * @throws Exception if doing the backup or restoring it fails */ @Test(expected = IOException.class) @@ -279,13 +267,12 @@ public void testFullRestoreSingleDNE() throws Exception { TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; TableName[] tablemap = new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, tablemap, false)); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, + tablemap, false)); } /** * Verify that restore fails on a single table that does not exist. - * * @throws Exception if doing the backup or restoring it fails */ @Test @@ -299,9 +286,8 @@ public void testFullRestoreSingleDNECommand() throws Exception { TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; TableName[] tablemap = new TableName[] { table1_restore }; - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, StringUtils.join(tableset, ","), "-m", - StringUtils.join(tablemap, ",") }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, StringUtils.join(tableset, ","), "-m", + StringUtils.join(tablemap, ",") }; // Run restore int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret != 0); @@ -309,7 +295,6 @@ public void testFullRestoreSingleDNECommand() throws Exception { /** * Verify that restore fails on multiple tables that do not exist. - * * @throws Exception if doing the backup or restoring it fails */ @Test(expected = IOException.class) @@ -321,7 +306,7 @@ public void testFullRestoreMultipleDNE() throws Exception { assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = - new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; + new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; BackupAdmin client = getBackupAdmin(); client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, @@ -330,7 +315,6 @@ public void testFullRestoreMultipleDNE() throws Exception { /** * Verify that restore fails on multiple tables that do not exist. - * * @throws Exception if doing the backup or restoring it fails */ @Test @@ -342,11 +326,10 @@ public void testFullRestoreMultipleDNECommand() throws Exception { assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = - new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; + new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, StringUtils.join(restore_tableset, ","), "-m", - StringUtils.join(tablemap, ",") }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, + StringUtils.join(restore_tableset, ","), "-m", StringUtils.join(tablemap, ",") }; // Run restore int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret != 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index ea552b7945a3..90fbba2bf0ae 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -57,7 +57,7 @@ public class TestIncrementalBackup extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrementalBackup.class); + HBaseClassTestRule.forClass(TestIncrementalBackup.class); private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackup.class); @@ -87,7 +87,8 @@ public void TestIncBackupRestore() throws Exception { TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true) - .setMobThreshold(5L).build()).build(); + .setMobThreshold(5L).build()) + .build(); TEST_UTIL.getAdmin().modifyTable(newTable1Desc); try (Connection conn = ConnectionFactory.createConnection(conf1)) { @@ -104,7 +105,7 @@ public void TestIncBackupRestore() throws Exception { Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); LOG.debug("writing " + ADD_ROWS + " rows to " + table1); Assert.assertEquals(HBaseTestingUtil.countRows(t1), - NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); LOG.debug("written " + ADD_ROWS + " rows to " + table1); // additionally, insert rows to MOB cf int NB_ROWS_MOB = 111; @@ -112,7 +113,7 @@ public void TestIncBackupRestore() throws Exception { LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF"); t1.close(); Assert.assertEquals(HBaseTestingUtil.countRows(t1), - NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); Table t2 = conn.getTable(table2); Put p2; for (int i = 0; i < 5; i++) { @@ -174,7 +175,7 @@ public void TestIncBackupRestore() throws Exception { LOG.debug("Restoring full " + backupIdFull); client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, - tablesRestoreFull, tablesMapFull, true)); + tablesRestoreFull, tablesMapFull, true)); // #6.1 - check tables for full restore Admin hAdmin = TEST_UTIL.getAdmin(); @@ -194,8 +195,8 @@ public void TestIncBackupRestore() throws Exception { // #7 - restore incremental backup for multiple tables, with overwrite TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, - false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = conn.getTable(table1_restore); LOG.debug("After incremental restore: " + hTable.getDescriptor()); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index 837de4dd6166..a5eec87fb06b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,11 +41,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * 1. Create table t1, t2 - * 2. Load data to t1, t2 - * 3 Full backup t1, t2 - * 4 Delete t2 - * 5 Load data to t1 + * 1. Create table t1, t2 2. Load data to t1, t2 3 Full backup t1, t2 4 Delete t2 5 Load data to t1 * 6 Incremental backup t1 */ @Category(LargeTests.class) @@ -53,7 +49,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrementalBackupDeleteTable.class); + HBaseClassTestRule.forClass(TestIncrementalBackupDeleteTable.class); private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupDeleteTable.class); @@ -120,8 +116,8 @@ public void testIncBackupDeleteTable() throws Exception { // #6 - restore incremental backup for table1 TableName[] tablesRestoreIncMultiple = new TableName[] { table1 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, - false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = conn.getTable(table1_restore); Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index 1bde63ba5527..1ece1770489b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,13 +56,16 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrementalBackupMergeWithFailures.class); + HBaseClassTestRule.forClass(TestIncrementalBackupMergeWithFailures.class); private static final Logger LOG = - LoggerFactory.getLogger(TestIncrementalBackupMergeWithFailures.class); + LoggerFactory.getLogger(TestIncrementalBackupMergeWithFailures.class); enum FailurePhase { - PHASE1, PHASE2, PHASE3, PHASE4 + PHASE1, + PHASE2, + PHASE3, + PHASE4 } public final static String FAILURE_PHASE_KEY = "failurePhase"; @@ -82,8 +85,7 @@ public void setConf(Configuration conf) { } /** - * This is the exact copy of parent's run() with injections - * of different types of failures + * This is the exact copy of parent's run() with injections of different types of failures */ @Override public void run(String[] backupIds) throws IOException { @@ -128,9 +130,8 @@ public void run(String[] backupIds) throws IOException { // Find input directories for table Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); String dirs = StringUtils.join(dirPaths, ","); - Path bulkOutputPath = - BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]), - getConf(), false); + Path bulkOutputPath = BackupUtils.getBulkOutputDir( + BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false); // Delete content if exists if (fs.exists(bulkOutputPath)) { if (!fs.delete(bulkOutputPath, true)) { @@ -150,7 +151,7 @@ public void run(String[] backupIds) throws IOException { processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath)); } else { throw new IOException("Can not merge backup images for " + dirs - + " (check Hadoop/MR and HBase logs). Player return code =" + result); + + " (check Hadoop/MR and HBase logs). Player return code =" + result); } LOG.debug("Merge Job finished:" + result); } @@ -163,13 +164,13 @@ public void run(String[] backupIds) throws IOException { // (modification of a backup file system) // Move existing mergedBackupId data into tmp directory // we will need it later in case of a failure - Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, - mergedBackupId); + Path tmpBackupDir = + HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId); Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId); if (!fs.rename(backupDirPath, tmpBackupDir)) { - throw new IOException("Failed to rename "+ backupDirPath +" to "+tmpBackupDir); + throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir); } else { - LOG.debug("Renamed "+ backupDirPath +" to "+ tmpBackupDir); + LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir); } // Move new data into backup dest for (Pair tn : processedTableList) { @@ -184,7 +185,7 @@ public void run(String[] backupIds) throws IOException { // Delete tmp dir (Rename back during repair) if (!fs.delete(tmpBackupDir, true)) { // WARN and ignore - LOG.warn("Could not delete tmp dir: "+ tmpBackupDir); + LOG.warn("Could not delete tmp dir: " + tmpBackupDir); } // Delete old data deleteBackupImages(backupsToDelete, conn, fs, backupRoot); @@ -206,8 +207,8 @@ public void run(String[] backupIds) throws IOException { } else { // backup repair must be run throw new IOException( - "Backup merge operation failed, run backup repair tool to restore system's integrity", - e); + "Backup merge operation failed, run backup repair tool to restore system's integrity", + e); } } finally { table.close(); @@ -274,7 +275,7 @@ public void TestIncBackupMergeRestore() throws Exception { request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); String backupIdIncMultiple2 = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple2)); - // #4 Merge backup images with failures + // #4 Merge backup images with failures for (FailurePhase phase : FailurePhase.values()) { Configuration conf = conn.getConfiguration(); @@ -287,14 +288,14 @@ public void TestIncBackupMergeRestore() throws Exception { Assert.fail("Expected IOException"); } catch (IOException e) { BackupSystemTable table = new BackupSystemTable(conn); - if(phase.ordinal() < FailurePhase.PHASE4.ordinal()) { + if (phase.ordinal() < FailurePhase.PHASE4.ordinal()) { // No need to repair: // Both Merge and backup exclusive operations are finished assertFalse(table.isMergeInProgress()); try { table.finishBackupExclusiveOperation(); Assert.fail("IOException is expected"); - } catch(IOException ee) { + } catch (IOException ee) { // Expected } } else { @@ -303,14 +304,14 @@ public void TestIncBackupMergeRestore() throws Exception { try { table.startBackupExclusiveOperation(); Assert.fail("IOException is expected"); - } catch(IOException ee) { + } catch (IOException ee) { // Expected - clean up before proceeding - //table.finishMergeOperation(); - //table.finishBackupExclusiveOperation(); + // table.finishMergeOperation(); + // table.finishBackupExclusiveOperation(); } } table.close(); - LOG.debug("Expected :"+ e.getMessage()); + LOG.debug("Expected :" + e.getMessage()); } } // Now merge w/o failures diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index 60aa635045a7..28fb025bb32a 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,19 +45,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * 1. Create table t1 - * 2. Load data to t1 - * 3 Full backup t1 - * 4 Load data to t1 - * 5 bulk load into t1 - * 6 Incremental backup t1 + * 1. Create table t1 2. Load data to t1 3 Full backup t1 4 Load data to t1 5 bulk load into t1 6 + * Incremental backup t1 */ @Category(LargeTests.class) public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrementalBackupWithBulkLoad.class); + HBaseClassTestRule.forClass(TestIncrementalBackupWithBulkLoad.class); private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupDeleteTable.class); @@ -92,11 +88,11 @@ public void TestIncBackupDeleteTable() throws Exception { int NB_ROWS2 = 20; LOG.debug("bulk loading into " + testName); - int actual = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, - qualName, false, null, new byte[][][] { - new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, - }, true, false, true, NB_ROWS_IN_BATCH*2, NB_ROWS2); + int actual = + TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, false, null, + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }, + true, false, true, NB_ROWS_IN_BATCH * 2, NB_ROWS2); // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); @@ -105,11 +101,11 @@ public void TestIncBackupDeleteTable() throws Exception { assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 bulk load again LOG.debug("bulk loading into " + testName); - int actual1 = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, - qualName, false, null, - new byte[][][] { new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("qqq") }, - new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, }, - true, false, true, NB_ROWS_IN_BATCH * 2 + actual, NB_ROWS2); + int actual1 = + TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, false, null, + new byte[][][] { new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("qqq") }, + new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, }, + true, false, true, NB_ROWS_IN_BATCH * 2 + actual, NB_ROWS2); // #5 - incremental backup for table1 tables = Lists.newArrayList(table1); @@ -123,9 +119,9 @@ public void TestIncBackupDeleteTable() throws Exception { // #6 - restore incremental backup for table1 TableName[] tablesRestoreIncMultiple = new TableName[] { table1 }; - //TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1, - false, tablesRestoreIncMultiple, tablesRestoreIncMultiple, true)); + // TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1, false, + tablesRestoreIncMultiple, tablesRestoreIncMultiple, true)); Table hTable = conn.getTable(table1); Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1); @@ -133,10 +129,10 @@ public void TestIncBackupDeleteTable() throws Exception { backupIdFull = client.backupTables(request); try (final BackupSystemTable table = new BackupSystemTable(conn)) { - Pair>>>>, List> pair - = table.readBulkloadRows(tables); + Pair>>>>, + List> pair = table.readBulkloadRows(tables); assertTrue("map still has " + pair.getSecond().size() + " entries", - pair.getSecond().isEmpty()); + pair.getSecond().isEmpty()); } assertTrue(checkSucceeded(backupIdFull)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java index 00b13ba8dbf8..c8d536564188 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java @@ -58,10 +58,10 @@ public class TestIncrementalBackupWithFailures extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrementalBackupWithFailures.class); + HBaseClassTestRule.forClass(TestIncrementalBackupWithFailures.class); private static final Logger LOG = - LoggerFactory.getLogger(TestIncrementalBackupWithFailures.class); + LoggerFactory.getLogger(TestIncrementalBackupWithFailures.class); @Parameterized.Parameters public static Collection data() { @@ -127,11 +127,10 @@ public void testIncBackupRestore() throws Exception { } - private void incrementalBackupWithFailures() throws Exception { conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, IncrementalTableBackupClientForTest.class.getName()); - int maxStage = Stage.values().length -1; + int maxStage = Stage.values().length - 1; // Fail stages between 0 and 4 inclusive for (int stage = 0; stage <= maxStage; stage++) { LOG.info("Running stage " + stage); @@ -144,18 +143,17 @@ private void runBackupAndFailAtStage(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; + String[] args = new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t", + table1.getNameAsString() + "," + table2.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertFalse(ret == 0); List backups = table.getBackupHistory(); int after = table.getBackupHistory().size(); - assertTrue(after == before +1); + assertTrue(after == before + 1); for (BackupInfo data : backups) { - if(data.getType() == BackupType.FULL) { + if (data.getType() == BackupType.FULL) { assertTrue(data.getState() == BackupState.COMPLETE); } else { assertTrue(data.getState() == BackupState.FAILED); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index 4150d3fd2fc5..a148ab232dc2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -53,13 +53,12 @@ public class TestRemoteBackup extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteBackup.class); + HBaseClassTestRule.forClass(TestRemoteBackup.class); private static final Logger LOG = LoggerFactory.getLogger(TestRemoteBackup.class); /** * Setup Cluster with appropriate configurations before running tests. - * * @throws Exception if starting the mini cluster or setting up the tables fails */ @BeforeClass @@ -73,7 +72,6 @@ public static void setUp() throws Exception { /** * Verify that a remote full backup is created on a single table with data correctly. - * * @throws Exception if an operation on the table fails */ @Test @@ -118,7 +116,7 @@ public void testFullBackupRemote() throws Exception { latch.countDown(); String backupId = - backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete " + backupId); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index 8dd4f7924703..ce8c6497c9ef 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,13 +37,12 @@ public class TestRemoteRestore extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteRestore.class); + HBaseClassTestRule.forClass(TestRemoteRestore.class); private static final Logger LOG = LoggerFactory.getLogger(TestRemoteRestore.class); /** * Setup Cluster with appropriate configurations before running tests. - * * @throws Exception if starting the mini cluster or setting up the tables fails */ @BeforeClass @@ -56,20 +55,18 @@ public static void setUp() throws Exception { /** * Verify that a remote restore on a single table is successful. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test public void testFullRestoreRemote() throws Exception { LOG.info("test remote full backup on a single table"); String backupId = - backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore( - BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset, - tablemap, false)); + getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, + false, tableset, tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java index 62a1f8f294cf..93345fd17059 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestRepairAfterFailedDelete extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRepairAfterFailedDelete.class); + HBaseClassTestRule.forClass(TestRepairAfterFailedDelete.class); private static final Logger LOG = LoggerFactory.getLogger(TestRepairAfterFailedDelete.class); @@ -83,7 +83,7 @@ public void testRepairBackupDelete() throws Exception { table.startDeleteOperation(backupIds); // Now run repair command to repair "failed" delete operation - String[] args = new String[] {"repair"}; + String[] args = new String[] { "repair" }; // Run restore int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index a6808cd69dc3..7b49558031e8 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,13 +36,12 @@ public class TestRestoreBoundaryTests extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRestoreBoundaryTests.class); + HBaseClassTestRule.forClass(TestRestoreBoundaryTests.class); private static final Logger LOG = LoggerFactory.getLogger(TestRestoreBoundaryTests.class); /** * Verify that a single empty table is restored to a new table. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -52,9 +51,8 @@ public void testFullRestoreSingleEmpty() throws Exception { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore( - BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, - false)); + getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); @@ -62,7 +60,6 @@ public void testFullRestoreSingleEmpty() throws Exception { /** * Verify that multiple tables are restored to new tables. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -73,9 +70,8 @@ public void testFullRestoreMultipleEmpty() throws Exception { String backupId = fullTableBackup(tables); TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - getBackupAdmin().restore( - BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, - tablemap, false)); + getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + restore_tableset, tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table2_restore)); assertTrue(hba.tableExists(table3_restore)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java index bd295122a289..f626dec5875d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,16 +32,15 @@ public class TestSystemTableSnapshot extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSystemTableSnapshot.class); + HBaseClassTestRule.forClass(TestSystemTableSnapshot.class); private static final Logger LOG = LoggerFactory.getLogger(TestSystemTableSnapshot.class); /** * Verify backup system table snapshot. - * * @throws Exception if an operation on the table fails */ - // @Test + // @Test public void _testBackupRestoreSystemTable() throws Exception { LOG.info("test snapshot system table"); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java index 5363b1a44b4f..2b0f9c0cba5f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.util.HashMap; import java.util.List; import java.util.Map; @@ -40,6 +41,7 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -48,7 +50,7 @@ public class TestBackupLogCleaner extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupLogCleaner.class); + HBaseClassTestRule.forClass(TestBackupLogCleaner.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupLogCleaner.class); @@ -120,8 +122,8 @@ public void testBackupLogCleaner() throws Exception { // #3 - incremental backup for multiple tables List tableSetIncList = Lists.newArrayList(table1, table2, table3); - String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, - BACKUP_ROOT_DIR); + String backupIdIncMultiple = + backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR); assertTrue(checkSucceeded(backupIdIncMultiple)); deletable = cleaner.getDeletableFiles(newWalFiles); diff --git a/hbase-balancer/pom.xml b/hbase-balancer/pom.xml index 9a0250bb9b89..e8d904338572 100644 --- a/hbase-balancer/pom.xml +++ b/hbase-balancer/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,35 +30,6 @@ hbase-balancer Apache HBase - Balancer HBase Balancer Support - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -152,13 +122,44 @@ test + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -184,8 +185,7 @@ lifecycle-mapping - - + diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java index df30ebbec0a5..006ff2e731d4 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.favored; import static org.apache.hadoop.hbase.ServerName.NON_STARTCODE; @@ -79,7 +77,7 @@ public class FavoredNodeAssignmentHelper { // region server entries might not match with that is in servers. private Map regionServerToRackMap; private List servers; - public static final byte [] FAVOREDNODES_QUALIFIER = Bytes.toBytes("fn"); + public static final byte[] FAVOREDNODES_QUALIFIER = Bytes.toBytes("fn"); public final static short FAVORED_NODES_NUM = 3; public final static short MAX_ATTEMPTS_FN_GENERATION = 10; @@ -88,7 +86,7 @@ public FavoredNodeAssignmentHelper(final List servers, Configuration } public FavoredNodeAssignmentHelper(final List servers, - final RackManager rackManager) { + final RackManager rackManager) { this.servers = servers; this.rackManager = rackManager; this.rackToRegionServerMap = new HashMap<>(); @@ -121,7 +119,7 @@ public void initialize() { /** * Update meta table with favored nodes info * @param regionToFavoredNodes map of RegionInfo's to their favored nodes - * @param connection connection to be used + * @param connection connection to be used */ public static void updateMetaWithFavoredNodesInfo( Map> regionToFavoredNodes, Connection connection) @@ -187,8 +185,7 @@ public static ServerName[] getFavoredNodesList(byte[] favoredNodes) throws IOExc } /** - * @param serverAddrList - * @return PB'ed bytes of {@link FavoredNodes} generated by the server list. + * n * @return PB'ed bytes of {@link FavoredNodes} generated by the server list. */ public static byte[] getFavoredNodes(List serverAddrList) { FavoredNodes.Builder f = FavoredNodes.newBuilder(); @@ -212,12 +209,12 @@ public static byte[] getFavoredNodes(List serverAddrList) { // placement could be r2:s5, , r4:s5, r1:s5, r2:s6, ... // The regions should be distributed proportionately to the racksizes public void placePrimaryRSAsRoundRobin(Map> assignmentMap, - Map primaryRSMap, List regions) { + Map primaryRSMap, List regions) { List rackList = new ArrayList<>(rackToRegionServerMap.size()); rackList.addAll(rackToRegionServerMap.keySet()); int rackIndex = ThreadLocalRandom.current().nextInt(rackList.size()); int maxRackSize = 0; - for (Map.Entry> r : rackToRegionServerMap.entrySet()) { + for (Map.Entry> r : rackToRegionServerMap.entrySet()) { if (r.getValue().size() > maxRackSize) { maxRackSize = r.getValue().size(); } @@ -234,7 +231,7 @@ public void placePrimaryRSAsRoundRobin(Map> assignm // Get the server list for the current rack currentServerList = rackToRegionServerMap.get(rackName); - if (serverIndex >= currentServerList.size()) { //not enough machines in this rack + if (serverIndex >= currentServerList.size()) { // not enough machines in this rack if (numIterations % rackList.size() == 0) { if (++serverIndex >= maxRackSize) serverIndex = 0; } @@ -268,8 +265,8 @@ public void placePrimaryRSAsRoundRobin(Map> assignm } } - public Map placeSecondaryAndTertiaryRS( - Map primaryRSMap) { + public Map + placeSecondaryAndTertiaryRS(Map primaryRSMap) { Map secondaryAndTertiaryMap = new HashMap<>(); for (Map.Entry entry : primaryRSMap.entrySet()) { // Get the target region and its primary region server rack @@ -281,11 +278,11 @@ public Map placeSecondaryAndTertiaryRS( if (favoredNodes != null) { secondaryAndTertiaryMap.put(regionInfo, favoredNodes); LOG.debug("Place the secondary and tertiary region server for region " - + regionInfo.getRegionNameAsString()); + + regionInfo.getRegionNameAsString()); } } catch (Exception e) { - LOG.warn("Cannot place the favored nodes for region " + - regionInfo.getRegionNameAsString() + " because " + e, e); + LOG.warn("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString() + + " because " + e, e); continue; } } @@ -293,7 +290,7 @@ public Map placeSecondaryAndTertiaryRS( } public ServerName[] getSecondaryAndTertiary(RegionInfo regionInfo, ServerName primaryRS) - throws IOException { + throws IOException { ServerName[] favoredNodes;// Get the rack for the primary region server String primaryRack = getRackOfServer(primaryRS); @@ -306,8 +303,8 @@ public ServerName[] getSecondaryAndTertiary(RegionInfo regionInfo, ServerName pr return favoredNodes; } - private Map> mapRSToPrimaries( - Map primaryRSMap) { + private Map> + mapRSToPrimaries(Map primaryRSMap) { Map> primaryServerMap = new HashMap<>(); for (Entry e : primaryRSMap.entrySet()) { Set currentSet = primaryServerMap.get(e.getValue()); @@ -321,16 +318,13 @@ private Map> mapRSToPrimaries( } /** - * For regions that share the primary, avoid placing the secondary and tertiary - * on a same RS. Used for generating new assignments for the - * primary/secondary/tertiary RegionServers - * @param primaryRSMap - * @return the map of regions to the servers the region-files should be hosted on + * For regions that share the primary, avoid placing the secondary and tertiary on a same RS. Used + * for generating new assignments for the primary/secondary/tertiary RegionServers n * @return the + * map of regions to the servers the region-files should be hosted on */ - public Map placeSecondaryAndTertiaryWithRestrictions( - Map primaryRSMap) { - Map> serverToPrimaries = - mapRSToPrimaries(primaryRSMap); + public Map + placeSecondaryAndTertiaryWithRestrictions(Map primaryRSMap) { + Map> serverToPrimaries = mapRSToPrimaries(primaryRSMap); Map secondaryAndTertiaryMap = new HashMap<>(); for (Entry entry : primaryRSMap.entrySet()) { @@ -346,17 +340,17 @@ public Map placeSecondaryAndTertiaryWithRestrictions( // from the same rack favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack); } else { - favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, - secondaryAndTertiaryMap, primaryRack, primaryRS, regionInfo); + favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, secondaryAndTertiaryMap, + primaryRack, primaryRS, regionInfo); } if (favoredNodes != null) { secondaryAndTertiaryMap.put(regionInfo, favoredNodes); LOG.debug("Place the secondary and tertiary region server for region " - + regionInfo.getRegionNameAsString()); + + regionInfo.getRegionNameAsString()); } } catch (Exception e) { - LOG.warn("Cannot place the favored nodes for region " - + regionInfo.getRegionNameAsString() + " because " + e, e); + LOG.warn("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString() + + " because " + e, e); continue; } } @@ -364,9 +358,9 @@ public Map placeSecondaryAndTertiaryWithRestrictions( } private ServerName[] multiRackCaseWithRestrictions( - Map> serverToPrimaries, - Map secondaryAndTertiaryMap, - String primaryRack, ServerName primaryRS, RegionInfo regionInfo) throws IOException { + Map> serverToPrimaries, + Map secondaryAndTertiaryMap, String primaryRack, ServerName primaryRS, + RegionInfo regionInfo) throws IOException { // Random to choose the secondary and tertiary region server // from another rack to place the secondary and tertiary // Random to choose one rack except for the current rack @@ -398,8 +392,7 @@ private ServerName[] multiRackCaseWithRestrictions( } } } - if (skipServerSet.size() + 2 <= serverSet.size()) - break; + if (skipServerSet.size() + 2 <= serverSet.size()) break; skipServerSet.clear(); rackSkipSet.add(secondaryRack); // we used all racks @@ -421,9 +414,8 @@ private ServerName[] multiRackCaseWithRestrictions( ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet); if (secondaryRS == null || tertiaryRS == null) { - LOG.error("Cannot place the secondary and tertiary" - + " region server for region " - + regionInfo.getRegionNameAsString()); + LOG.error("Cannot place the secondary and tertiary" + " region server for region " + + regionInfo.getRegionNameAsString()); } // Create the secondary and tertiary pair favoredNodes = new ServerName[2]; @@ -452,9 +444,8 @@ private ServerName[] multiRackCaseWithRestrictions( return favoredNodes; } - private ServerName[] singleRackCase(RegionInfo regionInfo, - ServerName primaryRS, - String primaryRack) throws IOException { + private ServerName[] singleRackCase(RegionInfo regionInfo, ServerName primaryRS, + String primaryRack) throws IOException { // Single rack case: have to pick the secondary and tertiary // from the same rack List serverList = getServersFromRack(primaryRack); @@ -465,45 +456,43 @@ private ServerName[] singleRackCase(RegionInfo regionInfo, } else { // Randomly select two region servers from the server list and make sure // they are not overlap with the primary region server; - Set serverSkipSet = new HashSet<>(); - serverSkipSet.add(primaryRS); + Set serverSkipSet = new HashSet<>(); + serverSkipSet.add(primaryRS); - // Place the secondary RS - ServerName secondaryRS = getOneRandomServer(primaryRack, serverSkipSet); - // Skip the secondary for the tertiary placement - serverSkipSet.add(secondaryRS); - ServerName tertiaryRS = getOneRandomServer(primaryRack, serverSkipSet); + // Place the secondary RS + ServerName secondaryRS = getOneRandomServer(primaryRack, serverSkipSet); + // Skip the secondary for the tertiary placement + serverSkipSet.add(secondaryRS); + ServerName tertiaryRS = getOneRandomServer(primaryRack, serverSkipSet); - if (secondaryRS == null || tertiaryRS == null) { - LOG.error("Cannot place the secondary, tertiary favored node for region " + - regionInfo.getRegionNameAsString()); - } - // Create the secondary and tertiary pair - ServerName[] favoredNodes = new ServerName[2]; - favoredNodes[0] = secondaryRS; - favoredNodes[1] = tertiaryRS; - return favoredNodes; + if (secondaryRS == null || tertiaryRS == null) { + LOG.error("Cannot place the secondary, tertiary favored node for region " + + regionInfo.getRegionNameAsString()); + } + // Create the secondary and tertiary pair + ServerName[] favoredNodes = new ServerName[2]; + favoredNodes[0] = secondaryRS; + favoredNodes[1] = tertiaryRS; + return favoredNodes; } } /** - * Place secondary and tertiary nodes in a multi rack case. - * If there are only two racks, then we try the place the secondary - * and tertiary on different rack than primary. But if the other rack has - * only one region server, then we place primary and tertiary on one rack - * and secondary on another. The aim is two distribute the three favored nodes - * on >= 2 racks. - * TODO: see how we can use generateMissingFavoredNodeMultiRack API here - * @param regionInfo Region for which we are trying to generate FN - * @param primaryRS The primary favored node. + * Place secondary and tertiary nodes in a multi rack case. If there are only two racks, then we + * try the place the secondary and tertiary on different rack than primary. But if the other rack + * has only one region server, then we place primary and tertiary on one rack and secondary on + * another. The aim is two distribute the three favored nodes on >= 2 racks. TODO: see how we can + * use generateMissingFavoredNodeMultiRack API here + * @param regionInfo Region for which we are trying to generate FN + * @param primaryRS The primary favored node. * @param primaryRack The rack of the primary favored node. * @return Array containing secondary and tertiary favored nodes. * @throws IOException Signals that an I/O exception has occurred. */ private ServerName[] multiRackCase(RegionInfo regionInfo, ServerName primaryRS, - String primaryRack) throws IOException { + String primaryRack) throws IOException { - ListfavoredNodes = Lists.newArrayList(primaryRS); + List favoredNodes = Lists.newArrayList(primaryRS); // Create the secondary and tertiary pair ServerName secondaryRS = generateMissingFavoredNodeMultiRack(favoredNodes); favoredNodes.add(secondaryRS); @@ -523,7 +512,7 @@ private ServerName[] multiRackCase(RegionInfo regionInfo, ServerName primaryRS, tertiaryRS = generateMissingFavoredNode(Lists.newArrayList(primaryRS, secondaryRS)); } } - return new ServerName[]{ secondaryRS, tertiaryRS }; + return new ServerName[] { secondaryRS, tertiaryRS }; } public boolean canPlaceFavoredNodes() { @@ -540,15 +529,13 @@ private List getServersFromRack(String rack) { /** * Gets a random server from the specified rack and skips anything specified. - - * @param rack rack from a server is needed + * @param rack rack from a server is needed * @param skipServerSet the server shouldn't belong to this set */ protected ServerName getOneRandomServer(String rack, Set skipServerSet) { // Is the rack valid? Do we recognize it? - if (rack == null || getServersFromRack(rack) == null || - getServersFromRack(rack).isEmpty()) { + if (rack == null || getServersFromRack(rack) == null || getServersFromRack(rack).isEmpty()) { return null; } @@ -615,12 +602,12 @@ public static String getFavoredNodesAsString(List nodes) { } /* - * Generates a missing favored node based on the input favored nodes. This helps to generate - * new FN when there is already 2 FN and we need a third one. For eg, while generating new FN - * for split daughters after inheriting 2 FN from the parent. If the cluster has only one rack - * it generates from the same rack. If the cluster has multiple racks, then it ensures the new - * FN respects the rack constraints similar to HDFS. For eg: if there are 3 FN, they will be - * spread across 2 racks. + * Generates a missing favored node based on the input favored nodes. This helps to generate new + * FN when there is already 2 FN and we need a third one. For eg, while generating new FN for + * split daughters after inheriting 2 FN from the parent. If the cluster has only one rack it + * generates from the same rack. If the cluster has multiple racks, then it ensures the new FN + * respects the rack constraints similar to HDFS. For eg: if there are 3 FN, they will be spread + * across 2 racks. */ public ServerName generateMissingFavoredNode(List favoredNodes) throws IOException { if (this.uniqueRackList.size() == 1) { @@ -631,7 +618,7 @@ public ServerName generateMissingFavoredNode(List favoredNodes) thro } public ServerName generateMissingFavoredNode(List favoredNodes, - List excludeNodes) throws IOException { + List excludeNodes) throws IOException { if (this.uniqueRackList.size() == 1) { return generateMissingFavoredNodeSingleRack(favoredNodes, excludeNodes); } else { @@ -644,7 +631,7 @@ public ServerName generateMissingFavoredNode(List favoredNodes, * when we would like to find a replacement node. */ private ServerName generateMissingFavoredNodeSingleRack(List favoredNodes, - List excludeNodes) throws IOException { + List excludeNodes) throws IOException { ServerName newServer = null; Set excludeFNSet = Sets.newHashSet(favoredNodes); if (excludeNodes != null && excludeNodes.size() > 0) { @@ -657,20 +644,19 @@ private ServerName generateMissingFavoredNodeSingleRack(List favored } private ServerName generateMissingFavoredNodeMultiRack(List favoredNodes) - throws IOException { + throws IOException { return generateMissingFavoredNodeMultiRack(favoredNodes, null); } /* - * Generates a missing FN based on the input favoredNodes and also the nodes to be skipped. - * - * Get the current layout of favored nodes arrangement and nodes to be excluded and get a - * random node that goes with HDFS block placement. Eg: If the existing nodes are on one rack, - * generate one from another rack. We exclude as much as possible so the random selection - * has more chance to generate a node within a few iterations, ideally 1. + * Generates a missing FN based on the input favoredNodes and also the nodes to be skipped. Get + * the current layout of favored nodes arrangement and nodes to be excluded and get a random node + * that goes with HDFS block placement. Eg: If the existing nodes are on one rack, generate one + * from another rack. We exclude as much as possible so the random selection has more chance to + * generate a node within a few iterations, ideally 1. */ private ServerName generateMissingFavoredNodeMultiRack(List favoredNodes, - List excludeNodes) throws IOException { + List excludeNodes) throws IOException { Set racks = Sets.newHashSet(); Map> rackToFNMapping = new HashMap<>(); @@ -692,8 +678,8 @@ private ServerName generateMissingFavoredNodeMultiRack(List favoredN Set skipRackSet = Sets.newHashSet(); /* - * If both the FN are from the same rack, then we don't want to generate another FN on the - * same rack. If that rack fails, the region would be unavailable. + * If both the FN are from the same rack, then we don't want to generate another FN on the same + * rack. If that rack fails, the region would be unavailable. */ if (racks.size() == 1 && favoredNodes.size() > 1) { skipRackSet.add(racks.iterator().next()); @@ -704,8 +690,10 @@ private ServerName generateMissingFavoredNodeMultiRack(List favoredN * reduce the number of iterations for FN selection. */ for (String rack : racks) { - if (getServersFromRack(rack) != null && - rackToFNMapping.get(rack).size() == getServersFromRack(rack).size()) { + if ( + getServersFromRack(rack) != null + && rackToFNMapping.get(rack).size() == getServersFromRack(rack).size() + ) { skipRackSet.add(rack); } } @@ -730,23 +718,22 @@ private ServerName generateMissingFavoredNodeMultiRack(List favoredN if (newServer == null) { if (LOG.isTraceEnabled()) { - LOG.trace(String.format("Unable to generate additional favored nodes for %s after " + LOG.trace(String.format( + "Unable to generate additional favored nodes for %s after " + "considering racks %s and skip rack %s with a unique rack list of %s and rack " + "to RS map of %s and RS to rack map of %s", StringUtils.join(favoredNodes, ","), randomRacks, skipRackSet, uniqueRackList, rackToRegionServerMap, regionServerToRackMap)); } - throw new IOException(" Unable to generate additional favored nodes for " - + StringUtils.join(favoredNodes, ",")); + throw new IOException( + " Unable to generate additional favored nodes for " + StringUtils.join(favoredNodes, ",")); } return newServer; } /* - * Generate favored nodes for a region. - * - * Choose a random server as primary and then choose secondary and tertiary FN so its spread - * across two racks. + * Generate favored nodes for a region. Choose a random server as primary and then choose + * secondary and tertiary FN so its spread across two racks. */ public List generateFavoredNodes(RegionInfo hri) throws IOException { @@ -757,7 +744,7 @@ public List generateFavoredNodes(RegionInfo hri) throws IOException Map primaryRSMap = new HashMap<>(1); primaryRSMap.put(hri, primary); Map secondaryAndTertiaryRSMap = - placeSecondaryAndTertiaryRS(primaryRSMap); + placeSecondaryAndTertiaryRS(primaryRSMap); ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(hri); if (secondaryAndTertiaryNodes != null && secondaryAndTertiaryNodes.length == 2) { for (ServerName sn : secondaryAndTertiaryNodes) { @@ -770,8 +757,7 @@ public List generateFavoredNodes(RegionInfo hri) throws IOException } public Map> generateFavoredNodesRoundRobin( - Map> assignmentMap, List regions) - throws IOException { + Map> assignmentMap, List regions) throws IOException { if (regions.size() > 0) { if (canPlaceFavoredNodes()) { @@ -790,8 +776,8 @@ public Map> generateFavoredNodesRoundRobin( /* * Generate favored nodes for a set of regions when we know where they are currently hosted. */ - private Map> generateFavoredNodes( - Map primaryRSMap) { + private Map> + generateFavoredNodes(Map primaryRSMap) { Map> generatedFavNodes = new HashMap<>(); Map secondaryAndTertiaryRSMap = @@ -801,16 +787,14 @@ private Map> generateFavoredNodes( List favoredNodesForRegion = new ArrayList<>(FAVORED_NODES_NUM); RegionInfo region = entry.getKey(); ServerName primarySN = entry.getValue(); - favoredNodesForRegion.add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(), - NON_STARTCODE)); + favoredNodesForRegion + .add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(), NON_STARTCODE)); ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region); if (secondaryAndTertiaryNodes != null) { - favoredNodesForRegion.add(ServerName.valueOf( - secondaryAndTertiaryNodes[0].getHostname(), secondaryAndTertiaryNodes[0].getPort(), - NON_STARTCODE)); - favoredNodesForRegion.add(ServerName.valueOf( - secondaryAndTertiaryNodes[1].getHostname(), secondaryAndTertiaryNodes[1].getPort(), - NON_STARTCODE)); + favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), + secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE)); + favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), + secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE)); } generatedFavNodes.put(region, favoredNodesForRegion); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index d67e7b4066a5..ad369ed2a7f1 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.favored; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.PRIMARY; @@ -48,18 +47,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that - * assigns favored nodes for each region. There is a Primary RegionServer that hosts - * the region, and then there is Secondary and Tertiary RegionServers. Currently, the - * favored nodes information is used in creating HDFS files - the Primary RegionServer - * passes the primary, secondary, tertiary node addresses as hints to the - * DistributedFileSystem API for creating files on the filesystem. These nodes are - * treated as hints by the HDFS to place the blocks of the file. This alleviates the - * problem to do with reading from remote nodes (since we can make the Secondary - * RegionServer as the new Primary RegionServer) after a region is recovered. This - * should help provide consistent read latencies for the regions even when their - * primary region servers die. - * + * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that assigns favored + * nodes for each region. There is a Primary RegionServer that hosts the region, and then there is + * Secondary and Tertiary RegionServers. Currently, the favored nodes information is used in + * creating HDFS files - the Primary RegionServer passes the primary, secondary, tertiary node + * addresses as hints to the DistributedFileSystem API for creating files on the filesystem. These + * nodes are treated as hints by the HDFS to place the blocks of the file. This alleviates the + * problem to do with reading from remote nodes (since we can make the Secondary RegionServer as the + * new Primary RegionServer) after a region is recovered. This should help provide consistent read + * latencies for the regions even when their primary region servers die. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements FavoredNodesPromoter { @@ -74,7 +70,7 @@ public void setFavoredNodesManager(FavoredNodesManager fnm) { @Override protected List balanceTable(TableName tableName, - Map> loadOfOneTable) { + Map> loadOfOneTable) { // TODO. Look at is whether Stochastic loadbalancer can be integrated with this List plans = new ArrayList<>(); Map serverNameWithoutCodeToServerName = new HashMap<>(); @@ -101,8 +97,10 @@ protected List balanceTable(TableName tableName, ServerName destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(0)); if (destination == null) { // check whether the region is on secondary/tertiary - if (currentServerWithoutStartCode.equals(favoredNodes.get(1)) - || currentServerWithoutStartCode.equals(favoredNodes.get(2))) { + if ( + currentServerWithoutStartCode.equals(favoredNodes.get(1)) + || currentServerWithoutStartCode.equals(favoredNodes.get(2)) + ) { continue; } // the region is currently on none of the favored nodes @@ -136,42 +134,42 @@ protected List balanceTable(TableName tableName, @Override @NonNull public Map> roundRobinAssignment(List regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { Map> assignmentMap; try { FavoredNodeAssignmentHelper assignmentHelper = - new FavoredNodeAssignmentHelper(servers, rackManager); + new FavoredNodeAssignmentHelper(servers, rackManager); assignmentHelper.initialize(); if (!assignmentHelper.canPlaceFavoredNodes()) { return super.roundRobinAssignment(regions, servers); } // Segregate the regions into two types: // 1. The regions that have favored node assignment, and where at least - // one of the favored node is still alive. In this case, try to adhere - // to the current favored nodes assignment as much as possible - i.e., - // if the current primary is gone, then make the secondary or tertiary - // as the new host for the region (based on their current load). - // Note that we don't change the favored - // node assignments here (even though one or more favored node is currently - // down). It is up to the balanceCluster to do this hard work. The HDFS - // can handle the fact that some nodes in the favored nodes hint is down - // It'd allocate some other DNs. In combination with stale settings for HDFS, - // we should be just fine. + // one of the favored node is still alive. In this case, try to adhere + // to the current favored nodes assignment as much as possible - i.e., + // if the current primary is gone, then make the secondary or tertiary + // as the new host for the region (based on their current load). + // Note that we don't change the favored + // node assignments here (even though one or more favored node is currently + // down). It is up to the balanceCluster to do this hard work. The HDFS + // can handle the fact that some nodes in the favored nodes hint is down + // It'd allocate some other DNs. In combination with stale settings for HDFS, + // we should be just fine. // 2. The regions that currently don't have favored node assignment. We will - // need to come up with favored nodes assignments for them. The corner case - // in (1) above is that all the nodes are unavailable and in that case, we - // will note that this region doesn't have favored nodes. - Pair>, List> segregatedRegions = - segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers); - Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); + // need to come up with favored nodes assignments for them. The corner case + // in (1) above is that all the nodes are unavailable and in that case, we + // will note that this region doesn't have favored nodes. + Pair>, List> segregatedRegions = + segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers); + Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); List regionsWithNoFavoredNodes = segregatedRegions.getSecond(); assignmentMap = new HashMap<>(); roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes); // merge the assignment maps assignmentMap.putAll(regionsWithFavoredNodesMap); } catch (Exception ex) { - LOG.warn("Encountered exception while doing favored-nodes assignment " + ex + - " Falling back to regular assignment"); + LOG.warn("Encountered exception while doing favored-nodes assignment " + ex + + " Falling back to regular assignment"); assignmentMap = super.roundRobinAssignment(regions, servers); } return assignmentMap; @@ -179,14 +177,16 @@ public Map> roundRobinAssignment(List r @Override public ServerName randomAssignment(RegionInfo regionInfo, List servers) - throws HBaseIOException { + throws HBaseIOException { try { FavoredNodeAssignmentHelper assignmentHelper = - new FavoredNodeAssignmentHelper(servers, rackManager); + new FavoredNodeAssignmentHelper(servers, rackManager); assignmentHelper.initialize(); ServerName primary = super.randomAssignment(regionInfo, servers); - if (!FavoredNodesManager.isFavoredNodeApplicable(regionInfo) - || !assignmentHelper.canPlaceFavoredNodes()) { + if ( + !FavoredNodesManager.isFavoredNodeApplicable(regionInfo) + || !assignmentHelper.canPlaceFavoredNodes() + ) { return primary; } List favoredNodes = fnm.getFavoredNodes(regionInfo); @@ -208,8 +208,8 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap); return primary; } catch (Exception ex) { - LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + ex + - " Falling back to regular assignment"); + LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + ex + + " Falling back to regular assignment"); return super.randomAssignment(regionInfo, servers); } } @@ -230,7 +230,7 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve ServerName serverWithLegitStartCode = availableServersContains(availableServers, s); if (serverWithLegitStartCode != null) { FavoredNodesPlan.Position position = - FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s); + FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s); if (Position.PRIMARY.equals(position)) { primaryHost = serverWithLegitStartCode; } else if (Position.SECONDARY.equals(position)) { @@ -240,11 +240,11 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve } } } - assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, - primaryHost, secondaryHost, tertiaryHost); + assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, primaryHost, + secondaryHost, tertiaryHost); } if (primaryHost == null && secondaryHost == null && tertiaryHost == null) { - //all favored nodes unavailable + // all favored nodes unavailable regionsWithNoFavoredNodes.add(region); } } @@ -263,9 +263,9 @@ private ServerName availableServersContains(List servers, ServerName return null; } - private void assignRegionToAvailableFavoredNode(Map> assignmentMapForFavoredNodes, RegionInfo region, ServerName primaryHost, - ServerName secondaryHost, ServerName tertiaryHost) { + private void assignRegionToAvailableFavoredNode( + Map> assignmentMapForFavoredNodes, RegionInfo region, + ServerName primaryHost, ServerName secondaryHost, ServerName tertiaryHost) { if (primaryHost != null) { addRegionToMap(assignmentMapForFavoredNodes, region, primaryHost); } else if (secondaryHost != null && tertiaryHost != null) { @@ -311,11 +311,11 @@ private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelp } private void assignSecondaryAndTertiaryNodesForRegion( - FavoredNodeAssignmentHelper assignmentHelper, - List regions, Map primaryRSMap) throws IOException { + FavoredNodeAssignmentHelper assignmentHelper, List regions, + Map primaryRSMap) throws IOException { // figure the secondary and tertiary RSs Map secondaryAndTertiaryRSMap = - assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap); + assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap); Map> regionFNMap = Maps.newHashMap(); // now record all the assignments so that we can serve queries later @@ -324,14 +324,14 @@ private void assignSecondaryAndTertiaryNodesForRegion( // We don't care about the startcode; but only the hostname really List favoredNodesForRegion = new ArrayList<>(3); ServerName sn = primaryRSMap.get(region); - favoredNodesForRegion.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), - ServerName.NON_STARTCODE)); + favoredNodesForRegion + .add(ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE)); ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region); if (secondaryAndTertiaryNodes != null) { favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), - secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE)); + secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE)); favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), - secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE)); + secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE)); } regionFNMap.put(region, favoredNodesForRegion); } @@ -339,18 +339,16 @@ private void assignSecondaryAndTertiaryNodesForRegion( } /* - * Generate Favored Nodes for daughters during region split. - * - * If the parent does not have FN, regenerates them for the daughters. - * - * If the parent has FN, inherit two FN from parent for each daughter and generate the remaining. - * The primary FN for both the daughters should be the same as parent. Inherit the secondary - * FN from the parent but keep it different for each daughter. Choose the remaining FN - * randomly. This would give us better distribution over a period of time after enough splits. + * Generate Favored Nodes for daughters during region split. If the parent does not have FN, + * regenerates them for the daughters. If the parent has FN, inherit two FN from parent for each + * daughter and generate the remaining. The primary FN for both the daughters should be the same + * as parent. Inherit the secondary FN from the parent but keep it different for each daughter. + * Choose the remaining FN randomly. This would give us better distribution over a period of time + * after enough splits. */ @Override public void generateFavoredNodesForDaughter(List servers, RegionInfo parent, - RegionInfo regionA, RegionInfo regionB) throws IOException { + RegionInfo regionA, RegionInfo regionB) throws IOException { Map> result = new HashMap<>(); FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); @@ -359,7 +357,7 @@ public void generateFavoredNodesForDaughter(List servers, RegionInfo List parentFavoredNodes = getFavoredNodes(parent); if (parentFavoredNodes == null) { LOG.debug("Unable to find favored nodes for parent, " + parent - + " generating new favored nodes for daughter"); + + " generating new favored nodes for daughter"); result.put(regionA, helper.generateFavoredNodes(regionA)); result.put(regionB, helper.generateFavoredNodes(regionB)); @@ -367,12 +365,12 @@ public void generateFavoredNodesForDaughter(List servers, RegionInfo // Lets get the primary and secondary from parent for regionA Set regionAFN = - getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY); + getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY); result.put(regionA, Lists.newArrayList(regionAFN)); // Lets get the primary and tertiary from parent for regionB Set regionBFN = - getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY); + getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY); result.put(regionB, Lists.newArrayList(regionBFN)); } @@ -380,8 +378,7 @@ public void generateFavoredNodesForDaughter(List servers, RegionInfo } private Set getInheritedFNForDaughter(FavoredNodeAssignmentHelper helper, - List parentFavoredNodes, Position primary, Position secondary) - throws IOException { + List parentFavoredNodes, Position primary, Position secondary) throws IOException { Set daughterFN = Sets.newLinkedHashSet(); if (parentFavoredNodes.size() >= primary.ordinal()) { @@ -400,12 +397,12 @@ private Set getInheritedFNForDaughter(FavoredNodeAssignmentHelper he } /* - * Generate favored nodes for a region during merge. Choose the FN from one of the sources to - * keep it simple. + * Generate favored nodes for a region during merge. Choose the FN from one of the sources to keep + * it simple. */ @Override - public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents) - throws IOException { + public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) + throws IOException { Map> regionFNMap = Maps.newHashMap(); regionFNMap.put(merged, getFavoredNodes(mergeParents[0])); fnm.updateFavoredNodes(regionFNMap); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java index d56e9e6b0404..82b5ba194c9d 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java @@ -97,8 +97,8 @@ public synchronized List getFavoredNodes(RegionInfo regionInfo) { } /** - * Favored nodes are not applicable for system tables. We will use this to check before - * we apply any favored nodes logic on a region. + * Favored nodes are not applicable for system tables. We will use this to check before we apply + * any favored nodes logic on a region. */ public static boolean isFavoredNodeApplicable(RegionInfo regionInfo) { return !regionInfo.getTable().isSystemTable(); @@ -113,9 +113,9 @@ public static Set filterNonFNApplicableRegions(Collection getFavoredNodesWithDNPort(RegionInfo regionInfo) { if (getFavoredNodes(regionInfo) == null) { @@ -124,14 +124,14 @@ public synchronized List getFavoredNodesWithDNPort(RegionInfo region List fnWithDNPort = Lists.newArrayList(); for (ServerName sn : getFavoredNodes(regionInfo)) { - fnWithDNPort.add(ServerName.valueOf(sn.getHostname(), datanodeDataTransferPort, - NON_STARTCODE)); + fnWithDNPort + .add(ServerName.valueOf(sn.getHostname(), datanodeDataTransferPort, NON_STARTCODE)); } return fnWithDNPort; } public synchronized void updateFavoredNodes(Map> regionFNMap) - throws IOException { + throws IOException { Map> regionToFavoredNodes = new HashMap<>(); for (Map.Entry> entry : regionFNMap.entrySet()) { RegionInfo regionInfo = entry.getKey(); @@ -147,13 +147,13 @@ public synchronized void updateFavoredNodes(Map> re if (!isFavoredNodeApplicable(regionInfo)) { throw new IOException("Can't update FN for a un-applicable region: " - + regionInfo.getRegionNameAsString() + " with " + servers); + + regionInfo.getRegionNameAsString() + " with " + servers); } if (servers.size() != FAVORED_NODES_NUM) { - throw new IOException("At least " + FAVORED_NODES_NUM - + " favored nodes should be present for region : " + regionInfo.getEncodedName() - + " current FN servers:" + servers); + throw new IOException( + "At least " + FAVORED_NODES_NUM + " favored nodes should be present for region : " + + regionInfo.getEncodedName() + " current FN servers:" + servers); } List serversWithNoStartCodes = Lists.newArrayList(); @@ -161,8 +161,8 @@ public synchronized void updateFavoredNodes(Map> re if (sn.getStartcode() == NON_STARTCODE) { serversWithNoStartCodes.add(sn); } else { - serversWithNoStartCodes.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), - NON_STARTCODE)); + serversWithNoStartCodes + .add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); } } regionToFavoredNodes.put(regionInfo, serversWithNoStartCodes); @@ -170,7 +170,7 @@ public synchronized void updateFavoredNodes(Map> re // Lets do a bulk update to meta since that reduces the RPC's FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, - provider.getConnection()); + provider.getConnection()); deleteFavoredNodesForRegions(regionToFavoredNodes.keySet()); for (Map.Entry> entry : regionToFavoredNodes.entrySet()) { @@ -191,8 +191,7 @@ private synchronized void addToReplicaLoad(RegionInfo hri, List serv regionList.add(hri); primaryRSToRegionMap.put(serverToUse, regionList); - serverToUse = ServerName - .valueOf(servers.get(SECONDARY.ordinal()).getAddress(), NON_STARTCODE); + serverToUse = ServerName.valueOf(servers.get(SECONDARY.ordinal()).getAddress(), NON_STARTCODE); regionList = secondaryRSToRegionMap.get(serverToUse); if (regionList == null) { regionList = new ArrayList<>(); @@ -200,8 +199,7 @@ private synchronized void addToReplicaLoad(RegionInfo hri, List serv regionList.add(hri); secondaryRSToRegionMap.put(serverToUse, regionList); - serverToUse = ServerName.valueOf(servers.get(TERTIARY.ordinal()).getAddress(), - NON_STARTCODE); + serverToUse = ServerName.valueOf(servers.get(TERTIARY.ordinal()).getAddress(), NON_STARTCODE); regionList = teritiaryRSToRegionMap.get(serverToUse); if (regionList == null) { regionList = new ArrayList<>(); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java index 4481021f1bac..224a32c222af 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,11 +27,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This class contains the mapping information between each region name and - * its favored region server list. Used by FavoredNodeLoadBalancer set - * of classes and from unit tests (hence the class is public) - * - * All the access to this class is thread-safe. + * This class contains the mapping information between each region name and its favored region + * server list. Used by FavoredNodeLoadBalancer set of classes and from unit tests (hence the class + * is public) All the access to this class is thread-safe. */ @InterfaceAudience.Private public class FavoredNodesPlan { @@ -82,18 +79,19 @@ public List getFavoredNodes(RegionInfo region) { } /** - * Return the position of the server in the favoredNodes list. Assumes the - * favoredNodes list is of size 3. - * @return position + * Return the position of the server in the favoredNodes list. Assumes the favoredNodes list is of + * size 3. n */ - public static Position getFavoredServerPosition( - List favoredNodes, ServerName server) { - if (favoredNodes == null || server == null || - favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + public static Position getFavoredServerPosition(List favoredNodes, + ServerName server) { + if ( + favoredNodes == null || server == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM + ) { return null; } for (Position p : Position.values()) { - if (ServerName.isSameAddress(favoredNodes.get(p.ordinal()),server)) { + if (ServerName.isSameAddress(favoredNodes.get(p.ordinal()), server)) { return p; } } @@ -105,8 +103,8 @@ public static Position getFavoredServerPosition( */ public Map> getAssignmentMap() { // Make a deep copy so changes don't harm our copy of favoredNodesMap. - return this.favoredNodesMap.entrySet().stream(). - collect(Collectors.toMap(k -> k.getKey(), v -> new ArrayList(v.getValue()))); + return this.favoredNodesMap.entrySet().stream() + .collect(Collectors.toMap(k -> k.getKey(), v -> new ArrayList(v.getValue()))); } public int size() { @@ -125,7 +123,7 @@ public boolean equals(Object o) { return false; } // To compare the map from object o is identical to current assignment map. - Map> comparedMap = ((FavoredNodesPlan)o).favoredNodesMap; + Map> comparedMap = ((FavoredNodesPlan) o).favoredNodesMap; // compare the size if (comparedMap.size() != this.favoredNodesMap.size()) { @@ -133,8 +131,7 @@ public boolean equals(Object o) { } // compare each element in the assignment map - for (Map.Entry> entry : - comparedMap.entrySet()) { + for (Map.Entry> entry : comparedMap.entrySet()) { List serverList = this.favoredNodesMap.get(entry.getKey()); if (serverList == null && entry.getValue() != null) { return false; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java index ba7af6682abd..912d3120952a 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java @@ -19,10 +19,9 @@ import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface FavoredNodesPromoter { @@ -30,11 +29,11 @@ public interface FavoredNodesPromoter { /* Try and assign regions even if favored nodes are dead */ String FAVORED_ALWAYS_ASSIGN_REGIONS = "hbase.favored.assignment.always.assign"; - void generateFavoredNodesForDaughter(List servers, - RegionInfo parent, RegionInfo hriA, RegionInfo hriB) throws IOException; + void generateFavoredNodesForDaughter(List servers, RegionInfo parent, RegionInfo hriA, + RegionInfo hriB) throws IOException; - void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents) - throws IOException; + void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) + throws IOException; List getFavoredNodes(RegionInfo regionInfo); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java index 2a7600079d7c..a419fedd8bdb 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java @@ -17,16 +17,17 @@ */ package org.apache.hadoop.hbase.favored; -import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Addressing; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; /** - * This class differs from ServerName in that start code is always ignored. This is because - * start code, ServerName.NON_STARTCODE is used to persist favored nodes and keeping this separate - * from {@link ServerName} is much cleaner. This should only be used by Favored node specific - * classes and should not be used outside favored nodes. + * This class differs from ServerName in that start code is always ignored. This is because start + * code, ServerName.NON_STARTCODE is used to persist favored nodes and keeping this separate from + * {@link ServerName} is much cleaner. This should only be used by Favored node specific classes and + * should not be used outside favored nodes. */ @InterfaceAudience.Private class StartcodeAgnosticServerName extends ServerName { @@ -37,17 +38,18 @@ public StartcodeAgnosticServerName(final String hostname, final int port, long s public static StartcodeAgnosticServerName valueOf(final ServerName serverName) { return new StartcodeAgnosticServerName(serverName.getHostname(), serverName.getPort(), - serverName.getStartcode()); + serverName.getStartcode()); } public static StartcodeAgnosticServerName valueOf(final String hostnameAndPort, long startcode) { return new StartcodeAgnosticServerName(Addressing.parseHostname(hostnameAndPort), - Addressing.parsePort(hostnameAndPort), startcode); + Addressing.parsePort(hostnameAndPort), startcode); } - public static StartcodeAgnosticServerName valueOf(final HostAndPort hostnameAndPort, long startcode) { - return new StartcodeAgnosticServerName(hostnameAndPort.getHost(), - hostnameAndPort.getPort(), startcode); + public static StartcodeAgnosticServerName valueOf(final HostAndPort hostnameAndPort, + long startcode) { + return new StartcodeAgnosticServerName(hostnameAndPort.getHost(), hostnameAndPort.getPort(), + startcode); } @Override diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java index 3a528f42c43e..2c49e26e9cf9 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -34,15 +32,14 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + /** - * Helper class that is used by RegionPlacementMaintainer to print - * information for favored nodes - * + * Helper class that is used by RegionPlacementMaintainer to print information for favored nodes */ @InterfaceAudience.Private public class AssignmentVerificationReport { - private static final Logger LOG = LoggerFactory.getLogger( - AssignmentVerificationReport.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(AssignmentVerificationReport.class.getName()); private TableName tableName = null; private boolean enforceLocality = false; @@ -63,7 +60,7 @@ public class AssignmentVerificationReport { private int totalFavoredAssignments = 0; private int[] favoredNodes = new int[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; private float[] favoredNodesLocalitySummary = - new float[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; + new float[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; private float actualLocalitySummary = 0; // For region balancing information @@ -86,21 +83,19 @@ public class AssignmentVerificationReport { private Set minDispersionNumServerSet = new HashSet<>(); public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot, - Map> regionLocalityMap) { + Map> regionLocalityMap) { // Set the table name this.tableName = tableName; // Get all the regions for this table - List regionInfoList = - snapshot.getTableToRegionMap().get(tableName); + List regionInfoList = snapshot.getTableToRegionMap().get(tableName); // Get the total region num for the current table this.totalRegions = regionInfoList.size(); // Get the existing assignment plan FavoredNodesPlan favoredNodesAssignment = snapshot.getExistingAssignmentPlan(); // Get the region to region server mapping - Map currentAssignment = - snapshot.getRegionToRegionServerMap(); + Map currentAssignment = snapshot.getRegionToRegionServerMap(); // Initialize the server to its hosing region counter map Map serverToHostingRegionCounterMap = new HashMap<>(); @@ -128,18 +123,17 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps // Get the favored nodes from the assignment plan and verify it. List favoredNodes = favoredNodesAssignment.getFavoredNodes(region); - if (favoredNodes == null || - favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + if ( + favoredNodes == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM + ) { regionsWithoutValidFavoredNodes.add(region); continue; } // Get the primary, secondary and tertiary region server - ServerName primaryRS = - favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); - ServerName secondaryRS = - favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); - ServerName tertiaryRS = - favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); + ServerName primaryRS = favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); + ServerName secondaryRS = favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); // Update the primary rs to its region set map Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); @@ -187,43 +181,37 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { ServerName favoredNode = favoredNodes.get(p.ordinal()); // Get the locality for the current favored nodes - Float locality = - regionDegreeLocalityMap.get(favoredNode.getHostname()); + Float locality = regionDegreeLocalityMap.get(favoredNode.getHostname()); if (locality != null) { this.favoredNodesLocalitySummary[p.ordinal()] += locality; } } // Get the locality summary for the current region server - Float actualLocality = - regionDegreeLocalityMap.get(currentRS.getHostname()); + Float actualLocality = regionDegreeLocalityMap.get(currentRS.getHostname()); if (actualLocality != null) { this.actualLocalitySummary += actualLocality; } } } catch (Exception e) { - LOG.error("Cannot verify the region assignment for region " + - ((region == null) ? " null " : region.getRegionNameAsString()) + - "because of " + e); + LOG.error("Cannot verify the region assignment for region " + + ((region == null) ? " null " : region.getRegionNameAsString()) + "because of " + e); } } float dispersionScoreSummary = 0; float dispersionNumSummary = 0; // Calculate the secondary score for each primary region server - for (Map.Entry entry : - primaryRSToRegionCounterMap.entrySet()) { + for (Map.Entry entry : primaryRSToRegionCounterMap.entrySet()) { ServerName primaryRS = entry.getKey(); Integer regionsOnPrimary = entry.getValue(); // Process the dispersion number and score float dispersionScore = 0; int dispersionNum = 0; - if (primaryToSecTerRSMap.get(primaryRS) != null - && regionsOnPrimary.intValue() != 0) { + if (primaryToSecTerRSMap.get(primaryRS) != null && regionsOnPrimary.intValue() != 0) { dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); - dispersionScore = dispersionNum / - ((float) regionsOnPrimary.intValue() * 2); + dispersionScore = dispersionNum / ((float) regionsOnPrimary.intValue() * 2); } // Update the max dispersion score if (dispersionScore > this.maxDispersionScore) { @@ -267,15 +255,14 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps // Update the avg dispersion score if (primaryRSToRegionCounterMap.keySet().size() != 0) { - this.avgDispersionScore = dispersionScoreSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); - this.avgDispersionNum = dispersionNumSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionScore = + dispersionScoreSummary / (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = + dispersionNumSummary / (float) primaryRSToRegionCounterMap.keySet().size(); } // Fill up the most loaded and least loaded region server information - for (Map.Entry entry : - serverToHostingRegionCounterMap.entrySet()) { + for (Map.Entry entry : serverToHostingRegionCounterMap.entrySet()) { ServerName currentRS = entry.getKey(); int hostRegionCounter = entry.getValue().intValue(); @@ -300,25 +287,21 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps // and total region servers this.totalRegionServers = serverToHostingRegionCounterMap.keySet().size(); - this.avgRegionsOnRS = (totalRegionServers == 0) ? 0 : - (totalRegions / (float) totalRegionServers); + this.avgRegionsOnRS = + (totalRegionServers == 0) ? 0 : (totalRegions / (float) totalRegionServers); // Set the isFilledUp as true isFilledUp = true; } /** - * Use this to project the dispersion scores - * @param tableName - * @param snapshot - * @param newPlan + * Use this to project the dispersion scores nnn */ - public void fillUpDispersion(TableName tableName, - SnapshotOfRegionAssignmentFromMeta snapshot, FavoredNodesPlan newPlan) { + public void fillUpDispersion(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot, + FavoredNodesPlan newPlan) { // Set the table name this.tableName = tableName; // Get all the regions for this table - List regionInfoList = snapshot.getTableToRegionMap().get( - tableName); + List regionInfoList = snapshot.getTableToRegionMap().get(tableName); // Get the total region num for the current table this.totalRegions = regionInfoList.size(); FavoredNodesPlan plan = null; @@ -337,18 +320,17 @@ public void fillUpDispersion(TableName tableName, try { // Get the favored nodes from the assignment plan and verify it. List favoredNodes = plan.getFavoredNodes(region); - if (favoredNodes == null - || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + if ( + favoredNodes == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM + ) { regionsWithoutValidFavoredNodes.add(region); continue; } // Get the primary, secondary and tertiary region server - ServerName primaryRS = favoredNodes - .get(FavoredNodesPlan.Position.PRIMARY.ordinal()); - ServerName secondaryRS = favoredNodes - .get(FavoredNodesPlan.Position.SECONDARY.ordinal()); - ServerName tertiaryRS = favoredNodes - .get(FavoredNodesPlan.Position.TERTIARY.ordinal()); + ServerName primaryRS = favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); + ServerName secondaryRS = favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); // Update the primary rs to its region set map Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); @@ -368,26 +350,22 @@ public void fillUpDispersion(TableName tableName, primaryToSecTerRSMap.put(primaryRS, secAndTerSet); } catch (Exception e) { LOG.error("Cannot verify the region assignment for region " - + ((region == null) ? " null " : region.getRegionNameAsString()) - + "because of " + e); + + ((region == null) ? " null " : region.getRegionNameAsString()) + "because of " + e); } } float dispersionScoreSummary = 0; float dispersionNumSummary = 0; // Calculate the secondary score for each primary region server - for (Map.Entry entry : - primaryRSToRegionCounterMap.entrySet()) { + for (Map.Entry entry : primaryRSToRegionCounterMap.entrySet()) { ServerName primaryRS = entry.getKey(); Integer regionsOnPrimary = entry.getValue(); // Process the dispersion number and score float dispersionScore = 0; int dispersionNum = 0; - if (primaryToSecTerRSMap.get(primaryRS) != null - && regionsOnPrimary.intValue() != 0) { + if (primaryToSecTerRSMap.get(primaryRS) != null && regionsOnPrimary.intValue() != 0) { dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); - dispersionScore = dispersionNum / - ((float) regionsOnPrimary.intValue() * 2); + dispersionScore = dispersionNum / ((float) regionsOnPrimary.intValue() * 2); } // Update the max dispersion num @@ -423,18 +401,16 @@ public void fillUpDispersion(TableName tableName, // Update the avg dispersion score if (primaryRSToRegionCounterMap.keySet().size() != 0) { - this.avgDispersionScore = dispersionScoreSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); - this.avgDispersionNum = dispersionNumSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionScore = + dispersionScoreSummary / (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = + dispersionNumSummary / (float) primaryRSToRegionCounterMap.keySet().size(); } } /** - * @return list which contains just 3 elements: average dispersion score, max - * dispersion score and min dispersion score as first, second and third element - * respectively. - * + * @return list which contains just 3 elements: average dispersion score, max dispersion score and + * min dispersion score as first, second and third element respectively. */ public List getDispersionInformation() { List dispersion = new ArrayList<>(); @@ -446,41 +422,38 @@ public List getDispersionInformation() { public void print(boolean isDetailMode) { if (!isFilledUp) { - System.err.println("[Error] Region assignment verification report" + - "hasn't been filled up"); + System.err.println("[Error] Region assignment verification report" + "hasn't been filled up"); } - DecimalFormat df = new java.text.DecimalFormat( "#.##"); + DecimalFormat df = new java.text.DecimalFormat("#.##"); // Print some basic information - System.out.println("Region Assignment Verification for Table: " + tableName + - "\n\tTotal regions : " + totalRegions); + System.out.println("Region Assignment Verification for Table: " + tableName + + "\n\tTotal regions : " + totalRegions); // Print the number of regions on each kinds of the favored nodes - System.out.println("\tTotal regions on favored nodes " + - totalFavoredAssignments); + System.out.println("\tTotal regions on favored nodes " + totalFavoredAssignments); for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { - System.out.println("\t\tTotal regions on "+ p.toString() + - " region servers: " + favoredNodes[p.ordinal()]); + System.out.println( + "\t\tTotal regions on " + p.toString() + " region servers: " + favoredNodes[p.ordinal()]); } // Print the number of regions in each kinds of invalid assignment - System.out.println("\tTotal unassigned regions: " + - unAssignedRegionsList.size()); + System.out.println("\tTotal unassigned regions: " + unAssignedRegionsList.size()); if (isDetailMode) { for (RegionInfo region : unAssignedRegionsList) { System.out.println("\t\t" + region.getRegionNameAsString()); } } - System.out.println("\tTotal regions NOT on favored nodes: " + - nonFavoredAssignedRegionList.size()); + System.out + .println("\tTotal regions NOT on favored nodes: " + nonFavoredAssignedRegionList.size()); if (isDetailMode) { for (RegionInfo region : nonFavoredAssignedRegionList) { System.out.println("\t\t" + region.getRegionNameAsString()); } } - System.out.println("\tTotal regions without favored nodes: " + - regionsWithoutValidFavoredNodes.size()); + System.out + .println("\tTotal regions without favored nodes: " + regionsWithoutValidFavoredNodes.size()); if (isDetailMode) { for (RegionInfo region : regionsWithoutValidFavoredNodes) { System.out.println("\t\t" + region.getRegionNameAsString()); @@ -490,77 +463,68 @@ public void print(boolean isDetailMode) { // Print the locality information if enabled if (this.enforceLocality && totalRegions != 0) { // Print the actual locality for this table - float actualLocality = 100 * - this.actualLocalitySummary / (float) totalRegions; - System.out.println("\n\tThe actual avg locality is " + - df.format(actualLocality) + " %"); + float actualLocality = 100 * this.actualLocalitySummary / (float) totalRegions; + System.out.println("\n\tThe actual avg locality is " + df.format(actualLocality) + " %"); // Print the expected locality if regions are placed on the each kinds of // favored nodes for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { - float avgLocality = 100 * - (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions); - System.out.println("\t\tThe expected avg locality if all regions" + - " on the " + p.toString() + " region servers: " - + df.format(avgLocality) + " %"); + float avgLocality = 100 * (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions); + System.out.println("\t\tThe expected avg locality if all regions" + " on the " + + p.toString() + " region servers: " + df.format(avgLocality) + " %"); } } // Print the region balancing information - System.out.println("\n\tTotal hosting region servers: " + - totalRegionServers); + System.out.println("\n\tTotal hosting region servers: " + totalRegionServers); // Print the region balance information if (totalRegionServers != 0) { - System.out.println( - "\tAvg dispersion num: " +df.format(avgDispersionNum) + - " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + - " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + - " hosts;"); + System.out.println("\tAvg dispersion num: " + df.format(avgDispersionNum) + + " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + + " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + " hosts;"); - System.out.println("\t\tThe number of the region servers with the max" + - " dispersion num: " + this.maxDispersionNumServerSet.size()); + System.out.println("\t\tThe number of the region servers with the max" + " dispersion num: " + + this.maxDispersionNumServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionNumServerSet); } - System.out.println("\t\tThe number of the region servers with the min" + - " dispersion num: " + this.minDispersionNumServerSet.size()); + System.out.println("\t\tThe number of the region servers with the min" + " dispersion num: " + + this.minDispersionNumServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionNumServerSet); } - System.out.println( - "\tAvg dispersion score: " + df.format(avgDispersionScore) + - ";\tMax dispersion score: " + df.format(maxDispersionScore) + - ";\tMin dispersion score: " + df.format(minDispersionScore) + ";"); + System.out.println("\tAvg dispersion score: " + df.format(avgDispersionScore) + + ";\tMax dispersion score: " + df.format(maxDispersionScore) + ";\tMin dispersion score: " + + df.format(minDispersionScore) + ";"); - System.out.println("\t\tThe number of the region servers with the max" + - " dispersion score: " + this.maxDispersionScoreServerSet.size()); + System.out.println("\t\tThe number of the region servers with the max" + " dispersion score: " + + this.maxDispersionScoreServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionScoreServerSet); } - System.out.println("\t\tThe number of the region servers with the min" + - " dispersion score: " + this.minDispersionScoreServerSet.size()); + System.out.println("\t\tThe number of the region servers with the min" + " dispersion score: " + + this.minDispersionScoreServerSet.size()); if (isDetailMode) { printHServerAddressSet(minDispersionScoreServerSet); } - System.out.println( - "\tAvg regions/region server: " + df.format(avgRegionsOnRS) + - ";\tMax regions/region server: " + maxRegionsOnRS + - ";\tMin regions/region server: " + minRegionsOnRS + ";"); + System.out.println("\tAvg regions/region server: " + df.format(avgRegionsOnRS) + + ";\tMax regions/region server: " + maxRegionsOnRS + ";\tMin regions/region server: " + + minRegionsOnRS + ";"); // Print the details about the most loaded region servers - System.out.println("\t\tThe number of the most loaded region servers: " - + mostLoadedRSSet.size()); + System.out + .println("\t\tThe number of the most loaded region servers: " + mostLoadedRSSet.size()); if (isDetailMode) { printHServerAddressSet(mostLoadedRSSet); } // Print the details about the least loaded region servers - System.out.println("\t\tThe number of the least loaded region servers: " - + leastLoadedRSSet.size()); + System.out + .println("\t\tThe number of the least loaded region servers: " + leastLoadedRSSet.size()); if (isDetailMode) { printHServerAddressSet(leastLoadedRSSet); } @@ -601,10 +565,8 @@ int getTotalFavoredAssignments() { } /** - * Return the number of regions based on the position (primary/secondary/ - * tertiary) assigned to their favored nodes - * @param position - * @return the number of regions + * Return the number of regions based on the position (primary/secondary/ tertiary) assigned to + * their favored nodes n * @return the number of regions */ int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) { return favoredNodes[position.ordinal()]; @@ -612,10 +574,10 @@ int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) { private void printHServerAddressSet(Set serverSet) { if (serverSet == null) { - return ; + return; } int i = 0; - for (ServerName addr : serverSet){ + for (ServerName addr : serverSet) { if ((i++) % 3 == 0) { System.out.print("\n\t\t\t"); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 004e3ce680a1..5c95b07f5c30 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,7 +81,6 @@ public interface LoadBalancer extends Stoppable, ConfigurationObserver { */ void updateClusterMetrics(ClusterMetrics metrics); - /** * Set the cluster info provider. Usually it is just a wrapper of master. */ @@ -103,7 +101,7 @@ List balanceCluster(Map> */ @NonNull Map> roundRobinAssignment(List regions, - List servers) throws IOException; + List servers) throws IOException; /** * Assign regions to the previously hosting region server @@ -111,7 +109,7 @@ Map> roundRobinAssignment(List regions, */ @NonNull Map> retainAssignment(Map regions, - List servers) throws IOException; + List servers) throws IOException; /** * Get a random region server from the list @@ -145,15 +143,15 @@ Map> retainAssignment(Map r */ void postMasterStartupInitialize(); - /*Updates balancer status tag reported to JMX*/ + /* Updates balancer status tag reported to JMX */ void updateBalancerStatus(boolean status); /** * In some scenarios, Balancer needs to update internal status or information according to the * current tables load - * * @param loadOfAllTable region load of servers for all table */ - default void updateBalancerLoadInfo(Map>> - loadOfAllTable){} + default void + updateBalancerLoadInfo(Map>> loadOfAllTable) { + } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java index 54ccac0cb629..db0b7b0bff33 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,17 +20,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.ScriptBasedMapping; +import org.apache.yetus.audience.InterfaceAudience; + /** - * Wrapper over the rack resolution utility in Hadoop. The rack resolution - * utility in Hadoop does resolution from hosts to the racks they belong to. - * + * Wrapper over the rack resolution utility in Hadoop. The rack resolution utility in Hadoop does + * resolution from hosts to the racks they belong to. */ @InterfaceAudience.Private public class RackManager { @@ -43,14 +42,13 @@ public RackManager() { public RackManager(Configuration conf) { switchMapping = ReflectionUtils.instantiateWithCustomCtor( - conf.getClass("hbase.util.ip.to.rack.determiner", ScriptBasedMapping.class, - DNSToSwitchMapping.class).getName(), new Class[]{Configuration.class}, - new Object[]{conf}); + conf.getClass("hbase.util.ip.to.rack.determiner", ScriptBasedMapping.class, + DNSToSwitchMapping.class).getName(), + new Class[] { Configuration.class }, new Object[] { conf }); } /** - * Get the name of the rack containing a server, according to the DNS to - * switch mapping. + * Get the name of the rack containing a server, according to the DNS to switch mapping. * @param server the server for which to get the rack name * @return the rack name of the server */ diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java index f4d6e63771aa..d6909dc2802e 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java @@ -19,21 +19,16 @@ import java.io.Serializable; import java.util.Comparator; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Stores the plan for the move of an individual region. - * - * Contains info for the region being moved, info for the server the region - * should be moved from, and info for the server the region should be moved - * to. - * - * The comparable implementation of this class compares only the region - * information and not the source/dest server info. + * Stores the plan for the move of an individual region. Contains info for the region being moved, + * info for the server the region should be moved from, and info for the server the region should be + * moved to. The comparable implementation of this class compares only the region information and + * not the source/dest server info. */ @InterfaceAudience.LimitedPrivate("Coprocessors") @InterfaceStability.Evolving @@ -52,15 +47,12 @@ public int compare(RegionPlan l, RegionPlan r) { } /** - * Instantiate a plan for a region move, moving the specified region from - * the specified source server to the specified destination server. - * - * Destination server can be instantiated as null and later set - * with {@link #setDestination(ServerName)}. - * - * @param hri region to be moved + * Instantiate a plan for a region move, moving the specified region from the specified source + * server to the specified destination server. Destination server can be instantiated as null and + * later set with {@link #setDestination(ServerName)}. + * @param hri region to be moved * @param source regionserver region should be moved from - * @param dest regionserver region should be moved to + * @param dest regionserver region should be moved to */ public RegionPlan(final RegionInfo hri, ServerName source, ServerName dest) { this.hri = hri; @@ -134,7 +126,7 @@ private static int compareTo(RegionPlan left, RegionPlan right) { private static int compareServerName(ServerName left, ServerName right) { if (left == null) { - return right == null? 0: -1; + return right == null ? 0 : -1; } else if (right == null) { return +1; } @@ -189,8 +181,8 @@ public boolean equals(Object obj) { @Override public String toString() { - return "hri=" + this.hri.getEncodedName() + ", source=" + - (this.source == null? "": this.source.toString()) + - ", destination=" + (this.dest == null? "": this.dest.toString()); + return "hri=" + this.hri.getEncodedName() + ", source=" + + (this.source == null ? "" : this.source.toString()) + ", destination=" + + (this.dest == null ? "" : this.dest.toString()); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 9aaf111d9800..02c18c73bfb5 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,22 +48,21 @@ import org.slf4j.LoggerFactory; /** - * Used internally for reading meta and constructing datastructures that are - * then queried, for things like regions to regionservers, table to regions, etc. - * It also records the favored nodes mapping for regions. - * + * Used internally for reading meta and constructing datastructures that are then queried, for + * things like regions to regionservers, table to regions, etc. It also records the favored nodes + * mapping for regions. */ @InterfaceAudience.Private public class SnapshotOfRegionAssignmentFromMeta { - private static final Logger LOG = LoggerFactory.getLogger(SnapshotOfRegionAssignmentFromMeta.class - .getName()); + private static final Logger LOG = + LoggerFactory.getLogger(SnapshotOfRegionAssignmentFromMeta.class.getName()); private final Connection connection; /** the table name to region map */ private final Map> tableToRegionMap; /** the region to region server map */ - //private final Map regionToRegionServerMap; + // private final Map regionToRegionServerMap; private Map regionToRegionServerMap; /** the region name to region info map */ private final Map regionNameToRegionInfoMap; @@ -84,7 +82,7 @@ public SnapshotOfRegionAssignmentFromMeta(Connection connection) { } public SnapshotOfRegionAssignmentFromMeta(Connection connection, Set disabledTables, - boolean excludeOfflinedSplitParents) { + boolean excludeOfflinedSplitParents) { this.connection = connection; tableToRegionMap = new HashMap<>(); regionToRegionServerMap = new HashMap<>(); @@ -152,8 +150,8 @@ private void processMetaRecord(Result result) throws IOException { * less than FAVORED_NODES_NUM, lets use as much as we can but log a warning. */ if (favoredServerList.length != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { - LOG.warn("Insufficient favored nodes for region " + hri + " fn: " + - Arrays.toString(favoredServerList)); + LOG.warn("Insufficient favored nodes for region " + hri + " fn: " + + Arrays.toString(favoredServerList)); } for (int i = 0; i < favoredServerList.length; i++) { if (i == PRIMARY.ordinal()) { @@ -167,6 +165,7 @@ private void processMetaRecord(Result result) throws IOException { } } } + /** * Initialize the region assignment snapshot by scanning the hbase:meta table */ diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java index c86a60ea4451..7f58d70c1c4e 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,14 @@ public enum ServerState { CRASHED, /** - * Only server which carries meta can have this state. We will split wal for meta and then - * assign meta first before splitting other wals. + * Only server which carries meta can have this state. We will split wal for meta and then assign + * meta first before splitting other wals. */ SPLITTING_META, /** - * Indicate that the meta splitting is done. We need this state so that the UnassignProcedure - * for meta can safely quit. See the comments in UnassignProcedure.remoteCallFailed for more - * details. + * Indicate that the meta splitting is done. We need this state so that the UnassignProcedure for + * meta can safely quit. See the comments in UnassignProcedure.remoteCallFailed for more details. */ SPLITTING_META_DONE, @@ -57,4 +56,4 @@ public enum ServerState { * quit. See the comments in UnassignProcedure.remoteCallFailed for more details. */ OFFLINE -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/AssignRegionAction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/AssignRegionAction.java index 76d850fd99a5..c99ae092d775 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/AssignRegionAction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/AssignRegionAction.java @@ -50,4 +50,4 @@ public BalanceAction undoAction() { public String toString() { return getType() + ": " + region + ":" + server; } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalanceAction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalanceAction.java index 9158e353bb79..56b473ae710c 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalanceAction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalanceAction.java @@ -25,7 +25,10 @@ @InterfaceAudience.Private abstract class BalanceAction { enum Type { - ASSIGN_REGION, MOVE_REGION, SWAP_REGIONS, NULL, + ASSIGN_REGION, + MOVE_REGION, + SWAP_REGIONS, + NULL, } static final BalanceAction NULL_ACTION = new BalanceAction(Type.NULL) { @@ -52,4 +55,4 @@ Type getType() { public String toString() { return type + ":"; } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java index f15ca92321a0..a54a410fcdf7 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java @@ -73,11 +73,11 @@ class BalancerClusterState { int[][] regionsPerHost; // hostIndex -> list of regions int[][] regionsPerRack; // rackIndex -> region list Int2IntCounterMap[] colocatedReplicaCountsPerServer; // serverIndex -> counts of colocated - // replicas by primary region index + // replicas by primary region index Int2IntCounterMap[] colocatedReplicaCountsPerHost; // hostIndex -> counts of colocated replicas by - // primary region index + // primary region index Int2IntCounterMap[] colocatedReplicaCountsPerRack; // rackIndex -> counts of colocated replicas by - // primary region index + // primary region index int[][] serversPerHost; // hostIndex -> list of server indexes int[][] serversPerRack; // rackIndex -> list of server indexes @@ -156,8 +156,8 @@ public String getRack(ServerName server) { // a matching hostname and port to have the same index. for (ServerName sn : clusterState.keySet()) { if (sn == null) { - LOG.warn("TODO: Enable TRACE on BaseLoadBalancer. Empty servername); " + - "skipping; unassigned regions?"); + LOG.warn("TODO: Enable TRACE on BaseLoadBalancer. Empty servername); " + + "skipping; unassigned regions?"); if (LOG.isTraceEnabled()) { LOG.trace("EMPTY SERVERNAME " + clusterState.toString()); } @@ -229,8 +229,10 @@ public String getRack(ServerName server) { // keep the servername if this is the first server name for this hostname // or this servername has the newest startcode. - if (servers[serverIndex] == null || - servers[serverIndex].getStartcode() < entry.getKey().getStartcode()) { + if ( + servers[serverIndex] == null + || servers[serverIndex].getStartcode() < entry.getKey().getStartcode() + ) { servers[serverIndex] = entry.getKey(); } @@ -242,8 +244,8 @@ public String getRack(ServerName server) { } else { regionsPerServer[serverIndex] = new int[entry.getValue().size()]; } - colocatedReplicaCountsPerServer[serverIndex] = new Int2IntCounterMap( - regionsPerServer[serverIndex].length, Hashing.DEFAULT_LOAD_FACTOR, 0); + colocatedReplicaCountsPerServer[serverIndex] = + new Int2IntCounterMap(regionsPerServer[serverIndex].length, Hashing.DEFAULT_LOAD_FACTOR, 0); serverIndicesSortedByRegionCount[serverIndex] = serverIndex; serverIndicesSortedByLocality[serverIndex] = serverIndex; } @@ -289,7 +291,7 @@ public String getRack(ServerName server) { serversPerHost[i] = new int[serversPerHostList.get(i).size()]; for (int j = 0; j < serversPerHost[i].length; j++) { serversPerHost[i][j] = serversPerHostList.get(i).get(j); - LOG.debug("server {} is on host {}",serversPerHostList.get(i).get(j), i); + LOG.debug("server {} is on host {}", serversPerHostList.get(i).get(j), i); } if (serversPerHost[i].length > 1) { multiServersPerHost = true; @@ -300,13 +302,13 @@ public String getRack(ServerName server) { serversPerRack[i] = new int[serversPerRackList.get(i).size()]; for (int j = 0; j < serversPerRack[i].length; j++) { serversPerRack[i][j] = serversPerRackList.get(i).get(j); - LOG.info("server {} is on rack {}",serversPerRackList.get(i).get(j), i); + LOG.info("server {} is on rack {}", serversPerRackList.get(i).get(j), i); } } numTables = tables.size(); - LOG.debug("Number of tables={}, number of hosts={}, number of racks={}", numTables, - numHosts, numRacks); + LOG.debug("Number of tables={}, number of hosts={}, number of racks={}", numTables, numHosts, + numRacks); numRegionsPerServerPerTable = new int[numTables][numServers]; numRegionsPerTable = new int[numTables]; @@ -342,8 +344,8 @@ public String getRack(ServerName server) { } for (int i = 0; i < regionsPerServer.length; i++) { - colocatedReplicaCountsPerServer[i] = new Int2IntCounterMap( - regionsPerServer[i].length, Hashing.DEFAULT_LOAD_FACTOR, 0); + colocatedReplicaCountsPerServer[i] = + new Int2IntCounterMap(regionsPerServer[i].length, Hashing.DEFAULT_LOAD_FACTOR, 0); for (int j = 0; j < regionsPerServer[i].length; j++) { int primaryIndex = regionIndexToPrimaryIndex[regionsPerServer[i][j]]; colocatedReplicaCountsPerServer[i].getAndIncrement(primaryIndex); @@ -363,16 +365,15 @@ public String getRack(ServerName server) { } private void populateRegionPerLocationFromServer(int[][] regionsPerLocation, - Int2IntCounterMap[] colocatedReplicaCountsPerLocation, - int[][] serversPerLocation) { + Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int[][] serversPerLocation) { for (int i = 0; i < serversPerLocation.length; i++) { int numRegionsPerLocation = 0; for (int j = 0; j < serversPerLocation[i].length; j++) { numRegionsPerLocation += regionsPerServer[serversPerLocation[i][j]].length; } regionsPerLocation[i] = new int[numRegionsPerLocation]; - colocatedReplicaCountsPerLocation[i] = new Int2IntCounterMap(numRegionsPerLocation, - Hashing.DEFAULT_LOAD_FACTOR, 0); + colocatedReplicaCountsPerLocation[i] = + new Int2IntCounterMap(numRegionsPerLocation, Hashing.DEFAULT_LOAD_FACTOR, 0); } for (int i = 0; i < serversPerLocation.length; i++) { @@ -422,9 +423,11 @@ private void registerRegion(RegionInfo region, int regionIndex, int serverIndex, List loc = regionFinder.getTopBlockLocations(region); regionLocations[regionIndex] = new int[loc.size()]; for (int i = 0; i < loc.size(); i++) { - regionLocations[regionIndex][i] = loc.get(i) == null ? -1 : - (serversToIndex.get(loc.get(i).getAddress()) == null ? -1 : - serversToIndex.get(loc.get(i).getAddress())); + regionLocations[regionIndex][i] = loc.get(i) == null + ? -1 + : (serversToIndex.get(loc.get(i).getAddress()) == null + ? -1 + : serversToIndex.get(loc.get(i).getAddress())); } } } @@ -546,7 +549,8 @@ public int getRackForRegion(int region) { } enum LocalityType { - SERVER, RACK + SERVER, + RACK } public void doAction(BalanceAction action) { @@ -698,17 +702,17 @@ void regionMoved(int region, int oldServer, int newServer) { oldServer, newServer, primary, region); } } + /** * Common method for per host and per Location region index updates when a region is moved. - * @param serverIndexToLocation serverIndexToHostIndex or serverIndexToLocationIndex - * @param regionsPerLocation regionsPerHost or regionsPerLocation + * @param serverIndexToLocation serverIndexToHostIndex or serverIndexToLocationIndex + * @param regionsPerLocation regionsPerHost or regionsPerLocation * @param colocatedReplicaCountsPerLocation colocatedReplicaCountsPerHost or * colocatedReplicaCountsPerRack */ - private void updateForLocation(int[] serverIndexToLocation, - int[][] regionsPerLocation, - Int2IntCounterMap[] colocatedReplicaCountsPerLocation, - int oldServer, int newServer, int primary, int region) { + private void updateForLocation(int[] serverIndexToLocation, int[][] regionsPerLocation, + Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int oldServer, int newServer, + int primary, int region) { int oldLocation = oldServer >= 0 ? serverIndexToLocation[oldServer] : -1; int newLocation = serverIndexToLocation[newServer]; if (newLocation != oldLocation) { @@ -721,6 +725,7 @@ private void updateForLocation(int[] serverIndexToLocation, } } + int[] removeRegion(int[] regions, int regionIndex) { // TODO: this maybe costly. Consider using linked lists int[] newRegions = new int[regions.length - 1]; @@ -812,11 +817,11 @@ int getLowestLocalityRegionOnServer(int serverIndex) { return -1; } if (LOG.isTraceEnabled()) { - LOG.trace("Lowest locality region is " + - regions[regionsPerServer[serverIndex][lowestLocalityRegionIndex]] - .getRegionNameAsString() + - " with locality " + lowestLocality + " and its region server contains " + - regionsPerServer[serverIndex].length + " regions"); + LOG.trace("Lowest locality region is " + + regions[regionsPerServer[serverIndex][lowestLocalityRegionIndex]] + .getRegionNameAsString() + + " with locality " + lowestLocality + " and its region server contains " + + regionsPerServer[serverIndex].length + " regions"); } return regionsPerServer[serverIndex][lowestLocalityRegionIndex]; } else { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java index 7cc33751a0f0..ffb36cb8ca1a 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.RegionMetrics; @@ -24,8 +23,8 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Wrapper class for the few fields required by the {@link StochasticLoadBalancer} - * from the full {@link RegionMetrics}. + * Wrapper class for the few fields required by the {@link StochasticLoadBalancer} from the full + * {@link RegionMetrics}. */ @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index ac5ef44bed0a..576e09f1e59c 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -93,8 +93,8 @@ protected BaseLoadBalancer() { } /** - * This Constructor accepts an instance of MetricsBalancer, - * which will be used instead of creating a new one + * This Constructor accepts an instance of MetricsBalancer, which will be used instead of creating + * a new one */ protected BaseLoadBalancer(MetricsBalancer metricsBalancer) { this.metricsBalancer = (metricsBalancer != null) ? metricsBalancer : new MetricsBalancer(); @@ -112,7 +112,6 @@ public void updateClusterMetrics(ClusterMetrics st) { } } - @Override public void setClusterInfoProvider(ClusterInfoProvider provider) { this.provider = provider; @@ -125,10 +124,10 @@ public void postMasterStartupInitialize() { } } - protected final boolean idleRegionServerExist(BalancerClusterState c){ + protected final boolean idleRegionServerExist(BalancerClusterState c) { boolean isServerExistsWithMoreRegions = false; boolean isServerExistsWithZeroRegions = false; - for (int[] serverList: c.regionsPerServer){ + for (int[] serverList : c.regionsPerServer) { if (serverList.length > 1) { isServerExistsWithMoreRegions = true; } @@ -140,21 +139,19 @@ protected final boolean idleRegionServerExist(BalancerClusterState c){ } /** - * Generates a bulk assignment plan to be used on cluster startup using a - * simple round-robin assignment. + * Generates a bulk assignment plan to be used on cluster startup using a simple round-robin + * assignment. *

- * Takes a list of all the regions and all the servers in the cluster and - * returns a map of each server to the regions that it should be assigned. + * Takes a list of all the regions and all the servers in the cluster and returns a map of each + * server to the regions that it should be assigned. *

- * Currently implemented as a round-robin assignment. Same invariant as load - * balancing, all servers holding floor(avg) or ceiling(avg). - * - * TODO: Use block locations from HDFS to place regions with their blocks - * + * Currently implemented as a round-robin assignment. Same invariant as load balancing, all + * servers holding floor(avg) or ceiling(avg). TODO: Use block locations from HDFS to place + * regions with their blocks * @param regions all regions * @param servers all servers - * @return map of server to the regions it should take, or emptyMap if no - * assignment is possible (ie. no servers) + * @return map of server to the regions it should take, or emptyMap if no assignment is possible + * (ie. no servers) */ @Override @NonNull @@ -184,7 +181,7 @@ public Map> roundRobinAssignment(List r private BalancerClusterState createCluster(List servers, Collection regions) throws HBaseIOException { - boolean hasRegionReplica= false; + boolean hasRegionReplica = false; try { if (provider != null) { hasRegionReplica = provider.hasRegionReplica(regions); @@ -210,8 +207,7 @@ private BalancerClusterState createCluster(List servers, clusterState.put(server, Collections.emptyList()); } } - return new BalancerClusterState(regions, clusterState, null, this.regionFinder, - rackManager); + return new BalancerClusterState(regions, clusterState, null, this.regionFinder, rackManager); } private List findIdleServers(List servers) { @@ -224,7 +220,7 @@ private List findIdleServers(List servers) { */ @Override public ServerName randomAssignment(RegionInfo regionInfo, List servers) - throws HBaseIOException { + throws HBaseIOException { metricsBalancer.incrMiscInvocations(); int numServers = servers == null ? 0 : servers.size(); if (numServers == 0) { @@ -238,35 +234,32 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve if (idleServers.size() == 1) { return idleServers.get(0); } - final List finalServers = idleServers.isEmpty() ? - servers : idleServers; + final List finalServers = idleServers.isEmpty() ? servers : idleServers; List regions = Lists.newArrayList(regionInfo); BalancerClusterState cluster = createCluster(finalServers, regions); return randomAssignment(cluster, regionInfo, finalServers); } /** - * Generates a bulk assignment startup plan, attempting to reuse the existing - * assignment information from META, but adjusting for the specified list of - * available/online servers available for assignment. + * Generates a bulk assignment startup plan, attempting to reuse the existing assignment + * information from META, but adjusting for the specified list of available/online servers + * available for assignment. *

- * Takes a map of all regions to their existing assignment from META. Also - * takes a list of online servers for regions to be assigned to. Attempts to - * retain all assignment, so in some instances initial assignment will not be - * completely balanced. + * Takes a map of all regions to their existing assignment from META. Also takes a list of online + * servers for regions to be assigned to. Attempts to retain all assignment, so in some instances + * initial assignment will not be completely balanced. *

- * Any leftover regions without an existing server to be assigned to will be - * assigned randomly to available servers. - * + * Any leftover regions without an existing server to be assigned to will be assigned randomly to + * available servers. * @param regions regions and existing assignment from meta * @param servers available servers - * @return map of servers and regions to be assigned to them, or emptyMap if no - * assignment is possible (ie. no servers) + * @return map of servers and regions to be assigned to them, or emptyMap if no assignment is + * possible (ie. no servers) */ @Override @NonNull public Map> retainAssignment(Map regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { // Update metrics metricsBalancer.incrMiscInvocations(); int numServers = servers == null ? 0 : servers.size(); @@ -360,15 +353,14 @@ public Map> retainAssignment(Map 0) { - randomAssignMsg = - numRandomAssignments + " regions were assigned " - + "to random hosts, since the old hosts for these regions are no " - + "longer present in the cluster. These hosts were:\n " - + Joiner.on("\n ").join(oldHostsNoLongerPresent); + randomAssignMsg = numRandomAssignments + " regions were assigned " + + "to random hosts, since the old hosts for these regions are no " + + "longer present in the cluster. These hosts were:\n " + + Joiner.on("\n ").join(oldHostsNoLongerPresent); } LOG.info("Reassigned " + regions.size() + " regions. " + numRetainedAssigments - + " retained the pre-restart assignment. " + randomAssignMsg); + + " retained the pre-restart assignment. " + randomAssignMsg); return Collections.unmodifiableMap(assignments); } @@ -432,8 +424,8 @@ public void stop(String why) { } /** - * Updates the balancer status tag reported to JMX - */ + * Updates the balancer status tag reported to JMX + */ @Override public void updateBalancerStatus(boolean status) { metricsBalancer.balancerStatus(status); @@ -443,7 +435,7 @@ public void updateBalancerStatus(boolean status) { * Used to assign a single region to a random server. */ private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo regionInfo, - List servers) { + List servers) { int numServers = servers.size(); // servers is not null, numServers > 1 ServerName sn = null; final int maxIterations = numServers * 4; @@ -456,8 +448,7 @@ private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo reg if (!usedSNs.contains(sn)) { usedSNs.add(sn); } - } while (cluster.wouldLowerAvailability(regionInfo, sn) - && iterations++ < maxIterations); + } while (cluster.wouldLowerAvailability(regionInfo, sn) && iterations++ < maxIterations); if (iterations >= maxIterations) { // We have reached the max. Means the servers that we collected is still lowering the // availability @@ -507,7 +498,6 @@ private void roundRobinAssignment(BalancerClusterState cluster, List regionIdx++; } - List lastFewRegions = new ArrayList<>(); // assign the remaining by going through the list and try to assign to servers one-by-one serverIdx = rand.nextInt(numServers); @@ -542,17 +532,18 @@ private void roundRobinAssignment(BalancerClusterState cluster, List // return a modifiable map, as we may add more entries into the returned map. private Map> getRegionAssignmentsByServer(Collection regions) { - return provider != null ? new HashMap<>(provider.getSnapShotOfAssignment(regions)) : - new HashMap<>(); + return provider != null + ? new HashMap<>(provider.getSnapShotOfAssignment(regions)) + : new HashMap<>(); } - protected final Map> toEnsumbleTableLoad( - Map>> LoadOfAllTable) { + protected final Map> + toEnsumbleTableLoad(Map>> LoadOfAllTable) { Map> returnMap = new TreeMap<>(); for (Map> serverNameListMap : LoadOfAllTable.values()) { serverNameListMap.forEach((serverName, regionInfoList) -> { List regionInfos = - returnMap.computeIfAbsent(serverName, k -> new ArrayList<>()); + returnMap.computeIfAbsent(serverName, k -> new ArrayList<>()); regionInfos.addAll(regionInfoList); }); } @@ -567,7 +558,7 @@ protected final Map> toEnsumbleTableLoad( * multiple times, one table a time, where we will only pass in the regions for a single table * each time. If not, we will pass in all the regions at once, and the {@code tableName} will be * {@link HConstants#ENSEMBLE_TABLE_NAME}. - * @param tableName the table to be balanced + * @param tableName the table to be balanced * @param loadOfOneTable region load of servers for the specific one table * @return List of plans */ diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CPRequestCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CPRequestCostFunction.java index 9a6a43494c6f..24cba6d7442d 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CPRequestCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CPRequestCostFunction.java @@ -21,14 +21,14 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Compute the cost of total number of coprocessor requests The more unbalanced the higher the - * computed cost will be. This uses a rolling average of regionload. + * Compute the cost of total number of coprocessor requests The more unbalanced the higher the + * computed cost will be. This uses a rolling average of regionload. */ @InterfaceAudience.Private class CPRequestCostFunction extends CostFromRegionLoadAsRateFunction { private static final String CP_REQUEST_COST_KEY = - "hbase.master.balancer.stochastic.cpRequestCost"; + "hbase.master.balancer.stochastic.cpRequestCost"; private static final float DEFAULT_CP_REQUEST_COST = 5; CPRequestCostFunction(Configuration conf) { @@ -39,4 +39,4 @@ class CPRequestCostFunction extends CostFromRegionLoadAsRateFunction { protected double getCostFromRl(BalancerRegionLoad rl) { return rl.getCpRequestsCount(); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CandidateGenerator.java index faaaff9733c8..d9245495e204 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CandidateGenerator.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.Map; @@ -35,15 +34,17 @@ abstract class CandidateGenerator { * From a list of regions pick a random one. Null can be returned which * {@link StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region move * rather than swap. - * @param cluster The state of the cluster - * @param server index of the server + * @param cluster The state of the cluster + * @param server index of the server * @param chanceOfNoSwap Chance that this will decide to try a move rather than a swap. * @return a random {@link RegionInfo} or null if an asymmetrical move is suggested. */ int pickRandomRegion(BalancerClusterState cluster, int server, double chanceOfNoSwap) { // Check to see if this is just a move. - if (cluster.regionsPerServer[server].length == 0 - || ThreadLocalRandom.current().nextFloat() < chanceOfNoSwap) { + if ( + cluster.regionsPerServer[server].length == 0 + || ThreadLocalRandom.current().nextFloat() < chanceOfNoSwap + ) { // signal a move only. return -1; } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterInfoProvider.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterInfoProvider.java index cfd50fc11a21..1660cc44757e 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterInfoProvider.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterInfoProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java index 5d1e1ccac2db..2c93e7852136 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java index 977c6b14ec0a..5cc98478f9b9 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java @@ -95,14 +95,16 @@ public void updateWeight(double[] weights) { /** * Scale the value between 0 and 1. - * @param min Min value - * @param max The Max value + * @param min Min value + * @param max The Max value * @param value The value to be scaled. * @return The scaled value. */ protected static double scale(double min, double max, double value) { - if (max <= min || value <= min - || Math.abs(max - min) <= COST_EPSILON || Math.abs(value - min) <= COST_EPSILON) { + if ( + max <= min || value <= min || Math.abs(max - min) <= COST_EPSILON + || Math.abs(value - min) <= COST_EPSILON + ) { return 0; } if (max <= min || Math.abs(max - min) <= COST_EPSILON) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java index 29afd59084f7..65e102f2fb79 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -81,8 +81,7 @@ private static double computeCost(double[] stats) { } // No need to compute standard deviation with division by cluster size when scaling. totalCost = Math.sqrt(totalCost); - return CostFunction.scale(getMinSkew(total, count), - getMaxSkew(total, count), totalCost); + return CostFunction.scale(getMinSkew(total, count), getMaxSkew(total, count), totalCost); } private static double getSum(double[] stats) { @@ -105,21 +104,20 @@ public static double getMinSkew(double total, double numServers) { // It's possible that there aren't enough regions to go around double min; if (numServers > total) { - min = ((numServers - total) * mean * mean + (1 - mean) * (1 - mean) * total) ; + min = ((numServers - total) * mean * mean + (1 - mean) * (1 - mean) * total); } else { // Some will have 1 more than everything else. int numHigh = (int) (total - (Math.floor(mean) * numServers)); int numLow = (int) (numServers - numHigh); - min = numHigh * (Math.ceil(mean) - mean) * (Math.ceil(mean) - mean) + - numLow * (mean - Math.floor(mean)) * (mean - Math.floor(mean)); + min = numHigh * (Math.ceil(mean) - mean) * (Math.ceil(mean) - mean) + + numLow * (mean - Math.floor(mean)) * (mean - Math.floor(mean)); } return Math.sqrt(min); } /** - * Return the max deviation of distribution - * Compute max as if all region servers had 0 and one had the sum of all costs. This must be - * a zero sum cost for this to make sense. + * Return the max deviation of distribution Compute max as if all region servers had 0 and one had + * the sum of all costs. This must be a zero sum cost for this to make sense. */ public static double getMaxSkew(double total, double numServers) { if (numServers == 0) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java index acccc321ae3c..3b91bec03ecd 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java @@ -55,23 +55,20 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that - * assigns favored nodes for each region. There is a Primary RegionServer that hosts - * the region, and then there is Secondary and Tertiary RegionServers. Currently, the - * favored nodes information is used in creating HDFS files - the Primary RegionServer - * passes the primary, secondary, tertiary node addresses as hints to the - * DistributedFileSystem API for creating files on the filesystem. These nodes are - * treated as hints by the HDFS to place the blocks of the file. This alleviates the - * problem to do with reading from remote nodes (since we can make the Secondary - * RegionServer as the new Primary RegionServer) after a region is recovered. This - * should help provide consistent read latencies for the regions even when their - * primary region servers die. This provides two + * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that assigns favored + * nodes for each region. There is a Primary RegionServer that hosts the region, and then there is + * Secondary and Tertiary RegionServers. Currently, the favored nodes information is used in + * creating HDFS files - the Primary RegionServer passes the primary, secondary, tertiary node + * addresses as hints to the DistributedFileSystem API for creating files on the filesystem. These + * nodes are treated as hints by the HDFS to place the blocks of the file. This alleviates the + * problem to do with reading from remote nodes (since we can make the Secondary RegionServer as the + * new Primary RegionServer) after a region is recovered. This should help provide consistent read + * latencies for the regions even when their primary region servers die. This provides two * {@link CandidateGenerator} - * */ @InterfaceAudience.Private -public class FavoredStochasticBalancer extends StochasticLoadBalancer implements - FavoredNodesPromoter { +public class FavoredStochasticBalancer extends StochasticLoadBalancer + implements FavoredNodesPromoter { private static final Logger LOG = LoggerFactory.getLogger(FavoredStochasticBalancer.class); @@ -99,23 +96,20 @@ protected CandidateGenerator getRandomGenerator() { } /** - * Round robin assignment: Segregate the regions into two types: - * - * 1. The regions that have favored node assignment where at least one of the favored node - * is still alive. In this case, try to adhere to the current favored nodes assignment as - * much as possible - i.e., if the current primary is gone, then make the secondary or - * tertiary as the new host for the region (based on their current load). Note that we don't - * change the favored node assignments here (even though one or more favored node is - * currently down). That will be done by the admin operations. - * - * 2. The regions that currently don't have favored node assignments. Generate favored nodes - * for them and then assign. Generate the primary fn in round robin fashion and generate - * secondary and tertiary as per favored nodes constraints. + * Round robin assignment: Segregate the regions into two types: 1. The regions that have favored + * node assignment where at least one of the favored node is still alive. In this case, try to + * adhere to the current favored nodes assignment as much as possible - i.e., if the current + * primary is gone, then make the secondary or tertiary as the new host for the region (based on + * their current load). Note that we don't change the favored node assignments here (even though + * one or more favored node is currently down). That will be done by the admin operations. 2. The + * regions that currently don't have favored node assignments. Generate favored nodes for them and + * then assign. Generate the primary fn in round robin fashion and generate secondary and tertiary + * as per favored nodes constraints. */ @Override @NonNull public Map> roundRobinAssignment(List regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { metricsBalancer.incrMiscInvocations(); Map> assignmentMap = new HashMap<>(); if (regions.isEmpty()) { @@ -134,7 +128,7 @@ public Map> roundRobinAssignment(List r super.roundRobinAssignment(Lists.newArrayList(systemRegions), servers); // Segregate favored and non-favored nodes regions and assign accordingly. - Pair>, List> segregatedRegions = + Pair>, List> segregatedRegions = segregateRegionsAndAssignRegionsWithFavoredNodes(regionSet, servers); Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); Map> regionsWithoutFN = @@ -146,14 +140,14 @@ public Map> roundRobinAssignment(List r mergeAssignmentMaps(assignmentMap, regionsWithoutFN); } catch (Exception ex) { - throw new HBaseIOException("Encountered exception while doing favored-nodes assignment " - + ex + " Falling back to regular assignment", ex); + throw new HBaseIOException("Encountered exception while doing favored-nodes assignment " + ex + + " Falling back to regular assignment", ex); } return assignmentMap; } private void mergeAssignmentMaps(Map> assignmentMap, - Map> otherAssignments) { + Map> otherAssignments) { if (otherAssignments == null || otherAssignments.isEmpty()) { return; @@ -171,7 +165,7 @@ private void mergeAssignmentMaps(Map> assignmentMap } private Map> generateFNForRegionsWithoutFN( - FavoredNodeAssignmentHelper helper, List regions) throws IOException { + FavoredNodeAssignmentHelper helper, List regions) throws IOException { Map> assignmentMap = Maps.newHashMap(); Map> regionsNoFNMap; @@ -193,7 +187,7 @@ private Map> generateFNForRegionsWithoutFN( // Since we expect FN to be present most of the time, lets create map with same size Map> assignmentMapForFavoredNodes = - new HashMap<>(onlineServers.size()); + new HashMap<>(onlineServers.size()); List regionsWithNoFavoredNodes = new ArrayList<>(); for (RegionInfo region : regions) { @@ -207,7 +201,7 @@ private Map> generateFNForRegionsWithoutFN( ServerName serverWithLegitStartCode = getServerFromFavoredNode(onlineServers, s); if (serverWithLegitStartCode != null) { FavoredNodesPlan.Position position = - FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s); + FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s); if (Position.PRIMARY.equals(position)) { primaryHost = serverWithLegitStartCode; } else if (Position.SECONDARY.equals(position)) { @@ -218,7 +212,7 @@ private Map> generateFNForRegionsWithoutFN( } } assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, primaryHost, - secondaryHost, tertiaryHost); + secondaryHost, tertiaryHost); } else { regionsWithNoFavoredNodes.add(region); } @@ -255,8 +249,8 @@ private ServerName getServerFromFavoredNode(List servers, ServerName * available (in that order). */ private void assignRegionToAvailableFavoredNode( - Map> assignmentMapForFavoredNodes, RegionInfo region, - ServerName primaryHost, ServerName secondaryHost, ServerName tertiaryHost) { + Map> assignmentMapForFavoredNodes, RegionInfo region, + ServerName primaryHost, ServerName secondaryHost, ServerName tertiaryHost) { if (primaryHost != null) { addRegionToMap(assignmentMapForFavoredNodes, region, primaryHost); } else if (secondaryHost != null && tertiaryHost != null) { @@ -286,13 +280,13 @@ private void assignRegionToAvailableFavoredNode( } /** - * If we have favored nodes for a region, we will return one of the FN as destination. If - * favored nodes are not present for a region, we will generate and return one of the FN as - * destination. If we can't generate anything, lets fallback. + * If we have favored nodes for a region, we will return one of the FN as destination. If favored + * nodes are not present for a region, we will generate and return one of the FN as destination. + * If we can't generate anything, lets fallback. */ @Override public ServerName randomAssignment(RegionInfo regionInfo, List servers) - throws HBaseIOException { + throws HBaseIOException { ServerName destination = null; if (!FavoredNodesManager.isFavoredNodeApplicable(regionInfo)) { return super.randomAssignment(regionInfo, servers); @@ -330,7 +324,7 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve } private void updateFavoredNodesForRegion(RegionInfo regionInfo, List newFavoredNodes) - throws IOException { + throws IOException { Map> regionFNMap = Maps.newHashMap(); regionFNMap.put(regionInfo, newFavoredNodes); fnm.updateFavoredNodes(regionFNMap); @@ -342,7 +336,7 @@ private void updateFavoredNodesForRegion(RegionInfo regionInfo, List @Override @NonNull public Map> retainAssignment(Map regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { Map> assignmentMap = Maps.newHashMap(); Map> result = super.retainAssignment(regions, servers); if (result.isEmpty()) { @@ -374,16 +368,16 @@ public Map> retainAssignment(Map newFavoredNodes = Lists.newArrayList(); newFavoredNodes.add(primary); newFavoredNodes.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), - secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE)); + secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE)); newFavoredNodes.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), - secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE)); + secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE)); regionFNMap.put(hri, newFavoredNodes); addRegionToMap(assignmentMap, hri, sn); } else { - throw new HBaseIOException("Cannot generate secondary/tertiary FN for " + hri - + " generated " - + (secondaryAndTertiaryNodes != null ? secondaryAndTertiaryNodes : " nothing")); + throw new HBaseIOException( + "Cannot generate secondary/tertiary FN for " + hri + " generated " + + (secondaryAndTertiaryNodes != null ? secondaryAndTertiaryNodes : " nothing")); } } else { List onlineFN = getOnlineFavoredNodes(servers, favoredNodes); @@ -425,7 +419,7 @@ public Map> retainAssignment(Map getOnlineFavoredNodes(List onlineServers, - List serversWithoutStartCodes) { + List serversWithoutStartCodes) { if (serversWithoutStartCodes == null) { return null; } else { @@ -458,7 +452,7 @@ public List getFavoredNodes(RegionInfo regionInfo) { */ @Override public void generateFavoredNodesForDaughter(List servers, RegionInfo parent, - RegionInfo regionA, RegionInfo regionB) throws IOException { + RegionInfo regionA, RegionInfo regionB) throws IOException { Map> result = new HashMap<>(); FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); helper.initialize(); @@ -466,7 +460,7 @@ public void generateFavoredNodesForDaughter(List servers, RegionInfo List parentFavoredNodes = fnm.getFavoredNodes(parent); if (parentFavoredNodes == null) { LOG.debug("Unable to find favored nodes for parent, " + parent - + " generating new favored nodes for daughter"); + + " generating new favored nodes for daughter"); result.put(regionA, helper.generateFavoredNodes(regionA)); result.put(regionB, helper.generateFavoredNodes(regionB)); @@ -474,12 +468,12 @@ public void generateFavoredNodesForDaughter(List servers, RegionInfo // Lets get the primary and secondary from parent for regionA Set regionAFN = - getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY); + getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY); result.put(regionA, Lists.newArrayList(regionAFN)); // Lets get the primary and tertiary from parent for regionB Set regionBFN = - getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY); + getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY); result.put(regionB, Lists.newArrayList(regionBFN)); } @@ -487,8 +481,7 @@ public void generateFavoredNodesForDaughter(List servers, RegionInfo } private Set getInheritedFNForDaughter(FavoredNodeAssignmentHelper helper, - List parentFavoredNodes, Position primary, Position secondary) - throws IOException { + List parentFavoredNodes, Position primary, Position secondary) throws IOException { Set daughterFN = Sets.newLinkedHashSet(); if (parentFavoredNodes.size() >= primary.ordinal()) { @@ -507,12 +500,12 @@ private Set getInheritedFNForDaughter(FavoredNodeAssignmentHelper he } /** - * Generate favored nodes for a region during merge. Choose the FN from one of the sources to - * keep it simple. + * Generate favored nodes for a region during merge. Choose the FN from one of the sources to keep + * it simple. */ @Override - public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents) - throws IOException { + public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) + throws IOException { updateFavoredNodesForRegion(merged, fnm.getFavoredNodes(mergeParents[0])); } @@ -560,7 +553,7 @@ protected BalanceAction generate(BalancerClusterState cluster) { } private int getDifferentFavoredNode(BalancerClusterState cluster, List favoredNodes, - int currentServer) { + int currentServer) { List fnIndex = new ArrayList<>(); for (ServerName sn : favoredNodes) { if (cluster.serversToIndex.containsKey(sn.getAddress())) { @@ -587,8 +580,8 @@ private int pickLowestLocalRegionOnServer(BalancerClusterState cluster, int serv } /* - * This is like LoadCandidateGenerator, but we choose appropriate FN for the region on the - * most loaded server. + * This is like LoadCandidateGenerator, but we choose appropriate FN for the region on the most + * loaded server. */ class FavoredNodeLoadPicker extends CandidateGenerator { @@ -615,7 +608,7 @@ BalanceAction generate(BalancerClusterState cluster) { private int pickLeastLoadedServer(final BalancerClusterState cluster, int thisServer) { Integer[] servers = cluster.serverIndicesSortedByRegionCount; int index; - for (index = 0; index < servers.length ; index++) { + for (index = 0; index < servers.length; index++) { if ((servers[index] != null) && servers[index] != thisServer) { break; } @@ -648,7 +641,7 @@ private int pickLeastLoadedFNServer(final BalancerClusterState cluster, private int pickMostLoadedServer(final BalancerClusterState cluster) { Integer[] servers = cluster.serverIndicesSortedByRegionCount; int index; - for (index = servers.length - 1; index > 0 ; index--) { + for (index = servers.length - 1; index > 0; index--) { if (servers[index] != null) { break; } @@ -675,13 +668,15 @@ protected List balanceTable(TableName tableName, for (RegionInfo hri : entry.getValue()) { List favoredNodes = fnm.getFavoredNodes(hri); - if (FavoredNodesPlan.getFavoredServerPosition(favoredNodes, current) != null || - !FavoredNodesManager.isFavoredNodeApplicable(hri)) { + if ( + FavoredNodesPlan.getFavoredServerPosition(favoredNodes, current) != null + || !FavoredNodesManager.isFavoredNodeApplicable(hri) + ) { regions.add(hri); } else { // No favored nodes, lets unassign. - LOG.warn("Region not on favored nodes, unassign. Region: " + hri + " current: " + - current + " favored nodes: " + favoredNodes); + LOG.warn("Region not on favored nodes, unassign. Region: " + hri + " current: " + current + + " favored nodes: " + favoredNodes); try { provider.unassign(hri); } catch (IOException e) { @@ -702,4 +697,3 @@ protected List balanceTable(TableName tableName, return regionPlans; } } - diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java index 4cda751e3b92..8a963b7017a9 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java @@ -1,16 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.master.balancer; @@ -63,19 +66,19 @@ public class HeterogeneousRegionCountCostFunction extends CostFunction { * configuration used for the path where the rule file is stored. */ static final String HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE = - "hbase.master.balancer.heterogeneousRegionCountRulesFile"; + "hbase.master.balancer.heterogeneousRegionCountRulesFile"; private static final Logger LOG = - LoggerFactory.getLogger(HeterogeneousRegionCountCostFunction.class); + LoggerFactory.getLogger(HeterogeneousRegionCountCostFunction.class); /** * Default rule to apply when the rule file is not found. Default to 200. */ private static final String HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_DEFAULT = - "hbase.master.balancer.heterogeneousRegionCountDefault"; + "hbase.master.balancer.heterogeneousRegionCountDefault"; /** * Cost for the function. Default to 500, can be changed. */ private static final String REGION_COUNT_SKEW_COST_KEY = - "hbase.master.balancer.stochastic.heterogeneousRegionCountCost"; + "hbase.master.balancer.stochastic.heterogeneousRegionCountCost"; private static final float DEFAULT_REGION_COUNT_SKEW_COST = 500; private final String rulesPath; @@ -104,17 +107,19 @@ public class HeterogeneousRegionCountCostFunction extends CostFunction { this.setMultiplier(conf.getFloat(REGION_COUNT_SKEW_COST_KEY, DEFAULT_REGION_COUNT_SKEW_COST)); this.rulesPath = conf.get(HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE); this.defaultNumberOfRegions = - conf.getInt(HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_DEFAULT, 200); + conf.getInt(HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_DEFAULT, 200); if (this.defaultNumberOfRegions < 0) { LOG.warn("invalid configuration '" + HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_DEFAULT - + "'. Setting default to 200"); + + "'. Setting default to 200"); this.defaultNumberOfRegions = 200; } - if (conf.getFloat(RegionCountSkewCostFunction.REGION_COUNT_SKEW_COST_KEY, - RegionCountSkewCostFunction.DEFAULT_REGION_COUNT_SKEW_COST) > 0) { + if ( + conf.getFloat(RegionCountSkewCostFunction.REGION_COUNT_SKEW_COST_KEY, + RegionCountSkewCostFunction.DEFAULT_REGION_COUNT_SKEW_COST) > 0 + ) { LOG.warn("regionCountCost is not set to 0, " - + " this will interfere with the HeterogeneousRegionCountCostFunction!"); + + " this will interfere with the HeterogeneousRegionCountCostFunction!"); } } @@ -153,12 +158,12 @@ protected double cost() { * used to load the rule files. */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|HeterogeneousRegionCountCostFunction).java") + allowedOnPath = ".*(/src/test/.*|HeterogeneousRegionCountCostFunction).java") void loadRules() { final List lines = readFile(this.rulesPath); if (null == lines) { LOG.warn("cannot load rules file, keeping latest rules file which has " - + this.limitPerRule.size() + " rules"); + + this.limitPerRule.size() + " rules"); return; } @@ -175,7 +180,7 @@ void loadRules() { final List splits = Splitter.on(' ').splitToList(line); if (splits.size() != 2) { throw new IOException( - "line '" + line + "' is malformated, " + "expected [regexp] [limit]. Skipping line"); + "line '" + line + "' is malformated, " + "expected [regexp] [limit]. Skipping line"); } final Pattern pattern = Pattern.compile(splits.get(0)); @@ -243,7 +248,7 @@ private void rebuildCache() { } overallUsage = (double) this.cluster.numRegions / (double) this.totalCapacity; LOG.info("Cluster can hold " + this.cluster.numRegions + "/" + this.totalCapacity + " regions (" - + Math.round(overallUsage * 100) + "%)"); + + Math.round(overallUsage * 100) + "%)"); if (overallUsage >= 1) { LOG.warn("Cluster is overused, {}", overallUsage); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java index a43fdc88f148..697f24f82c93 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java index 8604f4a47f7f..83162a624d92 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.concurrent.ThreadLocalRandom; @@ -41,13 +40,15 @@ private int pickLeastLoadedServer(final BalancerClusterState cluster, int thisSe if (servers[i] == null || servers[i] == thisServer) { continue; } - if (selectedIndex != -1 - && cluster.getNumRegionsComparator().compare(servers[i], servers[selectedIndex]) != 0) { + if ( + selectedIndex != -1 + && cluster.getNumRegionsComparator().compare(servers[i], servers[selectedIndex]) != 0 + ) { // Exhausted servers of the same region count break; } // we don't know how many servers have the same region count, we will randomly select one - // using a simplified inline reservoir sampling by assignmening a random number to stream + // using a simplified inline reservoir sampling by assignmening a random number to stream // data and choose the greatest one. (http://gregable.com/2007/10/reservoir-sampling.html) double currentRandom = ThreadLocalRandom.current().nextDouble(); if (currentRandom > currentLargestRandom) { @@ -67,13 +68,15 @@ private int pickMostLoadedServer(final BalancerClusterState cluster, int thisSer if (servers[i] == null || servers[i] == thisServer) { continue; } - if (selectedIndex != -1 && cluster.getNumRegionsComparator().compare(servers[i], - servers[selectedIndex]) != 0) { + if ( + selectedIndex != -1 + && cluster.getNumRegionsComparator().compare(servers[i], servers[selectedIndex]) != 0 + ) { // Exhausted servers of the same region count break; } // we don't know how many servers have the same region count, we will randomly select one - // using a simplified inline reservoir sampling by assignmening a random number to stream + // using a simplified inline reservoir sampling by assignmening a random number to stream // data and choose the greatest one. (http://gregable.com/2007/10/reservoir-sampling.html) double currentRandom = ThreadLocalRandom.current().nextDouble(); if (currentRandom > currentLargestRandom) { @@ -81,7 +84,7 @@ private int pickMostLoadedServer(final BalancerClusterState cluster, int thisSer currentLargestRandom = currentRandom; } } - return selectedIndex == -1? -1 : servers[selectedIndex]; + return selectedIndex == -1 ? -1 : servers[selectedIndex]; } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCandidateGenerator.java index c8e56f193bf0..77dd797c85c6 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCandidateGenerator.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.Optional; @@ -34,8 +33,10 @@ BalanceAction generate(BalancerClusterState cluster) { for (int i = 0; i < cluster.numRegions; i++) { int region = (startIndex + i) % cluster.numRegions; int currentServer = cluster.regionIndexToServerIndex[region]; - if (currentServer != cluster.getOrComputeRegionsToMostLocalEntities( - BalancerClusterState.LocalityType.SERVER)[region]) { + if ( + currentServer != cluster.getOrComputeRegionsToMostLocalEntities( + BalancerClusterState.LocalityType.SERVER)[region] + ) { Optional potential = tryMoveOrSwap(cluster, currentServer, region, cluster.getOrComputeRegionsToMostLocalEntities( BalancerClusterState.LocalityType.SERVER)[region]); @@ -55,16 +56,16 @@ private Optional tryMoveOrSwap(BalancerClusterState cluster, int return Optional.of(getAction(fromServer, fromRegion, toServer, -1)); } // Compare locality gain/loss from swapping fromRegion with regions on toServer - double fromRegionLocalityDelta = getWeightedLocality(cluster, fromRegion, toServer) - - getWeightedLocality(cluster, fromRegion, fromServer); + double fromRegionLocalityDelta = getWeightedLocality(cluster, fromRegion, toServer) + - getWeightedLocality(cluster, fromRegion, fromServer); int toServertotalRegions = cluster.regionsPerServer[toServer].length; if (toServertotalRegions > 0) { int startIndex = ThreadLocalRandom.current().nextInt(toServertotalRegions); for (int i = 0; i < toServertotalRegions; i++) { int toRegionIndex = (startIndex + i) % toServertotalRegions; int toRegion = cluster.regionsPerServer[toServer][toRegionIndex]; - double toRegionLocalityDelta = getWeightedLocality(cluster, toRegion, fromServer) - - getWeightedLocality(cluster, toRegion, toServer); + double toRegionLocalityDelta = getWeightedLocality(cluster, toRegion, fromServer) + - getWeightedLocality(cluster, toRegion, toServer); // If locality would remain neutral or improve, attempt the swap if (fromRegionLocalityDelta + toRegionLocalityDelta >= 0) { return Optional.of(getAction(fromServer, fromRegion, toServer, toRegion)); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MemStoreSizeCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MemStoreSizeCostFunction.java index 80abac1f1115..b6500bf1772f 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MemStoreSizeCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MemStoreSizeCostFunction.java @@ -39,4 +39,4 @@ class MemStoreSizeCostFunction extends CostFromRegionLoadAsRateFunction { protected double getCostFromRl(BalancerRegionLoad rl) { return rl.getMemStoreSizeMB(); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java index 015e1d486c16..8a7561f7c08f 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java index 6c79f054ee4a..c9c0a1830cc1 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.yetus.audience.InterfaceAudience; /** - * This metrics balancer uses extended source for stochastic load balancer - * to report its related metrics to JMX. For details, refer to HBASE-13965 + * This metrics balancer uses extended source for stochastic load balancer to report its related + * metrics to JMX. For details, refer to HBASE-13965 */ @InterfaceAudience.Private public class MetricsStochasticBalancer extends MetricsBalancer { @@ -43,7 +42,7 @@ public MetricsStochasticBalancer() { @Override protected void initSource() { stochasticSource = - CompatibilitySingletonFactory.getInstance(MetricsStochasticBalancerSource.class); + CompatibilitySingletonFactory.getInstance(MetricsStochasticBalancerSource.class); } @Override @@ -75,7 +74,7 @@ public void updateMetricsSize(int size) { * Reports stochastic load balancer costs to JMX */ public void updateStochasticCost(String tableName, String costFunctionName, - String costFunctionDesc, Double value) { + String costFunctionDesc, Double value) { stochasticSource.updateStochasticCost(tableName, costFunctionName, costFunctionDesc, value); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveRegionAction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveRegionAction.java index f73fada18759..547c9c5b28e9 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveRegionAction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveRegionAction.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.yetus.audience.InterfaceAudience; @@ -54,4 +53,4 @@ public BalanceAction undoAction() { public String toString() { return getType() + ": " + region + ":" + fromServer + " -> " + toServer; } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RackLocalityCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RackLocalityCostFunction.java index 0e4735479a13..93d0b57e094a 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RackLocalityCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RackLocalityCostFunction.java @@ -36,4 +36,4 @@ public RackLocalityCostFunction(Configuration conf) { int regionIndexToEntityIndex(int region) { return cluster.getRackForRegion(region); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RandomCandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RandomCandidateGenerator.java index dad201152300..274545a684a9 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RandomCandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RandomCandidateGenerator.java @@ -32,4 +32,4 @@ BalanceAction generate(BalancerClusterState cluster) { return pickRandomRegions(cluster, thisServer, otherServer); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ReadRequestCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ReadRequestCostFunction.java index 402d144777d7..df108167cafa 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ReadRequestCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ReadRequestCostFunction.java @@ -39,4 +39,4 @@ class ReadRequestCostFunction extends CostFromRegionLoadAsRateFunction { protected double getCostFromRl(BalancerRegionLoad rl) { return rl.getReadRequestsCount(); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java index 9634dd1eb309..1d0f21a50884 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,6 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; @@ -129,8 +130,8 @@ void setClusterMetrics(ClusterMetrics status) { */ private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) { if (oldStatus == null || newStatus == null) { - LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", - oldStatus, newStatus); + LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", oldStatus, + newStatus); return; } @@ -244,7 +245,7 @@ private TableDescriptor getDescriptor(TableName tableName) throws IOException { * @return ServerName list */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*|.*/RegionHDFSBlockLocationFinder.java") + allowedOnPath = ".*/src/test/.*|.*/RegionHDFSBlockLocationFinder.java") List mapHostNameToServerName(List hosts) { if (hosts == null || status == null) { if (hosts == null) { @@ -331,7 +332,7 @@ void refreshAndWait(Collection hris) { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") LoadingCache getCache() { return cache; } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java index 911b70bd6fec..68f442d63a51 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,14 +18,13 @@ package org.apache.hadoop.hbase.master.balancer; import java.util.Comparator; - import org.apache.hadoop.hbase.client.RegionInfo; /** - * The following comparator assumes that RegionId from HRegionInfo can represent - * the age of the region - larger RegionId means the region is younger. This - * comparator is used in balanceCluster() to account for the out-of-band regions - * which were assigned to the server after some other region server crashed. + * The following comparator assumes that RegionId from HRegionInfo can represent the age of the + * region - larger RegionId means the region is younger. This comparator is used in balanceCluster() + * to account for the out-of-band regions which were assigned to the server after some other region + * server crashed. */ class RegionInfoComparator implements Comparator { @Override diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaCandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaCandidateGenerator.java index e0fd6966c42f..61d03a27d187 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaCandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaCandidateGenerator.java @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.concurrent.ThreadLocalRandom; - import org.agrona.collections.Int2IntCounterMap; import org.agrona.collections.IntArrayList; import org.apache.yetus.audience.InterfaceAudience; @@ -37,9 +35,11 @@ class RegionReplicaCandidateGenerator extends CandidateGenerator { * Randomly select one regionIndex out of all region replicas co-hosted in the same group (a group * is a server, host or rack) * @param colocatedReplicaCountsPerGroup either Cluster.colocatedReplicaCountsPerServer, - * colocatedReplicaCountsPerHost or colocatedReplicaCountsPerRack - * @param regionsPerGroup either Cluster.regionsPerServer, regionsPerHost or regionsPerRack - * @param regionIndexToPrimaryIndex Cluster.regionsIndexToPrimaryIndex + * colocatedReplicaCountsPerHost or + * colocatedReplicaCountsPerRack + * @param regionsPerGroup either Cluster.regionsPerServer, regionsPerHost or + * regionsPerRack + * @param regionIndexToPrimaryIndex Cluster.regionsIndexToPrimaryIndex * @return a regionIndex for the selected primary or -1 if there is no co-locating */ int selectCoHostedRegionPerGroup(Int2IntCounterMap colocatedReplicaCountsPerGroup, @@ -75,9 +75,9 @@ BalanceAction generate(BalancerClusterState cluster) { return BalanceAction.NULL_ACTION; } - int regionIndex = selectCoHostedRegionPerGroup( - cluster.colocatedReplicaCountsPerServer[serverIndex], - cluster.regionsPerServer[serverIndex], cluster.regionIndexToPrimaryIndex); + int regionIndex = + selectCoHostedRegionPerGroup(cluster.colocatedReplicaCountsPerServer[serverIndex], + cluster.regionsPerServer[serverIndex], cluster.regionIndexToPrimaryIndex); // if there are no pairs of region replicas co-hosted, default to random generator if (regionIndex == -1) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java index cd4012a0e8ef..f482ada22b29 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,8 @@ package org.apache.hadoop.hbase.master.balancer; import java.util.concurrent.atomic.AtomicLong; - import org.agrona.collections.Hashing; import org.agrona.collections.Int2IntCounterMap; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -47,8 +45,8 @@ final void prepare(BalancerClusterState cluster) { protected final long getMaxCost(BalancerClusterState cluster) { // max cost is the case where every region replica is hosted together regardless of host - Int2IntCounterMap colocatedReplicaCounts = new Int2IntCounterMap(cluster.numRegions, - Hashing.DEFAULT_LOAD_FACTOR, 0); + Int2IntCounterMap colocatedReplicaCounts = + new Int2IntCounterMap(cluster.numRegions, Hashing.DEFAULT_LOAD_FACTOR, 0); for (int i = 0; i < cluster.regionIndexToPrimaryIndex.length; i++) { colocatedReplicaCounts.getAndIncrement(cluster.regionIndexToPrimaryIndex[i]); } @@ -91,7 +89,7 @@ protected final long costPerGroup(Int2IntCounterMap colocatedReplicaCounts) { final AtomicLong cost = new AtomicLong(0); // colocatedReplicaCounts is a sorted array of primary ids of regions. Replicas of regions // sharing the same primary will have consecutive numbers in the array. - colocatedReplicaCounts.forEach((primary,count) -> { + colocatedReplicaCounts.forEach((primary, count) -> { if (count > 1) { // means consecutive primaries, indicating co-location cost.getAndAdd((count - 1) * (count - 1)); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaHostCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaHostCostFunction.java index 658b5c862a18..df5bca0da72e 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaHostCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaHostCostFunction.java @@ -47,7 +47,8 @@ protected void loadCosts() { costsPerGroup = new long[cluster.numHosts]; // either server based or host based colocatedReplicaCountsPerGroup = cluster.multiServersPerHost - ? cluster.colocatedReplicaCountsPerHost : cluster.colocatedReplicaCountsPerServer; + ? cluster.colocatedReplicaCountsPerHost + : cluster.colocatedReplicaCountsPerServer; for (int i = 0; i < colocatedReplicaCountsPerGroup.length; i++) { costsPerGroup[i] = costPerGroup(colocatedReplicaCountsPerGroup[i]); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java index 0698d128eb30..b06e838724ab 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,8 @@ package org.apache.hadoop.hbase.master.balancer; import java.io.Serializable; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ServerName; +import org.apache.yetus.audience.InterfaceAudience; /** * Data structure that holds servername and 'load'. diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerLocalityCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerLocalityCostFunction.java index bb5dc8182b33..c051bf96c4bd 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerLocalityCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerLocalityCostFunction.java @@ -35,4 +35,4 @@ class ServerLocalityCostFunction extends LocalityBasedCostFunction { int regionIndexToEntityIndex(int region) { return cluster.regionIndexToServerIndex[region]; } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index 84418b36f611..a9e1dfcaaad4 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,13 +62,14 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { private List serverLoadList = new ArrayList<>(); // overallSlop to control simpleLoadBalancer's cluster level threshold private float overallSlop; + /** - * Stores additional per-server information about the regions added/removed - * during the run of the balancing algorithm. + * Stores additional per-server information about the regions added/removed during the run of the + * balancing algorithm. *

- * For servers that shed regions, we need to track which regions we have already - * shed. nextRegionForUnload contains the index in the list of regions on - * the server that is the next to be shed. + * For servers that shed regions, we need to track which regions we have already shed. + * nextRegionForUnload contains the index in the list of regions on the server that is the + * next to be shed. */ private static final class BalanceInfo { @@ -108,7 +109,7 @@ void setNextRegionForUnload(int nextRegionForUnload) { * Pass RegionStates and allow balancer to set the current cluster load. */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|SimpleLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|SimpleLoadBalancer).java") void setClusterLoad(Map>> clusterLoad) { serverLoadList.clear(); Map server2LoadMap = new HashMap<>(); @@ -145,8 +146,9 @@ public void onConfigurationChange(Configuration conf) { float originSlop = slop; float originOverallSlop = overallSlop; loadConf(conf); - LOG.info("Update configuration of SimpleLoadBalancer, previous slop is {}," - + " current slop is {}, previous overallSlop is {}, current overallSlop is {}", + LOG.info( + "Update configuration of SimpleLoadBalancer, previous slop is {}," + + " current slop is {}, previous overallSlop is {}, current overallSlop is {}", originSlop, slop, originOverallSlop, overallSlop); } @@ -165,7 +167,7 @@ private boolean overallNeedsBalance() { int floor = (int) Math.floor(avgLoadOverall * (1 - overallSlop)); int ceiling = (int) Math.ceil(avgLoadOverall * (1 + overallSlop)); int max = 0, min = Integer.MAX_VALUE; - for(ServerAndLoad server : serverLoadList){ + for (ServerAndLoad server : serverLoadList) { max = Math.max(server.getLoad(), max); min = Math.min(server.getLoad(), min); } @@ -201,10 +203,10 @@ private boolean needsBalance(BalancerClusterState c) { NavigableMap> serversByLoad = cs.getServersByLoad(); if (LOG.isTraceEnabled()) { // If nothing to balance, then don't say anything unless trace-level logging. - LOG.trace("Skipping load balancing because balanced cluster; " + "servers=" + - cs.getNumServers() + " regions=" + cs.getNumRegions() + " average=" + average + - " mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded=" + - serversByLoad.firstKey().getLoad()); + LOG.trace("Skipping load balancing because balanced cluster; " + "servers=" + + cs.getNumServers() + " regions=" + cs.getNumRegions() + " average=" + average + + " mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded=" + + serversByLoad.firstKey().getLoad()); } return false; } @@ -212,93 +214,62 @@ private boolean needsBalance(BalancerClusterState c) { } /** - * Generate a global load balancing plan according to the specified map of - * server information to the most loaded regions of each server. - * - * The load balancing invariant is that all servers are within 1 region of the - * average number of regions per server. If the average is an integer number, - * all servers will be balanced to the average. Otherwise, all servers will - * have either floor(average) or ceiling(average) regions. - * - * HBASE-3609 Modeled regionsToMove using Guava's MinMaxPriorityQueue so that - * we can fetch from both ends of the queue. - * At the beginning, we check whether there was empty region server - * just discovered by Master. If so, we alternately choose new / old - * regions from head / tail of regionsToMove, respectively. This alternation - * avoids clustering young regions on the newly discovered region server. - * Otherwise, we choose new regions from head of regionsToMove. - * - * Another improvement from HBASE-3609 is that we assign regions from - * regionsToMove to underloaded servers in round-robin fashion. - * Previously one underloaded server would be filled before we move onto - * the next underloaded server, leading to clustering of young regions. - * - * Finally, we randomly shuffle underloaded servers so that they receive - * offloaded regions relatively evenly across calls to balanceCluster(). - * - * The algorithm is currently implemented as such: - * + * Generate a global load balancing plan according to the specified map of server information to + * the most loaded regions of each server. The load balancing invariant is that all servers are + * within 1 region of the average number of regions per server. If the average is an integer + * number, all servers will be balanced to the average. Otherwise, all servers will have either + * floor(average) or ceiling(average) regions. HBASE-3609 Modeled regionsToMove using Guava's + * MinMaxPriorityQueue so that we can fetch from both ends of the queue. At the beginning, we + * check whether there was empty region server just discovered by Master. If so, we alternately + * choose new / old regions from head / tail of regionsToMove, respectively. This alternation + * avoids clustering young regions on the newly discovered region server. Otherwise, we choose new + * regions from head of regionsToMove. Another improvement from HBASE-3609 is that we assign + * regions from regionsToMove to underloaded servers in round-robin fashion. Previously one + * underloaded server would be filled before we move onto the next underloaded server, leading to + * clustering of young regions. Finally, we randomly shuffle underloaded servers so that they + * receive offloaded regions relatively evenly across calls to balanceCluster(). The algorithm is + * currently implemented as such: *
    *
  1. Determine the two valid numbers of regions each server should have, - * MIN=floor(average) and MAX=ceiling(average). - * - *
  2. Iterate down the most loaded servers, shedding regions from each so - * each server hosts exactly MAX regions. Stop once you reach a - * server that already has <= MAX regions. - *

    - * Order the regions to move from most recent to least. - * - *

  3. Iterate down the least loaded servers, assigning regions so each server - * has exactly MIN regions. Stop once you reach a server that - * already has >= MIN regions. - * - * Regions being assigned to underloaded servers are those that were shed - * in the previous step. It is possible that there were not enough - * regions shed to fill each underloaded server to MIN. If so we - * end up with a number of regions required to do so, neededRegions. - * - * It is also possible that we were able to fill each underloaded but ended - * up with regions that were unassigned from overloaded servers but that - * still do not have assignment. - * - * If neither of these conditions hold (no regions needed to fill the - * underloaded servers, no regions leftover from overloaded servers), - * we are done and return. Otherwise we handle these cases below. - * - *
  4. If neededRegions is non-zero (still have underloaded servers), - * we iterate the most loaded servers again, shedding a single server from - * each (this brings them from having MAX regions to having - * MIN regions). - * - *
  5. We now definitely have more regions that need assignment, either from - * the previous step or from the original shedding from overloaded servers. - * Iterate the least loaded servers filling each to MIN. - * - *
  6. If we still have more regions that need assignment, again iterate the - * least loaded servers, this time giving each one (filling them to - * MAX) until we run out. - * - *
  7. All servers will now either host MIN or MAX regions. - * - * In addition, any server hosting >= MAX regions is guaranteed - * to end up with MAX regions at the end of the balancing. This - * ensures the minimal number of regions possible are moved. + * MIN=floor(average) and MAX=ceiling(average). + *
  8. Iterate down the most loaded servers, shedding regions from each so each server hosts + * exactly MAX regions. Stop once you reach a server that already has <= MAX + * regions. + *

    + * Order the regions to move from most recent to least. + *

  9. Iterate down the least loaded servers, assigning regions so each server has exactly + * MIN regions. Stop once you reach a server that already has >= MIN regions. + * Regions being assigned to underloaded servers are those that were shed in the previous step. It + * is possible that there were not enough regions shed to fill each underloaded server to + * MIN. If so we end up with a number of regions required to do so, neededRegions. + * It is also possible that we were able to fill each underloaded but ended up with regions that + * were unassigned from overloaded servers but that still do not have assignment. If neither of + * these conditions hold (no regions needed to fill the underloaded servers, no regions leftover + * from overloaded servers), we are done and return. Otherwise we handle these cases below. + *
  10. If neededRegions is non-zero (still have underloaded servers), we iterate the most + * loaded servers again, shedding a single server from each (this brings them from having + * MAX regions to having MIN regions). + *
  11. We now definitely have more regions that need assignment, either from the previous step or + * from the original shedding from overloaded servers. Iterate the least loaded servers filling + * each to MIN. + *
  12. If we still have more regions that need assignment, again iterate the least loaded servers, + * this time giving each one (filling them to MAX) until we run out. + *
  13. All servers will now either host MIN or MAX regions. In addition, any server + * hosting >= MAX regions is guaranteed to end up with MAX regions at the end of + * the balancing. This ensures the minimal number of regions possible are moved. *
- * - * TODO: We can at-most reassign the number of regions away from a particular - * server to be how many they report as most loaded. - * Should we just keep all assignment in memory? Any objections? - * Does this mean we need HeapSize on HMaster? Or just careful monitor? - * (current thinking is we will hold all assignments in memory) - * - * @param loadOfOneTable Map of regionservers and their load/region information to - * a list of their most loaded regions - * @return a list of regions to be moved, including source and destination, - * or null if cluster is already balanced + * TODO: We can at-most reassign the number of regions away from a particular server to be how + * many they report as most loaded. Should we just keep all assignment in memory? Any objections? + * Does this mean we need HeapSize on HMaster? Or just careful monitor? (current thinking is we + * will hold all assignments in memory) + * @param loadOfOneTable Map of regionservers and their load/region information to a list of their + * most loaded regions + * @return a list of regions to be moved, including source and destination, or null if cluster is + * already balanced */ @Override protected List balanceTable(TableName tableName, - Map> loadOfOneTable) { + Map> loadOfOneTable) { long startTime = EnvironmentEdgeManager.currentTime(); // construct a Cluster object with clusterMap and rest of the @@ -313,14 +284,14 @@ protected List balanceTable(TableName tableName, NavigableMap> serversByLoad = cs.getServersByLoad(); int numRegions = cs.getNumRegions(); float average = cs.getLoadAverage(); - int max = (int)Math.ceil(average); - int min = (int)average; + int max = (int) Math.ceil(average); + int min = (int) average; // Using to check balance result. StringBuilder strBalanceParam = new StringBuilder(); strBalanceParam.append("Balance parameter: numRegions=").append(numRegions) - .append(", numServers=").append(numServers).append(", max=").append(max) - .append(", min=").append(min); + .append(", numServers=").append(numServers).append(", max=").append(max).append(", min=") + .append(min); LOG.debug(strBalanceParam.toString()); // Balance the cluster @@ -334,8 +305,8 @@ protected List balanceTable(TableName tableName, // flag used to fetch regions from head and tail of list, alternately boolean fetchFromTail = false; Map serverBalanceInfo = new TreeMap<>(); - for (Map.Entry> server: - serversByLoad.descendingMap().entrySet()) { + for (Map.Entry> server : serversByLoad.descendingMap() + .entrySet()) { ServerAndLoad sal = server.getKey(); int load = sal.getLoad(); if (load <= max) { @@ -349,7 +320,7 @@ protected List balanceTable(TableName tableName, // after some other region server crashed Collections.sort(regions, riComparator); int numTaken = 0; - for (int i = 0; i <= numToOffload; ) { + for (int i = 0; i <= numToOffload;) { RegionInfo hri = regions.get(i); // fetch from head if (fetchFromTail) { hri = regions.get(regions.size() - 1 - i); @@ -372,8 +343,7 @@ protected List balanceTable(TableName tableName, Map underloadedServers = new HashMap<>(); int maxToTake = numRegions - min; - for (Map.Entry> server: - serversByLoad.entrySet()) { + for (Map.Entry> server : serversByLoad.entrySet()) { if (maxToTake == 0) { break; // no more to take } @@ -393,7 +363,7 @@ protected List balanceTable(TableName tableName, Collections.shuffle(sns); while (regionsToMove.size() > 0) { int cnt = 0; - int i = incr > 0 ? 0 : underloadedServers.size()-1; + int i = incr > 0 ? 0 : underloadedServers.size() - 1; for (; i >= 0 && i < underloadedServers.size(); i += incr) { if (regionsToMove.isEmpty()) { break; @@ -406,10 +376,10 @@ protected List balanceTable(TableName tableName, addRegionPlan(regionsToMove, fetchFromTail, si, regionsToReturn); - underloadedServers.put(si, numToTake-1); + underloadedServers.put(si, numToTake - 1); cnt++; BalanceInfo bi = serverBalanceInfo.get(si); - bi.setNumRegionsAdded(bi.getNumRegionsAdded()+1); + bi.setNumRegionsAdded(bi.getNumRegionsAdded() + 1); } if (cnt == 0) { break; @@ -428,12 +398,10 @@ protected List balanceTable(TableName tableName, // If we need more to fill min, grab one from each most loaded until enough if (neededRegions != 0) { // Walk down most loaded, grabbing one from each until we get enough - for (Map.Entry> server : - serversByLoad.descendingMap().entrySet()) { - BalanceInfo balanceInfo = - serverBalanceInfo.get(server.getKey().getServerName()); - int idx = - balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload(); + for (Map.Entry> server : serversByLoad.descendingMap() + .entrySet()) { + BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName()); + int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload(); if (idx >= server.getValue().size()) { break; } @@ -456,24 +424,23 @@ protected List balanceTable(TableName tableName, // Assign each underloaded up to the min, then if leftovers, assign to max // Walk down least loaded, assigning to each to fill up to min - for (Map.Entry> server : - serversByLoad.entrySet()) { + for (Map.Entry> server : serversByLoad.entrySet()) { int regionCount = server.getKey().getLoad(); if (regionCount >= min) { break; } BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName()); - if(balanceInfo != null) { + if (balanceInfo != null) { regionCount += balanceInfo.getNumRegionsAdded(); } - if(regionCount >= min) { + if (regionCount >= min) { continue; } int numToTake = min - regionCount; int numTaken = 0; - while(numTaken < numToTake && 0 < regionsToMove.size()) { - addRegionPlan(regionsToMove, fetchFromTail, - server.getKey().getServerName(), regionsToReturn); + while (numTaken < numToTake && 0 < regionsToMove.size()) { + addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), + regionsToReturn); numTaken++; balanceInfo.setNumRegionsAdded(balanceInfo.getNumRegionsAdded() + 1); } @@ -487,11 +454,11 @@ protected List balanceTable(TableName tableName, if (!regionsToMove.isEmpty() || neededRegions != 0) { // Emit data so can diagnose how balancer went astray. - LOG.warn("regionsToMove=" + totalNumMoved + - ", numServers=" + numServers + ", serversOverloaded=" + serversOverloaded + - ", serversUnderloaded=" + serversUnderloaded); + LOG.warn( + "regionsToMove=" + totalNumMoved + ", numServers=" + numServers + ", serversOverloaded=" + + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded); StringBuilder sb = new StringBuilder(); - for (Map.Entry> e: loadOfOneTable.entrySet()) { + for (Map.Entry> e : loadOfOneTable.entrySet()) { if (sb.length() > 0) { sb.append(", "); } @@ -503,10 +470,9 @@ protected List balanceTable(TableName tableName, } // All done! - LOG.info("Done. Calculated a load balance in " + (endTime-startTime) + "ms. " + - "Moving " + totalNumMoved + " regions off of " + - serversOverloaded + " overloaded servers onto " + - serversUnderloaded + " less loaded servers"); + LOG.info("Done. Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + + totalNumMoved + " regions off of " + serversOverloaded + " overloaded servers onto " + + serversUnderloaded + " less loaded servers"); return regionsToReturn; } @@ -547,7 +513,7 @@ private void balanceOverall(List regionsToReturn, RegionInfo hriToPlan; if (balanceInfo.getHriList().isEmpty()) { LOG.debug("During balanceOverall, we found " + serverload.getServerName() - + " has no RegionInfo, no operation needed"); + + " has no RegionInfo, no operation needed"); continue; } else if (balanceInfo.getNextRegionForUnload() >= balanceInfo.getHriList().size()) { continue; @@ -557,14 +523,16 @@ private void balanceOverall(List regionsToReturn, RegionPlan maxPlan = new RegionPlan(hriToPlan, serverload.getServerName(), null); regionsToMove.add(maxPlan); setLoad(serverLoadList, i, -1); - } else if (balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() > max || - balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() < min) { + } else if ( + balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() > max + || balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() < min + ) { LOG.warn( - "Encounter incorrect region numbers after calculating move plan during balanceOverall, " + - "for this table, " + serverload.getServerName() + " originally has " + - balanceInfo.getHriList().size() + " regions and " + balanceInfo.getNumRegionsAdded() + - " regions have been added. Yet, max =" + max + ", min =" + min + - ". Thus stop balance for this table"); // should not happen + "Encounter incorrect region numbers after calculating move plan during balanceOverall, " + + "for this table, " + serverload.getServerName() + " originally has " + + balanceInfo.getHriList().size() + " regions and " + balanceInfo.getNumRegionsAdded() + + " regions have been added. Yet, max =" + max + ", min =" + min + + ". Thus stop balance for this table"); // should not happen return; } } @@ -588,12 +556,12 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { // We need to remove the plan that has the source RS equals to destination RS, // since the source RS belongs to the least n loaded RS. int assignLength = regionsToMove.size(); - // A structure help to map ServerName to it's load and index in ServerLoadList - Map> SnLoadMap = new HashMap<>(); + // A structure help to map ServerName to it's load and index in ServerLoadList + Map> SnLoadMap = new HashMap<>(); for (int i = 0; i < serverLoadList.size(); i++) { SnLoadMap.put(serverLoadList.get(i).getServerName(), new Pair<>(serverLoadList.get(i), i)); } - Pair shredLoad; + Pair shredLoad; // A List to help mark the plan in regionsToMove that should be removed List planToRemoveList = new ArrayList<>(); // A structure to record how many times a server becomes the source of a plan, from @@ -601,7 +569,7 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { Map sourceMap = new HashMap<>(); // We remove one of the plan which would cause source RS equals destination RS. // But we should keep in mind that the second plan from such RS should be kept. - for(RegionPlan plan: regionsToMove){ + for (RegionPlan plan : regionsToMove) { // the source RS's load and index in ServerLoadList shredLoad = SnLoadMap.get(plan.getSource()); if (!sourceMap.containsKey(plan.getSource())) { @@ -617,7 +585,7 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { // Remove those marked plans from regionsToMove, // we cannot direct remove them during iterating through // regionsToMove, due to the fact that regionsToMove is a MinMaxPriorityQueue. - for(RegionPlan planToRemove : planToRemoveList){ + for (RegionPlan planToRemove : planToRemoveList) { regionsToMove.remove(planToRemove); } @@ -626,22 +594,22 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { // the first n = regionsToMove.size() of them, with least load. // With this strategy adopted, we can gradually achieve the overall balance, // while keeping table level balanced. - for(int i = 0; i < assignLength; i++){ + for (int i = 0; i < assignLength; i++) { // skip the RS that is also the source, we have removed them from regionsToMove in previous // step if (sourceMap.containsKey(serverLoadList.get(i).getServerName())) { continue; } - addRegionPlan(regionsToMove, fetchFromTail, - serverLoadList.get(i).getServerName(), regionsToReturn); + addRegionPlan(regionsToMove, fetchFromTail, serverLoadList.get(i).getServerName(), + regionsToReturn); setLoad(serverLoadList, i, 1); // resolve a possible cyclic assignment pair if we just produced one: // e.g. plan1: A -> B, plan2: B -> C => resolve plan1 to A -> C and remove plan2 List pos = returnMap.get(regionsToReturn.get(regionsToReturn.size() - 1).getSource()); if (pos != null && pos.size() != 0) { - regionsToReturn.get(pos.get(pos.size() - 1)).setDestination( - regionsToReturn.get(regionsToReturn.size() - 1).getDestination()); + regionsToReturn.get(pos.get(pos.size() - 1)) + .setDestination(regionsToReturn.get(regionsToReturn.size() - 1).getDestination()); pos.remove(pos.size() - 1); regionsToReturn.remove(regionsToReturn.size() - 1); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index ff6d031cef33..866e3d0a2c6a 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,9 +48,11 @@ import org.slf4j.LoggerFactory; /** - *

This is a best effort load balancer. Given a Cost function F(C) => x It will - * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the - * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+ *

+ * This is a best effort load balancer. Given a Cost function F(C) => x It will randomly try and + * mutate the cluster to Cprime. If F(Cprime) < F(C) then the new cluster state becomes the plan. + * It includes costs functions to compute the cost of: + *

*
    *
  • Region Load
  • *
  • Table Load
  • @@ -58,44 +60,46 @@ *
  • Memstore Sizes
  • *
  • Storefile Sizes
  • *
- * - * - *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost - * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are - * scaled by their respective multipliers:

- * + *

+ * Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost best + * solution, and 1 is the highest possible cost and the worst solution. The computed costs are + * scaled by their respective multipliers: + *

*
    - *
  • hbase.master.balancer.stochastic.regionLoadCost
  • - *
  • hbase.master.balancer.stochastic.moveCost
  • - *
  • hbase.master.balancer.stochastic.tableLoadCost
  • - *
  • hbase.master.balancer.stochastic.localityCost
  • - *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • - *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • + *
  • hbase.master.balancer.stochastic.regionLoadCost
  • + *
  • hbase.master.balancer.stochastic.moveCost
  • + *
  • hbase.master.balancer.stochastic.tableLoadCost
  • + *
  • hbase.master.balancer.stochastic.localityCost
  • + *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • + *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • *
- * - *

You can also add custom Cost function by setting the the following configuration value:

+ *

+ * You can also add custom Cost function by setting the the following configuration value: + *

*
    - *
  • hbase.master.balancer.stochastic.additionalCostFunctions
  • + *
  • hbase.master.balancer.stochastic.additionalCostFunctions
  • *
- * - *

All custom Cost Functions needs to extends {@link CostFunction}

- * - *

In addition to the above configurations, the balancer can be tuned by the following - * configuration values:

+ *

+ * All custom Cost Functions needs to extends {@link CostFunction} + *

+ *

+ * In addition to the above configurations, the balancer can be tuned by the following configuration + * values: + *

*
    - *
  • hbase.master.balancer.stochastic.maxMoveRegions which - * controls what the max number of regions that can be moved in a single invocation of this - * balancer.
  • - *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of - * regions is multiplied to try and get the number of times the balancer will - * mutate all servers.
  • - *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that - * the balancer will try and mutate all the servers. The balancer will use the minimum of this - * value and the above computation.
  • + *
  • hbase.master.balancer.stochastic.maxMoveRegions which controls what the max number of regions + * that can be moved in a single invocation of this balancer.
  • + *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of + * regions is multiplied to try and get the number of times the balancer will mutate all + * servers.
  • + *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that the + * balancer will try and mutate all the servers. The balancer will use the minimum of this value and + * the above computation.
  • *
- * - *

This balancer is best used with hbase.master.loadbalance.bytable set to false - * so that the balancer gets the full picture of all loads on the cluster.

+ *

+ * This balancer is best used with hbase.master.loadbalance.bytable set to false so that the + * balancer gets the full picture of all loads on the cluster. + *

*/ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StochasticLoadBalancer extends BaseLoadBalancer { @@ -103,20 +107,18 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private static final Logger LOG = LoggerFactory.getLogger(StochasticLoadBalancer.class); protected static final String STEPS_PER_REGION_KEY = - "hbase.master.balancer.stochastic.stepsPerRegion"; - protected static final String MAX_STEPS_KEY = - "hbase.master.balancer.stochastic.maxSteps"; - protected static final String RUN_MAX_STEPS_KEY = - "hbase.master.balancer.stochastic.runMaxSteps"; + "hbase.master.balancer.stochastic.stepsPerRegion"; + protected static final String MAX_STEPS_KEY = "hbase.master.balancer.stochastic.maxSteps"; + protected static final String RUN_MAX_STEPS_KEY = "hbase.master.balancer.stochastic.runMaxSteps"; protected static final String MAX_RUNNING_TIME_KEY = - "hbase.master.balancer.stochastic.maxRunningTime"; + "hbase.master.balancer.stochastic.maxRunningTime"; protected static final String KEEP_REGION_LOADS = - "hbase.master.balancer.stochastic.numRegionLoadsToRemember"; + "hbase.master.balancer.stochastic.numRegionLoadsToRemember"; private static final String TABLE_FUNCTION_SEP = "_"; protected static final String MIN_COST_NEED_BALANCE_KEY = - "hbase.master.balancer.stochastic.minCostNeedBalance"; + "hbase.master.balancer.stochastic.minCostNeedBalance"; protected static final String COST_FUNCTIONS_COST_FUNCTIONS_KEY = - "hbase.master.balancer.stochastic.additionalCostFunctions"; + "hbase.master.balancer.stochastic.additionalCostFunctions"; public static final String OVERALL_COST_FUNCTION_NAME = "Overall"; Map> loads = new HashMap<>(); @@ -149,7 +151,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { protected List candidateGenerators; public enum GeneratorType { - RANDOM, LOAD, LOCALITY, RACK + RANDOM, + LOAD, + LOCALITY, + RACK } /** @@ -161,7 +166,7 @@ public StochasticLoadBalancer() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public StochasticLoadBalancer(MetricsStochasticBalancer metricsStochasticBalancer) { super(metricsStochasticBalancer); } @@ -198,7 +203,7 @@ private void loadCustomCostFunctions(Configuration conf) { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") List getCandidateGenerators() { return this.candidateGenerators; } @@ -255,12 +260,11 @@ protected void loadConf(Configuration conf) { curFunctionCosts = new double[costFunctions.size()]; tempFunctionCosts = new double[costFunctions.size()]; - LOG.info( - "Loaded config; maxSteps=" + maxSteps + ", runMaxSteps=" + runMaxSteps + - ", stepsPerRegion=" + stepsPerRegion + - ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + - ", CostFunctions=" + Arrays.toString(getCostFunctionNames()) + - " , sum of multiplier of cost functions = " + sumMultiplier + " etc."); } + LOG.info("Loaded config; maxSteps=" + maxSteps + ", runMaxSteps=" + runMaxSteps + + ", stepsPerRegion=" + stepsPerRegion + ", maxRunningTime=" + maxRunningTime + ", isByTable=" + + isByTable + ", CostFunctions=" + Arrays.toString(getCostFunctionNames()) + + " , sum of multiplier of cost functions = " + sumMultiplier + " etc."); + } @Override public void updateClusterMetrics(ClusterMetrics st) { @@ -279,7 +283,8 @@ public void updateClusterMetrics(ClusterMetrics st) { } } - private void updateBalancerTableLoadInfo(TableName tableName, Map> loadOfOneTable) { + private void updateBalancerTableLoadInfo(TableName tableName, + Map> loadOfOneTable) { RegionHDFSBlockLocationFinder finder = null; if ((this.localityCost != null) || (this.rackLocalityCost != null)) { finder = this.regionFinder; @@ -294,14 +299,15 @@ private void updateBalancerTableLoadInfo(TableName tableName, Map>> loadOfAllTable) { + public void + updateBalancerLoadInfo(Map>> loadOfAllTable) { if (isByTable) { loadOfAllTable.forEach((tableName, loadOfOneTable) -> { updateBalancerTableLoadInfo(tableName, loadOfOneTable); }); } else { - updateBalancerTableLoadInfo(HConstants.ENSEMBLE_TABLE_NAME, toEnsumbleTableLoad(loadOfAllTable)); + updateBalancerTableLoadInfo(HConstants.ENSEMBLE_TABLE_NAME, + toEnsumbleTableLoad(loadOfAllTable)); } } @@ -309,7 +315,7 @@ public void updateBalancerLoadInfo( * Update the number of metrics that are reported to JMX */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void updateMetricsSize(int size) { if (metricsBalancer instanceof MetricsStochasticBalancer) { ((MetricsStochasticBalancer) metricsBalancer).updateMetricsSize(size); @@ -327,33 +333,33 @@ private String getBalanceReason(double total, double sumMultiplier) { } else if (sumMultiplier <= 0) { return "sumMultiplier = " + sumMultiplier + " <= 0"; } else if ((total / sumMultiplier) < minCostNeedBalance) { - return "[(cost1*multiplier1)+(cost2*multiplier2)+...+(costn*multipliern)]/sumMultiplier = " + - (total / sumMultiplier) + " <= minCostNeedBalance(" + minCostNeedBalance + ")"; + return "[(cost1*multiplier1)+(cost2*multiplier2)+...+(costn*multipliern)]/sumMultiplier = " + + (total / sumMultiplier) + " <= minCostNeedBalance(" + minCostNeedBalance + ")"; } else { return ""; } } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") boolean needsBalance(TableName tableName, BalancerClusterState cluster) { ClusterLoadState cs = new ClusterLoadState(cluster.clusterState); if (cs.getNumServers() < MIN_SERVER_BALANCE) { - LOG.info("Not running balancer because only " + cs.getNumServers() - + " active regionserver(s)"); - sendRejectionReasonToRingBuffer(() -> "The number of RegionServers " + cs.getNumServers() + - " < MIN_SERVER_BALANCE(" + MIN_SERVER_BALANCE + ")", null); + LOG.info( + "Not running balancer because only " + cs.getNumServers() + " active regionserver(s)"); + sendRejectionReasonToRingBuffer(() -> "The number of RegionServers " + cs.getNumServers() + + " < MIN_SERVER_BALANCE(" + MIN_SERVER_BALANCE + ")", null); return false; } if (areSomeRegionReplicasColocated(cluster)) { - LOG.info("Running balancer because at least one server hosts replicas of the same region." + - " function cost={}", functionCost()); + LOG.info("Running balancer because at least one server hosts replicas of the same region." + + " function cost={}", functionCost()); return true; } - if (idleRegionServerExist(cluster)){ - LOG.info("Running balancer because cluster has idle server(s)."+ - " function cost={}", functionCost()); + if (idleRegionServerExist(cluster)) { + LOG.info("Running balancer because cluster has idle server(s)." + " function cost={}", + functionCost()); return true; } @@ -369,23 +375,24 @@ boolean needsBalance(TableName tableName, BalancerClusterState cluster) { if (balanced) { final double calculatedTotal = total; - sendRejectionReasonToRingBuffer(() -> - getBalanceReason(calculatedTotal, sumMultiplier), costFunctions); - LOG.info("{} - skipping load balancing because weighted average imbalance={} <= " + sendRejectionReasonToRingBuffer(() -> getBalanceReason(calculatedTotal, sumMultiplier), + costFunctions); + LOG.info( + "{} - skipping load balancing because weighted average imbalance={} <= " + "threshold({}). If you want more aggressive balancing, either lower " + "hbase.master.balancer.stochastic.minCostNeedBalance from {} or increase the relative " + "multiplier(s) of the specific cost function(s). functionCost={}", - isByTable ? "Table specific ("+tableName+")" : "Cluster wide", total / sumMultiplier, + isByTable ? "Table specific (" + tableName + ")" : "Cluster wide", total / sumMultiplier, minCostNeedBalance, minCostNeedBalance, functionCost()); } else { LOG.info("{} - Calculating plan. may take up to {}ms to complete.", - isByTable ? "Table specific ("+tableName+")" : "Cluster wide", maxRunningTime); + isByTable ? "Table specific (" + tableName + ")" : "Cluster wide", maxRunningTime); } return !balanced; } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") BalanceAction nextAction(BalancerClusterState cluster) { return getRandomGenerator().generate(cluster); } @@ -417,7 +424,7 @@ protected CandidateGenerator getRandomGenerator() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") void setRackManager(RackManager rackManager) { this.rackManager = rackManager; } @@ -427,24 +434,23 @@ private long calculateMaxSteps(BalancerClusterState cluster) { } /** - * Given the cluster state this will try and approach an optimal balance. This - * should always approach the optimal state given enough steps. + * Given the cluster state this will try and approach an optimal balance. This should always + * approach the optimal state given enough steps. */ @Override - protected List balanceTable(TableName tableName, Map> loadOfOneTable) { + protected List balanceTable(TableName tableName, + Map> loadOfOneTable) { // On clusters with lots of HFileLinks or lots of reference files, // instantiating the storefile infos can be quite expensive. // Allow turning this feature off if the locality cost is not going to // be used in any computations. RegionHDFSBlockLocationFinder finder = null; - if ((this.localityCost != null) - || (this.rackLocalityCost != null)) { + if ((this.localityCost != null) || (this.rackLocalityCost != null)) { finder = this.regionFinder; } - //The clusterState that is given to this method contains the state - //of all the regions in the table(s) (that's true today) + // The clusterState that is given to this method contains the state + // of all the regions in the table(s) (that's true today) // Keep track of servers to iterate through them. BalancerClusterState cluster = new BalancerClusterState(loadOfOneTable, loads, finder, rackManager); @@ -455,7 +461,7 @@ protected List balanceTable(TableName tableName, Map balanceTable(TableName tableName, Map maxSteps) { - LOG.warn("calculatedMaxSteps:{} for loadbalancer's stochastic walk is larger than " + LOG.warn( + "calculatedMaxSteps:{} for loadbalancer's stochastic walk is larger than " + "maxSteps:{}. Hence load balancing may not work well. Setting parameter " + "\"hbase.master.balancer.stochastic.runMaxSteps\" to true can overcome this issue." - + "(This config change does not require service restart)", calculatedMaxSteps, - maxSteps); + + "(This config change does not require service restart)", + calculatedMaxSteps, maxSteps); } } - LOG.info("Start StochasticLoadBalancer.balancer, initial weighted average imbalance={}, " - + "functionCost={} computedMaxSteps={}", + LOG.info( + "Start StochasticLoadBalancer.balancer, initial weighted average imbalance={}, " + + "functionCost={} computedMaxSteps={}", currentCost / sumMultiplier, functionCost(), computedMaxSteps); final String initFunctionTotalCosts = totalCostsPerFunc(); @@ -525,8 +533,7 @@ protected List balanceTable(TableName tableName, Map - maxRunningTime) { + if (EnvironmentEdgeManager.currentTime() - startTime > maxRunningTime) { break; } } @@ -537,18 +544,20 @@ protected List balanceTable(TableName tableName, Map currentCost) { updateStochasticCosts(tableName, curOverallCost, curFunctionCosts); List plans = createRegionPlans(cluster); - LOG.info("Finished computing new moving plan. Computation took {} ms" + - " to try {} different iterations. Found a solution that moves " + - "{} regions; Going from a computed imbalance of {}" + - " to a new imbalance of {}. funtionCost={}", - endTime - startTime, step, plans.size(), - initCost / sumMultiplier, currentCost / sumMultiplier, functionCost()); + LOG.info( + "Finished computing new moving plan. Computation took {} ms" + + " to try {} different iterations. Found a solution that moves " + + "{} regions; Going from a computed imbalance of {}" + + " to a new imbalance of {}. funtionCost={}", + endTime - startTime, step, plans.size(), initCost / sumMultiplier, + currentCost / sumMultiplier, functionCost()); sendRegionPlansToRingBuffer(plans, currentCost, initCost, initFunctionTotalCosts, step); return plans; } - LOG.info("Could not find a better moving plan. Tried {} different configurations in " + - "{} ms, and did not find anything with an imbalance score less than {}", step, - endTime - startTime, initCost / sumMultiplier); + LOG.info( + "Could not find a better moving plan. Tried {} different configurations in " + + "{} ms, and did not find anything with an imbalance score less than {}", + step, endTime - startTime, initCost / sumMultiplier); return null; } @@ -574,8 +583,8 @@ private void sendRegionPlansToRingBuffer(List plans, double currentC List regionPlans = new ArrayList<>(); for (RegionPlan plan : plans) { regionPlans - .add("table: " + plan.getRegionInfo().getTable() + " , region: " + plan.getRegionName() + - " , source: " + plan.getSource() + " , destination: " + plan.getDestination()); + .add("table: " + plan.getRegionInfo().getTable() + " , region: " + plan.getRegionName() + + " , source: " + plan.getSource() + " , destination: " + plan.getDestination()); } return new BalancerDecision.Builder().setInitTotalCost(initCost) .setInitialFunctionCosts(initFunctionTotalCosts).setComputedTotalCost(currentCost) @@ -596,8 +605,8 @@ private void updateStochasticCosts(TableName tableName, double overall, double[] if (metricsBalancer instanceof MetricsStochasticBalancer) { MetricsStochasticBalancer balancer = (MetricsStochasticBalancer) metricsBalancer; // overall cost - balancer.updateStochasticCost(tableName.getNameAsString(), - OVERALL_COST_FUNCTION_NAME, "Overall cost", overall); + balancer.updateStochasticCost(tableName.getNameAsString(), OVERALL_COST_FUNCTION_NAME, + "Overall cost", overall); // each cost function for (int i = 0; i < costFunctions.size(); i++) { @@ -663,14 +672,13 @@ private String totalCostsPerFunc() { /** * Create all of the RegionPlan's needed to move from the initial cluster state to the desired * state. - * * @param cluster The state of the cluster * @return List of RegionPlan's that represent the moves needed to get to desired final state. */ private List createRegionPlans(BalancerClusterState cluster) { List plans = new ArrayList<>(); - for (int regionIndex = 0; - regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) { + for (int regionIndex = 0; regionIndex + < cluster.regionIndexToServerIndex.length; regionIndex++) { int initialServerIndex = cluster.initialRegionIndexToServerIndex[regionIndex]; int newServerIndex = cluster.regionIndexToServerIndex[regionIndex]; @@ -681,7 +689,7 @@ private List createRegionPlans(BalancerClusterState cluster) { if (LOG.isTraceEnabled()) { LOG.trace("Moving Region " + region.getEncodedName() + " from server " - + initialServer.getHostname() + " to " + newServer.getHostname()); + + initialServer.getHostname() + " to " + newServer.getHostname()); } RegionPlan rp = new RegionPlan(region, initialServer, newServer); plans.add(rp); @@ -715,7 +723,7 @@ private void updateRegionLoad() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void initCosts(BalancerClusterState cluster) { // Initialize the weights of generator every time weightsOfGenerators = new double[this.candidateGenerators.size()]; @@ -729,7 +737,7 @@ void initCosts(BalancerClusterState cluster) { * Update both the costs of costfunctions and the weights of candidate generators */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void updateCostsAndWeightsWithAction(BalancerClusterState cluster, BalanceAction action) { // Reset all the weights to 0 for (int i = 0; i < weightsOfGenerators.length; i++) { @@ -747,7 +755,7 @@ void updateCostsAndWeightsWithAction(BalancerClusterState cluster, BalanceAction * Get the names of the cost functions */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") String[] getCostFunctionNames() { String[] ret = new String[costFunctions.size()]; for (int i = 0; i < costFunctions.size(); i++) { @@ -759,16 +767,15 @@ String[] getCostFunctionNames() { } /** - * This is the main cost function. It will compute a cost associated with a proposed cluster - * state. All different costs will be combined with their multipliers to produce a double cost. - * - * @param cluster The state of the cluster + * This is the main cost function. It will compute a cost associated with a proposed cluster + * state. All different costs will be combined with their multipliers to produce a double cost. + * @param cluster The state of the cluster * @param previousCost the previous cost. This is used as an early out. - * @return a double of a cost associated with the proposed cluster state. This cost is an + * @return a double of a cost associated with the proposed cluster state. This cost is an * aggregate of all individual cost functions. */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") double computeCost(BalancerClusterState cluster, double previousCost) { double total = 0; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StoreFileCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StoreFileCostFunction.java index 31ad2b3940ff..d67e8d1d9cc4 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StoreFileCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StoreFileCostFunction.java @@ -39,4 +39,4 @@ class StoreFileCostFunction extends CostFromRegionLoadFunction { protected double getCostFromRl(BalancerRegionLoad rl) { return rl.getStorefileSizeMB(); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SwapRegionsAction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SwapRegionsAction.java index 9963fe0f6c0d..6f83d2bc930b 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SwapRegionsAction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SwapRegionsAction.java @@ -59,4 +59,4 @@ public BalanceAction undoAction() { public String toString() { return getType() + ": " + fromRegion + ":" + fromServer + " <-> " + toRegion + ":" + toServer; } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/WriteRequestCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/WriteRequestCostFunction.java index 26c962f5af59..ad416df088b5 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/WriteRequestCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/WriteRequestCostFunction.java @@ -39,4 +39,4 @@ class WriteRequestCostFunction extends CostFromRegionLoadAsRateFunction { protected double getCostFromRl(BalancerRegionLoad rl) { return rl.getWriteRequestsCount(); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java index f2bf954d2820..1caea76a82ff 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public class TestFavoredNodeAssignmentHelper { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFavoredNodeAssignmentHelper.class); + HBaseClassTestRule.forClass(TestFavoredNodeAssignmentHelper.class); private static List servers = new ArrayList<>(); private static Map> rackToServers = new HashMap<>(); @@ -99,8 +99,7 @@ public static void setupBeforeClass() throws Exception { } }); for (int i = 0; i < 40; i++) { - ServerName server = ServerName.valueOf("foo" + i, 1234, - EnvironmentEdgeManager.currentTime()); + ServerName server = ServerName.valueOf("foo" + i, 1234, EnvironmentEdgeManager.currentTime()); String rack = getRack(i); if (!rack.equals(RackManager.UNKNOWN_RACK)) { rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server); @@ -146,8 +145,8 @@ public void testPlacePrimaryRSAsRoundRobin() { @Test public void testRoundRobinAssignmentsWithUnevenSizedRacks() { - //In the case of uneven racks, the regions should be distributed - //proportionately to the rack sizes + // In the case of uneven racks, the regions should be distributed + // proportionately to the rack sizes primaryRSPlacement(6, null, 10, 10, 10); primaryRSPlacement(600, null, 10, 10, 5); primaryRSPlacement(600, null, 10, 5, 10); @@ -165,16 +164,17 @@ public void testRoundRobinAssignmentsWithUnevenSizedRacks() { public void testSecondaryAndTertiaryPlacementWithSingleRack() { // Test the case where there is a single rack and we need to choose // Primary/Secondary/Tertiary from a single rack. - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 10); // have lots of regions to test with - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, + List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); List regions = primaryRSMapAndHelper.getThird(); Map secondaryAndTertiaryMap = - helper.placeSecondaryAndTertiaryRS(primaryRSMap); + helper.placeSecondaryAndTertiaryRS(primaryRSMap); // although we created lots of regions we should have no overlap on the // primary/secondary/tertiary for any given region for (RegionInfo region : regions) { @@ -191,7 +191,7 @@ public void testSecondaryAndTertiaryPlacementWithSingleRack() { public void testSecondaryAndTertiaryPlacementWithSingleServer() { // Test the case where we have a single node in the cluster. In this case // the primary can be assigned but the secondary/tertiary would be null - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 1); Triple, FavoredNodeAssignmentHelper, List> primaryRSMapAndHelper = @@ -201,7 +201,7 @@ public void testSecondaryAndTertiaryPlacementWithSingleServer() { List regions = primaryRSMapAndHelper.getThird(); Map secondaryAndTertiaryMap = - helper.placeSecondaryAndTertiaryRS(primaryRSMap); + helper.placeSecondaryAndTertiaryRS(primaryRSMap); // no secondary/tertiary placement in case of a single RegionServer assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null); } @@ -210,18 +210,19 @@ public void testSecondaryAndTertiaryPlacementWithSingleServer() { public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { // Test the case where we have multiple racks and the region servers // belong to multiple racks - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 10); rackToServerCount.put("rack2", 10); - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, + List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); assertTrue(primaryRSMap.size() == 60000); Map secondaryAndTertiaryMap = - helper.placeSecondaryAndTertiaryRS(primaryRSMap); + helper.placeSecondaryAndTertiaryRS(primaryRSMap); assertTrue(secondaryAndTertiaryMap.size() == 60000); // for every region, the primary should be on one rack and the secondary/tertiary // on another (we create a lot of regions just to increase probability of failure) @@ -241,17 +242,18 @@ public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() { // Test the case where we have two racks but with less than two servers in each // We will not have enough machines to select secondary/tertiary - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 1); rackToServerCount.put("rack2", 1); - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, + List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); List regions = primaryRSMapAndHelper.getThird(); assertTrue(primaryRSMap.size() == 6); Map secondaryAndTertiaryMap = - helper.placeSecondaryAndTertiaryRS(primaryRSMap); + helper.placeSecondaryAndTertiaryRS(primaryRSMap); for (RegionInfo region : regions) { // not enough secondary/tertiary room to place the regions assertTrue(secondaryAndTertiaryMap.get(region) == null); @@ -265,17 +267,18 @@ public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack( // racks than what the primary is on. But if the other rack doesn't have // enough nodes to have both secondary/tertiary RSs, the tertiary is placed // on the same rack as the primary server is on - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 2); rackToServerCount.put("rack2", 1); - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, + List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); List regions = primaryRSMapAndHelper.getThird(); assertTrue(primaryRSMap.size() == 6); Map secondaryAndTertiaryMap = - helper.placeSecondaryAndTertiaryRS(primaryRSMap); + helper.placeSecondaryAndTertiaryRS(primaryRSMap); assertTrue(secondaryAndTertiaryMap.size() == regions.size()); for (RegionInfo region : regions) { ServerName s = primaryRSMap.get(region); @@ -293,16 +296,13 @@ public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack( Map primaryRSMap = new HashMap(); List servers = getServersFromRack(rackToServerCount); FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); - Map> assignmentMap = - new HashMap>(); + Map> assignmentMap = new HashMap>(); helper.initialize(); // create regions List regions = new ArrayList<>(regionCount); for (int i = 0; i < regionCount; i++) { regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build()); + .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); } // place the regions helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); @@ -310,14 +310,13 @@ public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack( } private void primaryRSPlacement(int regionCount, Map primaryRSMap, - int firstRackSize, int secondRackSize, int thirdRackSize) { - Map rackToServerCount = new HashMap<>(); + int firstRackSize, int secondRackSize, int thirdRackSize) { + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", firstRackSize); rackToServerCount.put("rack2", secondRackSize); rackToServerCount.put("rack3", thirdRackSize); List servers = getServersFromRack(rackToServerCount); - FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, - rackManager); + FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); helper.initialize(); assertTrue(helper.canPlaceFavoredNodes()); @@ -330,9 +329,7 @@ private void primaryRSPlacement(int regionCount, Map pri List regions = new ArrayList<>(regionCount); for (int i = 0; i < regionCount; i++) { regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf("foobar")) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build()); + .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); } // place those regions in primary RSs helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); @@ -384,11 +381,10 @@ private void checkNumRegions(int firstRackSize, int secondRackSize, int thirdRac rackMap.get(thirdRackSize).intValue(), regionMap.get(regionsOnRack3).intValue()); } - private String printProportions(int firstRackSize, int secondRackSize, - int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { - return "The rack sizes " + firstRackSize + " " + secondRackSize - + " " + thirdRackSize + " " + regionsOnRack1 + " " + regionsOnRack2 + - " " + regionsOnRack3; + private String printProportions(int firstRackSize, int secondRackSize, int thirdRackSize, + int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { + return "The rack sizes " + firstRackSize + " " + secondRackSize + " " + thirdRackSize + " " + + regionsOnRack1 + " " + regionsOnRack2 + " " + regionsOnRack3; } @Test @@ -404,24 +400,21 @@ public void testConstrainedPlacement() throws Exception { List regions = new ArrayList<>(20); for (int i = 0; i < 20; i++) { regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build()); + .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); } - Map> assignmentMap = - new HashMap>(); + Map> assignmentMap = new HashMap>(); Map primaryRSMap = new HashMap(); helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); assertTrue(primaryRSMap.size() == regions.size()); Map secondaryAndTertiary = - helper.placeSecondaryAndTertiaryRS(primaryRSMap); + helper.placeSecondaryAndTertiaryRS(primaryRSMap); assertEquals(regions.size(), secondaryAndTertiary.size()); } @Test public void testGetOneRandomRack() throws IOException { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); Set rackList = Sets.newHashSet("rack1", "rack2", "rack3"); for (String rack : rackList) { rackToServerCount.put(rack, 2); @@ -433,20 +426,20 @@ public void testGetOneRandomRack() throws IOException { assertTrue(helper.canPlaceFavoredNodes()); // Check we don't get a bad rack on any number of attempts - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { assertTrue(rackList.contains(helper.getOneRandomRack(Sets.newHashSet()))); } // Check skipRack multiple times when an invalid rack is specified Set skipRacks = Sets.newHashSet("rack"); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { assertTrue(rackList.contains(helper.getOneRandomRack(skipRacks))); } // Check skipRack multiple times when an valid rack is specified skipRacks = Sets.newHashSet("rack1"); Set validRacks = Sets.newHashSet("rack2", "rack3"); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { assertTrue(validRacks.contains(helper.getOneRandomRack(skipRacks))); } } @@ -454,7 +447,7 @@ public void testGetOneRandomRack() throws IOException { @Test public void testGetRandomServerSingleRack() throws IOException { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); final String rack = "rack1"; rackToServerCount.put(rack, 4); List servers = getServersFromRack(rackToServerCount); @@ -464,15 +457,15 @@ public void testGetRandomServerSingleRack() throws IOException { assertTrue(helper.canPlaceFavoredNodes()); // Check we don't get a bad node on any number of attempts - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); } // Check skipServers multiple times when an invalid server is specified Set skipServers = - Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { ServerName sn = helper.getOneRandomServer(rack, skipServers); assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); } @@ -480,17 +473,16 @@ public void testGetRandomServerSingleRack() throws IOException { // Check skipRack multiple times when an valid servers are specified ServerName skipSN = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); skipServers = Sets.newHashSet(skipSN); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { ServerName sn = helper.getOneRandomServer(rack, skipServers); - assertNotEquals("Skip server should not be selected ", - skipSN.getAddress(), sn.getAddress()); + assertNotEquals("Skip server should not be selected ", skipSN.getAddress(), sn.getAddress()); assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); } } @Test public void testGetRandomServerMultiRack() throws IOException { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); Set rackList = Sets.newHashSet("rack1", "rack2", "rack3"); for (String rack : rackList) { rackToServerCount.put(rack, 4); @@ -502,22 +494,22 @@ public void testGetRandomServerMultiRack() throws IOException { assertTrue(helper.canPlaceFavoredNodes()); // Check we don't get a bad node on any number of attempts - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { for (String rack : rackList) { ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), - rackToServers.get(rack).contains(sn)); + rackToServers.get(rack).contains(sn)); } } // Check skipServers multiple times when an invalid server is specified Set skipServers = - Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { for (String rack : rackList) { ServerName sn = helper.getOneRandomServer(rack, skipServers); assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), - rackToServers.get(rack).contains(sn)); + rackToServers.get(rack).contains(sn)); } } @@ -526,19 +518,19 @@ public void testGetRandomServerMultiRack() throws IOException { ServerName skipSN2 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); ServerName skipSN3 = ServerName.valueOf("foo20:1234", ServerName.NON_STARTCODE); skipServers = Sets.newHashSet(skipSN1, skipSN2, skipSN3); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { for (String rack : rackList) { ServerName sn = helper.getOneRandomServer(rack, skipServers); assertFalse("Skip server should not be selected ", skipServers.contains(sn)); assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), - rackToServers.get(rack).contains(sn)); + rackToServers.get(rack).contains(sn)); } } } @Test public void testGetFavoredNodes() throws IOException { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); Set rackList = Sets.newHashSet("rack1", "rack2", "rack3"); for (String rack : rackList) { rackToServerCount.put(rack, 4); @@ -550,9 +542,7 @@ public void testGetFavoredNodes() throws IOException { assertTrue(helper.canPlaceFavoredNodes()); RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(HConstants.EMPTY_START_ROW) - .setEndKey(HConstants.EMPTY_END_ROW) - .build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build(); for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) { List fn = helper.generateFavoredNodes(region); @@ -572,7 +562,6 @@ public void testGenMissingFavoredNodeOneRack() throws IOException { helper.initialize(); assertTrue(helper.canPlaceFavoredNodes()); - ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE); ServerName snRack1SN3 = ServerName.valueOf("foo3:1234", ServerName.NON_STARTCODE); @@ -599,7 +588,7 @@ public void testGenMissingFavoredNodeMultiRack() throws IOException { ServerName snRack2SN1 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); ServerName snRack2SN2 = ServerName.valueOf("foo11:1234", ServerName.NON_STARTCODE); - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); Set rackList = Sets.newHashSet("rack1", "rack2"); for (String rack : rackList) { rackToServerCount.put(rack, 4); @@ -639,13 +628,13 @@ private void checkDuplicateFN(List fnList, ServerName genFN) { assertNotNull("Generated FN can't be null", genFN); favoredNodes.add(genFN); assertEquals("Did not find expected number of favored nodes", - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); + FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); } private void checkDuplicateFN(List fnList) { Set favoredNodes = Sets.newHashSet(fnList); assertEquals("Did not find expected number of favored nodes", - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); + FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); } private void checkFNRacks(List fnList, ServerName genFN) { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestStartcodeAgnosticServerName.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestStartcodeAgnosticServerName.java index 05e1e0163fd5..095495729f26 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestStartcodeAgnosticServerName.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestStartcodeAgnosticServerName.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,18 +28,18 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestStartcodeAgnosticServerName { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStartcodeAgnosticServerName.class); + HBaseClassTestRule.forClass(TestStartcodeAgnosticServerName.class); @Test public void testStartCodeServerName() { ServerName sn = ServerName.valueOf("www.example.org", 1234, 5678); StartcodeAgnosticServerName snStartCode = - new StartcodeAgnosticServerName("www.example.org", 1234, 5678); + new StartcodeAgnosticServerName("www.example.org", 1234, 5678); assertTrue(ServerName.isSameAddress(sn, snStartCode)); assertTrue(snStartCode.equals(sn)); @@ -47,7 +47,7 @@ public void testStartCodeServerName() { assertEquals(0, snStartCode.compareTo(sn)); StartcodeAgnosticServerName snStartCodeFNPort = - new StartcodeAgnosticServerName("www.example.org", 1234, ServerName.NON_STARTCODE); + new StartcodeAgnosticServerName("www.example.org", 1234, ServerName.NON_STARTCODE); assertTrue(ServerName.isSameAddress(snStartCodeFNPort, snStartCode)); assertTrue(snStartCode.equals(snStartCodeFNPort)); assertTrue(snStartCodeFNPort.equals(snStartCode)); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlan.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlan.java index 13154cf9567e..c65198b825bc 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlan.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,12 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionPlan { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionPlan.class); + HBaseClassTestRule.forClass(TestRegionPlan.class); private final ServerName SRC = ServerName.valueOf("source", 1234, 2345); private final ServerName DEST = ServerName.valueOf("dest", 1234, 2345); @@ -98,7 +98,7 @@ public void testEquals() { // HRI is used for equality RegionInfo other = - RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName() + "other")).build(); + RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName() + "other")).build(); assertNotEquals(plan.hashCode(), new RegionPlan(other, SRC, DEST).hashCode()); assertNotEquals(plan, new RegionPlan(other, SRC, DEST)); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index 59335079bcda..a8aca0306c8a 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -49,95 +49,61 @@ import org.slf4j.LoggerFactory; /** - * Class used to be the base of unit tests on load balancers. It gives helper - * methods to create maps of {@link ServerName} to lists of {@link RegionInfo} - * and to check list of region plans. - * + * Class used to be the base of unit tests on load balancers. It gives helper methods to create maps + * of {@link ServerName} to lists of {@link RegionInfo} and to check list of region plans. */ public class BalancerTestBase { private static final Logger LOG = LoggerFactory.getLogger(BalancerTestBase.class); static int regionId = 0; protected static Configuration conf; - protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }; + protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }; // int[testnum][servernumber] -> numregions - protected int[][] clusterStateMocks = new int[][]{ - // 1 node - new int[]{0}, - new int[]{1}, - new int[]{10}, - // 2 node - new int[]{0, 0}, - new int[]{2, 0}, - new int[]{2, 1}, - new int[]{2, 2}, - new int[]{2, 3}, - new int[]{2, 4}, - new int[]{1, 1}, - new int[]{0, 1}, - new int[]{10, 1}, - new int[]{514, 1432}, - new int[]{48, 53}, - // 3 node - new int[]{0, 1, 2}, - new int[]{1, 2, 3}, - new int[]{0, 2, 2}, - new int[]{0, 3, 0}, - new int[]{0, 4, 0}, - new int[]{20, 20, 0}, - // 4 node - new int[]{0, 1, 2, 3}, - new int[]{4, 0, 0, 0}, - new int[]{5, 0, 0, 0}, - new int[]{6, 6, 0, 0}, - new int[]{6, 2, 0, 0}, - new int[]{6, 1, 0, 0}, - new int[]{6, 0, 0, 0}, - new int[]{4, 4, 4, 7}, - new int[]{4, 4, 4, 8}, - new int[]{0, 0, 0, 7}, - // 5 node - new int[]{1, 1, 1, 1, 4}, - // 6 nodes - new int[]{1500, 500, 500, 500, 10, 0}, - new int[]{1500, 500, 500, 500, 500, 0}, - // more nodes - new int[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 10}, - new int[]{6, 6, 5, 6, 6, 6, 6, 6, 6, 1}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 54}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 55}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 56}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 16}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 8}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 9}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 10}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 123}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 155}, - new int[]{10, 7, 12, 8, 11, 10, 9, 14}, - new int[]{13, 14, 6, 10, 10, 10, 8, 10}, - new int[]{130, 14, 60, 10, 100, 10, 80, 10}, - new int[]{130, 140, 60, 100, 100, 100, 80, 100}, - new int[]{0, 5 , 5, 5, 5}, - largeCluster, + protected int[][] clusterStateMocks = new int[][] { + // 1 node + new int[] { 0 }, new int[] { 1 }, new int[] { 10 }, + // 2 node + new int[] { 0, 0 }, new int[] { 2, 0 }, new int[] { 2, 1 }, new int[] { 2, 2 }, + new int[] { 2, 3 }, new int[] { 2, 4 }, new int[] { 1, 1 }, new int[] { 0, 1 }, + new int[] { 10, 1 }, new int[] { 514, 1432 }, new int[] { 48, 53 }, + // 3 node + new int[] { 0, 1, 2 }, new int[] { 1, 2, 3 }, new int[] { 0, 2, 2 }, new int[] { 0, 3, 0 }, + new int[] { 0, 4, 0 }, new int[] { 20, 20, 0 }, + // 4 node + new int[] { 0, 1, 2, 3 }, new int[] { 4, 0, 0, 0 }, new int[] { 5, 0, 0, 0 }, + new int[] { 6, 6, 0, 0 }, new int[] { 6, 2, 0, 0 }, new int[] { 6, 1, 0, 0 }, + new int[] { 6, 0, 0, 0 }, new int[] { 4, 4, 4, 7 }, new int[] { 4, 4, 4, 8 }, + new int[] { 0, 0, 0, 7 }, + // 5 node + new int[] { 1, 1, 1, 1, 4 }, + // 6 nodes + new int[] { 1500, 500, 500, 500, 10, 0 }, new int[] { 1500, 500, 500, 500, 500, 0 }, + // more nodes + new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 }, new int[] { 6, 6, 5, 6, 6, 6, 6, 6, 6, 1 }, + new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 54 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 55 }, + new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 }, + new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 8 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 9 }, + new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 10 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 123 }, + new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 155 }, new int[] { 10, 7, 12, 8, 11, 10, 9, 14 }, + new int[] { 13, 14, 6, 10, 10, 10, 8, 10 }, new int[] { 130, 14, 60, 10, 100, 10, 80, 10 }, + new int[] { 130, 140, 60, 100, 100, 100, 80, 100 }, new int[] { 0, 5, 5, 5, 5 }, largeCluster, }; - // This class is introduced because IP to rack resolution can be lengthy. public static class MockMapping implements DNSToSwitchMapping { public MockMapping(Configuration conf) { @@ -158,8 +124,7 @@ public void reloadCachedMappings(List arg0) { } /** - * Invariant is that all servers have between floor(avg) and ceiling(avg) - * number of regions. + * Invariant is that all servers have between floor(avg) and ceiling(avg) number of regions. */ public void assertClusterAsBalanced(List servers) { int numServers = servers.size(); @@ -186,15 +151,14 @@ public void assertClusterAsBalanced(List servers) { for (ServerAndLoad server : servers) { assertTrue("All servers should have a positive load. " + server, server.getLoad() >= 0); assertTrue("All servers should have load no more than " + max + ". " + server, - server.getLoad() <= max); + server.getLoad() <= max); assertTrue("All servers should have load no less than " + min + ". " + server, - server.getLoad() >= min); + server.getLoad() >= min); } } /** - * Invariant is that all servers have between acceptable range - * number of regions. + * Invariant is that all servers have between acceptable range number of regions. */ public boolean assertClusterOverallAsBalanced(List servers, int tablenum) { int numServers = servers.size(); @@ -220,10 +184,12 @@ public boolean assertClusterOverallAsBalanced(List servers, int t for (ServerAndLoad server : servers) { // The '5' in below is arbitrary. - if (server.getLoad() < 0 || server.getLoad() > max + (tablenum/2 + 5) || - server.getLoad() < (min - tablenum/2 - 5)) { - LOG.warn("server={}, load={}, max={}, tablenum={}, min={}", - server.getServerName(), server.getLoad(), max, tablenum, min); + if ( + server.getLoad() < 0 || server.getLoad() > max + (tablenum / 2 + 5) + || server.getLoad() < (min - tablenum / 2 - 5) + ) { + LOG.warn("server={}, load={}, max={}, tablenum={}, min={}", server.getServerName(), + server.getLoad(), max, tablenum, min); return false; } } @@ -233,7 +199,8 @@ public boolean assertClusterOverallAsBalanced(List servers, int t /** * Checks whether region replicas are not hosted on the same host. */ - public void assertRegionReplicaPlacement(Map> serverMap, RackManager rackManager) { + public void assertRegionReplicaPlacement(Map> serverMap, + RackManager rackManager) { TreeMap> regionsPerHost = new TreeMap<>(); TreeMap> regionsPerRack = new TreeMap<>(); @@ -284,7 +251,7 @@ protected String printStats(List servers) { int max = (int) Math.ceil(average); int min = (int) Math.floor(average); return "[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + average + " max=" + max - + " min=" + min + "]"; + + " min=" + min + "]"; } protected List convertToList(final Map> servers) { @@ -313,8 +280,8 @@ protected String printMock(List balancedCluster) { } /** - * This assumes the RegionPlan HSI instances are the same ones in the map, so - * actually no need to even pass in the map, but I think it's clearer. + * This assumes the RegionPlan HSI instances are the same ones in the map, so actually no need to + * even pass in the map, but I think it's clearer. * @return a list of all added {@link ServerAndLoad} values. */ protected List reconcile(List list, List plans, @@ -342,9 +309,8 @@ protected List reconcile(List list, List map, - final ServerName sn, - final int diff) { + protected void updateLoad(final Map map, final ServerName sn, + final int diff) { ServerAndLoad sal = map.get(sn); if (sal == null) sal = new ServerAndLoad(sn, 0); sal = new ServerAndLoad(sn, sal.getLoad() + diff); @@ -356,11 +322,11 @@ protected TreeMap> mockClusterServers(int[] mockClu } protected BalancerClusterState mockCluster(int[] mockCluster) { - return new BalancerClusterState( - mockClusterServers(mockCluster, -1), null, null, null); + return new BalancerClusterState(mockClusterServers(mockCluster, -1), null, null, null); } - protected TreeMap> mockClusterServers(int[] mockCluster, int numTables) { + protected TreeMap> mockClusterServers(int[] mockCluster, + int numTables) { int numServers = mockCluster.length; TreeMap> servers = new TreeMap<>(); for (int i = 0; i < numServers; i++) { @@ -384,12 +350,13 @@ protected TreeMap> mockUniformClusterServers(int[] return servers; } - protected HashMap>> mockClusterServersWithTables(Map> clusterServers) { + protected HashMap>> + mockClusterServersWithTables(Map> clusterServers) { HashMap>> result = new HashMap<>(); for (Map.Entry> entry : clusterServers.entrySet()) { ServerName sal = entry.getKey(); List regions = entry.getValue(); - for (RegionInfo hri : regions){ + for (RegionInfo hri : regions) { TreeMap> servers = result.get(hri.getTable()); if (servers == null) { servers = new TreeMap<>(); @@ -403,8 +370,8 @@ protected HashMap>> mockClusterS hrilist.add(hri); } } - for(Map.Entry>> entry : result.entrySet()){ - for(ServerName srn : clusterServers.keySet()){ + for (Map.Entry>> entry : result.entrySet()) { + for (ServerName srn : clusterServers.keySet()) { if (!entry.getValue().containsKey(srn)) entry.getValue().put(srn, new ArrayList<>()); } } @@ -426,11 +393,8 @@ protected List createRegions(int numRegions, TableName tableName) { for (int i = 0; i < numRegions; i++) { Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).build(); regions.add(hri); } return regions; @@ -449,14 +413,10 @@ protected List randomRegions(int numRegions, int numTables) { } Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); - TableName tableName = TableName.valueOf("table" + - (numTables > 0 ? ThreadLocalRandom.current().nextInt(numTables) : i)); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .setRegionId(regionId++) - .build(); + TableName tableName = TableName + .valueOf("table" + (numTables > 0 ? ThreadLocalRandom.current().nextInt(numTables) : i)); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).setRegionId(regionId++).build(); regions.add(hri); } return regions; @@ -472,11 +432,8 @@ protected List uniformRegions(int numRegions) { Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); TableName tableName = TableName.valueOf("table" + i); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).build(); regions.add(hri); } return regions; diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyClusterInfoProvider.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyClusterInfoProvider.java index a62e3378c9b7..438971f0b25a 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyClusterInfoProvider.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyClusterInfoProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyCostFunction.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyCostFunction.java index 680d292d8a66..83b22da4bec2 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyCostFunction.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyCostFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyMetricsStochasticBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyMetricsStochasticBalancer.java index fcb8f64b0ec3..6291bc6bfea1 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyMetricsStochasticBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyMetricsStochasticBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,13 @@ import java.util.Map; public class DummyMetricsStochasticBalancer extends MetricsStochasticBalancer { - //We use a map to record those metrics that were updated to MetricsStochasticBalancer when running + // We use a map to record those metrics that were updated to MetricsStochasticBalancer when + // running // unit tests. private Map costsMap; public DummyMetricsStochasticBalancer() { - //noop + // noop } @Override @@ -36,22 +37,22 @@ protected void initSource() { @Override public void balanceCluster(long time) { - //noop + // noop } @Override public void incrMiscInvocations() { - //noop + // noop } @Override public void balancerStatus(boolean status) { - //noop + // noop } @Override public void updateMetricsSize(int size) { - //noop + // noop } @Override @@ -61,14 +62,14 @@ public void updateStochasticCost(String tableName, String costFunctionName, costsMap.put(key, value); } - public Map getDummyCostsMap(){ + public Map getDummyCostsMap() { return this.costsMap; } /** * Clear all metrics in the cache map then prepare to run the next test - * */ - public void clearDummyMetrics(){ + */ + public void clearDummyMetrics() { this.costsMap.clear(); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousCostRulesTestHelper.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousCostRulesTestHelper.java index 46300c6e3dfa..7158d3365723 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousCostRulesTestHelper.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousCostRulesTestHelper.java @@ -64,4 +64,4 @@ static void cleanup(String file) throws IOException { LOG.warn("FileNotFoundException for {}", file, nsfe); } } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java index 59433137f654..3a435e140989 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.io.IOException; @@ -38,6 +37,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @@ -45,31 +45,29 @@ /** * Tool to test performance of different {@link org.apache.hadoop.hbase.master.LoadBalancer} - * implementations. - * Example command: - * $ bin/hbase org.apache.hadoop.hbase.master.balancer.LoadBalancerPerformanceEvaluation - * -regions 1000 -servers 100 - * -load_balancer org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer + * implementations. Example command: $ bin/hbase + * org.apache.hadoop.hbase.master.balancer.LoadBalancerPerformanceEvaluation -regions 1000 -servers + * 100 -load_balancer org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class LoadBalancerPerformanceEvaluation extends AbstractHBaseTool { private static final Logger LOG = - LoggerFactory.getLogger(LoadBalancerPerformanceEvaluation.class.getName()); + LoggerFactory.getLogger(LoadBalancerPerformanceEvaluation.class.getName()); protected static final HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil(); private static final int DEFAULT_NUM_REGIONS = 1000000; private static Option NUM_REGIONS_OPT = new Option("regions", true, - "Number of regions to consider by load balancer. Default: " + DEFAULT_NUM_REGIONS); + "Number of regions to consider by load balancer. Default: " + DEFAULT_NUM_REGIONS); private static final int DEFAULT_NUM_SERVERS = 1000; private static Option NUM_SERVERS_OPT = new Option("servers", true, - "Number of servers to consider by load balancer. Default: " + DEFAULT_NUM_SERVERS); + "Number of servers to consider by load balancer. Default: " + DEFAULT_NUM_SERVERS); private static final String DEFAULT_LOAD_BALANCER = - "org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer"; + "org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer"; private static Option LOAD_BALANCER_OPT = new Option("load_balancer", true, - "Type of Load Balancer to use. Default: " + DEFAULT_LOAD_BALANCER); + "Type of Load Balancer to use. Default: " + DEFAULT_LOAD_BALANCER); private int numRegions; private int numServers; @@ -102,12 +100,8 @@ private void generateRegionsAndServers() { Bytes.putInt(start, 0, i); Bytes.putInt(end, 0, i + 1); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .setRegionId(i) - .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).setRegionId(i).build(); regions.add(hri); regionServerMap.put(hri, null); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java index 8a077b793ccb..c875031493b0 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,8 +39,8 @@ public class StochasticBalancerTestBase extends BalancerTestBase { protected static StochasticLoadBalancer loadBalancer; - protected static DummyMetricsStochasticBalancer dummyMetricsStochasticBalancer = new - DummyMetricsStochasticBalancer(); + protected static DummyMetricsStochasticBalancer dummyMetricsStochasticBalancer = + new DummyMetricsStochasticBalancer(); @BeforeClass public static void beforeAllTests() throws Exception { @@ -59,8 +59,7 @@ protected void testWithCluster(int numNodes, int numRegions, int numRegionsPerSe boolean assertFullyBalancedForReplicas) { Map> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); - testWithCluster(serverMap, null, assertFullyBalanced, - assertFullyBalancedForReplicas); + testWithCluster(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas); } protected void testWithClusterWithIteration(int numNodes, int numRegions, int numRegionsPerServer, @@ -96,8 +95,8 @@ protected void testWithCluster(Map> serverMap, assertClusterAsBalanced(balancedCluster); LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap); List secondPlans = loadBalancer.balanceCluster(LoadOfAllTable); - assertNull("Given a requirement to be fully balanced, second attempt at plans should " + - "produce none.", secondPlans); + assertNull("Given a requirement to be fully balanced, second attempt at plans should " + + "produce none.", secondPlans); } if (assertFullyBalancedForReplicas) { @@ -135,8 +134,8 @@ protected void testWithClusterWithIteration(Map> se LOG.info("Mock Final balance: " + printMock(balancedCluster)); if (assertFullyBalanced) { - assertNull("Given a requirement to be fully balanced, second attempt at plans should " + - "produce none.", plans); + assertNull("Given a requirement to be fully balanced, second attempt at plans should " + + "produce none.", plans); } if (assertFullyBalancedForReplicas) { assertRegionReplicaPlacement(serverMap, rackManager); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java index 41dbb552db6a..9bab75b51257 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java index 5dc3fa81e1b3..d9df27994ac9 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,12 +62,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestBaseLoadBalancer extends BalancerTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBaseLoadBalancer.class); + HBaseClassTestRule.forClass(TestBaseLoadBalancer.class); private static LoadBalancer loadBalancer; private static final Logger LOG = LoggerFactory.getLogger(TestBaseLoadBalancer.class); @@ -77,11 +77,11 @@ public class TestBaseLoadBalancer extends BalancerTestBase { private static ServerName[] servers = new ServerName[NUM_SERVERS]; int[][] regionsAndServersMocks = new int[][] { - // { num regions, num servers } - new int[] { 0, 0 }, new int[] { 0, 1 }, new int[] { 1, 1 }, new int[] { 2, 1 }, - new int[] { 10, 1 }, new int[] { 1, 2 }, new int[] { 2, 2 }, new int[] { 3, 2 }, - new int[] { 1, 3 }, new int[] { 2, 3 }, new int[] { 3, 3 }, new int[] { 25, 3 }, - new int[] { 2, 10 }, new int[] { 2, 100 }, new int[] { 12, 10 }, new int[] { 12, 100 }, }; + // { num regions, num servers } + new int[] { 0, 0 }, new int[] { 0, 1 }, new int[] { 1, 1 }, new int[] { 2, 1 }, + new int[] { 10, 1 }, new int[] { 1, 2 }, new int[] { 2, 2 }, new int[] { 3, 2 }, + new int[] { 1, 3 }, new int[] { 2, 3 }, new int[] { 3, 3 }, new int[] { 25, 3 }, + new int[] { 2, 10 }, new int[] { 2, 100 }, new int[] { 12, 10 }, new int[] { 12, 100 }, }; @Rule public TestName name = new TestName(); @@ -96,7 +96,7 @@ public static void beforeAllTests() throws Exception { // Set up the rack topologies (5 machines per rack) rackManager = mock(RackManager.class); for (int i = 0; i < NUM_SERVERS; i++) { - servers[i] = ServerName.valueOf("foo"+i+":1234",-1); + servers[i] = ServerName.valueOf("foo" + i + ":1234", -1); if (i < 5) { when(rackManager.getRack(servers[i])).thenReturn("rack1"); } @@ -113,16 +113,15 @@ public static class MockBalancer extends BaseLoadBalancer { @Override protected List balanceTable(TableName tableName, - Map> loadOfOneTable) { + Map> loadOfOneTable) { return null; } } /** - * Tests the bulk assignment used during cluster startup. - * - * Round-robin. Should yield a balanced cluster so same invariant as the load - * balancer holds, all servers holding either floor(avg) or ceiling(avg). + * Tests the bulk assignment used during cluster startup. Round-robin. Should yield a balanced + * cluster so same invariant as the load balancer holds, all servers holding either floor(avg) or + * ceiling(avg). */ @Test public void testBulkAssignment() throws Exception { @@ -132,7 +131,7 @@ public void testBulkAssignment() throws Exception { tmp.add(master); Map> plans = loadBalancer.roundRobinAssignment(hris, tmp); int totalRegion = 0; - for (List regions: plans.values()) { + for (List regions : plans.values()) { totalRegion += regions.size(); } assertEquals(hris.size(), totalRegion); @@ -142,7 +141,7 @@ public void testBulkAssignment() throws Exception { List servers = randomServers(mock[1], 0); List list = getListOfServerNames(servers); Map> assignments = - loadBalancer.roundRobinAssignment(regions, list); + loadBalancer.roundRobinAssignment(regions, list); float average = (float) regions.size() / servers.size(); int min = (int) Math.floor(average); int max = (int) Math.ceil(average); @@ -157,8 +156,7 @@ public void testBulkAssignment() throws Exception { } /** - * Test the cluster startup bulk assignment which attempts to retain - * assignment info. + * Test the cluster startup bulk assignment which attempts to retain assignment info. */ @Test public void testRetainAssignment() throws Exception { @@ -171,12 +169,12 @@ public void testRetainAssignment() throws Exception { // The old server would have had same host and port, but different // start code! ServerName snWithOldStartCode = - ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10); + ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10); existing.put(regions.get(i), snWithOldStartCode); } List listOfServerNames = getListOfServerNames(servers); Map> assignment = - loadBalancer.retainAssignment(existing, listOfServerNames); + loadBalancer.retainAssignment(existing, listOfServerNames); assertRetainedAssignment(existing, listOfServerNames, assignment); // Include two new servers that were not there before @@ -225,11 +223,8 @@ public List getOnlineServersListWithPredicator(List serv } }); RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key1")) - .setEndKey(Bytes.toBytes("key2")) - .setSplit(false) - .setRegionId(100) - .build(); + .setStartKey(Bytes.toBytes("key1")).setEndKey(Bytes.toBytes("key2")).setSplit(false) + .setRegionId(100).build(); assertNull(balancer.randomAssignment(hri1, Collections.emptyList())); assertNull(balancer.randomAssignment(hri1, null)); for (int i = 0; i != 3; ++i) { @@ -250,27 +245,21 @@ public void testRegionAvailability() throws Exception { List list2 = new ArrayList<>(); // create a region (region1) RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key1")) - .setEndKey(Bytes.toBytes("key2")) - .setSplit(false) - .setRegionId(100) - .build(); + .setStartKey(Bytes.toBytes("key1")).setEndKey(Bytes.toBytes("key2")).setSplit(false) + .setRegionId(100).build(); // create a replica of the region (replica_of_region1) RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1); // create a second region (region2) RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key2")) - .setEndKey(Bytes.toBytes("key3")) - .setSplit(false) - .setRegionId(101) - .build(); - list0.add(hri1); //only region1 - list1.add(hri2); //only replica_of_region1 - list2.add(hri3); //only region2 + .setStartKey(Bytes.toBytes("key2")).setEndKey(Bytes.toBytes("key3")).setSplit(false) + .setRegionId(101).build(); + list0.add(hri1); // only region1 + list1.add(hri2); // only replica_of_region1 + list2.add(hri3); // only region2 Map> clusterState = new LinkedHashMap<>(); - clusterState.put(servers[0], list0); //servers[0] hosts region1 - clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 - clusterState.put(servers[2], list2); //servers[2] hosts region2 + clusterState.put(servers[0], list0); // servers[0] hosts region1 + clusterState.put(servers[1], list1); // servers[1] hosts replica_of_region1 + clusterState.put(servers[2], list2); // servers[2] hosts region2 // create a cluster with the above clusterState. The way in which the // cluster is created (constructor code) would make sure the indices of // the servers are in the order in which it is inserted in the clusterState @@ -299,10 +288,11 @@ public void testRegionAvailability() throws Exception { // start over again clusterState.clear(); - clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 - clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 and replica_of_region2 - clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 - clusterState.put(servers[10], new ArrayList<>()); //servers[10], rack3 hosts no region + clusterState.put(servers[0], list0); // servers[0], rack1 hosts region1 + clusterState.put(servers[5], list1); // servers[5], rack2 hosts replica_of_region1 and + // replica_of_region2 + clusterState.put(servers[6], list2); // servers[6], rack2 hosts region2 + clusterState.put(servers[10], new ArrayList<>()); // servers[10], rack3 hosts no region // create a cluster with the above clusterState cluster = new BalancerClusterState(clusterState, null, null, rackManager); // check whether a move of region1 from servers[0],rack1 to servers[6],rack2 would @@ -324,27 +314,21 @@ public void testRegionAvailabilityWithRegionMoves() throws Exception { List list2 = new ArrayList<>(); // create a region (region1) RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key1")) - .setEndKey(Bytes.toBytes("key2")) - .setSplit(false) - .setRegionId(100) - .build(); + .setStartKey(Bytes.toBytes("key1")).setEndKey(Bytes.toBytes("key2")).setSplit(false) + .setRegionId(100).build(); // create a replica of the region (replica_of_region1) RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1); // create a second region (region2) RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key2")) - .setEndKey(Bytes.toBytes("key3")) - .setSplit(false) - .setRegionId(101) - .build(); - list0.add(hri1); //only region1 - list1.add(hri2); //only replica_of_region1 - list2.add(hri3); //only region2 + .setStartKey(Bytes.toBytes("key2")).setEndKey(Bytes.toBytes("key3")).setSplit(false) + .setRegionId(101).build(); + list0.add(hri1); // only region1 + list1.add(hri2); // only replica_of_region1 + list2.add(hri3); // only region2 Map> clusterState = new LinkedHashMap<>(); - clusterState.put(servers[0], list0); //servers[0] hosts region1 - clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 - clusterState.put(servers[2], list2); //servers[2] hosts region2 + clusterState.put(servers[0], list0); // servers[0] hosts region1 + clusterState.put(servers[1], list1); // servers[1] hosts replica_of_region1 + clusterState.put(servers[2], list2); // servers[2] hosts region2 // create a cluster with the above clusterState. The way in which the // cluster is created (constructor code) would make sure the indices of // the servers are in the order in which it is inserted in the clusterState @@ -364,10 +348,10 @@ public void testRegionAvailabilityWithRegionMoves() throws Exception { List list3 = new ArrayList<>(); RegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1); list3.add(hri4); - clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 - clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 - clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 - clusterState.put(servers[12], list3); //servers[12], rack3 hosts replica_of_region2 + clusterState.put(servers[0], list0); // servers[0], rack1 hosts region1 + clusterState.put(servers[5], list1); // servers[5], rack2 hosts replica_of_region1 + clusterState.put(servers[6], list2); // servers[6], rack2 hosts region2 + clusterState.put(servers[12], list3); // servers[12], rack3 hosts replica_of_region2 // create a cluster with the above clusterState cluster = new BalancerClusterState(clusterState, null, null, rackManager); // check whether a move of replica_of_region2 from servers[12],rack3 to servers[0],rack1 would @@ -390,12 +374,12 @@ private List getListOfServerNames(final List sals) { * Must meet the following conditions: *
    *
  • Every input region has an assignment, and to an online server - *
  • If a region had an existing assignment to a server with the same - * address a a currently online server, it will be assigned to it + *
  • If a region had an existing assignment to a server with the same address a a currently + * online server, it will be assigned to it *
*/ private void assertRetainedAssignment(Map existing, - List servers, Map> assignment) { + List servers, Map> assignment) { // Verify condition 1, every region assigned, and to online server Set onlineServerSet = new TreeSet<>(servers); Set assignedRegions = new TreeSet<>(RegionInfo.COMPARATOR); @@ -461,7 +445,7 @@ public void testClusterServersWithSameHostPort() { } private void assignRegions(List regions, List servers, - Map> clusterState) { + Map> clusterState) { for (int i = 0; i < regions.size(); i++) { ServerName sn = servers.get(i % servers.size()); List regionsOfServer = clusterState.get(sn); @@ -485,15 +469,15 @@ public void testClusterRegionLocations() { // mock block locality for some regions RegionHDFSBlockLocationFinder locationFinder = mock(RegionHDFSBlockLocationFinder.class); - // block locality: region:0 => {server:0} - // region:1 => {server:0, server:1} - // region:42 => {server:4, server:9, server:5} - when(locationFinder.getTopBlockLocations(regions.get(0))).thenReturn( - Lists.newArrayList(servers.get(0))); - when(locationFinder.getTopBlockLocations(regions.get(1))).thenReturn( - Lists.newArrayList(servers.get(0), servers.get(1))); - when(locationFinder.getTopBlockLocations(regions.get(42))).thenReturn( - Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5))); + // block locality: region:0 => {server:0} + // region:1 => {server:0, server:1} + // region:42 => {server:4, server:9, server:5} + when(locationFinder.getTopBlockLocations(regions.get(0))) + .thenReturn(Lists.newArrayList(servers.get(0))); + when(locationFinder.getTopBlockLocations(regions.get(1))) + .thenReturn(Lists.newArrayList(servers.get(0), servers.get(1))); + when(locationFinder.getTopBlockLocations(regions.get(42))) + .thenReturn(Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5))); // this server does not exists in clusterStatus when(locationFinder.getTopBlockLocations(regions.get(43))) .thenReturn(Lists.newArrayList(ServerName.valueOf("foo", 0, 0))); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java index 38834a8c9fa4..e557b22d8312 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionHDFSBlockLocationFinder.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionHDFSBlockLocationFinder.java index 11ef8695785f..1f5cdd484b88 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionHDFSBlockLocationFinder.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionHDFSBlockLocationFinder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -220,8 +220,8 @@ public void testRefreshRegionsWithChangedLocality() throws InterruptedException cache.put(region, hbd); } - finder.setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), - 0.123f)); + finder + .setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), 0.123f)); // everything should be cached, because metrics were null before for (RegionInfo region : REGIONS) { @@ -229,8 +229,8 @@ public void testRefreshRegionsWithChangedLocality() throws InterruptedException assertSame(cache.get(region), hbd); } - finder.setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), - 0.345f)); + finder + .setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), 0.345f)); // cache refresh happens in a background thread, so we need to wait for the value to // update before running assertions. diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java index 982321f4c598..9568edad74cd 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,12 +28,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestServerAndLoad { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServerAndLoad.class); + HBaseClassTestRule.forClass(TestServerAndLoad.class); @Test public void test() { @@ -50,6 +50,6 @@ public void test() { ServerName other = ServerName.valueOf("other", 12345, 112244); assertNotEquals(sal.hashCode(), new ServerAndLoad(other, startcode).hashCode()); assertNotEquals(sal, new ServerAndLoad(other, startcode)); - } + } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java index 1fb02629255e..992e57275cd2 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,12 +47,12 @@ /** * Test the load balancer that is created by default. */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestSimpleLoadBalancer extends BalancerTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSimpleLoadBalancer.class); + HBaseClassTestRule.forClass(TestSimpleLoadBalancer.class); private static final Logger LOG = LoggerFactory.getLogger(TestSimpleLoadBalancer.class); @@ -74,10 +74,8 @@ public static void beforeAllTests() throws Exception { public TestName name = new TestName(); /** - * Test the load balancing algorithm. - * - * Invariant is that all servers should be hosting either floor(average) or - * ceiling(average) at both table level and cluster level + * Test the load balancing algorithm. Invariant is that all servers should be hosting either + * floor(average) or ceiling(average) at both table level and cluster level */ @Test public void testBalanceClusterOverall() throws Exception { @@ -87,17 +85,17 @@ public void testBalanceClusterOverall() throws Exception { List clusterList = convertToList(clusterServers); clusterLoad.put(TableName.valueOf(name.getMethodName()), clusterServers); HashMap>> result = - mockClusterServersWithTables(clusterServers); + mockClusterServersWithTables(clusterServers); loadBalancer.setClusterLoad(clusterLoad); List clusterplans = new ArrayList<>(); for (Map.Entry>> mapEntry : result - .entrySet()) { + .entrySet()) { TableName tableName = mapEntry.getKey(); TreeMap> servers = mapEntry.getValue(); List list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List partialplans = loadBalancer.balanceTable(tableName, servers); - if(partialplans != null) clusterplans.addAll(partialplans); + if (partialplans != null) clusterplans.addAll(partialplans); List balancedClusterPerTable = reconcile(list, partialplans, servers); LOG.info("Mock Balance : " + printMock(balancedClusterPerTable)); assertClusterAsBalanced(balancedClusterPerTable); @@ -112,12 +110,10 @@ public void testBalanceClusterOverall() throws Exception { } /** - * Test the load balancing algorithm. - * - * Invariant is that all servers should be hosting either floor(average) or - * ceiling(average) at both table level and cluster level - * Deliberately generate a special case to show the overall strategy can achieve cluster - * level balance while the bytable strategy cannot + * Test the load balancing algorithm. Invariant is that all servers should be hosting either + * floor(average) or ceiling(average) at both table level and cluster level Deliberately generate + * a special case to show the overall strategy can achieve cluster level balance while the bytable + * strategy cannot */ @Test public void testImpactOfBalanceClusterOverall() throws Exception { @@ -132,12 +128,12 @@ public void testImpactOfBalanceClusterOverallWithLoadOfAllTable() throws Excepti private void testImpactOfBalanceClusterOverall(boolean useLoadOfAllTable) throws Exception { Map>> clusterLoad = new TreeMap<>(); Map> clusterServers = - mockUniformClusterServers(mockUniformCluster); + mockUniformClusterServers(mockUniformCluster); List clusterList = convertToList(clusterServers); clusterLoad.put(TableName.valueOf(name.getMethodName()), clusterServers); // use overall can achieve both table and cluster level balance HashMap>> LoadOfAllTable = - mockClusterServersWithTables(clusterServers); + mockClusterServersWithTables(clusterServers); if (useLoadOfAllTable) { loadBalancer.setClusterLoad((Map) LoadOfAllTable); } else { @@ -145,7 +141,7 @@ private void testImpactOfBalanceClusterOverall(boolean useLoadOfAllTable) throws } List clusterplans1 = new ArrayList(); for (Map.Entry>> mapEntry : LoadOfAllTable - .entrySet()) { + .entrySet()) { TableName tableName = mapEntry.getKey(); TreeMap> servers = mapEntry.getValue(); List list = convertToList(servers); @@ -173,22 +169,22 @@ public void testBalanceClusterOverallStrictly() throws Exception { for (int i = 0; i < regionNumOfTable1PerServer.length; i++) { ServerName serverName = ServerName.valueOf("server" + i, 1000, -1); List regions1 = - createRegions(regionNumOfTable1PerServer[i], TableName.valueOf("table1")); + createRegions(regionNumOfTable1PerServer[i], TableName.valueOf("table1")); List regions2 = - createRegions(regionNumOfTable2PerServer[i], TableName.valueOf("table2")); + createRegions(regionNumOfTable2PerServer[i], TableName.valueOf("table2")); regions1.addAll(regions2); serverRegionInfo.put(serverName, regions1); ServerAndLoad serverAndLoad = new ServerAndLoad(serverName, - regionNumOfTable1PerServer[i] + regionNumOfTable2PerServer[i]); + regionNumOfTable1PerServer[i] + regionNumOfTable2PerServer[i]); serverAndLoads.add(serverAndLoad); } HashMap>> LoadOfAllTable = - mockClusterServersWithTables(serverRegionInfo); + mockClusterServersWithTables(serverRegionInfo); loadBalancer.setClusterLoad((Map) LoadOfAllTable); List partialplans = loadBalancer.balanceTable(TableName.valueOf("table1"), LoadOfAllTable.get(TableName.valueOf("table1"))); List balancedServerLoads = - reconcile(serverAndLoads, partialplans, serverRegionInfo); + reconcile(serverAndLoads, partialplans, serverRegionInfo); for (ServerAndLoad serverAndLoad : balancedServerLoads) { assertEquals(6, serverAndLoad.getLoad()); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java index 2f862cda65f1..149dc9b6bfb8 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -50,6 +51,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; @Category({ MasterTests.class, MediumTests.class }) @@ -57,66 +59,59 @@ public class TestStochasticLoadBalancer extends StochasticBalancerTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancer.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancer.class); private static final String REGION_KEY = "testRegion"; // Mapping of locality test -> expected locality - private float[] expectedLocalities = {1.0f, 0.0f, 0.50f, 0.25f, 1.0f}; + private float[] expectedLocalities = { 1.0f, 0.0f, 0.50f, 0.25f, 1.0f }; /** - * Data set for testLocalityCost: - * [test][0][0] = mapping of server to number of regions it hosts - * [test][region + 1][0] = server that region is hosted on - * [test][region + 1][server + 1] = locality for region on server + * Data set for testLocalityCost: [test][0][0] = mapping of server to number of regions it hosts + * [test][region + 1][0] = server that region is hosted on [test][region + 1][server + 1] = + * locality for region on server */ - private int[][][] clusterRegionLocationMocks = new int[][][]{ + private int[][][] clusterRegionLocationMocks = new int[][][] { // Test 1: each region is entirely on server that hosts it - new int[][]{ - new int[]{2, 1, 1}, - new int[]{2, 0, 0, 100}, // region 0 is hosted and entirely local on server 2 - new int[]{0, 100, 0, 0}, // region 1 is hosted and entirely on server 0 - new int[]{0, 100, 0, 0}, // region 2 is hosted and entirely on server 0 - new int[]{1, 0, 100, 0}, // region 3 is hosted and entirely on server 1 + new int[][] { new int[] { 2, 1, 1 }, new int[] { 2, 0, 0, 100 }, // region 0 is hosted and + // entirely local on server 2 + new int[] { 0, 100, 0, 0 }, // region 1 is hosted and entirely on server 0 + new int[] { 0, 100, 0, 0 }, // region 2 is hosted and entirely on server 0 + new int[] { 1, 0, 100, 0 }, // region 3 is hosted and entirely on server 1 }, // Test 2: each region is 0% local on the server that hosts it - new int[][]{ - new int[]{1, 2, 1}, - new int[]{0, 0, 0, 100}, // region 0 is hosted and entirely local on server 2 - new int[]{1, 100, 0, 0}, // region 1 is hosted and entirely on server 0 - new int[]{1, 100, 0, 0}, // region 2 is hosted and entirely on server 0 - new int[]{2, 0, 100, 0}, // region 3 is hosted and entirely on server 1 + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 0, 0, 100 }, // region 0 is hosted and + // entirely local on server 2 + new int[] { 1, 100, 0, 0 }, // region 1 is hosted and entirely on server 0 + new int[] { 1, 100, 0, 0 }, // region 2 is hosted and entirely on server 0 + new int[] { 2, 0, 100, 0 }, // region 3 is hosted and entirely on server 1 }, // Test 3: each region is 25% local on the server that hosts it (and 50% locality is possible) - new int[][]{ - new int[]{1, 2, 1}, - new int[]{0, 25, 0, 50}, // region 0 is hosted and entirely local on server 2 - new int[]{1, 50, 25, 0}, // region 1 is hosted and entirely on server 0 - new int[]{1, 50, 25, 0}, // region 2 is hosted and entirely on server 0 - new int[]{2, 0, 50, 25}, // region 3 is hosted and entirely on server 1 + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 25, 0, 50 }, // region 0 is hosted and + // entirely local on server 2 + new int[] { 1, 50, 25, 0 }, // region 1 is hosted and entirely on server 0 + new int[] { 1, 50, 25, 0 }, // region 2 is hosted and entirely on server 0 + new int[] { 2, 0, 50, 25 }, // region 3 is hosted and entirely on server 1 }, // Test 4: each region is 25% local on the server that hosts it (and 100% locality is possible) - new int[][]{ - new int[]{1, 2, 1}, - new int[]{0, 25, 0, 100}, // region 0 is hosted and entirely local on server 2 - new int[]{1, 100, 25, 0}, // region 1 is hosted and entirely on server 0 - new int[]{1, 100, 25, 0}, // region 2 is hosted and entirely on server 0 - new int[]{2, 0, 100, 25}, // region 3 is hosted and entirely on server 1 + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 25, 0, 100 }, // region 0 is hosted and + // entirely local on server 2 + new int[] { 1, 100, 25, 0 }, // region 1 is hosted and entirely on server 0 + new int[] { 1, 100, 25, 0 }, // region 2 is hosted and entirely on server 0 + new int[] { 2, 0, 100, 25 }, // region 3 is hosted and entirely on server 1 }, // Test 5: each region is 75% local on the server that hosts it (and 75% locality is possible // everywhere) - new int[][]{ - new int[]{1, 2, 1}, - new int[]{0, 75, 75, 75}, // region 0 is hosted and entirely local on server 2 - new int[]{1, 75, 75, 75}, // region 1 is hosted and entirely on server 0 - new int[]{1, 75, 75, 75}, // region 2 is hosted and entirely on server 0 - new int[]{2, 75, 75, 75}, // region 3 is hosted and entirely on server 1 - }, - }; + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 75, 75, 75 }, // region 0 is hosted and + // entirely local on server 2 + new int[] { 1, 75, 75, 75 }, // region 1 is hosted and entirely on server 0 + new int[] { 1, 75, 75, 75 }, // region 2 is hosted and entirely on server 0 + new int[] { 2, 75, 75, 75 }, // region 3 is hosted and entirely on server 1 + }, }; private ServerMetrics mockServerMetricsWithCpRequests(List regionsOnServer, long cpRequestCount) { @@ -175,7 +170,7 @@ public void testCPRequestCost() { loadBalancer.updateClusterMetrics(clusterStatus); List plans = - loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, clusterState); + loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, clusterState); Set regionsMoveFromServerA = new HashSet<>(); Set targetServers = new HashSet<>(); for (RegionPlan plan : plans) { @@ -225,15 +220,15 @@ public void testKeepRegionLoad() throws Exception { Queue loads = loadBalancer.loads.get(regionNameAsString); int i = 0; - while(loads.size() > 0) { + while (loads.size() > 0) { BalancerRegionLoad rl = loads.remove(); assertEquals(i + (numClusterStatusToAdd - 15), rl.getStorefileSizeMB()); - i ++; + i++; } } @Test - public void testUpdateBalancerLoadInfo(){ + public void testUpdateBalancerLoadInfo() { int[] cluster = new int[] { 10, 0 }; Map> servers = mockClusterServers(cluster); BalancerClusterState clusterState = mockCluster(cluster); @@ -247,20 +242,22 @@ public void testUpdateBalancerLoadInfo(){ dummyMetricsStochasticBalancer.clearDummyMetrics(); loadBalancer.updateBalancerLoadInfo(LoadOfAllTable); assertTrue("Metrics should be recorded!", - dummyMetricsStochasticBalancer.getDummyCostsMap() != null && !dummyMetricsStochasticBalancer.getDummyCostsMap().isEmpty()); + dummyMetricsStochasticBalancer.getDummyCostsMap() != null + && !dummyMetricsStochasticBalancer.getDummyCostsMap().isEmpty()); String metricRecordKey; if (isByTable) { metricRecordKey = "table1#" + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME; } else { - metricRecordKey = HConstants.ENSEMBLE_TABLE_NAME + "#" + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME; + metricRecordKey = HConstants.ENSEMBLE_TABLE_NAME + "#" + + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME; } double curOverallCost = loadBalancer.computeCost(clusterState, Double.MAX_VALUE); double curOverallCostInMetrics = dummyMetricsStochasticBalancer.getDummyCostsMap().get(metricRecordKey); assertEquals(curOverallCost, curOverallCostInMetrics, 0.001); } - }finally { + } finally { conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE); loadBalancer.onConfigurationChange(conf); } @@ -277,7 +274,8 @@ public void testUpdateStochasticCosts() { conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false); loadBalancer.onConfigurationChange(conf); dummyMetricsStochasticBalancer.clearDummyMetrics(); - List plans = loadBalancer.balanceCluster((Map)mockClusterServersWithTables(servers)); + List plans = + loadBalancer.balanceCluster((Map) mockClusterServersWithTables(servers)); assertTrue("Balance plan should not be empty!", plans != null && !plans.isEmpty()); assertTrue("There should be metrics record in MetricsStochasticBalancer", @@ -288,7 +286,7 @@ public void testUpdateStochasticCosts() { HConstants.ENSEMBLE_TABLE_NAME + "#" + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME); assertEquals(overallCostOfCluster, overallCostInMetrics, 0.001); } finally { - //reset config + // reset config conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", minCost); conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE); loadBalancer.onConfigurationChange(conf); @@ -306,7 +304,8 @@ public void testUpdateStochasticCostsIfBalanceNotRan() { conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false); loadBalancer.onConfigurationChange(conf); dummyMetricsStochasticBalancer.clearDummyMetrics(); - List plans = loadBalancer.balanceCluster( (Map) mockClusterServersWithTables(servers)); + List plans = + loadBalancer.balanceCluster((Map) mockClusterServersWithTables(servers)); assertTrue("Balance plan should be empty!", plans == null || plans.isEmpty()); assertTrue("There should be metrics record in MetricsStochasticBalancer!", @@ -317,7 +316,7 @@ public void testUpdateStochasticCostsIfBalanceNotRan() { HConstants.ENSEMBLE_TABLE_NAME + "#" + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME); assertEquals(overallCostOfCluster, overallCostInMetrics, 0.001); } finally { - //reset config + // reset config conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", minCost); conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE); loadBalancer.onConfigurationChange(conf); @@ -330,14 +329,14 @@ public void testNeedBalance() { conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f); try { // Test with/without per table balancer. - boolean[] perTableBalancerConfigs = {true, false}; + boolean[] perTableBalancerConfigs = { true, false }; for (boolean isByTable : perTableBalancerConfigs) { conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable); loadBalancer.onConfigurationChange(conf); for (int[] mockCluster : clusterStateMocks) { Map> servers = mockClusterServers(mockCluster); Map>> LoadOfAllTable = - (Map) mockClusterServersWithTables(servers); + (Map) mockClusterServersWithTables(servers); List plans = loadBalancer.balanceCluster(LoadOfAllTable); boolean emptyPlans = plans == null || plans.isEmpty(); assertTrue(emptyPlans || needsBalanceIdleRegion(mockCluster)); @@ -354,8 +353,7 @@ public void testNeedBalance() { @Test public void testLocalityCost() throws Exception { Configuration conf = HBaseConfiguration.create(); - CostFunction - costFunction = new ServerLocalityCostFunction(conf); + CostFunction costFunction = new ServerLocalityCostFunction(conf); for (int test = 0; test < clusterRegionLocationMocks.length; test++) { int[][] clusterRegionLocations = clusterRegionLocationMocks[test]; @@ -371,21 +369,18 @@ public void testLocalityCost() throws Exception { public void testMoveCostMultiplier() throws Exception { Configuration conf = HBaseConfiguration.create(); ClusterInfoProvider provider = mock(ClusterInfoProvider.class); - CostFunction costFunction = - new MoveCostFunction(conf, provider); + CostFunction costFunction = new MoveCostFunction(conf, provider); when(provider.isOffPeakHour()).thenReturn(false); BalancerClusterState cluster = mockCluster(clusterStateMocks[0]); costFunction.prepare(cluster); costFunction.cost(); - assertEquals(MoveCostFunction.DEFAULT_MOVE_COST, - costFunction.getMultiplier(), 0.01); + assertEquals(MoveCostFunction.DEFAULT_MOVE_COST, costFunction.getMultiplier(), 0.01); // In offpeak hours, the multiplier of move cost should be lower when(provider.isOffPeakHour()).thenReturn(true); costFunction.prepare(cluster); costFunction.cost(); - assertEquals(MoveCostFunction.DEFAULT_MOVE_COST_OFFPEAK, - costFunction.getMultiplier(), 0.01); + assertEquals(MoveCostFunction.DEFAULT_MOVE_COST_OFFPEAK, costFunction.getMultiplier(), 0.01); } @Test @@ -410,7 +405,6 @@ public void testMoveCost() throws Exception { cost = costFunction.cost(); assertEquals(1.0f, cost, 0.001); - // cluster region number is bigger than maxMoves=2500 cluster.setNumRegions(10000); cluster.setNumMovedRegions(250); @@ -428,8 +422,7 @@ public void testMoveCost() throws Exception { @Test public void testSkewCost() { Configuration conf = HBaseConfiguration.create(); - CostFunction - costFunction = new RegionCountSkewCostFunction(conf); + CostFunction costFunction = new RegionCountSkewCostFunction(conf); for (int[] mockCluster : clusterStateMocks) { costFunction.prepare(mockCluster(mockCluster)); double cost = costFunction.cost(); @@ -437,17 +430,17 @@ public void testSkewCost() { assertTrue(cost <= 1.01); } - costFunction.prepare(mockCluster(new int[]{0, 0, 0, 0, 1})); - assertEquals(0,costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{0, 0, 0, 1, 1})); + costFunction.prepare(mockCluster(new int[] { 0, 0, 0, 0, 1 })); assertEquals(0, costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{0, 0, 1, 1, 1})); + costFunction.prepare(mockCluster(new int[] { 0, 0, 0, 1, 1 })); assertEquals(0, costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{0, 1, 1, 1, 1})); + costFunction.prepare(mockCluster(new int[] { 0, 0, 1, 1, 1 })); assertEquals(0, costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{1, 1, 1, 1, 1})); + costFunction.prepare(mockCluster(new int[] { 0, 1, 1, 1, 1 })); assertEquals(0, costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{10000, 0, 0, 0, 0})); + costFunction.prepare(mockCluster(new int[] { 1, 1, 1, 1, 1 })); + assertEquals(0, costFunction.cost(), 0.01); + costFunction.prepare(mockCluster(new int[] { 10000, 0, 0, 0, 0 })); assertEquals(1, costFunction.cost(), 0.01); } @@ -475,8 +468,7 @@ public void testCostAfterUndoAction() { @Test public void testTableSkewCost() { Configuration conf = HBaseConfiguration.create(); - CostFunction - costFunction = new TableSkewCostFunction(conf); + CostFunction costFunction = new TableSkewCostFunction(conf); for (int[] mockCluster : clusterStateMocks) { BalancerClusterState cluster = mockCluster(mockCluster); costFunction.prepare(cluster); @@ -498,20 +490,17 @@ public void testRegionLoadCost() { } Configuration conf = HBaseConfiguration.create(); - ReadRequestCostFunction readCostFunction = - new ReadRequestCostFunction(conf); + ReadRequestCostFunction readCostFunction = new ReadRequestCostFunction(conf); double rateResult = readCostFunction.getRegionLoadCost(regionLoads); // read requests are treated as a rate so the average rate here is simply 1 assertEquals(1, rateResult, 0.01); - CPRequestCostFunction cpCostFunction = - new CPRequestCostFunction(conf); + CPRequestCostFunction cpCostFunction = new CPRequestCostFunction(conf); rateResult = cpCostFunction.getRegionLoadCost(regionLoads); // coprocessor requests are treated as a rate so the average rate here is simply 1 assertEquals(1, rateResult, 0.01); - StoreFileCostFunction storeFileCostFunction = - new StoreFileCostFunction(conf); + StoreFileCostFunction storeFileCostFunction = new StoreFileCostFunction(conf); double result = storeFileCostFunction.getRegionLoadCost(regionLoads); // storefile size cost is simply an average of it's value over time assertEquals(2.5, result, 0.01); @@ -524,20 +513,18 @@ public void testRegionLoadCostWhenDecrease() { for (int i = 1; i < 5; i++) { int load = i == 3 ? 1 : i; BalancerRegionLoad regionLoad = mock(BalancerRegionLoad.class); - when(regionLoad.getReadRequestsCount()).thenReturn((long)load); - when(regionLoad.getCpRequestsCount()).thenReturn((long)load); + when(regionLoad.getReadRequestsCount()).thenReturn((long) load); + when(regionLoad.getCpRequestsCount()).thenReturn((long) load); regionLoads.add(regionLoad); } Configuration conf = HBaseConfiguration.create(); - ReadRequestCostFunction readCostFunction = - new ReadRequestCostFunction(conf); + ReadRequestCostFunction readCostFunction = new ReadRequestCostFunction(conf); double rateResult = readCostFunction.getRegionLoadCost(regionLoads); // read requests are treated as a rate so the average rate here is simply 1 assertEquals(1.67, rateResult, 0.01); - CPRequestCostFunction cpCostFunction = - new CPRequestCostFunction(conf); + CPRequestCostFunction cpCostFunction = new CPRequestCostFunction(conf); rateResult = cpCostFunction.getRegionLoadCost(regionLoads); // coprocessor requests are treated as a rate so the average rate here is simply 1 assertEquals(1.67, rateResult, 0.01); @@ -547,15 +534,14 @@ public void testRegionLoadCostWhenDecrease() { public void testLosingRs() throws Exception { int numNodes = 3; int numRegions = 20; - int numRegionsPerServer = 3; //all servers except one + int numRegionsPerServer = 3; // all servers except one int replication = 1; int numTables = 2; Map> serverMap = - createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); List list = convertToList(serverMap); - List plans = loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap); assertNotNull(plans); @@ -581,9 +567,8 @@ public void testAdditionalCostFunction() { DummyCostFunction.class.getName()); loadBalancer.onConfigurationChange(conf); - assertTrue(Arrays. - asList(loadBalancer.getCostFunctionNames()). - contains(DummyCostFunction.class.getSimpleName())); + assertTrue(Arrays.asList(loadBalancer.getCostFunctionNames()) + .contains(DummyCostFunction.class.getSimpleName())); } finally { conf.unset(StochasticLoadBalancer.COST_FUNCTIONS_COST_FUNCTIONS_KEY); loadBalancer.onConfigurationChange(conf); @@ -592,20 +577,15 @@ public void testAdditionalCostFunction() { @Test public void testDefaultCostFunctionList() { - List expected = Arrays.asList( - RegionCountSkewCostFunction.class.getSimpleName(), + List expected = Arrays.asList(RegionCountSkewCostFunction.class.getSimpleName(), PrimaryRegionCountSkewCostFunction.class.getSimpleName(), - MoveCostFunction.class.getSimpleName(), - RackLocalityCostFunction.class.getSimpleName(), + MoveCostFunction.class.getSimpleName(), RackLocalityCostFunction.class.getSimpleName(), TableSkewCostFunction.class.getSimpleName(), RegionReplicaHostCostFunction.class.getSimpleName(), RegionReplicaRackCostFunction.class.getSimpleName(), - ReadRequestCostFunction.class.getSimpleName(), - CPRequestCostFunction.class.getSimpleName(), + ReadRequestCostFunction.class.getSimpleName(), CPRequestCostFunction.class.getSimpleName(), WriteRequestCostFunction.class.getSimpleName(), - MemStoreSizeCostFunction.class.getSimpleName(), - StoreFileCostFunction.class.getSimpleName() - ); + MemStoreSizeCostFunction.class.getSimpleName(), StoreFileCostFunction.class.getSimpleName()); List actual = Arrays.asList(loadBalancer.getCostFunctionNames()); assertTrue("ExpectedCostFunctions: " + expected + " ActualCostFunctions: " + actual, @@ -613,14 +593,14 @@ public void testDefaultCostFunctionList() { } private boolean needsBalanceIdleRegion(int[] cluster) { - return Arrays.stream(cluster).anyMatch(x -> x > 1) && - Arrays.stream(cluster).anyMatch(x -> x < 1); + return Arrays.stream(cluster).anyMatch(x -> x > 1) + && Arrays.stream(cluster).anyMatch(x -> x < 1); } // This mock allows us to test the LocalityCostFunction private class MockCluster extends BalancerClusterState { - private int[][] localities = null; // [region][server] = percent of blocks + private int[][] localities = null; // [region][server] = percent of blocks public MockCluster(int[][] regions) { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java index 5269fe71d7f0..2a7b8afccc38 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,10 +39,10 @@ public class TestStochasticLoadBalancerBalanceCluster extends StochasticBalancer @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerBalanceCluster.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerBalanceCluster.class); private static final Logger LOG = - LoggerFactory.getLogger(TestStochasticLoadBalancerBalanceCluster.class); + LoggerFactory.getLogger(TestStochasticLoadBalancerBalanceCluster.class); /** * Test the load balancing algorithm. @@ -57,7 +57,7 @@ public void testBalanceCluster() throws Exception { List list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); Map>> LoadOfAllTable = - (Map) mockClusterServersWithTables(servers); + (Map) mockClusterServersWithTables(servers); List plans = loadBalancer.balanceCluster(LoadOfAllTable); List balancedCluster = reconcile(list, plans, servers); LOG.info("Mock Balance : " + printMock(balancedCluster)); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java index 5a0dc06e4707..8691cff733f5 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java @@ -214,9 +214,9 @@ protected void testWithCluster(final Map> serverMap // as the balancer is stochastic, we cannot check exactly the result of the balancing, // hence the allowedWindow parameter - assertTrue("Host " + sn.getHostname() + " should be below " + - cf.overallUsage * ALLOWED_WINDOW * 100 + "%; " + cf.overallUsage + ", " + usage + ", " + - numberRegions + ", " + limit, usage <= cf.overallUsage * ALLOWED_WINDOW); + assertTrue("Host " + sn.getHostname() + " should be below " + + cf.overallUsage * ALLOWED_WINDOW * 100 + "%; " + cf.overallUsage + ", " + usage + ", " + + numberRegions + ", " + limit, usage <= cf.overallUsage * ALLOWED_WINDOW); } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java index 11e3f650ea41..2df1aa83a0d1 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java @@ -42,7 +42,7 @@ public class TestStochasticLoadBalancerHeterogeneousCostRules extends Stochastic @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerHeterogeneousCostRules.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerHeterogeneousCostRules.class); @Rule public TestName name = new TestName(); @@ -64,10 +64,8 @@ public static void beforeClass() throws IOException { @Before public void before() throws IOException { // New rules file name per test. - this.rulesFilename = HTU - .getDataTestDir( - this.name.getMethodName() + "." + DEFAULT_RULES_FILE_NAME) - .toString(); + this.rulesFilename = + HTU.getDataTestDir(this.name.getMethodName() + "." + DEFAULT_RULES_FILE_NAME).toString(); // Set the created rules filename into the configuration. HTU.getConfiguration().set( HeterogeneousRegionCountCostFunction.HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE, @@ -99,8 +97,7 @@ public void testBadFormatInRules() throws IOException { this.costFunction.loadRules(); assertEquals(0, this.costFunction.getNumberOfRulesLoaded()); - createRulesFile(this.rulesFilename, Arrays.asList("srv[1-2] 10", - "bad_rules format", "a")); + createRulesFile(this.rulesFilename, Arrays.asList("srv[1-2] 10", "bad_rules format", "a")); this.costFunction = new HeterogeneousRegionCountCostFunction(HTU.getConfiguration()); this.costFunction.loadRules(); assertEquals(1, this.costFunction.getNumberOfRulesLoaded()); @@ -149,4 +146,4 @@ public void testNoOverride() throws IOException { this.costFunction.loadRules(); assertEquals(2, this.costFunction.getNumberOfRulesLoaded()); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java index ba2da0a860a0..620d610288f7 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class TestStochasticLoadBalancerLargeCluster extends StochasticBalancerTe @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerLargeCluster.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerLargeCluster.class); @Test public void testLargeCluster() { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerMidCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerMidCluster.java index 3c0ec03b03c3..80adea40fd77 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerMidCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerMidCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class TestStochasticLoadBalancerMidCluster extends StochasticBalancerTest @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerMidCluster.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerMidCluster.class); @Test public void testMidCluster() { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java index 58eed9e63796..bd437425f213 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java @@ -46,13 +46,12 @@ public class TestStochasticLoadBalancerRegionReplica extends StochasticBalancerT @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplica.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplica.class); @Test public void testReplicaCost() { Configuration conf = HBaseConfiguration.create(); - CostFunction costFunction = - new RegionReplicaHostCostFunction(conf); + CostFunction costFunction = new RegionReplicaHostCostFunction(conf); for (int[] mockCluster : clusterStateMocks) { BalancerClusterState cluster = mockCluster(mockCluster); costFunction.prepare(cluster); @@ -65,8 +64,7 @@ public void testReplicaCost() { @Test public void testReplicaCostForReplicas() { Configuration conf = HBaseConfiguration.create(); - CostFunction costFunction = - new RegionReplicaHostCostFunction(conf); + CostFunction costFunction = new RegionReplicaHostCostFunction(conf); int[] servers = new int[] { 3, 3, 3, 3, 3 }; TreeMap> clusterState = mockClusterServers(servers); @@ -80,7 +78,7 @@ public void testReplicaCostForReplicas() { // replicate the region from first server to the last server RegionInfo replica1 = - RegionReplicaUtil.getRegionInfoForReplica(clusterState.firstEntry().getValue().get(0), 1); + RegionReplicaUtil.getRegionInfoForReplica(clusterState.firstEntry().getValue().get(0), 1); clusterState.lastEntry().getValue().add(replica1); cluster = new BalancerClusterState(clusterState, null, null, null); @@ -163,7 +161,7 @@ public void testNeedsBalanceForColocatedReplicas() { // add another server so that the cluster has some host on another rack map.put(ServerName.valueOf("host2", 1000, 11111), randomRegions(1)); assertFalse(loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, - new BalancerClusterState(map, null, null, new ForTestRackManagerOne()))); + new BalancerClusterState(map, null, null, new ForTestRackManagerOne()))); } @Test diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaLargeCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaLargeCluster.java index 278e9f2e6138..f5c58cb62b20 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaLargeCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaLargeCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaMidCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaMidCluster.java index 247baefdd611..e668b20c1cb3 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaMidCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaMidCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.java index 098b3d901935..6ee9682e8d66 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,11 +26,11 @@ @Category({ MasterTests.class, LargeTests.class }) public class TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes - extends StochasticBalancerTestBase2 { + extends StochasticBalancerTestBase2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule - .forClass(TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.class); + .forClass(TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.class); @Test public void testRegionReplicationOnMidClusterReplicationGreaterThanNumNodes() { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaSameHosts.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaSameHosts.java index 85576efc229f..c8e7a83c9bd4 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaSameHosts.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaSameHosts.java @@ -35,7 +35,7 @@ public class TestStochasticLoadBalancerRegionReplicaSameHosts extends Stochastic @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaSameHosts.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaSameHosts.class); @Test public void testRegionReplicationOnMidClusterSameHosts() { @@ -48,7 +48,7 @@ public void testRegionReplicationOnMidClusterSameHosts() { int numRegionsPerServer = 5; int numTables = 10; Map> serverMap = - createServerMap(numHosts, numRegions, numRegionsPerServer, replication, numTables); + createServerMap(numHosts, numRegions, numRegionsPerServer, replication, numTables); int numNodesPerHost = 4; // create a new map with 4 RS per host. diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java index eade1bf0bee3..ad60aadc1c17 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestStochasticLoadBalancerRegionReplicaWithRacks extends Stochastic @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaWithRacks.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaWithRacks.class); private static class ForTestRackManager extends RackManager { @@ -70,7 +70,7 @@ public void testRegionReplicationOnMidClusterWithRacks() { int numTables = 1; int numRacks = 3; // all replicas should be on a different rack Map> serverMap = - createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); RackManager rm = new ForTestRackManager(numRacks); testWithClusterWithIteration(serverMap, rm, true, true); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java index 479e194a6d6c..831c9e932727 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class TestStochasticLoadBalancerSmallCluster extends StochasticBalancerTe @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerSmallCluster.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerSmallCluster.class); @Test public void testSmallCluster() { diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index ffae7a9a1d78..154ceca9ed8b 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase org.apache.hbase + hbase 3.0.0-alpha-3-SNAPSHOT .. hbase-build-configuration - Apache HBase - Build Configuration - Configure the build-support artifacts for maven build pom + Apache HBase - Build Configuration + Configure the build-support artifacts for maven build + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.yetus + audience-annotations + + @@ -50,18 +62,6 @@ - - - org.apache.hbase - hbase-annotations - test-jar - test - - - org.apache.yetus - audience-annotations - - errorProne diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml index fbf7d97b2146..1da90e79baf5 100644 --- a/hbase-checkstyle/pom.xml +++ b/hbase-checkstyle/pom.xml @@ -1,7 +1,5 @@ - + -4.0.0 -hbase-checkstyle -3.0.0-alpha-3-SNAPSHOT -Apache HBase - Checkstyle -Module to hold Checkstyle properties for HBase. - + 4.0.0 + - hbase org.apache.hbase + hbase 3.0.0-alpha-3-SNAPSHOT .. + hbase-checkstyle + 3.0.0-alpha-3-SNAPSHOT + Apache HBase - Checkstyle + Module to hold Checkstyle properties for HBase. - - + + - - org.apache.maven.plugins - maven-site-plugin - - true - - - - - maven-assembly-plugin - - true - - - - + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + maven-assembly-plugin + + true + + + + diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index 885085c5ff8c..d766eec915fe 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,28 +30,6 @@ hbase-client Apache HBase - Client Client of HBase - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - @@ -209,6 +186,28 @@ + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + @@ -228,7 +227,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -316,8 +317,7 @@ lifecycle-mapping - - + diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java index b137a7da2ceb..b9736d573454 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +22,8 @@ /** * Interface to support the aborting of a given server or client. *

- * This is used primarily for ZooKeeper usage when we could get an unexpected - * and fatal exception, requiring an abort. + * This is used primarily for ZooKeeper usage when we could get an unexpected and fatal exception, + * requiring an abort. *

* Implemented by the Master, RegionServer, and TableServers (client). */ @@ -33,13 +32,12 @@ public interface Abortable { /** * Abort the server or client. * @param why Why we're aborting. - * @param e Throwable that caused abort. Can be null. + * @param e Throwable that caused abort. Can be null. */ void abort(String why, Throwable e); /** - * It just call another abort method and the Throwable - * parameter is null. + * It just call another abort method and the Throwable parameter is null. * @param why Why we're aborting. * @see Abortable#abort(String, Throwable) */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java index 91cedd60299d..615b3a467e6e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.util.Collections; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -56,9 +54,8 @@ public int getExceptionCount() { private String getFailedRegions() { return exceptions.keySet().stream() - .map(regionName -> RegionInfo.prettyPrint(RegionInfo.encodeRegionName(regionName))) - .collect(Collectors.toList()) - .toString(); + .map(regionName -> RegionInfo.prettyPrint(RegionInfo.encodeRegionName(regionName))) + .collect(Collectors.toList()).toString(); } @InterfaceAudience.Private @@ -68,11 +65,8 @@ public static CacheEvictionStatsBuilder builder() { @Override public String toString() { - return "CacheEvictionStats{" + - "evictedBlocks=" + evictedBlocks + - ", maxCacheSize=" + maxCacheSize + - ", failedRegionsSize=" + getExceptionCount() + - ", failedRegions=" + getFailedRegions() + - '}'; + return "CacheEvictionStats{" + "evictedBlocks=" + evictedBlocks + ", maxCacheSize=" + + maxCacheSize + ", failedRegionsSize=" + getExceptionCount() + ", failedRegions=" + + getFailedRegions() + '}'; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java index 85d68dcc08bc..fabe7f030278 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,4 +38,4 @@ public synchronized void append(CacheEvictionStats stats) { public synchronized CacheEvictionStats sum() { return this.builder.build(); } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java index d9e1400da16b..4b31d98611bc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.HashMap; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -42,7 +40,7 @@ public CacheEvictionStatsBuilder withMaxCacheSize(long maxCacheSize) { return this; } - public void addException(byte[] regionName, Throwable ie){ + public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java index 3feaaaf17a81..8bfde779176e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Returned to the clients when their request was discarded due to server being overloaded. - * Clients should retry upon receiving it. + * Returned to the clients when their request was discarded due to server being overloaded. Clients + * should retry upon receiving it. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java index 6bf68bc4ad0e..ecad4d9f0bc2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Returned to clients when their request was dropped because the call queue was too big to - * accept a new call. Clients should retry upon receiving it. + * Returned to clients when their request was dropped because the call queue was too big to accept a + * new call. Clients should retry upon receiving it. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index 3cf6cc035238..6ad1a18e83da 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -105,7 +105,7 @@ public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws /** * Returns the RegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and * qualifier of the catalog table result. - * @param r a Result object from the catalog table scan + * @param r a Result object from the catalog table scan * @param qualifier Column family qualifier * @return An RegionInfo instance or null. */ @@ -132,9 +132,9 @@ public static RegionInfo getRegionInfo(Result data) { /** * Returns the HRegionLocation parsed from the given meta row Result for the given regionInfo and * replicaId. The regionInfo can be the default region info for the replica. - * @param r the meta row result + * @param r the meta row result * @param regionInfo RegionInfo for default replica - * @param replicaId the replicaId for the HRegionLocation + * @param replicaId the replicaId for the HRegionLocation * @return HRegionLocation parsed from the given meta row Result for the given replicaId */ public static HRegionLocation getRegionLocation(final Result r, final RegionInfo regionInfo, @@ -232,9 +232,10 @@ public static ServerName getServerName(Result r, int replicaId) { * @return a byte[] for server column qualifier */ public static byte[] getServerColumn(int replicaId) { - return replicaId == 0 ? HConstants.SERVER_QUALIFIER : - Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 + ? HConstants.SERVER_QUALIFIER + : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -243,9 +244,10 @@ public static byte[] getServerColumn(int replicaId) { * @return a byte[] for server start code column qualifier */ public static byte[] getStartCodeColumn(int replicaId) { - return replicaId == 0 ? HConstants.STARTCODE_QUALIFIER : - Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 + ? HConstants.STARTCODE_QUALIFIER + : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -268,9 +270,10 @@ private static long getSeqNumDuringOpen(final Result r, final int replicaId) { * @return a byte[] for seqNum column qualifier */ public static byte[] getSeqNumColumn(int replicaId) { - return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER : - Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 + ? HConstants.SEQNUM_QUALIFIER + : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** The delimiter for meta columns for replicaIds > 0 */ @@ -308,9 +311,10 @@ public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) { * @return a byte[] for state qualifier */ public static byte[] getRegionStateColumn(int replicaId) { - return replicaId == 0 ? HConstants.STATE_QUALIFIER : - Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 + ? HConstants.STATE_QUALIFIER + : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -319,9 +323,10 @@ public static byte[] getRegionStateColumn(int replicaId) { * @return a byte[] for sn column qualifier */ public static byte[] getServerNameColumn(int replicaId) { - return replicaId == 0 ? HConstants.SERVERNAME_QUALIFIER : - Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 + ? HConstants.SERVERNAME_QUALIFIER + : Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -398,7 +403,7 @@ public static boolean hasMergeRegions(Cell[] cells) { */ public static boolean isMergeQualifierPrefix(Cell cell) { // Check to see if has family and that qualifier starts with the merge qualifier 'merge' - return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) && - PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX); + return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) + && PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java index ecc65733c12b..14076f0d7b00 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,8 +58,10 @@ private ClientMetaTableAccessor() { @InterfaceAudience.Private public enum QueryType { - ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), REGION(HConstants.CATALOG_FAMILY), - TABLE(HConstants.TABLE_FAMILY), REPLICATION(HConstants.REPLICATION_BARRIER_FAMILY); + ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), + REGION(HConstants.CATALOG_FAMILY), + TABLE(HConstants.TABLE_FAMILY), + REPLICATION(HConstants.REPLICATION_BARRIER_FAMILY); private final byte[][] families; @@ -97,9 +99,8 @@ public static CompletableFuture> getTableState(AsyncTable> @@ -124,9 +125,8 @@ public static CompletableFuture> getTableState(AsyncTable> @@ -145,8 +145,10 @@ public static CompletableFuture> getTableState(AsyncTable CatalogFamilyFormat.getRegionInfo(result) != null).forEach(result -> { getRegionLocations(result).ifPresent(locations -> { for (HRegionLocation location : locations.getRegionLocations()) { - if (location != null && - encodedRegionNameStr.equals(location.getRegion().getEncodedName())) { + if ( + location != null + && encodedRegionNameStr.equals(location.getRegion().getEncodedName()) + ) { future.complete(Optional.of(location)); return; } @@ -163,9 +165,8 @@ private static Optional getTableState(Result r) throws IOException { } /** - * Used to get all region locations for the specific table. - * @param metaTable - * @param tableName table we're looking for, can be null for getting all regions + * Used to get all region locations for the specific table. n * @param tableName table we're + * looking for, can be null for getting all regions * @return the list of region locations. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -188,9 +189,8 @@ public static CompletableFuture> getTableHRegionLocations( } /** - * Used to get table regions' info and server. - * @param metaTable - * @param tableName table we're looking for, can be null for getting all regions + * Used to get table regions' info and server. n * @param tableName table we're looking for, can + * be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return the list of regioninfos and server. The return value will be wrapped by a * {@link CompletableFuture}. @@ -219,10 +219,8 @@ private static CompletableFuture>> getTableReg } /** - * Performs a scan of META table for given table. - * @param metaTable - * @param tableName table withing we scan - * @param type scanned part of meta + * Performs a scan of META table for given table. n * @param tableName table withing we scan + * @param type scanned part of meta * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, @@ -232,11 +230,9 @@ private static CompletableFuture scanMeta(AsyncTable scanMeta(AsyncTable metaTable, int rowUpperLimit) { Scan scan = new Scan(); int scannerCaching = metaTable.getConfiguration().getInt(HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING); - if (metaTable.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, - HConstants.DEFAULT_USE_META_REPLICAS)) { + if ( + metaTable.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, + HConstants.DEFAULT_USE_META_REPLICAS) + ) { scan.setConsistency(Consistency.TIMELINE); } if (rowUpperLimit <= scannerCaching) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java index a63ca6936ec1..1afcb30ece01 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +18,10 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * This exception is thrown by the master when a region server clock skew is - * too high. + * This exception is thrown by the master when a region server clock skew is too high. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index 1dd01faf808a..e769e80847f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -15,29 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; import java.util.UUID; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterIdProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * The identifier for this cluster. - * It is serialized to the filesystem and up into zookeeper. This is a container for the id. - * Also knows how to serialize and deserialize the cluster id. + * The identifier for this cluster. It is serialized to the filesystem and up into zookeeper. This + * is a container for the id. Also knows how to serialize and deserialize the cluster id. */ @InterfaceAudience.Private public class ClusterId { private final String id; /** - * New ClusterID. Generates a uniqueid. + * New ClusterID. Generates a uniqueid. */ public ClusterId() { this(UUID.randomUUID().toString()); @@ -50,17 +48,15 @@ public ClusterId(final String uuid) { /** * @return The clusterid serialized using pb w/ pb magic prefix */ - public byte [] toByteArray() { + public byte[] toByteArray() { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } /** * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix - * @return An instance of {@link ClusterId} made from bytes - * @throws DeserializationException - * @see #toByteArray() + * @return An instance of {@link ClusterId} made from bytes n * @see #toByteArray() */ - public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException { + public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { int pblen = ProtobufUtil.lengthOfPBMagic(); ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); @@ -87,8 +83,7 @@ public ClusterIdProtos.ClusterId convert() { } /** - * @param cid - * @return A {@link ClusterId} made from the passed in cid + * n * @return A {@link ClusterId} made from the passed in cid */ public static ClusterId convert(final ClusterIdProtos.ClusterId cid) { return new ClusterId(cid.getClusterId()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java index 29679e6fb6f4..769d48496afa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -39,28 +37,32 @@ *

  • The average cluster load.
  • *
  • The number of regions deployed on the cluster.
  • *
  • The number of requests since last report.
  • - *
  • Detailed region server loading and resource usage information, - * per server and per region.
  • + *
  • Detailed region server loading and resource usage information, per server and per + * region.
  • *
  • Regions in transition at master
  • *
  • The unique cluster ID
  • * - * {@link Option} provides a way to get desired ClusterStatus information. - * The following codes will get all the cluster information. + * {@link Option} provides a way to get desired ClusterStatus information. The following + * codes will get all the cluster information. + * *
    - * {@code
    - * // Original version still works
    - * Admin admin = connection.getAdmin();
    - * ClusterMetrics metrics = admin.getClusterStatus();
    - * // or below, a new version which has the same effects
    - * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
    + * {
    + *   @code
    + *   // Original version still works
    + *   Admin admin = connection.getAdmin();
    + *   ClusterMetrics metrics = admin.getClusterStatus();
    + *   // or below, a new version which has the same effects
    + *   ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
      * }
      * 
    - * If information about live servers is the only wanted. - * then codes in the following way: + * + * If information about live servers is the only wanted. then codes in the following way: + * *
    - * {@code
    - * Admin admin = connection.getAdmin();
    - * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
    + * {
    + *   @code
    + *   Admin admin = connection.getAdmin();
    + *   ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
      * }
      * 
    */ @@ -88,7 +90,7 @@ public interface ClusterMetrics { */ default int getRegionCount() { return getLiveServerMetrics().entrySet().stream() - .mapToInt(v -> v.getValue().getRegionMetrics().size()).sum(); + .mapToInt(v -> v.getValue().getRegionMetrics().size()).sum(); } /** @@ -96,8 +98,8 @@ default int getRegionCount() { */ default long getRequestCount() { return getLiveServerMetrics().entrySet().stream() - .flatMap(v -> v.getValue().getRegionMetrics().values().stream()) - .mapToLong(RegionMetrics::getRequestCount).sum(); + .flatMap(v -> v.getValue().getRegionMetrics().values().stream()) + .mapToLong(RegionMetrics::getRequestCount).sum(); } /** @@ -122,17 +124,15 @@ default long getRequestCount() { default long getLastMajorCompactionTimestamp(TableName table) { return getLiveServerMetrics().values().stream() - .flatMap(s -> s.getRegionMetrics().values().stream()) - .filter(r -> RegionInfo.getTable(r.getRegionName()).equals(table)) - .mapToLong(RegionMetrics::getLastMajorCompactionTimestamp).min().orElse(0); + .flatMap(s -> s.getRegionMetrics().values().stream()) + .filter(r -> RegionInfo.getTable(r.getRegionName()).equals(table)) + .mapToLong(RegionMetrics::getLastMajorCompactionTimestamp).min().orElse(0); } default long getLastMajorCompactionTimestamp(byte[] regionName) { return getLiveServerMetrics().values().stream() - .filter(s -> s.getRegionMetrics().containsKey(regionName)) - .findAny() - .map(s -> s.getRegionMetrics().get(regionName).getLastMajorCompactionTimestamp()) - .orElse(0L); + .filter(s -> s.getRegionMetrics().containsKey(regionName)).findAny() + .map(s -> s.getRegionMetrics().get(regionName).getLastMajorCompactionTimestamp()).orElse(0L); } @Nullable @@ -150,13 +150,12 @@ default double getAverageLoad() { if (serverSize == 0) { return 0; } - return (double)getRegionCount() / (double)serverSize; + return (double) getRegionCount() / (double) serverSize; } /** - * Provide region states count for given table. - * e.g howmany regions of give table are opened/closed/rit etc - * + * Provide region states count for given table. e.g howmany regions of give table are + * opened/closed/rit etc * @return map of table to region states count */ Map getTableRegionStatesCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 011f93f9fe90..7ef8a2086118 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -26,13 +24,13 @@ import java.util.Map; import java.util.TreeMap; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.Option; @@ -43,42 +41,34 @@ public final class ClusterMetricsBuilder { public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) { - ClusterStatusProtos.ClusterStatus.Builder builder - = ClusterStatusProtos.ClusterStatus.newBuilder() - .addAllBackupMasters(metrics.getBackupMasterNames().stream() - .map(ProtobufUtil::toServerName).collect(Collectors.toList())) - .addAllDeadServers(metrics.getDeadServerNames().stream() - .map(ProtobufUtil::toServerName).collect(Collectors.toList())) + ClusterStatusProtos.ClusterStatus.Builder builder = + ClusterStatusProtos.ClusterStatus.newBuilder() + .addAllBackupMasters(metrics.getBackupMasterNames().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .addAllDeadServers(metrics.getDeadServerNames().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream() - .map(s -> ClusterStatusProtos.LiveServerInfo - .newBuilder() - .setServer(ProtobufUtil.toServerName(s.getKey())) - .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())) - .build()) - .collect(Collectors.toList())) + .map(s -> ClusterStatusProtos.LiveServerInfo.newBuilder() + .setServer(ProtobufUtil.toServerName(s.getKey())) + .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())).build()) + .collect(Collectors.toList())) .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream() - .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) - .collect(Collectors.toList())) + .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + .collect(Collectors.toList())) .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream() - .map(r -> ClusterStatusProtos.RegionInTransition - .newBuilder() - .setSpec(HBaseProtos.RegionSpecifier - .newBuilder() - .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())) - .build()) - .setRegionState(r.convert()) - .build()) - .collect(Collectors.toList())) + .map(r -> ClusterStatusProtos.RegionInTransition.newBuilder() + .setSpec(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())).build()) + .setRegionState(r.convert()).build()) + .collect(Collectors.toList())) .setMasterInfoPort(metrics.getMasterInfoPort()) .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() - .map(status -> - ClusterStatusProtos.TableRegionStatesCount.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) - .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())) - .build()) + .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build()) .collect(Collectors.toList())); if (metrics.getMasterName() != null) { builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); @@ -95,40 +85,33 @@ public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics m } if (metrics.getHBaseVersion() != null) { builder.setHbaseVersion( - FSProtos.HBaseVersionFileContent.newBuilder() - .setVersion(metrics.getHBaseVersion())); + FSProtos.HBaseVersionFileContent.newBuilder().setVersion(metrics.getHBaseVersion())); } return builder.build(); } - public static ClusterMetrics toClusterMetrics( - ClusterStatusProtos.ClusterStatus proto) { + public static ClusterMetrics toClusterMetrics(ClusterStatusProtos.ClusterStatus proto) { ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder(); - builder.setLiveServerMetrics(proto.getLiveServersList().stream() + builder + .setLiveServerMetrics(proto.getLiveServersList().stream() .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()), - ServerMetricsBuilder::toServerMetrics))) - .setDeadServerNames(proto.getDeadServersList().stream() - .map(ProtobufUtil::toServerName) - .collect(Collectors.toList())) - .setBackerMasterNames(proto.getBackupMastersList().stream() - .map(ProtobufUtil::toServerName) - .collect(Collectors.toList())) - .setRegionsInTransition(proto.getRegionsInTransitionList().stream() - .map(ClusterStatusProtos.RegionInTransition::getRegionState) - .map(RegionState::convert) - .collect(Collectors.toList())) - .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream() - .map(HBaseProtos.Coprocessor::getName) - .collect(Collectors.toList())) - .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList())) - .setTableRegionStatesCount( - proto.getTableRegionStatesCountList().stream() - .collect(Collectors.toMap( - e -> ProtobufUtil.toTableName(e.getTableName()), - e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))) - .setMasterTasks(proto.getMasterTasksList().stream() - .map(t -> ProtobufUtil.getServerTask(t)).collect(Collectors.toList())); + ServerMetricsBuilder::toServerMetrics))) + .setDeadServerNames(proto.getDeadServersList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .setBackerMasterNames(proto.getBackupMastersList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .setRegionsInTransition(proto.getRegionsInTransitionList().stream() + .map(ClusterStatusProtos.RegionInTransition::getRegionState).map(RegionState::convert) + .collect(Collectors.toList())) + .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream() + .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) + .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .setTableRegionStatesCount(proto.getTableRegionStatesCountList().stream() + .collect(Collectors.toMap(e -> ProtobufUtil.toTableName(e.getTableName()), + e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))) + .setMasterTasks(proto.getMasterTasksList().stream().map(t -> ProtobufUtil.getServerTask(t)) + .collect(Collectors.toList())); if (proto.hasClusterId()) { builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString()); } @@ -158,21 +141,35 @@ public static ClusterMetrics toClusterMetrics( */ public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) { switch (option) { - case HBASE_VERSION: return ClusterMetrics.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterMetrics.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterMetrics.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterMetrics.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterMetrics.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterMetrics.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterMetrics.Option.MASTER; - case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON; - case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME; - case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT; - case TABLE_TO_REGIONS_COUNT: return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; - case TASKS: return ClusterMetrics.Option.TASKS; + case HBASE_VERSION: + return ClusterMetrics.Option.HBASE_VERSION; + case LIVE_SERVERS: + return ClusterMetrics.Option.LIVE_SERVERS; + case DEAD_SERVERS: + return ClusterMetrics.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: + return ClusterMetrics.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: + return ClusterMetrics.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: + return ClusterMetrics.Option.MASTER_COPROCESSORS; + case MASTER: + return ClusterMetrics.Option.MASTER; + case BACKUP_MASTERS: + return ClusterMetrics.Option.BACKUP_MASTERS; + case BALANCER_ON: + return ClusterMetrics.Option.BALANCER_ON; + case SERVERS_NAME: + return ClusterMetrics.Option.SERVERS_NAME; + case MASTER_INFO_PORT: + return ClusterMetrics.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: + return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; + case TASKS: + return ClusterMetrics.Option.TASKS; // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); + default: + throw new IllegalArgumentException("Invalid option: " + option); } } @@ -183,21 +180,35 @@ public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) */ public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) { switch (option) { - case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterStatusProtos.Option.MASTER; - case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; - case SERVERS_NAME: return Option.SERVERS_NAME; - case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; - case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; - case TASKS: return ClusterStatusProtos.Option.TASKS; + case HBASE_VERSION: + return ClusterStatusProtos.Option.HBASE_VERSION; + case LIVE_SERVERS: + return ClusterStatusProtos.Option.LIVE_SERVERS; + case DEAD_SERVERS: + return ClusterStatusProtos.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: + return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: + return ClusterStatusProtos.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: + return ClusterStatusProtos.Option.MASTER_COPROCESSORS; + case MASTER: + return ClusterStatusProtos.Option.MASTER; + case BACKUP_MASTERS: + return ClusterStatusProtos.Option.BACKUP_MASTERS; + case BALANCER_ON: + return ClusterStatusProtos.Option.BALANCER_ON; + case SERVERS_NAME: + return Option.SERVERS_NAME; + case MASTER_INFO_PORT: + return ClusterStatusProtos.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: + return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; + case TASKS: + return ClusterStatusProtos.Option.TASKS; // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); + default: + throw new IllegalArgumentException("Invalid option: " + option); } } @@ -208,7 +219,7 @@ public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) */ public static EnumSet toOptions(List options) { return options.stream().map(ClusterMetricsBuilder::toOption) - .collect(Collectors.toCollection(() -> EnumSet.noneOf(ClusterMetrics.Option.class))); + .collect(Collectors.toCollection(() -> EnumSet.noneOf(ClusterMetrics.Option.class))); } /** @@ -223,6 +234,7 @@ public static List toOptions(EnumSet deadServerNames = Collections.emptyList(); @@ -244,10 +256,12 @@ public static ClusterMetricsBuilder newBuilder() { private ClusterMetricsBuilder() { } + public ClusterMetricsBuilder setHBaseVersion(String value) { this.hbaseVersion = value; return this; } + public ClusterMetricsBuilder setDeadServerNames(List value) { this.deadServerNames = value; return this; @@ -262,62 +276,59 @@ public ClusterMetricsBuilder setMasterName(ServerName value) { this.masterName = value; return this; } + public ClusterMetricsBuilder setBackerMasterNames(List value) { this.backupMasterNames = value; return this; } + public ClusterMetricsBuilder setRegionsInTransition(List value) { this.regionsInTransition = value; return this; } + public ClusterMetricsBuilder setClusterId(String value) { this.clusterId = value; return this; } + public ClusterMetricsBuilder setMasterCoprocessorNames(List value) { this.masterCoprocessorNames = value; return this; } + public ClusterMetricsBuilder setBalancerOn(@Nullable Boolean value) { this.balancerOn = value; return this; } + public ClusterMetricsBuilder setMasterInfoPort(int value) { this.masterInfoPort = value; return this; } + public ClusterMetricsBuilder setServerNames(List serversName) { this.serversName = serversName; return this; } + public ClusterMetricsBuilder setMasterTasks(List masterTasks) { this.masterTasks = masterTasks; return this; } - public ClusterMetricsBuilder setTableRegionStatesCount( - Map tableRegionStatesCount) { + public ClusterMetricsBuilder + setTableRegionStatesCount(Map tableRegionStatesCount) { this.tableRegionStatesCount = tableRegionStatesCount; return this; } public ClusterMetrics build() { - return new ClusterMetricsImpl( - hbaseVersion, - deadServerNames, - liveServerMetrics, - masterName, - backupMasterNames, - regionsInTransition, - clusterId, - masterCoprocessorNames, - balancerOn, - masterInfoPort, - serversName, - tableRegionStatesCount, - masterTasks - ); + return new ClusterMetricsImpl(hbaseVersion, deadServerNames, liveServerMetrics, masterName, + backupMasterNames, regionsInTransition, clusterId, masterCoprocessorNames, balancerOn, + masterInfoPort, serversName, tableRegionStatesCount, masterTasks); } + private static class ClusterMetricsImpl implements ClusterMetrics { @Nullable private final String hbaseVersion; @@ -338,17 +349,11 @@ private static class ClusterMetricsImpl implements ClusterMetrics { private final List masterTasks; ClusterMetricsImpl(String hbaseVersion, List deadServerNames, - Map liveServerMetrics, - ServerName masterName, - List backupMasterNames, - List regionsInTransition, - String clusterId, - List masterCoprocessorNames, - Boolean balancerOn, - int masterInfoPort, - List serversName, - Map tableRegionStatesCount, - List masterTasks) { + Map liveServerMetrics, ServerName masterName, + List backupMasterNames, List regionsInTransition, String clusterId, + List masterCoprocessorNames, Boolean balancerOn, int masterInfoPort, + List serversName, Map tableRegionStatesCount, + List masterTasks) { this.hbaseVersion = hbaseVersion; this.deadServerNames = Preconditions.checkNotNull(deadServerNames); this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics); @@ -437,15 +442,15 @@ public String toString() { int backupMastersSize = getBackupMasterNames().size(); sb.append("\nNumber of backup masters: " + backupMastersSize); if (backupMastersSize > 0) { - for (ServerName serverName: getBackupMasterNames()) { + for (ServerName serverName : getBackupMasterNames()) { sb.append("\n " + serverName); } } int serversSize = getLiveServerMetrics().size(); int serversNameSize = getServersName().size(); - sb.append("\nNumber of live region servers: " - + (serversSize > 0 ? serversSize : serversNameSize)); + sb.append( + "\nNumber of live region servers: " + (serversSize > 0 ? serversSize : serversNameSize)); if (serversSize > 0) { for (ServerName serverName : getLiveServerMetrics().keySet()) { sb.append("\n " + serverName.getServerName()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java index 86aca2bc8177..b8b2519dc09f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java index fa202c17eb7d..d6ce1fbf4a5d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java @@ -7,32 +7,29 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; import java.util.Collections; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.protobuf.Service; /** * Base interface for the 4 coprocessors - MasterCoprocessor, RegionCoprocessor, - * RegionServerCoprocessor, and WALCoprocessor. - * Do NOT implement this interface directly. Unless an implementation implements one (or more) of - * the above mentioned 4 coprocessors, it'll fail to be loaded by any coprocessor host. + * RegionServerCoprocessor, and WALCoprocessor. Do NOT implement this interface directly. Unless an + * implementation implements one (or more) of the above mentioned 4 coprocessors, it'll fail to be + * loaded by any coprocessor host. Example: Building a coprocessor to observe Master operations. * - * Example: - * Building a coprocessor to observe Master operations. *
      * class MyMasterCoprocessor implements MasterCoprocessor {
      *   @Override
    @@ -47,6 +44,7 @@
      * 
    * * Building a Service which can be loaded by both Master and RegionServer + * *
      * class MyCoprocessorService implements MasterCoprocessor, RegionServerCoprocessor {
      *   @Override
    @@ -86,18 +84,19 @@ enum State {
        * Called by the {@link CoprocessorEnvironment} during it's own startup to initialize the
        * coprocessor.
        */
    -  default void start(CoprocessorEnvironment env) throws IOException {}
    +  default void start(CoprocessorEnvironment env) throws IOException {
    +  }
     
       /**
    -   * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the
    -   * coprocessor.
    +   * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the coprocessor.
        */
    -  default void stop(CoprocessorEnvironment env) throws IOException {}
    +  default void stop(CoprocessorEnvironment env) throws IOException {
    +  }
     
       /**
        * Coprocessor endpoints providing protobuf services should override this method.
    -   * @return Iterable of {@link Service}s or empty collection. Implementations should never
    -   * return null.
    +   * @return Iterable of {@link Service}s or empty collection. Implementations should never return
    +   *         null.
        */
       default Iterable getServices() {
         return Collections.EMPTY_SET;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    index 4fab7333dcd9..edbc5f479d6e 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    @@ -7,16 +7,14 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
    -
     package org.apache.hadoop.hbase;
     
     import org.apache.hadoop.conf.Configuration;
    @@ -46,8 +44,8 @@ public interface CoprocessorEnvironment {
       int getLoadSequence();
     
       /**
    -   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try
    -   *   to set a configuration.
    +   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
    +   *         set a configuration.
        */
       Configuration getConfiguration();
     
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    index 509844e367d8..7e1821de7d47 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -41,7 +40,7 @@ public DoNotRetryIOException(String message) {
       }
     
       /**
    -   * @param message the message for this exception
    +   * @param message   the message for this exception
        * @param throwable the {@link Throwable} to use for this exception
        */
       public DoNotRetryIOException(String message, Throwable throwable) {
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    index 76f374c412f0..f4391f1025c4 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    @@ -7,24 +7,22 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
     package org.apache.hadoop.hbase;
     
     import java.io.IOException;
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Thrown during flush if the possibility snapshot content was not properly
    - * persisted into store files.  Response should include replay of wal content.
    + * Thrown during flush if the possibility snapshot content was not properly persisted into store
    + * files. Response should include replay of wal content.
      */
     @InterfaceAudience.Public
     public class DroppedSnapshotException extends IOException {
    @@ -43,9 +41,8 @@ public DroppedSnapshotException(String message) {
     
       /**
        * DroppedSnapshotException with cause
    -   *
        * @param message the message for this exception
    -   * @param cause the cause for this exception
    +   * @param cause   the cause for this exception
        */
       public DroppedSnapshotException(String message, Throwable cause) {
         super(message, cause);
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java
    index c72ed19e486b..2e4ebbd0baa6 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -20,8 +20,8 @@
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Base class for exceptions thrown by an HBase server. May contain extra info about
    - * the state of the server when the exception was thrown.
    + * Base class for exceptions thrown by an HBase server. May contain extra info about the state of
    + * the server when the exception was thrown.
      */
     @InterfaceAudience.Public
     public class HBaseServerException extends HBaseIOException {
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
    index 8f356f1fe774..0decb58bc20b 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -23,17 +22,13 @@
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Data structure to hold RegionInfo and the address for the hosting
    - * HRegionServer.  Immutable.  Comparable, but we compare the 'location' only:
    - * i.e. the hostname and port, and *not* the regioninfo.  This means two
    - * instances are the same if they refer to the same 'location' (the same
    - * hostname and port), though they may be carrying different regions.
    - *
    - * On a big cluster, each client will have thousands of instances of this object, often
    - *  100 000 of them if not million. It's important to keep the object size as small
    - *  as possible.
    - *
    - * 
    This interface has been marked InterfaceAudience.Public in 0.96 and 0.98. + * Data structure to hold RegionInfo and the address for the hosting HRegionServer. Immutable. + * Comparable, but we compare the 'location' only: i.e. the hostname and port, and *not* the + * regioninfo. This means two instances are the same if they refer to the same 'location' (the same + * hostname and port), though they may be carrying different regions. On a big cluster, each client + * will have thousands of instances of this object, often 100 000 of them if not million. It's + * important to keep the object size as small as possible.
    + * This interface has been marked InterfaceAudience.Public in 0.96 and 0.98. */ @InterfaceAudience.Public public class HRegionLocation implements Comparable { @@ -57,7 +52,7 @@ public HRegionLocation(RegionInfo regionInfo, ServerName serverName, long seqNum @Override public String toString() { return "region=" + (this.regionInfo == null ? "null" : this.regionInfo.getRegionNameAsString()) - + ", hostname=" + this.serverName + ", seqNum=" + seqNum; + + ", hostname=" + this.serverName + ", seqNum=" + seqNum; } /** @@ -74,7 +69,7 @@ public boolean equals(Object o) { if (!(o instanceof HRegionLocation)) { return false; } - return this.compareTo((HRegionLocation)o) == 0; + return this.compareTo((HRegionLocation) o) == 0; } /** @@ -86,9 +81,9 @@ public int hashCode() { } /** - * @return regionInfo + * n */ - public RegionInfo getRegion(){ + public RegionInfo getRegion() { return regionInfo; } @@ -105,8 +100,8 @@ public long getSeqNum() { } /** - * @return String made of hostname and port formatted as - * per {@link Addressing#createHostAndPortStr(String, int)} + * @return String made of hostname and port formatted as per + * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java index 63c26e2c393f..2a099157bc76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown if a request is table schema modification is requested but - * made for an invalid family name. + * Thrown if a request is table schema modification is requested but made for an invalid family + * name. */ @InterfaceAudience.Public public class InvalidFamilyOperationException extends DoNotRetryIOException { private static final long serialVersionUID = (1L << 22) - 1L; + /** default constructor */ public InvalidFamilyOperationException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java index dd19fa1c2279..2ae80cade98a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,27 +23,25 @@ * Ways to keep cells marked for delete around. */ /* - * Don't change the TRUE/FALSE labels below, these have to be called - * this way for backwards compatibility. + * Don't change the TRUE/FALSE labels below, these have to be called this way for backwards + * compatibility. */ @InterfaceAudience.Public public enum KeepDeletedCells { /** Deleted Cells are not retained. */ FALSE, /** - * Deleted Cells are retained until they are removed by other means - * such TTL or VERSIONS. - * If no TTL is specified or no new versions of delete cells are - * written, they are retained forever. + * Deleted Cells are retained until they are removed by other means such TTL or VERSIONS. If no + * TTL is specified or no new versions of delete cells are written, they are retained forever. */ TRUE, /** - * Deleted Cells are retained until the delete marker expires due to TTL. - * This is useful when TTL is combined with MIN_VERSIONS and one - * wants to keep a minimum number of versions around but at the same - * time remove deleted cells after the TTL. + * Deleted Cells are retained until the delete marker expires due to TTL. This is useful when TTL + * is combined with MIN_VERSIONS and one wants to keep a minimum number of versions around but at + * the same time remove deleted cells after the TTL. */ TTL; + public static KeepDeletedCells getValue(String val) { return valueOf(val.toUpperCase()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java index 35cdecba9bb6..86e394e33403 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,6 +25,7 @@ @InterfaceAudience.Public public class MasterNotRunningException extends HBaseIOException { private static final long serialVersionUID = (1L << 23) - 1L; + /** default constructor */ public MasterNotRunningException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java index 099ea4054591..b913ac0506cd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,16 +30,15 @@ public enum MemoryCompactionPolicy { NONE, /** * Basic policy applies optimizations which modify the index to a more compacted representation. - * This is beneficial in all access patterns. The smaller the cells are the greater the - * benefit of this policy. - * This is the default policy. + * This is beneficial in all access patterns. The smaller the cells are the greater the benefit of + * this policy. This is the default policy. */ BASIC, /** - * In addition to compacting the index representation as the basic policy, eager policy - * eliminates duplication while the data is still in memory (much like the - * on-disk compaction does after the data is flushed to disk). This policy is most useful for - * applications with high data churn or small working sets. + * In addition to compacting the index representation as the basic policy, eager policy eliminates + * duplication while the data is still in memory (much like the on-disk compaction does after the + * data is flushed to disk). This policy is most useful for applications with high data churn or + * small working sets. */ EAGER, /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java index 3e06f4250af6..a49575849b04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Exception thrown when the result needs to be chunked on the server side. - * It signals that retries should happen right away and not count against the number of - * retries because some of the multi was a success. + * Exception thrown when the result needs to be chunked on the server side. It signals that retries + * should happen right away and not count against the number of retries because some of the multi + * was a success. */ @InterfaceAudience.Public public class MultiActionResultTooLarge extends RetryImmediatelyException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java index 5263523417ed..83e29fd9edc1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java index 72ff1e61b849..0af01d23bddf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java index c51fccb5955d..a15833ac17a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; @@ -27,6 +25,7 @@ @InterfaceAudience.Public public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException { private static final long serialVersionUID = 6439786157874827523L; + /** * default constructor */ @@ -35,7 +34,7 @@ public NotAllMetaRegionsOnlineException() { } /** - * @param message + * n */ public NotAllMetaRegionsOnlineException(String message) { super(message); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java index 918408778c0d..aa138478b4ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java index e887928da828..473947b8f769 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This exception is thrown by the master when a region server was shut down and - * restarted so fast that the master still hasn't processed the server shutdown - * of the first instance, or when master is initializing and client call admin - * operations, or when an operation is performed on a region server that is still starting. + * This exception is thrown by the master when a region server was shut down and restarted so fast + * that the master still hasn't processed the server shutdown of the first instance, or when master + * is initializing and client call admin operations, or when an operation is performed on a region + * server that is still starting. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java index 62f84e9495be..5e60e44243a0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java index 8a8d2151aa2e..aff9ff8af472 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when something happens related to region handling. - * Subclasses have to be more specific. + * Thrown when something happens related to region handling. Subclasses have to be more specific. */ @InterfaceAudience.Public public class RegionException extends HBaseIOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 0d3a464e0f86..4d6dd6d43fa3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Container for holding a list of {@link HRegionLocation}'s that correspond to the - * same range. The list is indexed by the replicaId. This is an immutable list, - * however mutation operations are provided which returns a new List via copy-on-write - * (assuming small number of locations) + * Container for holding a list of {@link HRegionLocation}'s that correspond to the same range. The + * list is indexed by the replicaId. This is an immutable list, however mutation operations are + * provided which returns a new List via copy-on-write (assuming small number of locations) */ @InterfaceAudience.Private public class RegionLocations implements Iterable { @@ -45,10 +42,9 @@ public class RegionLocations implements Iterable { private final HRegionLocation[] locations; // replicaId -> HRegionLocation. /** - * Constructs the region location list. The locations array should - * contain all the locations for known replicas for the region, and should be - * sorted in replicaId ascending order, although it can contain nulls indicating replicaIds - * that the locations of which are not known. + * Constructs the region location list. The locations array should contain all the locations for + * known replicas for the region, and should be sorted in replicaId ascending order, although it + * can contain nulls indicating replicaIds that the locations of which are not known. * @param locations an array of HRegionLocations for the same region range */ public RegionLocations(HRegionLocation... locations) { @@ -66,7 +62,7 @@ public RegionLocations(HRegionLocation... locations) { index++; } // account for the null elements in the array after maxReplicaIdIndex - maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1) ); + maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1)); if (maxReplicaId + 1 == locations.length) { this.locations = locations; @@ -79,7 +75,7 @@ public RegionLocations(HRegionLocation... locations) { } } for (HRegionLocation loc : this.locations) { - if (loc != null && loc.getServerName() != null){ + if (loc != null && loc.getServerName() != null) { numNonNullElements++; } } @@ -91,8 +87,7 @@ public RegionLocations(Collection locations) { } /** - * Returns the size of the list even if some of the elements - * might be null. + * Returns the size of the list even if some of the elements might be null. * @return the size of the list (corresponding to the max replicaId) */ public int size() { @@ -116,18 +111,18 @@ public boolean isEmpty() { } /** - * Returns a new RegionLocations with the locations removed (set to null) - * which have the destination server as given. + * Returns a new RegionLocations with the locations removed (set to null) which have the + * destination server as given. * @param serverName the serverName to remove locations of - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations removeByServer(ServerName serverName) { HRegionLocation[] newLocations = null; for (int i = 0; i < locations.length; i++) { // check whether something to remove if (locations[i] != null && serverName.equals(locations[i].getServerName())) { - if (newLocations == null) { //first time + if (newLocations == null) { // first time newLocations = new HRegionLocation[locations.length]; System.arraycopy(locations, 0, newLocations, 0, i); } @@ -142,8 +137,8 @@ public RegionLocations removeByServer(ServerName serverName) { /** * Removes the given location from the list * @param location the location to remove - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations remove(HRegionLocation location) { if (location == null) return this; @@ -153,9 +148,12 @@ public RegionLocations remove(HRegionLocation location) { // check whether something to remove. HRL.compareTo() compares ONLY the // serverName. We want to compare the HRI's as well. - if (locations[replicaId] == null - || RegionInfo.COMPARATOR.compare(location.getRegion(), locations[replicaId].getRegion()) != 0 - || !location.equals(locations[replicaId])) { + if ( + locations[replicaId] == null + || RegionInfo.COMPARATOR.compare(location.getRegion(), locations[replicaId].getRegion()) + != 0 + || !location.equals(locations[replicaId]) + ) { return this; } @@ -169,8 +167,8 @@ public RegionLocations remove(HRegionLocation location) { /** * Removes location of the given replicaId from the list * @param replicaId the replicaId of the location to remove - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations remove(int replicaId) { if (getRegionLocation(replicaId) == null) { @@ -204,13 +202,11 @@ public RegionLocations removeElementsWithNullLocation() { } /** - * Merges this RegionLocations list with the given list assuming - * same range, and keeping the most up to date version of the - * HRegionLocation entries from either list according to seqNum. If seqNums - * are equal, the location from the argument (other) is taken. + * Merges this RegionLocations list with the given list assuming same range, and keeping the most + * up to date version of the HRegionLocation entries from either list according to seqNum. If + * seqNums are equal, the location from the argument (other) is taken. * @param other the locations to merge with - * @return an RegionLocations object with merged locations or the same object - * if nothing is merged + * @return an RegionLocations object with merged locations or the same object if nothing is merged */ public RegionLocations mergeLocations(RegionLocations other) { assert other != null; @@ -231,8 +227,7 @@ public RegionLocations mergeLocations(RegionLocations other) { regionInfo = otherLoc.getRegion(); } - HRegionLocation selectedLoc = selectRegionLocation(thisLoc, - otherLoc, true, false); + HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false); if (selectedLoc != thisLoc) { if (newLocations == null) { @@ -247,10 +242,9 @@ public RegionLocations mergeLocations(RegionLocations other) { // ensure that all replicas share the same start code. Otherwise delete them if (newLocations != null && regionInfo != null) { - for (int i=0; i < newLocations.length; i++) { + for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { - if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, - newLocations[i].getRegion())) { + if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, newLocations[i].getRegion())) { newLocations[i] = null; } } @@ -261,7 +255,7 @@ public RegionLocations mergeLocations(RegionLocations other) { } private HRegionLocation selectRegionLocation(HRegionLocation oldLocation, - HRegionLocation location, boolean checkForEquals, boolean force) { + HRegionLocation location, boolean checkForEquals, boolean force) { if (location == null) { return oldLocation == null ? null : oldLocation; } @@ -270,44 +264,44 @@ private HRegionLocation selectRegionLocation(HRegionLocation oldLocation, return location; } - if (force - || isGreaterThan(location.getSeqNum(), oldLocation.getSeqNum(), checkForEquals)) { + if (force || isGreaterThan(location.getSeqNum(), oldLocation.getSeqNum(), checkForEquals)) { return location; } return oldLocation; } /** - * Updates the location with new only if the new location has a higher - * seqNum than the old one or force is true. - * @param location the location to add or update - * @param checkForEquals whether to update the location if seqNums for the - * HRegionLocations for the old and new location are the same - * @param force whether to force update - * @return an RegionLocations object with updated locations or the same object - * if nothing is updated + * Updates the location with new only if the new location has a higher seqNum than the old one or + * force is true. + * @param location the location to add or update + * @param checkForEquals whether to update the location if seqNums for the HRegionLocations for + * the old and new location are the same + * @param force whether to force update + * @return an RegionLocations object with updated locations or the same object if nothing is + * updated */ - public RegionLocations updateLocation(HRegionLocation location, - boolean checkForEquals, boolean force) { + public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, + boolean force) { assert location != null; int replicaId = location.getRegion().getReplicaId(); HRegionLocation oldLoc = getRegionLocation(location.getRegion().getReplicaId()); - HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, - checkForEquals, force); + HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, checkForEquals, force); if (selectedLoc == oldLoc) { return this; } - HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId +1)]; + HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId + 1)]; System.arraycopy(locations, 0, newLocations, 0, locations.length); newLocations[replicaId] = location; // ensure that all replicas share the same start code. Otherwise delete them - for (int i=0; i < newLocations.length; i++) { + for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { - if (!RegionReplicaUtil.isReplicasForSameRegion(location.getRegion(), - newLocations[i].getRegion())) { + if ( + !RegionReplicaUtil.isReplicasForSameRegion(location.getRegion(), + newLocations[i].getRegion()) + ) { newLocations[i] = null; } } @@ -327,16 +321,18 @@ public HRegionLocation getRegionLocation(int replicaId) { } /** - * Returns the region location from the list for matching regionName, which can - * be regionName or encodedRegionName + * Returns the region location from the list for matching regionName, which can be regionName or + * encodedRegionName * @param regionName regionName or encodedRegionName * @return HRegionLocation found or null */ public HRegionLocation getRegionLocationByRegionName(byte[] regionName) { for (HRegionLocation loc : locations) { if (loc != null) { - if (Bytes.equals(loc.getRegion().getRegionName(), regionName) - || Bytes.equals(loc.getRegion().getEncodedNameAsBytes(), regionName)) { + if ( + Bytes.equals(loc.getRegion().getRegionName(), regionName) + || Bytes.equals(loc.getRegion().getEncodedNameAsBytes(), regionName) + ) { return loc; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 8cd3ea156c4d..d873c4bc1cb4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Map; @@ -26,8 +23,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Encapsulates per-region load metrics. - */ + * Encapsulates per-region load metrics. + */ @InterfaceAudience.Public public interface RegionMetrics { @@ -72,8 +69,8 @@ public interface RegionMetrics { public long getCpRequestCount(); /** - * @return the number of write requests and read requests and coprocessor - * service requests made to region + * @return the number of write requests and read requests and coprocessor service requests made to + * region */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount() + getCpRequestCount(); @@ -93,8 +90,8 @@ default String getNameAsString() { /** * TODO: why we pass the same value to different counters? Currently, the value from - * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() - * see HRegionServer#createRegionLoad. + * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() see + * HRegionServer#createRegionLoad. * @return The current total size of root-level indexes for the region */ Size getStoreFileIndexSize(); @@ -135,7 +132,6 @@ default String getNameAsString() { */ Map getStoreSequenceId(); - /** * @return the uncompressed size of the storefiles */ @@ -157,8 +153,8 @@ default String getNameAsString() { int getStoreRefCount(); /** - * @return the max reference count for any store file among all compacted stores files - * of this region + * @return the max reference count for any store file among all compacted stores files of this + * region */ int getMaxCompactedStoreFileRefCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index 8349c35d7d33..43b3a17aac17 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Collections; @@ -39,98 +36,91 @@ @InterfaceAudience.Private public final class RegionMetricsBuilder { - public static List toRegionMetrics( - AdminProtos.GetRegionLoadResponse regionLoadResponse) { + public static List + toRegionMetrics(AdminProtos.GetRegionLoadResponse regionLoadResponse) { return regionLoadResponse.getRegionLoadsList().stream() - .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()); + .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()); } public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regionLoadPB) { return RegionMetricsBuilder - .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray()) - .setBloomFilterSize(new Size(regionLoadPB.getTotalStaticBloomSizeKB(), Size.Unit.KILOBYTE)) - .setCompactedCellCount(regionLoadPB.getCurrentCompactedKVs()) - .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) - .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) - .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) - .setDataLocalityForSsd(regionLoadPB.hasDataLocalityForSsd() ? - regionLoadPB.getDataLocalityForSsd() : 0.0f) - .setBlocksLocalWeight(regionLoadPB.hasBlocksLocalWeight() ? - regionLoadPB.getBlocksLocalWeight() : 0) - .setBlocksLocalWithSsdWeight(regionLoadPB.hasBlocksLocalWithSsdWeight() ? - regionLoadPB.getBlocksLocalWithSsdWeight() : 0) - .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) - .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad( - regionLoadPB.getCompactionState())) - .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) - .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs()) - .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE)) - .setReadRequestCount(regionLoadPB.getReadRequestsCount()) - .setCpRequestCount(regionLoadPB.getCpRequestsCount()) - .setWriteRequestCount(regionLoadPB.getWriteRequestsCount()) - .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setStoreCount(regionLoadPB.getStores()) - .setStoreFileCount(regionLoadPB.getStorefiles()) - .setStoreRefCount(regionLoadPB.getStoreRefCount()) - .setMaxCompactedStoreFileRefCount(regionLoadPB.getMaxCompactedStoreFileRefCount()) - .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE)) - .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream() - .collect(Collectors.toMap( - (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), - ClusterStatusProtos.StoreSequenceId::getSequenceId))) - .setUncompressedStoreFileSize( - new Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE)) - .build(); - } - - private static List toStoreSequenceId( - Map ids) { + .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray()) + .setBloomFilterSize(new Size(regionLoadPB.getTotalStaticBloomSizeKB(), Size.Unit.KILOBYTE)) + .setCompactedCellCount(regionLoadPB.getCurrentCompactedKVs()) + .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) + .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) + .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) + .setDataLocalityForSsd( + regionLoadPB.hasDataLocalityForSsd() ? regionLoadPB.getDataLocalityForSsd() : 0.0f) + .setBlocksLocalWeight( + regionLoadPB.hasBlocksLocalWeight() ? regionLoadPB.getBlocksLocalWeight() : 0) + .setBlocksLocalWithSsdWeight( + regionLoadPB.hasBlocksLocalWithSsdWeight() ? regionLoadPB.getBlocksLocalWithSsdWeight() : 0) + .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) + .setCompactionState( + ProtobufUtil.createCompactionStateForRegionLoad(regionLoadPB.getCompactionState())) + .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) + .setStoreFileUncompressedDataIndexSize( + new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) + .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs()) + .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE)) + .setReadRequestCount(regionLoadPB.getReadRequestsCount()) + .setCpRequestCount(regionLoadPB.getCpRequestsCount()) + .setWriteRequestCount(regionLoadPB.getWriteRequestsCount()) + .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), Size.Unit.KILOBYTE)) + .setStoreFileRootLevelIndexSize( + new Size(regionLoadPB.getRootIndexSizeKB(), Size.Unit.KILOBYTE)) + .setStoreCount(regionLoadPB.getStores()).setStoreFileCount(regionLoadPB.getStorefiles()) + .setStoreRefCount(regionLoadPB.getStoreRefCount()) + .setMaxCompactedStoreFileRefCount(regionLoadPB.getMaxCompactedStoreFileRefCount()) + .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE)) + .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream() + .collect(Collectors.toMap( + (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), + ClusterStatusProtos.StoreSequenceId::getSequenceId))) + .setUncompressedStoreFileSize( + new Size(regionLoadPB.getStoreUncompressedSizeMB(), Size.Unit.MEGABYTE)) + .build(); + } + + private static List + toStoreSequenceId(Map ids) { return ids.entrySet().stream() - .map(e -> ClusterStatusProtos.StoreSequenceId.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())) - .setSequenceId(e.getValue()) - .build()) - .collect(Collectors.toList()); + .map(e -> ClusterStatusProtos.StoreSequenceId.newBuilder() + .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())).setSequenceId(e.getValue()) + .build()) + .collect(Collectors.toList()); } public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) { return ClusterStatusProtos.RegionLoad.newBuilder() - .setRegionSpecifier(HBaseProtos.RegionSpecifier - .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())) - .build()) - .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize() - .get(Size.Unit.KILOBYTE)) - .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) - .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) - .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) - .setDataLocality(regionMetrics.getDataLocality()) - .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) - .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize() - .get(Size.Unit.KILOBYTE)) - .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) - .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) - .setReadRequestsCount(regionMetrics.getReadRequestCount()) - .setCpRequestsCount(regionMetrics.getCpRequestCount()) - .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) - .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize() - .get(Size.Unit.KILOBYTE)) - .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize() - .get(Size.Unit.KILOBYTE)) - .setStores(regionMetrics.getStoreCount()) - .setStorefiles(regionMetrics.getStoreFileCount()) - .setStoreRefCount(regionMetrics.getStoreRefCount()) - .setMaxCompactedStoreFileRefCount(regionMetrics.getMaxCompactedStoreFileRefCount()) - .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) - .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) - .setStoreUncompressedSizeMB( - (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) - .build(); + .setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())).build()) + .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize().get(Size.Unit.KILOBYTE)) + .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) + .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) + .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) + .setDataLocality(regionMetrics.getDataLocality()) + .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) + .setTotalStaticIndexSizeKB( + (int) regionMetrics.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE)) + .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) + .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) + .setReadRequestsCount(regionMetrics.getReadRequestCount()) + .setCpRequestsCount(regionMetrics.getCpRequestCount()) + .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) + .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize().get(Size.Unit.KILOBYTE)) + .setRootIndexSizeKB( + (int) regionMetrics.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE)) + .setStores(regionMetrics.getStoreCount()).setStorefiles(regionMetrics.getStoreFileCount()) + .setStoreRefCount(regionMetrics.getStoreRefCount()) + .setMaxCompactedStoreFileRefCount(regionMetrics.getMaxCompactedStoreFileRefCount()) + .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) + .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) + .setStoreUncompressedSizeMB( + (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) + .build(); } public static RegionMetricsBuilder newBuilder(byte[] name) { @@ -164,6 +154,7 @@ public static RegionMetricsBuilder newBuilder(byte[] name) { private long blocksLocalWithSsdWeight; private long blocksTotalWeight; private CompactionState compactionState; + private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -172,135 +163,140 @@ public RegionMetricsBuilder setStoreCount(int value) { this.storeCount = value; return this; } + public RegionMetricsBuilder setStoreFileCount(int value) { this.storeFileCount = value; return this; } + public RegionMetricsBuilder setStoreRefCount(int value) { this.storeRefCount = value; return this; } + public RegionMetricsBuilder setMaxCompactedStoreFileRefCount(int value) { this.maxCompactedStoreFileRefCount = value; return this; } + public RegionMetricsBuilder setCompactingCellCount(long value) { this.compactingCellCount = value; return this; } + public RegionMetricsBuilder setCompactedCellCount(long value) { this.compactedCellCount = value; return this; } + public RegionMetricsBuilder setStoreFileSize(Size value) { this.storeFileSize = value; return this; } + public RegionMetricsBuilder setMemStoreSize(Size value) { this.memStoreSize = value; return this; } + public RegionMetricsBuilder setStoreFileIndexSize(Size value) { this.indexSize = value; return this; } + public RegionMetricsBuilder setStoreFileRootLevelIndexSize(Size value) { this.rootLevelIndexSize = value; return this; } + public RegionMetricsBuilder setStoreFileUncompressedDataIndexSize(Size value) { this.uncompressedDataIndexSize = value; return this; } + public RegionMetricsBuilder setBloomFilterSize(Size value) { this.bloomFilterSize = value; return this; } + public RegionMetricsBuilder setUncompressedStoreFileSize(Size value) { this.uncompressedStoreFileSize = value; return this; } + public RegionMetricsBuilder setWriteRequestCount(long value) { this.writeRequestCount = value; return this; } + public RegionMetricsBuilder setReadRequestCount(long value) { this.readRequestCount = value; return this; } + public RegionMetricsBuilder setCpRequestCount(long value) { this.cpRequestCount = value; return this; } + public RegionMetricsBuilder setFilteredReadRequestCount(long value) { this.filteredReadRequestCount = value; return this; } + public RegionMetricsBuilder setCompletedSequenceId(long value) { this.completedSequenceId = value; return this; } + public RegionMetricsBuilder setStoreSequenceIds(Map value) { this.storeSequenceIds = value; return this; } + public RegionMetricsBuilder setDataLocality(float value) { this.dataLocality = value; return this; } + public RegionMetricsBuilder setLastMajorCompactionTimestamp(long value) { this.lastMajorCompactionTimestamp = value; return this; } + public RegionMetricsBuilder setDataLocalityForSsd(float value) { this.dataLocalityForSsd = value; return this; } + public RegionMetricsBuilder setBlocksLocalWeight(long value) { this.blocksLocalWeight = value; return this; } + public RegionMetricsBuilder setBlocksLocalWithSsdWeight(long value) { this.blocksLocalWithSsdWeight = value; return this; } + public RegionMetricsBuilder setBlocksTotalWeight(long value) { this.blocksTotalWeight = value; return this; } + public RegionMetricsBuilder setCompactionState(CompactionState compactionState) { this.compactionState = compactionState; return this; } public RegionMetrics build() { - return new RegionMetricsImpl(name, - storeCount, - storeFileCount, - storeRefCount, - maxCompactedStoreFileRefCount, - compactingCellCount, - compactedCellCount, - storeFileSize, - memStoreSize, - indexSize, - rootLevelIndexSize, - uncompressedDataIndexSize, - bloomFilterSize, - uncompressedStoreFileSize, - writeRequestCount, - readRequestCount, - cpRequestCount, - filteredReadRequestCount, - completedSequenceId, - storeSequenceIds, - dataLocality, - lastMajorCompactionTimestamp, - dataLocalityForSsd, - blocksLocalWeight, - blocksLocalWithSsdWeight, - blocksTotalWeight, - compactionState); + return new RegionMetricsImpl(name, storeCount, storeFileCount, storeRefCount, + maxCompactedStoreFileRefCount, compactingCellCount, compactedCellCount, storeFileSize, + memStoreSize, indexSize, rootLevelIndexSize, uncompressedDataIndexSize, bloomFilterSize, + uncompressedStoreFileSize, writeRequestCount, readRequestCount, cpRequestCount, + filteredReadRequestCount, completedSequenceId, storeSequenceIds, dataLocality, + lastMajorCompactionTimestamp, dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, + blocksTotalWeight, compactionState); } private static class RegionMetricsImpl implements RegionMetrics { @@ -331,33 +327,16 @@ private static class RegionMetricsImpl implements RegionMetrics { private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; private final CompactionState compactionState; - RegionMetricsImpl(byte[] name, - int storeCount, - int storeFileCount, - int storeRefCount, - int maxCompactedStoreFileRefCount, - final long compactingCellCount, - long compactedCellCount, - Size storeFileSize, - Size memStoreSize, - Size indexSize, - Size rootLevelIndexSize, - Size uncompressedDataIndexSize, - Size bloomFilterSize, - Size uncompressedStoreFileSize, - long writeRequestCount, - long readRequestCount, - long cpRequestCount, - long filteredReadRequestCount, - long completedSequenceId, - Map storeSequenceIds, - float dataLocality, - long lastMajorCompactionTimestamp, - float dataLocalityForSsd, - long blocksLocalWeight, - long blocksLocalWithSsdWeight, - long blocksTotalWeight, - CompactionState compactionState) { + + RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, int storeRefCount, + int maxCompactedStoreFileRefCount, final long compactingCellCount, long compactedCellCount, + Size storeFileSize, Size memStoreSize, Size indexSize, Size rootLevelIndexSize, + Size uncompressedDataIndexSize, Size bloomFilterSize, Size uncompressedStoreFileSize, + long writeRequestCount, long readRequestCount, long cpRequestCount, + long filteredReadRequestCount, long completedSequenceId, Map storeSequenceIds, + float dataLocality, long lastMajorCompactionTimestamp, float dataLocalityForSsd, + long blocksLocalWeight, long blocksLocalWithSsdWeight, long blocksTotalWeight, + CompactionState compactionState) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -524,65 +503,44 @@ public CompactionState getCompactionState() { @Override public String toString() { - StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", - this.getStoreCount()); - Strings.appendKeyValue(sb, "storeFileCount", - this.getStoreFileCount()); - Strings.appendKeyValue(sb, "storeRefCount", - this.getStoreRefCount()); + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "storeCount", this.getStoreCount()); + Strings.appendKeyValue(sb, "storeFileCount", this.getStoreFileCount()); + Strings.appendKeyValue(sb, "storeRefCount", this.getStoreRefCount()); Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", this.getMaxCompactedStoreFileRefCount()); - Strings.appendKeyValue(sb, "uncompressedStoreFileSize", - this.getUncompressedStoreFileSize()); + Strings.appendKeyValue(sb, "uncompressedStoreFileSize", this.getUncompressedStoreFileSize()); Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", - this.getLastMajorCompactionTimestamp()); - Strings.appendKeyValue(sb, "storeFileSize", - this.getStoreFileSize()); + this.getLastMajorCompactionTimestamp()); + Strings.appendKeyValue(sb, "storeFileSize", this.getStoreFileSize()); if (this.getUncompressedStoreFileSize().get() != 0) { Strings.appendKeyValue(sb, "compressionRatio", - String.format("%.4f", - (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) / - (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); + String.format("%.4f", (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) + / (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); } - Strings.appendKeyValue(sb, "memStoreSize", - this.getMemStoreSize()); - Strings.appendKeyValue(sb, "readRequestCount", - this.getReadRequestCount()); - Strings.appendKeyValue(sb, "cpRequestCount", - this.getCpRequestCount()); - Strings.appendKeyValue(sb, "writeRequestCount", - this.getWriteRequestCount()); - Strings.appendKeyValue(sb, "rootLevelIndexSize", - this.getStoreFileRootLevelIndexSize()); + Strings.appendKeyValue(sb, "memStoreSize", this.getMemStoreSize()); + Strings.appendKeyValue(sb, "readRequestCount", this.getReadRequestCount()); + Strings.appendKeyValue(sb, "cpRequestCount", this.getCpRequestCount()); + Strings.appendKeyValue(sb, "writeRequestCount", this.getWriteRequestCount()); + Strings.appendKeyValue(sb, "rootLevelIndexSize", this.getStoreFileRootLevelIndexSize()); Strings.appendKeyValue(sb, "uncompressedDataIndexSize", - this.getStoreFileUncompressedDataIndexSize()); - Strings.appendKeyValue(sb, "bloomFilterSize", - this.getBloomFilterSize()); - Strings.appendKeyValue(sb, "compactingCellCount", - this.getCompactingCellCount()); - Strings.appendKeyValue(sb, "compactedCellCount", - this.getCompactedCellCount()); + this.getStoreFileUncompressedDataIndexSize()); + Strings.appendKeyValue(sb, "bloomFilterSize", this.getBloomFilterSize()); + Strings.appendKeyValue(sb, "compactingCellCount", this.getCompactingCellCount()); + Strings.appendKeyValue(sb, "compactedCellCount", this.getCompactedCellCount()); float compactionProgressPct = Float.NaN; if (this.getCompactingCellCount() > 0) { - compactionProgressPct = ((float) this.getCompactedCellCount() / - (float) this.getCompactingCellCount()); + compactionProgressPct = + ((float) this.getCompactedCellCount() / (float) this.getCompactingCellCount()); } - Strings.appendKeyValue(sb, "compactionProgressPct", - compactionProgressPct); - Strings.appendKeyValue(sb, "completedSequenceId", - this.getCompletedSequenceId()); - Strings.appendKeyValue(sb, "dataLocality", - this.getDataLocality()); - Strings.appendKeyValue(sb, "dataLocalityForSsd", - this.getDataLocalityForSsd()); - Strings.appendKeyValue(sb, "blocksLocalWeight", - blocksLocalWeight); - Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", - blocksLocalWithSsdWeight); - Strings.appendKeyValue(sb, "blocksTotalWeight", - blocksTotalWeight); - Strings.appendKeyValue(sb, "compactionState", - compactionState); + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + Strings.appendKeyValue(sb, "completedSequenceId", this.getCompletedSequenceId()); + Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); + Strings.appendKeyValue(sb, "dataLocalityForSsd", this.getDataLocalityForSsd()); + Strings.appendKeyValue(sb, "blocksLocalWeight", blocksLocalWeight); + Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", blocksLocalWithSsdWeight); + Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); + Strings.appendKeyValue(sb, "compactionState", compactionState); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java index 3024962ebd67..4cdb4ea2ade6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,14 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown by a region server if it will block and wait to serve a request. - * For example, the client wants to insert something to a region while the - * region is compacting. Keep variance in the passed 'msg' low because its msg is used as a key - * over in {@link org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException} - * grouping failure types. + * Thrown by a region server if it will block and wait to serve a request. For example, the client + * wants to insert something to a region while the region is compacting. Keep variance in the passed + * 'msg' low because its msg is used as a key over in + * {@link org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException} grouping failure + * types. */ @InterfaceAudience.Public public class RegionTooBusyException extends IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java index 6f02df2028f9..4d1deebb4e87 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java index 9df4f893c714..46cc77c61b8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java index ddd4b2ec03f5..38286afa2d15 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -92,8 +92,7 @@ default String getVersion() { Map> getReplicationLoadSourceMap(); /** - * Call directly from client such as hbase shell - * @return ReplicationLoadSink + * Call directly from client such as hbase shell n */ @Nullable ReplicationLoadSink getReplicationLoadSink(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index dd2e836487f8..99f8520aa362 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,18 +6,18 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -37,6 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -62,12 +62,12 @@ public static ServerMetrics toServerMetrics(ClusterStatusProtos.LiveServerInfo s } public static ServerMetrics toServerMetrics(ServerName serverName, - ClusterStatusProtos.ServerLoad serverLoadPB) { + ClusterStatusProtos.ServerLoad serverLoadPB) { return toServerMetrics(serverName, 0, "0.0.0", serverLoadPB); } public static ServerMetrics toServerMetrics(ServerName serverName, int versionNumber, - String version, ClusterStatusProtos.ServerLoad serverLoadPB) { + String version, ClusterStatusProtos.ServerLoad serverLoadPB) { return ServerMetricsBuilder.newBuilder(serverName) .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests()) .setRequestCount(serverLoadPB.getTotalNumberOfRequests()) @@ -80,48 +80,43 @@ public static ServerMetrics toServerMetrics(ServerName serverName, int versionNu .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream() .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList())) - .setUserMetrics(serverLoadPB.getUserLoadsList().stream() - .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) + .setUserMetrics(serverLoadPB.getUserLoadsList().stream() + .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream() - .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) + .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() ? ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) : null) - .setTasks(serverLoadPB.getTasksList().stream() - .map(ProtobufUtil::getServerTask).collect(Collectors.toList())) + .setTasks(serverLoadPB.getTasksList().stream().map(ProtobufUtil::getServerTask) + .collect(Collectors.toList())) .setReportTimestamp(serverLoadPB.getReportEndTime()) .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber) .setVersion(version).build(); } public static List toCoprocessor(Collection names) { - return names.stream() - .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) - .collect(Collectors.toList()); + return names.stream().map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + .collect(Collectors.toList()); } public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) { ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder() - .setNumberOfRequests(metrics.getRequestCountPerSecond()) - .setTotalNumberOfRequests(metrics.getRequestCount()) - .setInfoServerPort(metrics.getInfoServerPort()) - .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE)) - .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE)) - .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())) - .addAllRegionLoads( - metrics.getRegionMetrics().values().stream().map(RegionMetricsBuilder::toRegionLoad) - .collect(Collectors.toList())) - .addAllUserLoads( - metrics.getUserMetrics().values().stream().map(UserMetricsBuilder::toUserMetrics) - .collect(Collectors.toList())) - .addAllReplLoadSource( - metrics.getReplicationLoadSourceList().stream() - .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) - .addAllTasks( - metrics.getTasks().stream().map(ProtobufUtil::toServerTask) - .collect(Collectors.toList())) - .setReportStartTime(metrics.getLastReportTimestamp()) - .setReportEndTime(metrics.getReportTimestamp()); + .setNumberOfRequests(metrics.getRequestCountPerSecond()) + .setTotalNumberOfRequests(metrics.getRequestCount()) + .setInfoServerPort(metrics.getInfoServerPort()) + .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE)) + .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE)) + .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())) + .addAllRegionLoads(metrics.getRegionMetrics().values().stream() + .map(RegionMetricsBuilder::toRegionLoad).collect(Collectors.toList())) + .addAllUserLoads(metrics.getUserMetrics().values().stream() + .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) + .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream() + .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) + .addAllTasks( + metrics.getTasks().stream().map(ProtobufUtil::toServerTask).collect(Collectors.toList())) + .setReportStartTime(metrics.getLastReportTimestamp()) + .setReportEndTime(metrics.getReportTimestamp()); if (metrics.getReplicationLoadSink() != null) { builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink(metrics.getReplicationLoadSink())); } @@ -186,7 +181,6 @@ public ServerMetricsBuilder setWriteRequestCount(long value) { return this; } - public ServerMetricsBuilder setUsedHeapSize(Size value) { this.usedHeapSize = value; return this; @@ -243,25 +237,10 @@ public ServerMetricsBuilder setTasks(List tasks) { } public ServerMetrics build() { - return new ServerMetricsImpl( - serverName, - versionNumber, - version, - requestCountPerSecond, - requestCount, - readRequestCount, - writeRequestCount, - usedHeapSize, - maxHeapSize, - infoServerPort, - sources, - sink, - regionStatus, - coprocessorNames, - reportTimestamp, - lastReportTimestamp, - userMetrics, - tasks); + return new ServerMetricsImpl(serverName, versionNumber, version, requestCountPerSecond, + requestCount, readRequestCount, writeRequestCount, usedHeapSize, maxHeapSize, infoServerPort, + sources, sink, regionStatus, coprocessorNames, reportTimestamp, lastReportTimestamp, + userMetrics, tasks); } private static class ServerMetricsImpl implements ServerMetrics { @@ -286,12 +265,11 @@ private static class ServerMetricsImpl implements ServerMetrics { private final List tasks; ServerMetricsImpl(ServerName serverName, int versionNumber, String version, - long requestCountPerSecond, long requestCount, long readRequestsCount, - long writeRequestsCount, Size usedHeapSize, Size maxHeapSize, - int infoServerPort, List sources, ReplicationLoadSink sink, - Map regionStatus, Set coprocessorNames, - long reportTimestamp, long lastReportTimestamp, Map userMetrics, - List tasks) { + long requestCountPerSecond, long requestCount, long readRequestsCount, + long writeRequestsCount, Size usedHeapSize, Size maxHeapSize, int infoServerPort, + List sources, ReplicationLoadSink sink, + Map regionStatus, Set coprocessorNames, long reportTimestamp, + long lastReportTimestamp, Map userMetrics, List tasks) { this.serverName = Preconditions.checkNotNull(serverName); this.versionNumber = versionNumber; this.version = version; @@ -306,7 +284,7 @@ private static class ServerMetricsImpl implements ServerMetrics { this.sink = sink; this.regionStatus = Preconditions.checkNotNull(regionStatus); this.userMetrics = Preconditions.checkNotNull(userMetrics); - this.coprocessorNames =Preconditions.checkNotNull(coprocessorNames); + this.coprocessorNames = Preconditions.checkNotNull(coprocessorNames); this.reportTimestamp = reportTimestamp; this.lastReportTimestamp = lastReportTimestamp; this.tasks = tasks; @@ -367,11 +345,11 @@ public List getReplicationLoadSourceList() { } @Override - public Map> getReplicationLoadSourceMap(){ - Map> sourcesMap = new HashMap<>(); - for(ReplicationLoadSource loadSource : sources){ - sourcesMap.computeIfAbsent(loadSource.getPeerID(), - peerId -> new ArrayList<>()).add(loadSource); + public Map> getReplicationLoadSourceMap() { + Map> sourcesMap = new HashMap<>(); + for (ReplicationLoadSource loadSource : sources) { + sourcesMap.computeIfAbsent(loadSource.getPeerID(), peerId -> new ArrayList<>()) + .add(loadSource); } return sourcesMap; } @@ -434,8 +412,8 @@ public String toString() { storeFileCount += r.getStoreFileCount(); storeRefCount += r.getStoreRefCount(); int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount(); - maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, - currentMaxCompactedStoreFileRefCount); + maxCompactedStoreFileRefCount = + Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); @@ -450,21 +428,20 @@ public String toString() { compactingCellCount += r.getCompactingCellCount(); } StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", - Double.valueOf(getRequestCountPerSecond())); + Double.valueOf(getRequestCountPerSecond())); Strings.appendKeyValue(sb, "numberOfOnlineRegions", - Integer.valueOf(getRegionMetrics().size())); + Integer.valueOf(getRegionMetrics().size())); Strings.appendKeyValue(sb, "usedHeapMB", getUsedHeapSize()); Strings.appendKeyValue(sb, "maxHeapMB", getMaxHeapSize()); Strings.appendKeyValue(sb, "numberOfStores", storeCount); Strings.appendKeyValue(sb, "numberOfStorefiles", storeFileCount); Strings.appendKeyValue(sb, "storeRefCount", storeRefCount); - Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", - maxCompactedStoreFileRefCount); + Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", maxCompactedStoreFileRefCount); Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", uncompressedStoreFileSizeMB); Strings.appendKeyValue(sb, "storefileSizeMB", storeFileSizeMB); if (uncompressedStoreFileSizeMB != 0) { - Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", - (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); + Strings.appendKeyValue(sb, "compressionRatio", + String.format("%.4f", (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); } Strings.appendKeyValue(sb, "memstoreSizeMB", memStoreSizeMB); Strings.appendKeyValue(sb, "readRequestsCount", readRequestsCount); @@ -478,8 +455,7 @@ public String toString() { Strings.appendKeyValue(sb, "currentCompactedKVs", compactedCellCount); float compactionProgressPct = Float.NaN; if (compactingCellCount > 0) { - compactionProgressPct = - Float.valueOf((float) compactedCellCount / compactingCellCount); + compactionProgressPct = Float.valueOf((float) compactedCellCount / compactingCellCount); } Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); Strings.appendKeyValue(sb, "coprocessors", getCoprocessorNames()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java index e791093e43d7..cd6d41169bb8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java index d4937373789e..3ecd0c16cd9c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,7 +33,8 @@ public static ServerTaskBuilder newBuilder() { private long startTime; private long completionTime; - private ServerTaskBuilder() { } + private ServerTaskBuilder() { + } private static final class ServerTaskImpl implements ServerTask { @@ -44,7 +45,7 @@ private static final class ServerTaskImpl implements ServerTask { private final long completionTime; private ServerTaskImpl(final String description, final String status, - final ServerTask.State state, final long startTime, final long completionTime) { + final ServerTask.State state, final long startTime, final long completionTime) { this.description = description; this.status = status; this.state = state; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java index 0e7716a0a619..c248849e3630 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.math.BigDecimal; @@ -24,8 +24,8 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * It is used to represent the size with different units. - * This class doesn't serve for the precise computation. + * It is used to represent the size with different units. This class doesn't serve for the precise + * computation. */ @InterfaceAudience.Public public final class Size implements Comparable { @@ -40,6 +40,7 @@ public enum Unit { MEGABYTE(97, "MB"), KILOBYTE(96, "KB"), BYTE(95, "B"); + private final int orderOfSize; private final String simpleName; @@ -91,7 +92,6 @@ public double get() { /** * get the value which is converted to specified unit. - * * @param unit size unit * @return the converted value */ @@ -146,7 +146,7 @@ public boolean equals(Object obj) { return true; } if (obj instanceof Size) { - return compareTo((Size)obj) == 0; + return compareTo((Size) obj) == 0; } return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java index 9d67a37695ca..ae6721813a8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java index a113f7c67bf0..98e958aa65b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public TableInfoMissingException(String message) { } /** - * @param message the message for this exception + * @param message the message for this exception * @param throwable the {@link Throwable} to use for this exception */ public TableInfoMissingException(String message, Throwable throwable) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java index 7e5046538abc..54f44405c584 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java index 90c015674ca6..14720811ca16 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +26,7 @@ @InterfaceAudience.Public public class TableNotEnabledException extends DoNotRetryIOException { private static final long serialVersionUID = 262144L; + /** default constructor */ public TableNotEnabledException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java index ae114fed0e62..416d8601fc3b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java index 850cd9600623..dfe5f682f382 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,6 @@ public class UnknownRegionException extends DoNotRetryRegionException { /** * Constructs a new UnknownRegionException with the specified detail message. - * * @param message the detail message */ public UnknownRegionException(String message) { @@ -39,9 +37,8 @@ public UnknownRegionException(String message) { /** * Constructs a new UnknownRegionException with the specified detail message and cause. - * * @param message the detail message - * @param cause the cause of the exception + * @param cause the cause of the exception */ public UnknownRegionException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java index 14afb977b5de..fec8e57bee2e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown if a region server is passed an unknown scanner ID. - * This usually means that the client has taken too long between checkins and so the - * scanner lease on the server-side has expired OR the server-side is closing - * down and has cancelled all leases. + * Thrown if a region server is passed an unknown scanner ID. This usually means that the client has + * taken too long between checkins and so the scanner lease on the server-side has expired OR the + * server-side is closing down and has cancelled all leases. */ @InterfaceAudience.Public public class UnknownScannerException extends DoNotRetryIOException { @@ -42,7 +40,7 @@ public UnknownScannerException(String message) { } /** - * @param message the message for this exception + * @param message the message for this exception * @param exception the exception to grab data from */ public UnknownScannerException(String message, Exception exception) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java index 6c2ba07cc3d6..2710aa9be273 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Map; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulates per-user load metrics. - */ + * Encapsulates per-user load metrics. + */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface UserMetrics { @@ -60,8 +56,8 @@ interface ClientMetrics { long getWriteRequestCount(); /** - * @return the number of write requests and read requests and coprocessor - * service requests made by the user + * @return the number of write requests and read requests and coprocessor service requests made by + * the user */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java index 70d28883c269..ab63f19fec85 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; @@ -35,23 +31,24 @@ public final class UserMetricsBuilder { public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) { UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes()); - userLoad.getClientMetricsList().stream().map( - clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), - clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), - clientMetrics.getFilteredRequestsCount())).forEach(builder::addClientMetris); + userLoad.getClientMetricsList().stream() + .map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), + clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), + clientMetrics.getFilteredRequestsCount())) + .forEach(builder::addClientMetris); return builder.build(); } public static ClusterStatusProtos.UserLoad toUserMetrics(UserMetrics userMetrics) { ClusterStatusProtos.UserLoad.Builder builder = - ClusterStatusProtos.UserLoad.newBuilder().setUserName(userMetrics.getNameAsString()); - userMetrics.getClientMetrics().values().stream().map( - clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() - .setHostName(clientMetrics.getHostName()) - .setWriteRequestsCount(clientMetrics.getWriteRequestsCount()) - .setReadRequestsCount(clientMetrics.getReadRequestsCount()) - .setFilteredRequestsCount(clientMetrics.getFilteredReadRequestsCount()).build()) - .forEach(builder::addClientMetrics); + ClusterStatusProtos.UserLoad.newBuilder().setUserName(userMetrics.getNameAsString()); + userMetrics.getClientMetrics().values().stream() + .map(clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() + .setHostName(clientMetrics.getHostName()) + .setWriteRequestsCount(clientMetrics.getWriteRequestsCount()) + .setReadRequestsCount(clientMetrics.getReadRequestsCount()) + .setFilteredRequestsCount(clientMetrics.getFilteredReadRequestsCount()).build()) + .forEach(builder::addClientMetrics); return builder.build(); } @@ -59,9 +56,9 @@ public static UserMetricsBuilder newBuilder(byte[] name) { return new UserMetricsBuilder(name); } - private final byte[] name; private Map clientMetricsMap = new HashMap<>(); + private UserMetricsBuilder(byte[] name) { this.name = name; } @@ -82,26 +79,30 @@ public static class ClientMetricsImpl implements UserMetrics.ClientMetrics { private final long writeRequestCount; public ClientMetricsImpl(String hostName, long readRequest, long writeRequest, - long filteredReadRequestsCount) { + long filteredReadRequestsCount) { this.hostName = hostName; this.readRequestCount = readRequest; this.writeRequestCount = writeRequest; this.filteredReadRequestsCount = filteredReadRequestsCount; } - @Override public String getHostName() { + @Override + public String getHostName() { return hostName; } - @Override public long getReadRequestsCount() { + @Override + public long getReadRequestsCount() { return readRequestCount; } - @Override public long getWriteRequestsCount() { + @Override + public long getWriteRequestsCount() { return writeRequestCount; } - @Override public long getFilteredReadRequestsCount() { + @Override + public long getFilteredReadRequestsCount() { return filteredReadRequestsCount; } } @@ -115,33 +116,38 @@ private static class UserMetricsImpl implements UserMetrics { this.clientMetricsMap = clientMetricsMap; } - @Override public byte[] getUserName() { + @Override + public byte[] getUserName() { return name; } - @Override public long getReadRequestCount() { - return clientMetricsMap.values().stream().map(c -> c.getReadRequestsCount()) - .reduce(0L, Long::sum); + @Override + public long getReadRequestCount() { + return clientMetricsMap.values().stream().map(c -> c.getReadRequestsCount()).reduce(0L, + Long::sum); } - @Override public long getWriteRequestCount() { - return clientMetricsMap.values().stream().map(c -> c.getWriteRequestsCount()) - .reduce(0L, Long::sum); + @Override + public long getWriteRequestCount() { + return clientMetricsMap.values().stream().map(c -> c.getWriteRequestsCount()).reduce(0L, + Long::sum); } - @Override public Map getClientMetrics() { + @Override + public Map getClientMetrics() { return this.clientMetricsMap; } - @Override public long getFilteredReadRequests() { + @Override + public long getFilteredReadRequests() { return clientMetricsMap.values().stream().map(c -> c.getFilteredReadRequestsCount()) - .reduce(0L, Long::sum); + .reduce(0L, Long::sum); } @Override public String toString() { - StringBuilder sb = Strings - .appendKeyValue(new StringBuilder(), "readRequestCount", this.getReadRequestCount()); + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "readRequestCount", this.getReadRequestCount()); Strings.appendKeyValue(sb, "writeRequestCount", this.getWriteRequestCount()); Strings.appendKeyValue(sb, "filteredReadRequestCount", this.getFilteredReadRequests()); return sb.toString(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java index 4dc44b4c3c69..f361d43f61db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -42,8 +40,7 @@ public ZooKeeperConnectionException(String message) { /** * Constructor taking another exception. - * - * @param message the message for this exception + * @param message the message for this exception * @param exception the exception to grab data from */ public ZooKeeperConnectionException(String message, Exception exception) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java index 92b046436258..48cec12f43c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.yetus.audience.InterfaceAudience; /** * Helper class for custom client scanners. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java index 9e33a12af6b5..bb44defbac6a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,10 +27,11 @@ abstract class AbstractResponse { public enum ResponseType { - SINGLE (0), - MULTI (1); + SINGLE(0), + MULTI(1); - ResponseType(int value) {} + ResponseType(int value) { + } } public abstract ResponseType type(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 60137d23fff2..2380335e56b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -222,7 +222,7 @@ protected final CompletableFuture call(Callable callab } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") Set getParsedServers() { return addr2Stub.keySet(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java index fdf3485c2548..0d141e42b6d7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +21,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by - * {@link Table#batch} to associate the action with it's region and maintain - * the index from the original request. + * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by + * {@link Table#batch} to associate the action with it's region and maintain the index from the + * original request. */ @InterfaceAudience.Private public class Action implements Comparable { @@ -46,7 +45,7 @@ public Action(Row action, int originalIndex, int priority) { /** * Creates an action for a particular replica from original action. - * @param action Original action. + * @param action Original action. * @param replicaId Replica id for the new action. */ public Action(Action action, int replicaId) { @@ -76,7 +75,9 @@ public int getReplicaId() { return replicaId; } - public int getPriority() { return priority; } + public int getPriority() { + return priority; + } @Override public int compareTo(Action other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 5d4337e34d41..736daf93eff2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,11 +71,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** - * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and - * call {@link #close()} when done. - *

    Admin can be used to create, drop, list, enable and disable and otherwise modify tables, - * as well as perform other administrative operations. - * + * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and call + * {@link #close()} when done. + *

    + * Admin can be used to create, drop, list, enable and disable and otherwise modify tables, as well + * as perform other administrative operations. * @see ConnectionFactory * @see Connection * @see Table @@ -125,7 +125,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables. - * * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ @@ -133,7 +132,6 @@ public interface Admin extends Abortable, Closeable { /** * List all userspace tables and whether or not include system tables. - * * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ @@ -141,7 +139,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables that match the given pattern. - * * @param pattern The compiled regular expression to match against * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs @@ -153,19 +150,17 @@ default List listTableDescriptors(Pattern pattern) throws IOExc /** * List all the tables matching the given pattern. - * - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables false to match only against userspace tables * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs * @see #listTableDescriptors() */ List listTableDescriptors(Pattern pattern, boolean includeSysTables) - throws IOException; + throws IOException; /** * List all of the names of userspace tables. - * * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ @@ -183,72 +178,79 @@ default TableName[] listTableNames(Pattern pattern) throws IOException { /** * List all of the names of userspace tables. - * @param pattern The regular expression to match against + * @param pattern The regular expression to match against * @param includeSysTables false to match only against userspace tables * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ - TableName[] listTableNames(Pattern pattern, boolean includeSysTables) - throws IOException; + TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException; /** * Get a table descriptor. - * * @param tableName as a {@link TableName} * @return the tableDescriptor * @throws org.apache.hadoop.hbase.TableNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs */ - TableDescriptor getDescriptor(TableName tableName) - throws TableNotFoundException, IOException; + TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException; /** * Creates a new table. Synchronous operation. - * * @param desc table descriptor for table - * @throws IllegalArgumentException if the table name is reserved + * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). - * @throws IOException if a remote or network exception occurs + * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * concurrent threads, the table may + * have been created between + * test-for-existence and + * attempt-at-creation). + * @throws IOException if a remote or network exception + * occurs */ default void createTable(TableDescriptor desc) throws IOException { get(createTableAsync(desc), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** - * Creates a new table with the specified number of regions. The start key specified will become + * Creates a new table with the specified number of regions. The start key specified will become * the end key of the first region of the table, and the end key specified will become the start * key of the last region of the table (the first region has a null start key and the last region * has a null end key). BigInteger math will be used to divide the key range specified into enough * segments to make the required number of total regions. Synchronous operation. - * - * @param desc table descriptor for table - * @param startKey beginning of key range - * @param endKey end of key range + * @param desc table descriptor for table + * @param startKey beginning of key range + * @param endKey end of key range * @param numRegions the total number of regions to create - * @throws IOException if a remote or network exception occurs - * @throws IllegalArgumentException if the table name is reserved + * @throws IOException if a remote or network exception + * occurs + * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). + * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * concurrent threads, the table may + * have been created between + * test-for-existence and + * attempt-at-creation). */ void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) - throws IOException; + throws IOException; /** * Creates a new table with an initial set of empty regions defined by the specified split keys. * The total number of regions created will be the number of split keys plus one. Synchronous * operation. Note : Avoid passing empty split key. - * - * @param desc table descriptor for table + * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table - * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated - * and if the split key has empty byte array. + * @throws IllegalArgumentException if the table name is reserved, if the + * split keys are repeated and if the + * split key has empty byte array. * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). - * @throws IOException if a remote or network exception occurs + * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * concurrent threads, the table may + * have been created between + * test-for-existence and + * attempt-at-creation). + * @throws IOException if a remote or network exception + * occurs */ default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOException { get(createTableAsync(desc, splitKeys), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); @@ -270,15 +272,13 @@ default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOExce Future createTableAsync(TableDescriptor desc) throws IOException; /** - * Creates a new table but does not block and wait for it to come online. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * Throws IllegalArgumentException Bad table name, if the split keys - * are repeated and if the split key has empty byte array. - * - * @param desc table descriptor for table + * Creates a new table but does not block and wait for it to come online. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. Throws + * IllegalArgumentException Bad table name, if the split keys are repeated and if the split key + * has empty byte array. + * @param desc table descriptor for table * @param splitKeys keys to check if the table has been created with all split keys * @throws IOException if a remote or network exception occurs * @return the result of the async creation. You can use Future.get(long, TimeUnit) to wait on the @@ -296,22 +296,20 @@ default void deleteTable(TableName tableName) throws IOException { } /** - * Deletes the table but does not block and wait for it to be completely removed. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Deletes the table but does not block and wait for it to be completely removed. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async delete. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async delete. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future deleteTableAsync(TableName tableName) throws IOException; /** * Truncate a table. Synchronous operation. - * @param tableName name of table to truncate + * @param tableName name of table to truncate * @param preserveSplits true if the splits should be preserved * @throws IOException if a remote or network exception occurs */ @@ -324,23 +322,22 @@ default void truncateTable(TableName tableName, boolean preserveSplits) throws I * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. - * @param tableName name of table to delete + * @param tableName name of table to delete * @param preserveSplits true if the splits should be preserved * @throws IOException if a remote or network exception occurs * @return the result of the async truncate. You can use Future.get(long, TimeUnit) to wait on the * operation to complete. */ - Future truncateTableAsync(TableName tableName, boolean preserveSplits) - throws IOException; + Future truncateTableAsync(TableName tableName, boolean preserveSplits) throws IOException; /** * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)} * and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in * disabled state for it to be enabled. * @param tableName name of the table - * @throws IOException There could be couple types of - * IOException TableNotFoundException means the table doesn't exist. - * TableNotDisabledException means the table isn't in disabled state. + * @throws IOException There could be couple types of IOException TableNotFoundException means the + * table doesn't exist. TableNotDisabledException means the table isn't in + * disabled state. * @see #isTableEnabled(org.apache.hadoop.hbase.TableName) * @see #disableTable(org.apache.hadoop.hbase.TableName) * @see #enableTableAsync(org.apache.hadoop.hbase.TableName) @@ -350,30 +347,26 @@ default void enableTable(TableName tableName) throws IOException { } /** - * Enable the table but does not block and wait for it to be completely enabled. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Enable the table but does not block and wait for it to be completely enabled. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async enable. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async enable. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future enableTableAsync(TableName tableName) throws IOException; /** - * Disable the table but does not block and wait for it to be completely disabled. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Disable the table but does not block and wait for it to be completely disabled. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async disable. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async disable. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future disableTableAsync(TableName tableName) throws IOException; @@ -381,10 +374,9 @@ default void enableTable(TableName tableName) throws IOException { * Disable table and wait on completion. May timeout eventually. Use * {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in - * enabled state for it to be disabled. - * @param tableName - * @throws IOException There could be couple types of IOException TableNotFoundException means the - * table doesn't exist. TableNotEnabledException means the table isn't in enabled state. + * enabled state for it to be disabled. n * @throws IOException There could be couple types of + * IOException TableNotFoundException means the table doesn't exist. TableNotEnabledException + * means the table isn't in enabled state. */ default void disableTable(TableName tableName) throws IOException { get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); @@ -415,36 +407,34 @@ default void disableTable(TableName tableName) throws IOException { * Add a column family to an existing table. Synchronous operation. Use * {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns a * {@link Future} from which you can learn whether success or failure. - * @param tableName name of the table to add column family to + * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs */ default void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException { + throws IOException { get(addColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** - * Add a column family to an existing table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * - * @param tableName name of the table to add column family to + * Add a column family to an existing table. Asynchronous operation. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. + * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs * @return the result of the async add column family. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. */ Future addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException; + throws IOException; /** * Delete a column family from a table. Synchronous operation. Use * {@link #deleteColumnFamily(TableName, byte[])} instead because it returns a {@link Future} from * which you can learn whether success or failure. - * @param tableName name of table + * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs */ @@ -454,56 +444,51 @@ default void deleteColumnFamily(TableName tableName, byte[] columnFamily) throws } /** - * Delete a column family from a table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * - * @param tableName name of table + * Delete a column family from a table. Asynchronous operation. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. + * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs * @return the result of the async delete column family. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. */ - Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) - throws IOException; + Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) throws IOException; /** * Modify an existing column family on a table. Synchronous operation. Use * {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns * a {@link Future} from which you can learn whether success or failure. - * @param tableName name of table + * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs */ default void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException { + throws IOException { get(modifyColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** - * Modify an existing column family on a table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * - * @param tableName name of table + * Modify an existing column family on a table. Asynchronous operation. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. + * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs * @return the result of the async modify column family. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. */ Future modifyColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException; + throws IOException; /** * Change the store file tracker of the given table's given family. * @param tableName the table you want to change - * @param family the family you want to change - * @param dstSFT the destination store file tracker + * @param family the family you want to change + * @param dstSFT the destination store file tracker * @throws IOException if a remote or network exception occurs */ default void modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT) @@ -515,8 +500,8 @@ default void modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] fami /** * Change the store file tracker of the given table's given family. * @param tableName the table you want to change - * @param family the family you want to change - * @param dstSFT the destination store file tracker + * @param family the family you want to change + * @param dstSFT the destination store file tracker * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the * operation to complete * @throws IOException if a remote or network exception occurs @@ -526,7 +511,6 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Get all the online regions on a region server. - * * @return List of {@link RegionInfo} * @throws IOException if a remote or network exception occurs */ @@ -534,17 +518,15 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Flush a table. Synchronous operation. - * * @param tableName table to flush * @throws IOException if a remote or network exception occurs */ void flush(TableName tableName) throws IOException; /** - * Flush the specified column family stores on all regions of the passed table. - * This runs as a synchronous operation. - * - * @param tableName table to flush + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. + * @param tableName table to flush * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ @@ -552,7 +534,6 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Flush an individual region. Synchronous operation. - * * @param regionName region to flush * @throws IOException if a remote or network exception occurs */ @@ -560,8 +541,7 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Flush a column family within a region. Synchronous operation. - * - * @param regionName region to flush + * @param regionName region to flush * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ @@ -575,10 +555,8 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] void flushRegionServer(ServerName serverName) throws IOException; /** - * Compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compact a table. Asynchronous operation in that this method requests that a Compaction run and + * then it returns. It does not wait on the completion of Compaction (it can take a while). * @param tableName table to compact * @throws IOException if a remote or network exception occurs */ @@ -586,9 +564,8 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Compact an individual region. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compaction run and then it returns. It does not wait on the completion of Compaction (it can + * take a while). * @param regionName region to compact * @throws IOException if a remote or network exception occurs */ @@ -596,139 +573,119 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Compact a column family within a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to compact + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). + * @param tableName table to compact * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ - void compact(TableName tableName, byte[] columnFamily) - throws IOException; + void compact(TableName tableName, byte[] columnFamily) throws IOException; /** * Compact a column family within a region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param regionName region to compact + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). + * @param regionName region to compact * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ - void compactRegion(byte[] regionName, byte[] columnFamily) - throws IOException; + void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException; /** - * Compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to compact + * Compact a table. Asynchronous operation in that this method requests that a Compaction run and + * then it returns. It does not wait on the completion of Compaction (it can take a while). + * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException + * @throws IOException if a remote or network exception occurs n */ void compact(TableName tableName, CompactType compactType) throws IOException, InterruptedException; /** - * Compact a column family within a table. Asynchronous operation in that this method - * requests that a Compaction run and then it returns. It does not wait on the - * completion of Compaction (it can take a while). - * - * @param tableName table to compact + * Compact a column family within a table. Asynchronous operation in that this method requests + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). + * @param tableName table to compact * @param columnFamily column family within a table - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} - * @throws IOException if not a mob column family or if a remote or network exception occurs - * @throws InterruptedException + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * @throws IOException if not a mob column family or if a remote or network exception occurs n */ void compact(TableName tableName, byte[] columnFamily, CompactType compactType) throws IOException, InterruptedException; /** - * Major compact a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table. Asynchronous operation in that this method requests that a Compaction + * run and then it returns. It does not wait on the completion of Compaction (it can take a + * while). * @param tableName table to major compact * @throws IOException if a remote or network exception occurs */ void majorCompact(TableName tableName) throws IOException; /** - * Major compact a table or an individual region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table or an individual region. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param regionName region to major compact * @throws IOException if a remote or network exception occurs */ void majorCompactRegion(byte[] regionName) throws IOException; /** - * Major compact a column family within a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to major compact + * Major compact a column family within a table. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). + * @param tableName table to major compact * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ - void majorCompact(TableName tableName, byte[] columnFamily) - throws IOException; + void majorCompact(TableName tableName, byte[] columnFamily) throws IOException; /** - * Major compact a column family within region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param regionName egion to major compact + * Major compact a column family within region. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). + * @param regionName egion to major compact * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ - void majorCompactRegion(byte[] regionName, byte[] columnFamily) - throws IOException; + void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException; /** - * Major compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to compact + * Major compact a table. Asynchronous operation in that this method requests that a Compaction + * run and then it returns. It does not wait on the completion of Compaction (it can take a + * while). + * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException + * @throws IOException if a remote or network exception occurs n */ void majorCompact(TableName tableName, CompactType compactType) throws IOException, InterruptedException; /** - * Major compact a column family within a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * - * @param tableName table to compact + * Major compact a column family within a table. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). + * @param tableName table to compact * @param columnFamily column family within a table - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} - * @throws IOException if not a mob column family or if a remote or network exception occurs - * @throws InterruptedException + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * @throws IOException if not a mob column family or if a remote or network exception occurs n */ void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType) throws IOException, InterruptedException; /** * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing - * compactions. This state is ephemeral. The setting will be lost on restart. Compaction - * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled - * in hbase-site.xml. - * + * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also + * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in + * hbase-site.xml. * @param switchState Set to true to enable, false to disable. * @param serverNamesList list of region servers. * @return Previous compaction states for region servers * @throws IOException if a remote or network exception occurs */ Map compactionSwitch(boolean switchState, List serverNamesList) - throws IOException; + throws IOException; /** * Compact all regions on the region server. Asynchronous operation in that this method requests @@ -751,9 +708,10 @@ Map compactionSwitch(boolean switchState, List serv /** * Move the region encodedRegionName to a random server. * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name - * suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. + * suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: + * 527db22f95c8a9e0116f0cc13c680396. * @throws IOException if we can't find a region named encodedRegionName */ void move(byte[] encodedRegionName) throws IOException; @@ -761,16 +719,18 @@ Map compactionSwitch(boolean switchState, List serv /** * Move the region rencodedRegionName to destServerName. * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name - * suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. - * @param destServerName The servername of the destination regionserver. If passed the empty byte - * array we'll assign to a random server. A server name is made of host, port and - * startcode. Here is an example: host187.example.com,60020,1289493121758 + * suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: + * 527db22f95c8a9e0116f0cc13c680396. + * @param destServerName The servername of the destination regionserver. If passed the empty + * byte array we'll assign to a random server. A server name is made of + * host, port and startcode. Here is an example: + * host187.example.com,60020,1289493121758 * @throws IOException if we can't find a region named encodedRegionName * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link #move(byte[], ServerName)} - * instead. And if you want to move the region to a random server, please use - * {@link #move(byte[])}. + * instead. And if you want to move the region to a random server, please use + * {@link #move(byte[])}. * @see HBASE-22108 */ @Deprecated @@ -785,12 +745,13 @@ default void move(byte[] encodedRegionName, byte[] destServerName) throws IOExce /** * Move the region encodedRegionName to destServerName. * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name - * suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. - * @param destServerName The servername of the destination regionserver. A server name is made of - * host, port and startcode. Here is an example: - * host187.example.com,60020,1289493121758 + * suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: + * 527db22f95c8a9e0116f0cc13c680396. + * @param destServerName The servername of the destination regionserver. A server name is made + * of host, port and startcode. Here is an example: + * host187.example.com,60020,1289493121758 * @throws IOException if we can't find a region named encodedRegionName */ void move(byte[] encodedRegionName, ServerName destServerName) throws IOException; @@ -810,16 +771,15 @@ default void move(byte[] encodedRegionName, byte[] destServerName) throws IOExce void unassign(byte[] regionName) throws IOException; /** - * Unassign a region from current hosting regionserver. Region will then be assigned to a - * regionserver chosen at random. Region could be reassigned back to the same server. Use {@link - * #move(byte[], ServerName)} if you want to control the region movement. - * + * Unassign a region from current hosting regionserver. Region will then be assigned to a + * regionserver chosen at random. Region could be reassigned back to the same server. Use + * {@link #move(byte[], ServerName)} if you want to control the region movement. * @param regionName Region to unassign. Will clear any existing RegionPlan if one found. - * @param force If true, force unassign (Will remove region from regions-in-transition too if - * present. If results in double assignment use hbck -fix to resolve. To be used by experts). + * @param force If true, force unassign (Will remove region from + * regions-in-transition too if present. If results in double assignment use + * hbck -fix to resolve. To be used by experts). * @throws IOException if a remote or network exception occurs - * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} - * instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see HBASE-24875 */ @Deprecated @@ -833,7 +793,6 @@ default void unassign(byte[] regionName, boolean force) throws IOException { * still online as per Master's in memory state. If this API is incorrectly used on active region * then master will loose track of that region. This is a special method that should be used by * experts or hbck. - * * @param regionName Region to offline. * @throws IOException if a remote or network exception occurs */ @@ -841,30 +800,27 @@ default void unassign(byte[] regionName, boolean force) throws IOException { /** * Turn the load balancer on or off. - * @param onOrOff Set to true to enable, false to disable. + * @param onOrOff Set to true to enable, false to disable. * @param synchronous If true, it waits until current balance() call, if outstanding, - * to return. + * to return. * @return Previous balancer value * @throws IOException if a remote or network exception occurs */ boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException; /** - * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the - * reassignments. Can NOT run for various reasons. Check logs. - * + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. Can NOT run for various reasons. Check logs. * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs */ default boolean balance() throws IOException { - return balance(BalanceRequest.defaultInstance()) - .isBalancerRan(); + return balance(BalanceRequest.defaultInstance()).isBalancerRan(); } /** - * Invoke the balancer with the given balance request. The BalanceRequest defines how the - * balancer will run. See {@link BalanceRequest} for more details. - * + * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer + * will run. See {@link BalanceRequest} for more details. * @param request defines how the balancer should run * @return {@link BalanceResponse} with details about the results of the invocation. * @throws IOException if a remote or network exception occurs @@ -872,39 +828,33 @@ default boolean balance() throws IOException { BalanceResponse balance(BalanceRequest request) throws IOException; /** - * Invoke the balancer. Will run the balancer and if regions to move, it will - * go ahead and do the reassignments. If there is region in transition, force parameter of true - * would still run balancer. Can *not* run for other reasons. Check - * logs. + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. If there is region in transition, force parameter of true would still run + * balancer. Can *not* run for other reasons. Check logs. * @param force whether we should force balance even if there is region in transition * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.5.0. Will be removed in 4.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ @Deprecated default boolean balance(boolean force) throws IOException { - return balance( - BalanceRequest.newBuilder() - .setIgnoreRegionsInTransition(force) - .build() - ).isBalancerRan(); + return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(force).build()) + .isBalancerRan(); } /** * Query the current state of the balancer. - * * @return true if the balancer is enabled, false otherwise. * @throws IOException if a remote or network exception occurs */ boolean isBalancerEnabled() throws IOException; /** - * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. - * Calling this API will drop all the cached blocks specific to a table from BlockCache. - * This can significantly impact the query performance as the subsequent queries will - * have to retrieve the blocks from underlying filesystem. - * + * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. Calling + * this API will drop all the cached blocks specific to a table from BlockCache. This can + * significantly impact the query performance as the subsequent queries will have to retrieve the + * blocks from underlying filesystem. * @param tableName table to clear block cache * @return CacheEvictionStats related to the eviction * @throws IOException if a remote or network exception occurs @@ -912,11 +862,9 @@ default boolean balance(boolean force) throws IOException { CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException; /** - * Invoke region normalizer. Can NOT run for various reasons. Check logs. - * This is a non-blocking invocation to region normalizer. If return value is true, it means - * the request was submitted successfully. We need to check logs for the details of which regions - * were split/merged. - * + * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking + * invocation to region normalizer. If return value is true, it means the request was submitted + * successfully. We need to check logs for the details of which regions were split/merged. * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs */ @@ -925,11 +873,9 @@ default boolean normalize() throws IOException { } /** - * Invoke region normalizer. Can NOT run for various reasons. Check logs. - * This is a non-blocking invocation to region normalizer. If return value is true, it means - * the request was submitted successfully. We need to check logs for the details of which regions - * were split/merged. - * + * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking + * invocation to region normalizer. If return value is true, it means the request was submitted + * successfully. We need to check logs for the details of which regions were split/merged. * @param ntfp limit to tables matching the specified filter. * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs @@ -938,7 +884,6 @@ default boolean normalize() throws IOException { /** * Query the current state of the region normalizer. - * * @return true if region normalizer is enabled, false otherwise. * @throws IOException if a remote or network exception occurs */ @@ -946,7 +891,6 @@ default boolean normalize() throws IOException { /** * Turn region normalizer on or off. - * * @return Previous normalizer value * @throws IOException if a remote or network exception occurs */ @@ -954,7 +898,6 @@ default boolean normalize() throws IOException { /** * Enable/Disable the catalog janitor/ - * * @param onOrOff if true enables the catalog janitor * @return the previous state * @throws IOException if a remote or network exception occurs @@ -963,7 +906,6 @@ default boolean normalize() throws IOException { /** * Ask for a scan of the catalog table. - * * @return the number of entries cleaned. Returns -1 if previous run is in progress. * @throws IOException if a remote or network exception occurs */ @@ -971,14 +913,12 @@ default boolean normalize() throws IOException { /** * Query on the catalog janitor state (Enabled/Disabled?). - * * @throws IOException if a remote or network exception occurs */ boolean isCatalogJanitorEnabled() throws IOException; /** * Enable/Disable the cleaner chore. - * * @param onOrOff if true enables the cleaner chore * @return the previous state * @throws IOException if a remote or network exception occurs @@ -987,7 +927,6 @@ default boolean normalize() throws IOException { /** * Ask for cleaner chore to run. - * * @return true if cleaner chore ran, false otherwise * @throws IOException if a remote or network exception occurs */ @@ -995,25 +934,23 @@ default boolean normalize() throws IOException { /** * Query on the cleaner chore state (Enabled/Disabled?). - * * @throws IOException if a remote or network exception occurs */ boolean isCleanerChoreEnabled() throws IOException; - /** * Merge two regions. Asynchronous operation. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge two - * adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge + * two adjacent regions * @throws IOException if a remote or network exception occurs * @deprecated since 2.3.0 and will be removed in 4.0.0. Multi-region merge feature is now * supported. Use {@link #mergeRegionsAsync(byte[][], boolean)} instead. */ @Deprecated default Future mergeRegionsAsync(byte[] nameOfRegionA, byte[] nameOfRegionB, - boolean forcible) throws IOException { + boolean forcible) throws IOException { byte[][] nameofRegionsToMerge = new byte[2][]; nameofRegionsToMerge[0] = nameOfRegionA; nameofRegionsToMerge[1] = nameOfRegionB; @@ -1023,12 +960,12 @@ default Future mergeRegionsAsync(byte[] nameOfRegionA, byte[] nameOfRegion /** * Merge multiple regions (>=2). Asynchronous operation. * @param nameofRegionsToMerge encoded or full name of daughter regions - * @param forcible true if do a compulsory merge, otherwise we will only merge - * adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only + * merge adjacent regions * @throws IOException if a remote or network exception occurs */ Future mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible) - throws IOException; + throws IOException; /** * Split a table. The method will execute split action for each region in table. @@ -1039,7 +976,7 @@ Future mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible) /** * Split a table. - * @param tableName table to split + * @param tableName table to split * @param splitPoint the explicit position to split on * @throws IOException if a remote or network exception occurs */ @@ -1085,7 +1022,7 @@ default void modifyTable(TableDescriptor td) throws IOException { /** * Change the store file tracker of the given table. * @param tableName the table you want to change - * @param dstSFT the destination store file tracker + * @param dstSFT the destination store file tracker * @throws IOException if a remote or network exception occurs */ default void modifyTableStoreFileTracker(TableName tableName, String dstSFT) throws IOException { @@ -1096,7 +1033,7 @@ default void modifyTableStoreFileTracker(TableName tableName, String dstSFT) thr /** * Change the store file tracker of the given table. * @param tableName the table you want to change - * @param dstSFT the destination store file tracker + * @param dstSFT the destination store file tracker * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the * operation to complete * @throws IOException if a remote or network exception occurs @@ -1125,22 +1062,21 @@ Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT /** * Check whether Master is in maintenance mode. - * * @throws IOException if a remote or network exception occurs */ - boolean isMasterInMaintenanceMode() throws IOException; + boolean isMasterInMaintenanceMode() throws IOException; /** * Stop the designated regionserver. - * * @param hostnamePort Hostname and port delimited by a : as in - * example.org:1234 + * example.org:1234 * @throws IOException if a remote or network exception occurs */ void stopRegionServer(String hostnamePort) throws IOException; /** * Get whole cluster metrics, containing status about: + * *

        * hbase version
        * cluster id
    @@ -1150,6 +1086,7 @@ Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT
        * balancer
        * regions in transition
        * 
    + * * @return cluster metrics * @throws IOException if a remote or network exception occurs */ @@ -1189,29 +1126,26 @@ default Collection getRegionServers() throws IOException { } /** - * Retrieve all current live region servers including decommissioned - * if excludeDecommissionedRS is false, else non-decommissioned ones only - * + * Retrieve all current live region servers including decommissioned if excludeDecommissionedRS is + * false, else non-decommissioned ones only * @param excludeDecommissionedRS should we exclude decommissioned RS nodes * @return all current live region servers including/excluding decommissioned hosts * @throws IOException if a remote or network exception occurs */ default Collection getRegionServers(boolean excludeDecommissionedRS) - throws IOException { + throws IOException { List allServers = getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).getServersName(); if (!excludeDecommissionedRS) { return allServers; } List decommissionedRegionServers = listDecommissionedRegionServers(); - return allServers.stream() - .filter(s -> !decommissionedRegionServers.contains(s)) + return allServers.stream().filter(s -> !decommissionedRegionServers.contains(s)) .collect(ImmutableList.toImmutableList()); } /** * Get {@link RegionMetrics} of all regions hosted on a regionserver. - * * @param serverName region server from which {@link RegionMetrics} is required. * @return a {@link RegionMetrics} list of all regions hosted on a region server * @throws IOException if a remote or network exception occurs @@ -1220,14 +1154,13 @@ default Collection getRegionServers(boolean excludeDecommissionedRS) /** * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table. - * * @param serverName region server from which {@link RegionMetrics} is required. - * @param tableName get {@link RegionMetrics} of regions belonging to the table + * @param tableName get {@link RegionMetrics} of regions belonging to the table * @return region metrics map of all regions of a table hosted on a region server * @throws IOException if a remote or network exception occurs */ - List getRegionMetrics(ServerName serverName, - TableName tableName) throws IOException; + List getRegionMetrics(ServerName serverName, TableName tableName) + throws IOException; /** * @return Configuration used by the instance. @@ -1296,14 +1229,14 @@ default void deleteNamespace(String name) throws IOException { * @param name name of namespace descriptor * @return A descriptor * @throws org.apache.hadoop.hbase.NamespaceNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception + * occurs */ NamespaceDescriptor getNamespaceDescriptor(String name) - throws NamespaceNotFoundException, IOException; + throws NamespaceNotFoundException, IOException; /** * List available namespaces - * * @return List of namespace names * @throws IOException if a remote or network exception occurs */ @@ -1311,7 +1244,6 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * List available namespace descriptors - * * @return List of descriptors * @throws IOException if a remote or network exception occurs */ @@ -1335,7 +1267,6 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * Get the regions of a given table. - * * @param tableName the name of the table * @return List of {@link RegionInfo}. * @throws IOException if a remote or network exception occurs @@ -1347,19 +1278,17 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * Get tableDescriptors. - * * @param tableNames List of table names * @return returns a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ - List listTableDescriptors(List tableNames) - throws IOException; + List listTableDescriptors(List tableNames) throws IOException; /** * Abort a procedure. *

    * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. - * @param procId ID of the procedure to abort + * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if aborted, false if procedure already completed or does * not exist @@ -1374,23 +1303,22 @@ default boolean abortProcedure(long procId, boolean mayInterruptIfRunning) throw } /** - * Abort a procedure but does not block and wait for completion. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. - * - * @param procId ID of the procedure to abort + * Abort a procedure but does not block and wait for completion. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. Do not use. Usually it is ignored but if not, it can + * do more damage than good. See hbck2. + * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? - * @return true if aborted, false if procedure already completed or does not exist + * @return true if aborted, false if procedure already completed or does + * not exist * @throws IOException if a remote or network exception occurs * @deprecated since 2.1.1 and will be removed in 4.0.0. * @see HBASE-21223 */ @Deprecated Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) - throws IOException; + throws IOException; /** * Get procedures. @@ -1408,13 +1336,12 @@ Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) /** * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file. - * * Note that the actual rolling of the log writer is asynchronous and may not be complete when - * this method returns. As a side effect of this call, the named region server may schedule - * store flushes at the request of the wal. - * + * this method returns. As a side effect of this call, the named region server may schedule store + * flushes at the request of the wal. * @param serverName The servername of the regionserver. - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network + * exception occurs * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException */ void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException; @@ -1425,14 +1352,12 @@ Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames() */ default List getMasterCoprocessorNames() throws IOException { - return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)) - .getMasterCoprocessorNames(); + return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)).getMasterCoprocessorNames(); } /** * Get the current compaction state of a table. It could be in a major compaction, a minor * compaction, both, or none. - * * @param tableName table to examine * @return the current compaction state * @throws IOException if a remote or network exception occurs @@ -1441,19 +1366,17 @@ default List getMasterCoprocessorNames() throws IOException { /** * Get the current compaction state of a table. It could be in a compaction, or none. - * - * @param tableName table to examine + * @param tableName table to examine * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @return the current compaction state * @throws IOException if a remote or network exception occurs */ - CompactionState getCompactionState(TableName tableName, - CompactType compactType) throws IOException; + CompactionState getCompactionState(TableName tableName, CompactType compactType) + throws IOException; /** * Get the current compaction state of region. It could be in a major compaction, a minor * compaction, both, or none. - * * @param regionName region to examine * @return the current compaction state * @throws IOException if a remote or network exception occurs @@ -1461,11 +1384,8 @@ CompactionState getCompactionState(TableName tableName, CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException; /** - * Get the timestamp of the last major compaction for the passed table - * - * The timestamp of the oldest HFile resulting from a major compaction of that table, - * or 0 if no such HFile could be found. - * + * Get the timestamp of the last major compaction for the passed table The timestamp of the oldest + * HFile resulting from a major compaction of that table, or 0 if no such HFile could be found. * @param tableName table to examine * @return the last major compaction timestamp or 0 * @throws IOException if a remote or network exception occurs @@ -1473,11 +1393,9 @@ CompactionState getCompactionState(TableName tableName, long getLastMajorCompactionTimestamp(TableName tableName) throws IOException; /** - * Get the timestamp of the last major compaction for the passed region. - * - * The timestamp of the oldest HFile resulting from a major compaction of that region, - * or 0 if no such HFile could be found. - * + * Get the timestamp of the last major compaction for the passed region. The timestamp of the + * oldest HFile resulting from a major compaction of that region, or 0 if no such HFile could be + * found. * @param regionName region to examine * @return the last major compaction timestamp or 0 * @throws IOException if a remote or network exception occurs @@ -1486,53 +1404,54 @@ CompactionState getCompactionState(TableName tableName, /** * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be - * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken - * sequentially even when requested concurrently, across all tables. Snapshots are considered - * unique based on the name of the snapshot. Attempts to take a snapshot with the same - * name (even a different type or with different parameters) will fail with a + * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken sequentially + * even when requested concurrently, across all tables. Snapshots are considered unique based on + * the name of the snapshot. Attempts to take a snapshot with the same name (even a + * different type or with different parameters) will fail with a * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate * naming. Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name of the snapshot to be created - * @param tableName name of the table for which snapshot is created - * @throws IOException if a remote or network exception occurs + * @param tableName name of the table for which snapshot is created + * @throws IOException if a remote or network + * exception occurs * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is + * formatted incorrectly */ default void snapshot(String snapshotName, TableName tableName) - throws IOException, SnapshotCreationException, IllegalArgumentException { + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(snapshotName, tableName, SnapshotType.FLUSH); } /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other - * snapshots stored on the cluster - * @param tableName name of the table to snapshot - * @param type type of snapshot to take - * @throws IOException we fail to reach the master + * snapshots stored on the cluster + * @param tableName name of the table to snapshot + * @param type type of snapshot to take + * @throws IOException we fail to reach the master * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ default void snapshot(String snapshotName, TableName tableName, SnapshotType type) - throws IOException, SnapshotCreationException, IllegalArgumentException { + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, type)); } /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * Snapshot can live with ttl seconds. - * + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can + * live with ttl seconds. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other * snapshots stored on the cluster * @param tableName name of the table to snapshot @@ -1543,20 +1462,19 @@ default void snapshot(String snapshotName, TableName tableName, SnapshotType typ * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ default void snapshot(String snapshotName, TableName tableName, SnapshotType type, - Map snapshotProps) throws IOException, - SnapshotCreationException, IllegalArgumentException { + Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, type, snapshotProps)); } /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * Snapshot can live with ttl seconds. - * + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can + * live with ttl seconds. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other * snapshots stored on the cluster * @param tableName name of the table to snapshot @@ -1565,9 +1483,8 @@ default void snapshot(String snapshotName, TableName tableName, SnapshotType typ * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ - default void snapshot(String snapshotName, TableName tableName, - Map snapshotProps) throws IOException, - SnapshotCreationException, IllegalArgumentException { + default void snapshot(String snapshotName, TableName tableName, Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, SnapshotType.FLUSH, snapshotProps)); } @@ -1582,44 +1499,46 @@ default void snapshot(String snapshotName, TableName tableName, * probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you are sure * about the type of snapshot that you want to take. * @param snapshot snapshot to take - * @throws IOException or we lose contact with the master. + * @throws IOException or we lose contact with the master. * @throws SnapshotCreationException if snapshot failed to be taken - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ void snapshot(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException, IllegalArgumentException; + throws IOException, SnapshotCreationException, IllegalArgumentException; /** * Take a snapshot without waiting for the server to complete that snapshot (asynchronous). * Snapshots are considered unique based on the name of the snapshot. Snapshots are taken * sequentially even when requested concurrently, across all tables. - * * @param snapshot snapshot to take - * @throws IOException if the snapshot did not succeed or we lose contact with the master. + * @throws IOException if the snapshot did not succeed or we lose contact with the + * master. * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ Future snapshotAsync(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException; - - /** - * Check the current state of the passed snapshot. There are three possible states:

      - *
    1. running - returns false
    2. finished - returns true
    3. - *
    4. finished with error - throws the exception that caused the snapshot to fail
    The - * cluster only knows about the most recent snapshot. Therefore, if another snapshot has been - * run/started since the snapshot you are checking, you will receive an {@link - * org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}. - * + throws IOException, SnapshotCreationException; + + /** + * Check the current state of the passed snapshot. There are three possible states: + *
      + *
    1. running - returns false
    2. + *
    3. finished - returns true
    4. + *
    5. finished with error - throws the exception that caused the snapshot to fail
    6. + *
    + * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been + * run/started since the snapshot you are checking, you will receive an + * {@link org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}. * @param snapshot description of the snapshot to check * @return true if the snapshot is completed, false if the snapshot is still - * running - * @throws IOException if we have a network issue - * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed + * running + * @throws IOException if we have a network issue + * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is - * unknown + * unknown */ boolean isSnapshotFinished(SnapshotDescription snapshot) - throws IOException, HBaseSnapshotException, UnknownSnapshotException; + throws IOException, HBaseSnapshotException, UnknownSnapshotException; /** * Restore the specified snapshot on the original table. (The table must be disabled) If the @@ -1628,7 +1547,7 @@ boolean isSnapshotFinished(SnapshotDescription snapshot) * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore * completes without problem the failsafe snapshot is deleted. * @param snapshotName name of the snapshot to restore - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ @@ -1641,14 +1560,14 @@ boolean isSnapshotFinished(SnapshotDescription snapshot) * be restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ default void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) - throws IOException, RestoreSnapshotException { + throws IOException, RestoreSnapshotException { restoreSnapshot(snapshotName, takeFailSafeSnapshot, false); } @@ -1659,10 +1578,10 @@ default void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) * be restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken - * @param restoreAcl true to restore acl of snapshot - * @throws IOException if a remote or network exception occurs + * @param restoreAcl true to restore acl of snapshot + * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ @@ -1672,31 +1591,30 @@ void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ default void cloneSnapshot(String snapshotName, TableName tableName) - throws IOException, TableExistsException, RestoreSnapshotException { + throws IOException, TableExistsException, RestoreSnapshotException { cloneSnapshot(snapshotName, tableName, false, null); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to clone acl into newly created table - * @param customSFT specify the StoreFileTracker used for the table - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @param customSFT specify the StoreFileTracker used for the table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl, - String customSFT) - throws IOException, TableExistsException, RestoreSnapshotException { + String customSFT) throws IOException, TableExistsException, RestoreSnapshotException { get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -1704,15 +1622,15 @@ default void cloneSnapshot(String snapshotName, TableName tableName, boolean res /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to clone acl into newly created table - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl) - throws IOException, TableExistsException, RestoreSnapshotException { + throws IOException, TableExistsException, RestoreSnapshotException { get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -1724,41 +1642,40 @@ default void cloneSnapshot(String snapshotName, TableName tableName, boolean res * TimeoutException in case the wait timeout was not long enough to allow the operation to * complete. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @throws IOException if a remote or network exception occurs + * @param tableName name of the table where the snapshot will be restored + * @throws IOException if a remote or network exception occurs * @throws TableExistsException if table to be cloned already exists * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit) to wait * on the operation to complete. */ default Future cloneSnapshotAsync(String snapshotName, TableName tableName) - throws IOException, TableExistsException { + throws IOException, TableExistsException { return cloneSnapshotAsync(snapshotName, tableName, false); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to clone acl into newly created table - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ default Future cloneSnapshotAsync(String snapshotName, TableName tableName, - boolean restoreAcl) - throws IOException, TableExistsException, RestoreSnapshotException { + boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException { return cloneSnapshotAsync(snapshotName, tableName, restoreAcl, null); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to clone acl into newly created table - * @param customSFT specify the StroreFileTracker used for the table - * @throws IOException if a remote or network exception occurs - * @throws TableExistsException if table to be created already exists + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @param customSFT specify the StroreFileTracker used for the table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ @@ -1767,48 +1684,48 @@ Future cloneSnapshotAsync(String snapshotName, TableName tableName, boolea /** * Execute a distributed procedure on a cluster. - * * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure * @throws IOException if a remote or network exception occurs */ void execProcedure(String signature, String instance, Map props) - throws IOException; + throws IOException; /** * Execute a distributed procedure on a cluster. - * * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure * @return data returned after procedure execution. null if no return data. * @throws IOException if a remote or network exception occurs */ byte[] execProcedureWithReturn(String signature, String instance, Map props) - throws IOException; + throws IOException; /** - * Check the current state of the specified procedure. There are three possible states:
      - *
    1. running - returns false
    2. finished - returns true
    3. - *
    4. finished with error - throws the exception that caused the procedure to fail
    - * + * Check the current state of the specified procedure. There are three possible states: + *
      + *
    1. running - returns false
    2. + *
    3. finished - returns true
    4. + *
    5. finished with error - throws the exception that caused the procedure to fail
    6. + *
    * @param signature The signature that uniquely identifies a procedure - * @param instance The instance name of the procedure - * @param props Property/Value pairs of properties passing to the procedure - * @return true if the specified procedure is finished successfully, false if it is still running + * @param instance The instance name of the procedure + * @param props Property/Value pairs of properties passing to the procedure + * @return true if the specified procedure is finished successfully, + * false if it is still running * @throws IOException if the specified procedure finished with error */ boolean isProcedureFinished(String signature, String instance, Map props) - throws IOException; + throws IOException; /** * List completed snapshots. - * * @return a list of snapshot descriptors for completed snapshots * @throws IOException if a network error occurs */ @@ -1816,7 +1733,6 @@ boolean isProcedureFinished(String signature, String instance, Map listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) throws IOException; + Pattern snapshotNamePattern) throws IOException; /** * Delete an existing snapshot. - * * @param snapshotName name of the snapshot * @throws IOException if a remote or network exception occurs */ @@ -1844,7 +1759,6 @@ List listTableSnapshots(Pattern tableNamePattern, /** * Delete existing snapshots whose names match the pattern passed. - * * @param pattern pattern for names of the snapshot to match * @throws IOException if a remote or network exception occurs */ @@ -1853,16 +1767,15 @@ List listTableSnapshots(Pattern tableNamePattern, /** * Delete all existing snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNamePattern The compiled table name regular expression to match against + * @param tableNamePattern The compiled table name regular expression to match against * @param snapshotNamePattern The compiled snapshot name regular expression to match against * @throws IOException if a remote or network exception occurs */ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) - throws IOException; + throws IOException; /** * Apply the new quota settings. - * * @param quota the quota settings * @throws IOException if a remote or network exception occurs */ @@ -1885,8 +1798,8 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service * invocations: *

    - *

    - *
    + *
    + * *
        * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
    @@ -1895,8 +1808,8 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
        * 
    - *
    - *
    + * + *
    * @return A MasterCoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any * more. Use the coprocessorService methods in {@link AsyncAdmin} instead. @@ -1904,7 +1817,6 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) @Deprecated CoprocessorRpcChannel coprocessorService(); - /** * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} * instance connected to the passed region server. @@ -1915,6 +1827,7 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) * invocations: *

    *

    + * *
        * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
    @@ -1923,8 +1836,8 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
        * 
    - *
    - *
    + * + * * @param serverName the server name to which the endpoint call is made * @return A RegionServerCoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any @@ -1933,25 +1846,22 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) @Deprecated CoprocessorRpcChannel coprocessorService(ServerName serverName); - /** - * Update the configuration and trigger an online config change - * on the regionserver. + * Update the configuration and trigger an online config change on the regionserver. * @param server : The server whose config needs to be updated. * @throws IOException if a remote or network exception occurs */ void updateConfiguration(ServerName server) throws IOException; /** - * Update the configuration and trigger an online config change - * on all the regionservers. + * Update the configuration and trigger an online config change on all the regionservers. * @throws IOException if a remote or network exception occurs */ void updateConfiguration() throws IOException; /** - * Update the configuration and trigger an online config change - * on all the regionservers in the RSGroup. + * Update the configuration and trigger an online config change on all the regionservers in the + * RSGroup. * @param groupName the group name * @throws IOException if a remote or network exception occurs */ @@ -1968,16 +1878,15 @@ default int getMasterInfoPort() throws IOException { /** * Return the set of supported security capabilities. - * @throws IOException if a remote or network exception occurs - * @throws UnsupportedOperationException + * @throws IOException if a remote or network exception occurs n */ List getSecurityCapabilities() throws IOException; /** * Turn the split switch on or off. - * @param enabled enabled or not + * @param enabled enabled or not * @param synchronous If true, it waits until current split() call, if outstanding, - * to return. + * to return. * @return Previous switch value * @throws IOException if a remote or network exception occurs */ @@ -1985,9 +1894,9 @@ default int getMasterInfoPort() throws IOException { /** * Turn the merge switch on or off. - * @param enabled enabled or not + * @param enabled enabled or not * @param synchronous If true, it waits until current merge() call, if outstanding, - * to return. + * to return. * @return Previous switch value * @throws IOException if a remote or network exception occurs */ @@ -2009,24 +1918,24 @@ default int getMasterInfoPort() throws IOException { /** * Add a new replication peer for replicating data to slave cluster. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer * @throws IOException if a remote or network exception occurs */ default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig) - throws IOException { + throws IOException { addReplicationPeer(peerId, peerConfig, true); } /** * Add a new replication peer for replicating data to slave cluster. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer - * @param enabled peer state, true if ENABLED and false if DISABLED + * @param enabled peer state, true if ENABLED and false if DISABLED * @throws IOException if a remote or network exception occurs */ default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) - throws IOException { + throws IOException { get(addReplicationPeerAsync(peerId, peerConfig, enabled), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -2037,13 +1946,13 @@ default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer * @return the result of the async operation * @throws IOException IOException if a remote or network exception occurs */ default Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig) - throws IOException { + throws IOException { return addReplicationPeerAsync(peerId, peerConfig, true); } @@ -2053,14 +1962,14 @@ default Future addReplicationPeerAsync(String peerId, ReplicationPeerConfi * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer - * @param enabled peer state, true if ENABLED and false if DISABLED + * @param enabled peer state, true if ENABLED and false if DISABLED * @return the result of the async operation * @throws IOException IOException if a remote or network exception occurs */ Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, - boolean enabled) throws IOException; + boolean enabled) throws IOException; /** * Remove a peer and stop the replication. @@ -2068,8 +1977,7 @@ Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerCo * @throws IOException if a remote or network exception occurs */ default void removeReplicationPeer(String peerId) throws IOException { - get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), - TimeUnit.MILLISECONDS); + get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** @@ -2136,12 +2044,12 @@ default void disableReplicationPeer(String peerId) throws IOException { /** * Update the peerConfig for the specified peer. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig new config for the replication peer * @throws IOException if a remote or network exception occurs */ default void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws IOException { + throws IOException { get(updateReplicationPeerConfigAsync(peerId, peerConfig), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -2152,23 +2060,23 @@ default void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig pe * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig new config for the replication peer * @return the result of the async operation * @throws IOException IOException if a remote or network exception occurs */ Future updateReplicationPeerConfigAsync(String peerId, ReplicationPeerConfig peerConfig) - throws IOException; + throws IOException; /** * Append the replicable table column family config from the specified peer. - * @param id a short that identifies the cluster + * @param id a short that identifies the cluster * @param tableCfs A map from tableName to column family names * @throws ReplicationException if tableCfs has conflict with existing config - * @throws IOException if a remote or network exception occurs + * @throws IOException if a remote or network exception occurs */ default void appendReplicationPeerTableCFs(String id, Map> tableCfs) - throws ReplicationException, IOException { + throws ReplicationException, IOException { if (tableCfs == null) { throw new ReplicationException("tableCfs is null"); } @@ -2180,13 +2088,13 @@ default void appendReplicationPeerTableCFs(String id, Map> tableCfs) - throws ReplicationException, IOException { + throws ReplicationException, IOException { if (tableCfs == null) { throw new ReplicationException("tableCfs is null"); } @@ -2214,11 +2122,11 @@ default void removeReplicationPeerTableCFs(String id, Map transitReplicationPeerSyncReplicationStateAsync(String peerId, - SyncReplicationState state) throws IOException; + SyncReplicationState state) throws IOException; /** * Get the current cluster state in a synchronous replication peer. @@ -2244,7 +2152,7 @@ Future transitReplicationPeerSyncReplicationStateAsync(String peerId, * @throws IOException if a remote or network exception occurs */ default SyncReplicationState getReplicationPeerSyncReplicationState(String peerId) - throws IOException { + throws IOException { List peers = listReplicationPeers(Pattern.compile(peerId)); if (peers.isEmpty() || !peers.get(0).getPeerId().equals(peerId)) { throw new IOException("Replication peer " + peerId + " does not exist"); @@ -2253,10 +2161,10 @@ default SyncReplicationState getReplicationPeerSyncReplicationState(String peerI } /** - * Mark region server(s) as decommissioned to prevent additional regions from getting - * assigned to them. Optionally unload the regions on the servers. If there are multiple servers - * to be decommissioned, decommissioning them at the same time can prevent wasteful region - * movements. Region unloading is asynchronous. + * Mark region server(s) as decommissioned to prevent additional regions from getting assigned to + * them. Optionally unload the regions on the servers. If there are multiple servers to be + * decommissioned, decommissioning them at the same time can prevent wasteful region movements. + * Region unloading is asynchronous. * @param servers The list of servers to decommission. * @param offload True to offload the regions from the decommissioned servers * @throws IOException if a remote or network exception occurs @@ -2271,15 +2179,14 @@ default SyncReplicationState getReplicationPeerSyncReplicationState(String peerI List listDecommissionedRegionServers() throws IOException; /** - * Remove decommission marker from a region server to allow regions assignments. - * Load regions onto the server if a list of regions is given. Region loading is - * asynchronous. - * @param server The server to recommission. + * Remove decommission marker from a region server to allow regions assignments. Load regions onto + * the server if a list of regions is given. Region loading is asynchronous. + * @param server The server to recommission. * @param encodedRegionNames Regions to load onto the server. * @throws IOException if a remote or network exception occurs */ void recommissionRegionServer(ServerName server, List encodedRegionNames) - throws IOException; + throws IOException; /** * Find all table and column families that are replicated from this cluster @@ -2305,9 +2212,8 @@ void recommissionRegionServer(ServerName server, List encodedRegionNames /** * Clear compacting queues on a regionserver. * @param serverName the region server name - * @param queues the set of queue name - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException + * @param queues the set of queue name + * @throws IOException if a remote or network exception occurs n */ void clearCompactionQueues(ServerName serverName, Set queues) throws IOException, InterruptedException; @@ -2330,13 +2236,13 @@ default List listDeadServers() throws IOException { /** * Create a new table by cloning the existent table schema. - * @param tableName name of the table to be cloned - * @param newTableName name of the new table where the table will be created + * @param tableName name of the table to be cloned + * @param newTableName name of the new table where the table will be created * @param preserveSplits True if the splits should be preserved * @throws IOException if a remote or network exception occurs */ void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits) - throws IOException; + throws IOException; /** * Switch the rpc throttle enable state. @@ -2354,8 +2260,8 @@ void cloneTableSchema(TableName tableName, TableName newTableName, boolean prese boolean isRpcThrottleEnabled() throws IOException; /** - * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota - * can be exceeded if region server has availble quota. + * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be + * exceeded if region server has availble quota. * @param enable Set to true to enable, false to disable. * @return Previous exceed throttle enabled value * @throws IOException if a remote or network exception occurs @@ -2372,8 +2278,8 @@ void cloneTableSchema(TableName tableName, TableName newTableName, boolean prese * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer. * @throws IOException if a remote or network exception occurs */ - Map getRegionServerSpaceQuotaSnapshots( - ServerName serverName) throws IOException; + Map + getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException; /** * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has @@ -2391,10 +2297,10 @@ void cloneTableSchema(TableName tableName, TableName newTableName, boolean prese /** * Grants user specific permissions - * @param userPermission user name and the specific permission + * @param userPermission user name and the specific permission * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. + * previous granted permissions. otherwise, it'll merge with + * previous granted permissions. * @throws IOException if a remote or network exception occurs */ void grant(UserPermission userPermission, boolean mergeExistingPermissions) throws IOException; @@ -2409,22 +2315,22 @@ void cloneTableSchema(TableName tableName, TableName newTableName, boolean prese /** * Get the global/namespace/table permissions for user * @param getUserPermissionsRequest A request contains which user, global, namespace or table - * permissions needed + * permissions needed * @return The user and permission list * @throws IOException if a remote or network exception occurs */ List getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) - throws IOException; + throws IOException; /** * Check if the user has specific permissions - * @param userName the user name + * @param userName the user name * @param permissions the specific permission list * @return True if user has the specific permissions * @throws IOException if a remote or network exception occurs */ List hasUserPermissions(String userName, List permissions) - throws IOException; + throws IOException; /** * Check if call user has specific permissions @@ -2438,39 +2344,34 @@ default List hasUserPermissions(List permissions) throws IO /** * Turn on or off the auto snapshot cleanup based on TTL. - * - * @param on Set to true to enable, false to disable. + * @param on Set to true to enable, false to disable. * @param synchronous If true, it waits until current snapshot cleanup is completed, - * if outstanding. + * if outstanding. * @return Previous auto snapshot cleanup value * @throws IOException if a remote or network exception occurs */ - boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) - throws IOException; + boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) throws IOException; /** * Query the current state of the auto snapshot cleanup based on TTL. - * - * @return true if the auto snapshot cleanup is enabled, - * false otherwise. + * @return true if the auto snapshot cleanup is enabled, false + * otherwise. * @throws IOException if a remote or network exception occurs */ boolean isSnapshotCleanupEnabled() throws IOException; /** - * Retrieves online slow/large RPC logs from the provided list of - * RegionServers - * - * @param serverNames Server names to get slowlog responses from + * Retrieves online slow/large RPC logs from the provided list of RegionServers + * @param serverNames Server names to get slowlog responses from * @param logQueryFilter filter to be used if provided (determines slow / large RPC logs) * @return online slowlog response list * @throws IOException if a remote or network exception occurs - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. */ @Deprecated default List getSlowLogResponses(final Set serverNames, - final LogQueryFilter logQueryFilter) throws IOException { + final LogQueryFilter logQueryFilter) throws IOException { String logType; if (LogQueryFilter.Type.LARGE_LOG.equals(logQueryFilter.getType())) { logType = "LARGE_LOG"; @@ -2483,24 +2384,20 @@ default List getSlowLogResponses(final Set serverNa filterParams.put("tableName", logQueryFilter.getTableName()); filterParams.put("userName", logQueryFilter.getUserName()); filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString()); - List logEntries = - getLogEntries(serverNames, logType, ServerType.REGION_SERVER, logQueryFilter.getLimit(), - filterParams); + List logEntries = getLogEntries(serverNames, logType, ServerType.REGION_SERVER, + logQueryFilter.getLimit(), filterParams); return logEntries.stream().map(logEntry -> (OnlineLogRecord) logEntry) .collect(Collectors.toList()); } /** - * Clears online slow/large RPC logs from the provided list of - * RegionServers - * + * Clears online slow/large RPC logs from the provided list of RegionServers * @param serverNames Set of Server names to clean slowlog responses from - * @return List of booleans representing if online slowlog response buffer is cleaned - * from each RegionServer + * @return List of booleans representing if online slowlog response buffer is cleaned from each + * RegionServer * @throws IOException if a remote or network exception occurs */ - List clearSlowLogResponses(final Set serverNames) - throws IOException; + List clearSlowLogResponses(final Set serverNames) throws IOException; /** * Creates a new RegionServer group with the given name @@ -2569,11 +2466,10 @@ Pair, List> getConfiguredNamespacesAndTablesInRSGroup(St void removeRSGroup(String groupName) throws IOException; /** - * Remove decommissioned servers from group - * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline - * the server for repairing. Or we need to move some servers to join other clusters. - * So we need to remove these servers from the group. - * 2. Dead/recovering/live servers will be disallowed. + * Remove decommissioned servers from group 1. Sometimes we may find the server aborted due to + * some hardware failure and we must offline the server for repairing. Or we need to move some + * servers to join other clusters. So we need to remove these servers from the group. 2. + * Dead/recovering/live servers will be disallowed. * @param servers set of servers to remove * @throws IOException if a remote or network exception occurs */ @@ -2581,7 +2477,7 @@ Pair, List> getConfiguredNamespacesAndTablesInRSGroup(St /** * Move given set of servers to the specified target RegionServer group - * @param servers set of servers to move + * @param servers set of servers to move * @param targetGroup the group to move servers to * @throws IOException if a remote or network exception occurs */ @@ -2589,7 +2485,7 @@ Pair, List> getConfiguredNamespacesAndTablesInRSGroup(St /** * Set the RegionServer group for tables - * @param tables tables to set group for + * @param tables tables to set group for * @param groupName group name for tables * @throws IOException if a remote or network exception occurs */ @@ -2606,9 +2502,8 @@ default BalanceResponse balanceRSGroup(String groupName) throws IOException { } /** - * Balance regions in the given RegionServer group, running based on - * the given {@link BalanceRequest}. - * + * Balance regions in the given RegionServer group, running based on the given + * {@link BalanceRequest}. * @return BalanceResponse details about the balancer run */ BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException; @@ -2623,26 +2518,25 @@ default BalanceResponse balanceRSGroup(String groupName) throws IOException { /** * Update RSGroup configuration - * @param groupName the group name + * @param groupName the group name * @param configuration new configuration of the group name to be set * @throws IOException if a remote or network exception occurs */ void updateRSGroupConfig(String groupName, Map configuration) throws IOException; /** - * Retrieve recent online records from HMaster / RegionServers. - * Examples include slow/large RPC logs, balancer decisions by master. - * - * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. - * @param logType string representing type of log records - * @param serverType enum for server type: HMaster or RegionServer - * @param limit put a limit to list of records that server should send in response + * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC + * logs, balancer decisions by master. + * @param serverNames servers to retrieve records from, useful in case of records maintained by + * RegionServer as we can select specific server. In case of + * servertype=MASTER, logs will only come from the currently active master. + * @param logType string representing type of log records + * @param serverType enum for server type: HMaster or RegionServer + * @param limit put a limit to list of records that server should send in response * @param filterParams additional filter params * @return Log entries representing online records from servers * @throws IOException if a remote or network exception occurs */ - List getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams) throws IOException; + List getLogEntries(Set serverNames, String logType, ServerType serverType, + int limit, Map filterParams) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 4559e90e4c97..901c86ff3744 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -136,14 +136,13 @@ public List listTableDescriptors() throws IOException { } @Override - public List listTableDescriptors(boolean includeSysTables) - throws IOException { + public List listTableDescriptors(boolean includeSysTables) throws IOException { return get(admin.listTableDescriptors(includeSysTables)); } @Override public List listTableDescriptors(Pattern pattern, boolean includeSysTables) - throws IOException { + throws IOException { return get(admin.listTableDescriptors(pattern, includeSysTables)); } @@ -159,13 +158,13 @@ public TableName[] listTableNames(Pattern pattern, boolean includeSysTables) thr @Override public TableDescriptor getDescriptor(TableName tableName) - throws TableNotFoundException, IOException { + throws TableNotFoundException, IOException { return get(admin.getDescriptor(tableName)); } @Override public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) - throws IOException { + throws IOException { get(admin.createTable(desc, startKey, endKey, numRegions)); } @@ -176,7 +175,7 @@ public Future createTableAsync(TableDescriptor desc) throws IOException { @Override public Future createTableAsync(TableDescriptor desc, byte[][] splitKeys) - throws IOException { + throws IOException { return admin.createTable(desc, splitKeys); } @@ -187,7 +186,7 @@ public Future deleteTableAsync(TableName tableName) throws IOException { @Override public Future truncateTableAsync(TableName tableName, boolean preserveSplits) - throws IOException { + throws IOException { return admin.truncateTable(tableName, preserveSplits); } @@ -218,13 +217,13 @@ public boolean isTableAvailable(TableName tableName) throws IOException { @Override public Future addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException { + throws IOException { return admin.addColumnFamily(tableName, columnFamily); } @Override public Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) - throws IOException { + throws IOException { return admin.deleteColumnFamily(tableName, columnFamily); } @@ -292,13 +291,13 @@ public void compactRegion(byte[] regionName, byte[] columnFamily) throws IOExcep @Override public void compact(TableName tableName, CompactType compactType) - throws IOException, InterruptedException { + throws IOException, InterruptedException { get(admin.compact(tableName, compactType)); } @Override public void compact(TableName tableName, byte[] columnFamily, CompactType compactType) - throws IOException, InterruptedException { + throws IOException, InterruptedException { get(admin.compact(tableName, columnFamily, compactType)); } @@ -324,19 +323,19 @@ public void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IO @Override public void majorCompact(TableName tableName, CompactType compactType) - throws IOException, InterruptedException { + throws IOException, InterruptedException { get(admin.majorCompact(tableName, compactType)); } @Override public void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType) - throws IOException, InterruptedException { + throws IOException, InterruptedException { get(admin.majorCompact(tableName, columnFamily, compactType)); } @Override public Map compactionSwitch(boolean switchState, - List serverNamesList) throws IOException { + List serverNamesList) throws IOException { return get(admin.compactionSwitch(switchState, serverNamesList)); } @@ -380,7 +379,6 @@ public boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOExc return get(admin.balancerSwitch(onOrOff, synchronous)); } - public BalanceResponse balance(BalanceRequest request) throws IOException { return get(admin.balance(request)); } @@ -452,7 +450,7 @@ public boolean isCleanerChoreEnabled() throws IOException { @Override public Future mergeRegionsAsync(byte[][] nameOfRegionsToMerge, boolean forcible) - throws IOException { + throws IOException { return admin.mergeRegions(Arrays.asList(nameOfRegionsToMerge), forcible); } @@ -519,7 +517,7 @@ public List getRegionMetrics(ServerName serverName) throws IOExce @Override public List getRegionMetrics(ServerName serverName, TableName tableName) - throws IOException { + throws IOException { return get(admin.getRegionMetrics(serverName, tableName)); } @@ -545,7 +543,7 @@ public Future deleteNamespaceAsync(String name) throws IOException { @Override public NamespaceDescriptor getNamespaceDescriptor(String name) - throws NamespaceNotFoundException, IOException { + throws NamespaceNotFoundException, IOException { return get(admin.getNamespaceDescriptor(name)); } @@ -586,7 +584,7 @@ public List listTableDescriptors(List tableNames) th @Override public Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) - throws IOException { + throws IOException { return admin.abortProcedure(procId, mayInterruptIfRunning); } @@ -612,7 +610,7 @@ public CompactionState getCompactionState(TableName tableName) throws IOExceptio @Override public CompactionState getCompactionState(TableName tableName, CompactType compactType) - throws IOException { + throws IOException { return get(admin.getCompactionState(tableName, compactType)); } @@ -633,19 +631,19 @@ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws I @Override public void snapshot(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException, IllegalArgumentException { + throws IOException, SnapshotCreationException, IllegalArgumentException { get(admin.snapshot(snapshot)); } @Override public Future snapshotAsync(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException { + throws IOException, SnapshotCreationException { return admin.snapshot(snapshot); } @Override public boolean isSnapshotFinished(SnapshotDescription snapshot) - throws IOException, HBaseSnapshotException, UnknownSnapshotException { + throws IOException, HBaseSnapshotException, UnknownSnapshotException { return get(admin.isSnapshotFinished(snapshot)); } @@ -669,19 +667,19 @@ public Future cloneSnapshotAsync(String snapshotName, TableName tableName, @Override public void execProcedure(String signature, String instance, Map props) - throws IOException { + throws IOException { get(admin.execProcedure(signature, instance, props)); } @Override public byte[] execProcedureWithReturn(String signature, String instance, - Map props) throws IOException { + Map props) throws IOException { return get(admin.execProcedureWithReturn(signature, instance, props)); } @Override public boolean isProcedureFinished(String signature, String instance, Map props) - throws IOException { + throws IOException { return get(admin.isProcedureFinished(signature, instance, props)); } @@ -697,7 +695,7 @@ public List listSnapshots(Pattern pattern) throws IOExcepti @Override public List listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) throws IOException { + Pattern snapshotNamePattern) throws IOException { return get(admin.listTableSnapshots(tableNamePattern, snapshotNamePattern)); } @@ -713,7 +711,7 @@ public void deleteSnapshots(Pattern pattern) throws IOException { @Override public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) - throws IOException { + throws IOException { get(admin.deleteTableSnapshots(tableNamePattern, snapshotNamePattern)); } @@ -738,7 +736,7 @@ public SyncCoprocessorRpcChannelOverAsync(RpcChannel delegate) { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { ClientCoprocessorRpcController c = new ClientCoprocessorRpcController(); CoprocessorBlockingRpcCallback callback = new CoprocessorBlockingRpcCallback<>(); delegate.callMethod(method, c, request, responsePrototype, callback); @@ -757,7 +755,7 @@ public void callMethod(MethodDescriptor method, RpcController controller, Messag @Override public Message callBlockingMethod(MethodDescriptor method, RpcController controller, - Message request, Message responsePrototype) throws ServiceException { + Message request, Message responsePrototype) throws ServiceException { ClientCoprocessorRpcController c = new ClientCoprocessorRpcController(); CoprocessorBlockingRpcCallback done = new CoprocessorBlockingRpcCallback<>(); callMethod(method, c, request, responsePrototype, done); @@ -831,7 +829,7 @@ public boolean isMergeEnabled() throws IOException { @Override public Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, - boolean enabled) throws IOException { + boolean enabled) throws IOException { return admin.addReplicationPeer(peerId, peerConfig, enabled); } @@ -857,7 +855,7 @@ public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws IOEx @Override public Future updateReplicationPeerConfigAsync(String peerId, - ReplicationPeerConfig peerConfig) throws IOException { + ReplicationPeerConfig peerConfig) throws IOException { return admin.updateReplicationPeerConfig(peerId, peerConfig); } @@ -873,13 +871,13 @@ public List listReplicationPeers(Pattern pattern) th @Override public Future transitReplicationPeerSyncReplicationStateAsync(String peerId, - SyncReplicationState state) throws IOException { + SyncReplicationState state) throws IOException { return admin.transitReplicationPeerSyncReplicationState(peerId, state); } @Override public void decommissionRegionServers(List servers, boolean offload) - throws IOException { + throws IOException { get(admin.decommissionRegionServers(servers, offload)); } @@ -890,7 +888,7 @@ public List listDecommissionedRegionServers() throws IOException { @Override public void recommissionRegionServer(ServerName server, List encodedRegionNames) - throws IOException { + throws IOException { get(admin.recommissionRegionServer(server, encodedRegionNames)); } @@ -911,7 +909,7 @@ public void disableTableReplication(TableName tableName) throws IOException { @Override public void clearCompactionQueues(ServerName serverName, Set queues) - throws IOException, InterruptedException { + throws IOException, InterruptedException { get(admin.clearCompactionQueues(serverName, queues)); } @@ -922,7 +920,7 @@ public List clearDeadServers(List servers) throws IOExce @Override public void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits) - throws IOException { + throws IOException { get(admin.cloneTableSchema(tableName, newTableName, preserveSplits)); } @@ -947,8 +945,8 @@ public Map getSpaceQuotaTableSizes() throws IOException { } @Override - public Map getRegionServerSpaceQuotaSnapshots( - ServerName serverName) throws IOException { + public Map + getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException { return get(admin.getRegionServerSpaceQuotaSnapshots(serverName)); } @@ -959,13 +957,13 @@ public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(String namespace) thr @Override public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(TableName tableName) - throws IOException { + throws IOException { return get(admin.getCurrentSpaceQuotaSnapshot(tableName)); } @Override public void grant(UserPermission userPermission, boolean mergeExistingPermissions) - throws IOException { + throws IOException { get(admin.grant(userPermission, mergeExistingPermissions)); } @@ -975,20 +973,20 @@ public void revoke(UserPermission userPermission) throws IOException { } @Override - public List getUserPermissions( - GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { + public List + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { return get(admin.getUserPermissions(getUserPermissionsRequest)); } @Override public List hasUserPermissions(String userName, List permissions) - throws IOException { + throws IOException { return get(admin.hasUserPermissions(userName, permissions)); } @Override public boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) - throws IOException { + throws IOException { return get(admin.snapshotCleanupSwitch(on, synchronous)); } @@ -998,8 +996,7 @@ public boolean isSnapshotCleanupEnabled() throws IOException { } @Override - public List clearSlowLogResponses(final Set serverNames) - throws IOException { + public List clearSlowLogResponses(final Set serverNames) throws IOException { return get(admin.clearSlowLogResponses(serverNames)); } @@ -1024,7 +1021,8 @@ public void removeRSGroup(String groupName) throws IOException { } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { return get(admin.balanceRSGroup(groupName, request)); } @@ -1071,14 +1069,13 @@ public void renameRSGroup(String oldName, String newName) throws IOException { @Override public void updateRSGroupConfig(String groupName, Map configuration) - throws IOException { + throws IOException { get(admin.updateRSGroupConfig(groupName, configuration)); } @Override public List getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams) - throws IOException { + ServerType serverType, int limit, Map filterParams) throws IOException { return get(admin.getLogEntries(serverNames, logType, serverType, limit, filterParams)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java index 10933abf3cf2..091024105a34 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Optional; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -93,10 +92,10 @@ interface ScanController { /** * Indicate that we have receive some data. - * @param results the data fetched from HBase service. + * @param results the data fetched from HBase service. * @param controller used to suspend or terminate the scan. Notice that the {@code controller} - * instance is only valid within scope of onNext method. You can only call its method in - * onNext, do NOT store it and call it later outside onNext. + * instance is only valid within scope of onNext method. You can only call its + * method in onNext, do NOT store it and call it later outside onNext. */ void onNext(Result[] results, ScanController controller); @@ -113,8 +112,9 @@ interface ScanController { *

    * This method give you a chance to terminate a slow scan operation. * @param controller used to suspend or terminate the scan. Notice that the {@code controller} - * instance is only valid within the scope of onHeartbeat method. You can only call its - * method in onHeartbeat, do NOT store it and call it later outside onHeartbeat. + * instance is only valid within the scope of onHeartbeat method. You can only + * call its method in onHeartbeat, do NOT store it and call it later outside + * onHeartbeat. */ default void onHeartbeat(ScanController controller) { } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java index 8d21994c23e0..3ef28308f1c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index 41b3845fc784..59c90382ab2b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -38,12 +38,12 @@ /** * Performs Append operations on a single row. *

    - * This operation ensures atomicty to readers. Appends are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. + * This operation ensures atomicty to readers. Appends are done under a single row lock, so write + * operations to a row are synchronized, and readers are guaranteed to see this operation fully + * completed. *

    - * To append to a set of columns of a row, instantiate an Append object with the - * row to append to. At least one column to append must be specified using the + * To append to a set of columns of a row, instantiate an Append object with the row to append to. + * At least one column to append must be specified using the * {@link #addColumn(byte[], byte[], byte[])} method. */ @InterfaceAudience.Public @@ -55,17 +55,15 @@ public class Append extends Mutation { /** * Sets the TimeRange to be used on the Get for this append. *

    - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this append, you can potentially gain - * some performance with a more optimal Get operation. - * Be careful adding the time range to this class as you will update the old cell if the - * time range doesn't include the latest cells. + * This is useful for when you have counters that only last for specific periods of time (ie. + * counters that are partitioned by time). By setting the range of valid times for this append, + * you can potentially gain some performance with a more optimal Get operation. Be careful adding + * the time range to this class as you will update the old cell if the time range doesn't include + * the latest cells. *

    * This range is used as [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive - * @param maxStamp maximum timestamp value, exclusive - * @return this + * @param maxStamp maximum timestamp value, exclusive n */ public Append setTimeRange(long minStamp, long maxStamp) { tr = TimeRange.between(minStamp, maxStamp); @@ -73,23 +71,20 @@ public Append setTimeRange(long minStamp, long maxStamp) { } /** - * Gets the TimeRange used for this append. - * @return TimeRange + * Gets the TimeRange used for this append. n */ public TimeRange getTimeRange() { return this.tr; } @Override - protected long extraHeapSize(){ + protected long extraHeapSize() { return HEAP_OVERHEAD; } /** - * @param returnResults - * True (default) if the append operation should return the results. - * A client that is not interested in the result can save network - * bandwidth setting this to false. + * n * True (default) if the append operation should return the results. A client that is not + * interested in the result can save network bandwidth setting this to false. */ @Override public Append setReturnResults(boolean returnResults) { @@ -115,6 +110,7 @@ public boolean isReturnResults() { public Append(byte[] row) { this(row, 0, row.length); } + /** * Copy constructor * @param appendToCopy append to copy @@ -124,36 +120,33 @@ public Append(Append appendToCopy) { this.tr = appendToCopy.getTimeRange(); } - /** Create a Append operation for the specified row. + /** + * Create a Append operation for the specified row. *

    * At least one column must be appended to. - * @param rowArray Makes a copy out of this buffer. - * @param rowOffset - * @param rowLength + * @param rowArray Makes a copy out of this buffer. nn */ - public Append(final byte [] rowArray, final int rowOffset, final int rowLength) { + public Append(final byte[] rowArray, final int rowOffset, final int rowLength) { checkRow(rowArray, rowOffset, rowLength); this.row = Bytes.copy(rowArray, rowOffset, rowLength); } /** - * Construct the Append with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. - * @param row row. CAN'T be null - * @param ts timestamp + * Construct the Append with user defined data. NOTED: 1) all cells in the familyMap must have the + * Type.Put 2) the row of each cell must be same with passed row. + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Append(byte[] row, long ts, NavigableMap> familyMap) { + public Append(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } /** * Add the specified column and value to this Append operation. - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param value value to append to specified column - * @return this + * @param value value to append to specified column n */ public Append addColumn(byte[] family, byte[] qualifier, byte[] value) { KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index b0dc0c16d9e6..47b89c96a4b3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -89,12 +89,12 @@ default CompletableFuture> listTableDescriptors() { /** * List all the tables matching the given pattern. - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}. */ CompletableFuture> listTableDescriptors(Pattern pattern, - boolean includeSysTables); + boolean includeSysTables); /** * List specific tables including system tables. @@ -128,7 +128,7 @@ default CompletableFuture> listTableNames() { /** * List all of the names of userspace tables. - * @param pattern The regular expression to match against + * @param pattern The regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a list of table names wrapped by a {@link CompletableFuture}. */ @@ -160,19 +160,19 @@ default CompletableFuture> listTableNames() { * key of the last region of the table (the first region has a null start key and the last region * has a null end key). BigInteger math will be used to divide the key range specified into enough * segments to make the required number of total regions. - * @param desc table descriptor for table - * @param startKey beginning of key range - * @param endKey end of key range + * @param desc table descriptor for table + * @param startKey beginning of key range + * @param endKey end of key range * @param numRegions the total number of regions to create */ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, - int numRegions); + int numRegions); /** * Creates a new table with an initial set of empty regions defined by the specified split keys. - * The total number of regions created will be the number of split keys plus one. - * Note : Avoid passing empty split key. - * @param desc table descriptor for table + * The total number of regions created will be the number of split keys plus one. Note : Avoid + * passing empty split key. + * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table */ CompletableFuture createTable(TableDescriptor desc, byte[][] splitKeys); @@ -186,7 +186,7 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ /** * Change the store file tracker of the given table. * @param tableName the table you want to change - * @param dstSFT the destination store file tracker + * @param dstSFT the destination store file tracker */ CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT); @@ -198,7 +198,7 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ /** * Truncate a table. - * @param tableName name of table to truncate + * @param tableName name of table to truncate * @param preserveSplits True if the splits should be preserved */ CompletableFuture truncateTable(TableName tableName, boolean preserveSplits); @@ -210,8 +210,7 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ CompletableFuture enableTable(TableName tableName); /** - * Disable a table. The table has to be in enabled state for it to be disabled. - * @param tableName + * Disable a table. The table has to be in enabled state for it to be disabled. n */ CompletableFuture disableTable(TableName tableName); @@ -238,32 +237,31 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ /** * Add a column family to an existing table. - * @param tableName name of the table to add column family to + * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added */ - CompletableFuture addColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily); + CompletableFuture addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily); /** * Delete a column family from a table. - * @param tableName name of table + * @param tableName name of table * @param columnFamily name of column family to be deleted */ CompletableFuture deleteColumnFamily(TableName tableName, byte[] columnFamily); /** * Modify an existing column family on a table. - * @param tableName name of table + * @param tableName name of table * @param columnFamily new column family descriptor to use */ CompletableFuture modifyColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily); + ColumnFamilyDescriptor columnFamily); /** * Change the store file tracker of the given table's given family. * @param tableName the table you want to change - * @param family the family you want to change - * @param dstSFT the destination store file tracker + * @param family the family you want to change + * @param dstSFT the destination store file tracker */ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT); @@ -322,9 +320,9 @@ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, CompletableFuture flush(TableName tableName); /** - * Flush the specified column family stores on all regions of the passed table. - * This runs as a synchronous operation. - * @param tableName table to flush + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. + * @param tableName table to flush * @param columnFamily column family within a table */ CompletableFuture flush(TableName tableName, byte[] columnFamily); @@ -337,9 +335,9 @@ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, /** * Flush a column family within a region. - * @param regionName region to flush + * @param regionName region to flush * @param columnFamily column family within a region. If not present, flush the region's all - * column families. + * column families. */ CompletableFuture flushRegion(byte[] regionName, byte[] columnFamily); @@ -351,8 +349,8 @@ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, /** * Compact a table. When the returned CompletableFuture is done, it only means the compact request - * was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to compact */ default CompletableFuture compact(TableName tableName) { @@ -362,11 +360,10 @@ default CompletableFuture compact(TableName tableName) { /** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. - * @param tableName table to compact + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * @param tableName table to compact * @param columnFamily column family within a table. If not present, compact the table's all - * column families. + * column families. */ default CompletableFuture compact(TableName tableName, byte[] columnFamily) { return compact(tableName, columnFamily, CompactType.NORMAL); @@ -374,10 +371,10 @@ default CompletableFuture compact(TableName tableName, byte[] columnFamily /** * Compact a table. When the returned CompletableFuture is done, it only means the compact request - * was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for - * normal compaction type. - * @param tableName table to compact + * was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for normal compaction + * type. + * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ CompletableFuture compact(TableName tableName, CompactType compactType); @@ -385,15 +382,14 @@ default CompletableFuture compact(TableName tableName, byte[] columnFamily /** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction type. - * @param tableName table to compact + * @param tableName table to compact * @param columnFamily column family within a table - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ CompletableFuture compact(TableName tableName, byte[] columnFamily, - CompactType compactType); + CompactType compactType); /** * Compact an individual region. When the returned CompletableFuture is done, it only means the @@ -406,16 +402,16 @@ CompletableFuture compact(TableName tableName, byte[] columnFamily, * Compact a column family within a region. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact * operation. - * @param regionName region to compact + * @param regionName region to compact * @param columnFamily column family within a region. If not present, compact the region's all - * column families. + * column families. */ CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily); /** * Major compact a table. When the returned CompletableFuture is done, it only means the compact - * request was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * request was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to major compact */ default CompletableFuture majorCompact(TableName tableName) { @@ -425,12 +421,11 @@ default CompletableFuture majorCompact(TableName tableName) { /** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction. type. - * @param tableName table to major compact + * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all - * column families. + * column families. */ default CompletableFuture majorCompact(TableName tableName, byte[] columnFamily) { return majorCompact(tableName, columnFamily, CompactType.NORMAL); @@ -438,10 +433,10 @@ default CompletableFuture majorCompact(TableName tableName, byte[] columnF /** * Major compact a table. When the returned CompletableFuture is done, it only means the compact - * request was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for - * normal compaction type. - * @param tableName table to major compact + * request was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for normal compaction + * type. + * @param tableName table to major compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ CompletableFuture majorCompact(TableName tableName, CompactType compactType); @@ -449,15 +444,14 @@ default CompletableFuture majorCompact(TableName tableName, byte[] columnF /** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. - * @param tableName table to major compact + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all - * column families. - * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} + * column families. + * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ CompletableFuture majorCompact(TableName tableName, byte[] columnFamily, - CompactType compactType); + CompactType compactType); /** * Major compact a region. When the returned CompletableFuture is done, it only means the compact @@ -470,9 +464,9 @@ CompletableFuture majorCompact(TableName tableName, byte[] columnFamily, * Major compact a column family within region. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact * operation. - * @param regionName region to major compact + * @param regionName region to major compact * @param columnFamily column family within a region. If not present, major compact the region's - * all column families. + * all column families. */ CompletableFuture majorCompactRegion(byte[] regionName, byte[] columnFamily); @@ -503,9 +497,9 @@ default CompletableFuture mergeSwitch(boolean enabled) { * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code drainMerges} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * @param enabled enabled or not + * @param enabled enabled or not * @param drainMerges If true, it waits until current merge() call, if outstanding, - * to return. + * to return. * @return Previous switch value wrapped by a {@link CompletableFuture} */ CompletableFuture mergeSwitch(boolean enabled, boolean drainMerges); @@ -532,9 +526,9 @@ default CompletableFuture splitSwitch(boolean enabled) { * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code drainSplits} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * @param enabled enabled or not + * @param enabled enabled or not * @param drainSplits If true, it waits until current split() call, if outstanding, - * to return. + * to return. * @return Previous switch value wrapped by a {@link CompletableFuture} */ CompletableFuture splitSwitch(boolean enabled, boolean drainSplits); @@ -550,22 +544,22 @@ default CompletableFuture splitSwitch(boolean enabled) { * Merge two regions. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent - * regions + * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent + * regions * @deprecated since 2.3.0 and will be removed in 4.0.0.Use {@link #mergeRegions(List, boolean)} * instead. */ @Deprecated default CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB, - boolean forcible) { + boolean forcible) { return mergeRegions(Arrays.asList(nameOfRegionA, nameOfRegionB), forcible); } /** * Merge multiple regions (>=2). * @param nameOfRegionsToMerge encoded or full name of daughter regions - * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent - * regions + * @param forcible true if do a compulsory merge, otherwise we will only merge two + * adjacent regions */ CompletableFuture mergeRegions(List nameOfRegionsToMerge, boolean forcible); @@ -583,7 +577,7 @@ default CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOf /** * Split a table. - * @param tableName table to split + * @param tableName table to split * @param splitPoint the explicit position to split on */ CompletableFuture split(TableName tableName, byte[] splitPoint); @@ -592,7 +586,7 @@ default CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOf * Split an individual region. * @param regionName region to split * @param splitPoint the explicit position to split on. If not present, it will decide by region - * server. + * server. */ CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint); @@ -611,12 +605,11 @@ default CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOf * regionserver chosen at random. Region could be reassigned back to the same server. Use * {@link #move(byte[], ServerName)} if you want to control the region movement. * @param regionName Encoded or full name of region to unassign. Will clear any existing - * RegionPlan if one found. - * @param forcible If true, force unassign (Will remove region from regions-in-transition too if - * present. If results in double assignment use hbck -fix to resolve. To be used by - * experts). - * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} - * instead. + * RegionPlan if one found. + * @param forcible If true, force unassign (Will remove region from regions-in-transition too if + * present. If results in double assignment use hbck -fix to resolve. To be used + * by experts). + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see HBASE-24875 */ @Deprecated @@ -642,10 +635,11 @@ default CompletableFuture unassign(byte[] regionName, boolean forcible) { /** * Move the region r to dest. - * @param regionName Encoded or full name of region to move. + * @param regionName Encoded or full name of region to move. * @param destServerName The servername of the destination regionserver. If not present, we'll - * assign to a random server. A server name is made of host, port and startcode. Here is - * an example: host187.example.com,60020,1289493121758 + * assign to a random server. A server name is made of host, port and + * startcode. Here is an example: + * host187.example.com,60020,1289493121758 */ CompletableFuture move(byte[] regionName, ServerName destServerName); @@ -664,22 +658,22 @@ default CompletableFuture unassign(byte[] regionName, boolean forcible) { /** * Add a new replication peer for replicating data to slave cluster - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication slave cluster */ default CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig) { + ReplicationPeerConfig peerConfig) { return addReplicationPeer(peerId, peerConfig, true); } /** * Add a new replication peer for replicating data to slave cluster - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication slave cluster - * @param enabled peer state, true if ENABLED and false if DISABLED + * @param enabled peer state, true if ENABLED and false if DISABLED */ - CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled); + CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled); /** * Remove a peer and stop the replication @@ -708,27 +702,27 @@ CompletableFuture addReplicationPeer(String peerId, /** * Update the peerConfig for the specified peer - * @param peerId a short name that identifies the peer + * @param peerId a short name that identifies the peer * @param peerConfig new config for the peer */ CompletableFuture updateReplicationPeerConfig(String peerId, - ReplicationPeerConfig peerConfig); + ReplicationPeerConfig peerConfig); /** * Transit current cluster to a new state in a synchronous replication peer. * @param peerId a short name that identifies the peer - * @param state a new state of current cluster + * @param state a new state of current cluster */ CompletableFuture transitReplicationPeerSyncReplicationState(String peerId, - SyncReplicationState state); + SyncReplicationState state); /** * Get the current cluster state in a synchronous replication peer. * @param peerId a short name that identifies the peer * @return the current cluster state wrapped by a {@link CompletableFuture}. */ - default CompletableFuture getReplicationPeerSyncReplicationState( - String peerId) { + default CompletableFuture + getReplicationPeerSyncReplicationState(String peerId) { CompletableFuture future = new CompletableFuture<>(); addListener(listReplicationPeers(Pattern.compile(peerId)), (peers, error) -> { if (error != null) { @@ -745,19 +739,19 @@ default CompletableFuture getReplicationPeerSyncReplicatio /** * Append the replicable table-cf config of the specified peer - * @param peerId a short that identifies the cluster + * @param peerId a short that identifies the cluster * @param tableCfs A map from tableName to column family names */ CompletableFuture appendReplicationPeerTableCFs(String peerId, - Map> tableCfs); + Map> tableCfs); /** * Remove some table-cfs from config of the specified peer - * @param peerId a short name that identifies the cluster + * @param peerId a short name that identifies the cluster * @param tableCfs A map from tableName to column family names */ CompletableFuture removeReplicationPeerTableCFs(String peerId, - Map> tableCfs); + Map> tableCfs); /** * Return a list of replication peers. @@ -795,15 +789,15 @@ CompletableFuture removeReplicationPeerTableCFs(String peerId, /** * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be - * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken - * sequentially even when requested concurrently, across all tables. Snapshots are considered - * unique based on the name of the snapshot. Attempts to take a snapshot with the same - * name (even a different type or with different parameters) will fail with a + * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken sequentially + * even when requested concurrently, across all tables. Snapshots are considered unique based on + * the name of the snapshot. Attempts to take a snapshot with the same name (even a + * different type or with different parameters) will fail with a * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate * naming. Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name of the snapshot to be created - * @param tableName name of the table for which snapshot is created + * @param tableName name of the table for which snapshot is created */ default CompletableFuture snapshot(String snapshotName, TableName tableName) { return snapshot(snapshotName, tableName, SnapshotType.FLUSH); @@ -812,31 +806,30 @@ default CompletableFuture snapshot(String snapshotName, TableName tableNam /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the * snapshot. Snapshots are taken sequentially even when requested concurrently, across all - * tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a - * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate - * naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} + * indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in + * HBase. See {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other - * snapshots stored on the cluster - * @param tableName name of the table to snapshot - * @param type type of snapshot to take + * snapshots stored on the cluster + * @param tableName name of the table to snapshot + * @param type type of snapshot to take */ default CompletableFuture snapshot(String snapshotName, TableName tableName, - SnapshotType type) { + SnapshotType type) { return snapshot(new SnapshotDescription(snapshotName, tableName, type)); } /** - * Take a snapshot and wait for the server to complete that snapshot asynchronously. Snapshots - * are taken sequentially even when requested concurrently, across all tables. Snapshots are - * considered unique based on the name of the snapshot. - * Attempts to take a snapshot with the same name (even a different type or with different - * parameters) will fail with a {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} - * indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in - * HBase. See {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * You should probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you - * are sure about the type of snapshot that you want to take. + * Take a snapshot and wait for the server to complete that snapshot asynchronously. Snapshots are + * taken sequentially even when requested concurrently, across all tables. Snapshots are + * considered unique based on the name of the snapshot. Attempts to take a snapshot with + * the same name (even a different type or with different parameters) will fail with a + * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate + * naming. Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should + * probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you are sure + * about the type of snapshot that you want to take. * @param snapshot snapshot to take */ CompletableFuture snapshot(SnapshotDescription snapshot); @@ -874,11 +867,11 @@ default CompletableFuture snapshot(String snapshotName, TableName tableNam * restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken */ default CompletableFuture restoreSnapshot(String snapshotName, - boolean takeFailSafeSnapshot) { + boolean takeFailSafeSnapshot) { return restoreSnapshot(snapshotName, takeFailSafeSnapshot, false); } @@ -889,17 +882,17 @@ default CompletableFuture restoreSnapshot(String snapshotName, * restored. If the restore completes without problem the failsafe snapshot is deleted. The * failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". - * @param snapshotName name of the snapshot to restore + * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken - * @param restoreAcl true to restore acl of snapshot + * @param restoreAcl true to restore acl of snapshot */ CompletableFuture restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl); + boolean restoreAcl); /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored + * @param tableName name of the table where the snapshot will be restored */ default CompletableFuture cloneSnapshot(String snapshotName, TableName tableName) { return cloneSnapshot(snapshotName, tableName, false); @@ -908,23 +901,23 @@ default CompletableFuture cloneSnapshot(String snapshotName, TableName tab /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to restore acl of snapshot + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to restore acl of snapshot */ default CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl) { + boolean restoreAcl) { return cloneSnapshot(snapshotName, tableName, restoreAcl, null); } /** * Create a new table by cloning the snapshot content. * @param snapshotName name of the snapshot to be cloned - * @param tableName name of the table where the snapshot will be restored - * @param restoreAcl true to restore acl of snapshot - * @param customSFT specify the StroreFileTracker used for the table + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to restore acl of snapshot + * @param customSFT specify the StroreFileTracker used for the table */ CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT); + boolean restoreAcl, String customSFT); /** * List completed snapshots. @@ -951,13 +944,13 @@ CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, /** * List all the completed snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNamePattern The compiled table name regular expression to match against + * @param tableNamePattern The compiled table name regular expression to match against * @param snapshotNamePattern The compiled snapshot name regular expression to match against * @return - returns a List of completed SnapshotDescription wrapped by a * {@link CompletableFuture} */ CompletableFuture> listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern); + Pattern snapshotNamePattern); /** * Delete an existing snapshot. @@ -985,34 +978,34 @@ CompletableFuture> listTableSnapshots(Pattern tableNam /** * Delete all existing snapshots matching the given table name regular expression and snapshot * name regular expression. - * @param tableNamePattern The compiled table name regular expression to match against + * @param tableNamePattern The compiled table name regular expression to match against * @param snapshotNamePattern The compiled snapshot name regular expression to match against */ CompletableFuture deleteTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern); + Pattern snapshotNamePattern); /** * Execute a distributed procedure on a cluster. * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure */ CompletableFuture execProcedure(String signature, String instance, - Map props); + Map props); /** * Execute a distributed procedure on a cluster. * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure + * root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure * @return data returned after procedure execution. null if no return data. */ CompletableFuture execProcedureWithReturn(String signature, String instance, - Map props); + Map props); /** * Check the current state of the specified procedure. There are three possible states: @@ -1022,18 +1015,18 @@ CompletableFuture execProcedureWithReturn(String signature, String insta *

  • finished with error - throws the exception that caused the procedure to fail
  • * * @param signature The signature that uniquely identifies a procedure - * @param instance The instance name of the procedure - * @param props Property/Value pairs of properties passing to the procedure + * @param instance The instance name of the procedure + * @param props Property/Value pairs of properties passing to the procedure * @return true if the specified procedure is finished successfully, false if it is still running. * The value is wrapped by {@link CompletableFuture} */ CompletableFuture isProcedureFinished(String signature, String instance, - Map props); + Map props); /** - * Abort a procedure - * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. - * @param procId ID of the procedure to abort + * Abort a procedure Do not use. Usually it is ignored but if not, it can do more damage than + * good. See hbck2. + * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if aborted, false if procedure already completed or does not exist. the value is * wrapped by {@link CompletableFuture} @@ -1056,10 +1049,10 @@ CompletableFuture isProcedureFinished(String signature, String instance CompletableFuture getLocks(); /** - * Mark region server(s) as decommissioned to prevent additional regions from getting - * assigned to them. Optionally unload the regions on the servers. If there are multiple servers - * to be decommissioned, decommissioning them at the same time can prevent wasteful region - * movements. Region unloading is asynchronous. + * Mark region server(s) as decommissioned to prevent additional regions from getting assigned to + * them. Optionally unload the regions on the servers. If there are multiple servers to be + * decommissioned, decommissioning them at the same time can prevent wasteful region movements. + * Region unloading is asynchronous. * @param servers The list of servers to decommission. * @param offload True to offload the regions from the decommissioned servers */ @@ -1074,11 +1067,11 @@ CompletableFuture isProcedureFinished(String signature, String instance /** * Remove decommission marker from a region server to allow regions assignments. Load regions onto * the server if a list of regions is given. Region loading is asynchronous. - * @param server The server to recommission. + * @param server The server to recommission. * @param encodedRegionNames Regions to load onto the server. */ CompletableFuture recommissionRegionServer(ServerName server, - List encodedRegionNames); + List encodedRegionNames); /** * @return cluster status wrapped by {@link CompletableFuture} @@ -1110,11 +1103,11 @@ default CompletableFuture> getBackupMasters() { */ default CompletableFuture> getRegionServers() { return getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)) - .thenApply(ClusterMetrics::getServersName); + .thenApply(ClusterMetrics::getServersName); } - default CompletableFuture> getRegionServers( - boolean excludeDecommissionedRS) { + default CompletableFuture> + getRegionServers(boolean excludeDecommissionedRS) { CompletableFuture> future = new CompletableFuture<>(); addListener( getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).thenApply(ClusterMetrics::getServersName), @@ -1144,7 +1137,7 @@ default CompletableFuture> getRegionServers( */ default CompletableFuture> getMasterCoprocessorNames() { return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)) - .thenApply(ClusterMetrics::getMasterCoprocessorNames); + .thenApply(ClusterMetrics::getMasterCoprocessorNames); } /** @@ -1152,8 +1145,8 @@ default CompletableFuture> getMasterCoprocessorNames() { * @return master info port */ default CompletableFuture getMasterInfoPort() { - return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).thenApply( - ClusterMetrics::getMasterInfoPort); + return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)) + .thenApply(ClusterMetrics::getMasterInfoPort); } /** @@ -1167,8 +1160,7 @@ default CompletableFuture getMasterInfoPort() { CompletableFuture stopMaster(); /** - * Stop the designated regionserver. - * @param serverName + * Stop the designated regionserver. n */ CompletableFuture stopRegionServer(ServerName serverName); @@ -1185,8 +1177,8 @@ default CompletableFuture getMasterInfoPort() { CompletableFuture updateConfiguration(); /** - * Update the configuration and trigger an online config change on all the regionservers in - * the RSGroup. + * Update the configuration and trigger an online config change on all the regionservers in the + * RSGroup. * @param groupName the group name */ CompletableFuture updateConfiguration(String groupName); @@ -1203,24 +1195,19 @@ default CompletableFuture getMasterInfoPort() { CompletableFuture rollWALWriter(ServerName serverName); /** - * Clear compacting queues on a region server. - * @param serverName - * @param queues the set of queue name + * Clear compacting queues on a region server. n * @param queues the set of queue name */ CompletableFuture clearCompactionQueues(ServerName serverName, Set queues); /** - * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver. - * @param serverName - * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture} + * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver. n * @return a + * list of {@link RegionMetrics} wrapped by {@link CompletableFuture} */ CompletableFuture> getRegionMetrics(ServerName serverName); /** - * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table. - * @param serverName - * @param tableName - * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture} + * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table. nn + * * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture} */ CompletableFuture> getRegionMetrics(ServerName serverName, TableName tableName); @@ -1245,12 +1232,12 @@ default CompletableFuture getCompactionState(TableName tableNam /** * Get the current compaction state of a table. It could be in a major compaction, a minor * compaction, both, or none. - * @param tableName table to examine + * @param tableName table to examine * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @return the current compaction state wrapped by a {@link CompletableFuture} */ CompletableFuture getCompactionState(TableName tableName, - CompactType compactType); + CompactType compactType); /** * Get the current compaction state of region. It could be in a major compaction, a minor @@ -1301,9 +1288,9 @@ default CompletableFuture balancerSwitch(boolean on) { * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code drainRITs} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * @param on Set to true to enable, false to disable. + * @param on Set to true to enable, false to disable. * @param drainRITs If true, it waits until current balance() call, if outstanding, - * to return. + * to return. * @return Previous balancer value wrapped by a {@link CompletableFuture}. */ CompletableFuture balancerSwitch(boolean on, boolean drainRITs); @@ -1315,8 +1302,7 @@ default CompletableFuture balancerSwitch(boolean on) { * {@link CompletableFuture}. */ default CompletableFuture balance() { - return balance(BalanceRequest.defaultInstance()) - .thenApply(BalanceResponse::isBalancerRan); + return balance(BalanceRequest.defaultInstance()).thenApply(BalanceResponse::isBalancerRan); } /** @@ -1326,21 +1312,17 @@ default CompletableFuture balance() { * @param forcible whether we should force balance even if there is region in transition. * @return True if balancer ran, false otherwise. The return value will be wrapped by a * {@link CompletableFuture}. - * @deprecated Since 2.5.0. Will be removed in 4.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ default CompletableFuture balance(boolean forcible) { - return balance( - BalanceRequest.newBuilder() - .setIgnoreRegionsInTransition(forcible) - .build() - ).thenApply(BalanceResponse::isBalancerRan); + return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(forcible).build()) + .thenApply(BalanceResponse::isBalancerRan); } /** - * Invoke the balancer with the given balance request. The BalanceRequest defines how the - * balancer will run. See {@link BalanceRequest} for more details. - * + * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer + * will run. See {@link BalanceRequest} for more details. * @param request defines how the balancer should run * @return {@link BalanceResponse} with details about the results of the invocation. */ @@ -1348,8 +1330,8 @@ default CompletableFuture balance(boolean forcible) { /** * Query the current state of the balancer. - * @return true if the balance switch is on, false otherwise. The return value will be wrapped by a - * {@link CompletableFuture}. + * @return true if the balance switch is on, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture}. */ CompletableFuture isBalancerEnabled(); @@ -1385,16 +1367,15 @@ default CompletableFuture normalize() { CompletableFuture normalize(NormalizeTableFilterParams ntfp); /** - * Turn the cleaner chore on/off. - * @param on - * @return Previous cleaner state wrapped by a {@link CompletableFuture} + * Turn the cleaner chore on/off. n * @return Previous cleaner state wrapped by a + * {@link CompletableFuture} */ CompletableFuture cleanerChoreSwitch(boolean on); /** * Query the current state of the cleaner chore. - * @return true if cleaner chore is on, false otherwise. The return value will be wrapped by - * a {@link CompletableFuture} + * @return true if cleaner chore is on, false otherwise. The return value will be wrapped by a + * {@link CompletableFuture} */ CompletableFuture isCleanerChoreEnabled(); @@ -1406,16 +1387,15 @@ default CompletableFuture normalize() { CompletableFuture runCleanerChore(); /** - * Turn the catalog janitor on/off. - * @param on - * @return the previous state wrapped by a {@link CompletableFuture} + * Turn the catalog janitor on/off. n * @return the previous state wrapped by a + * {@link CompletableFuture} */ CompletableFuture catalogJanitorSwitch(boolean on); /** * Query on the catalog janitor state. - * @return true if the catalog janitor is on, false otherwise. The return value will be - * wrapped by a {@link CompletableFuture} + * @return true if the catalog janitor is on, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture} */ CompletableFuture isCatalogJanitorEnabled(); @@ -1437,16 +1417,17 @@ default CompletableFuture normalize() { * channel -> xxxService.newStub(channel) * *
    + * * @param stubMaker a delegation to the actual {@code newStub} call. - * @param callable a delegation to the actual protobuf rpc call. See the comment of - * {@link ServiceCaller} for more details. - * @param the type of the asynchronous stub - * @param the type of the return value + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. + * @param the type of the asynchronous stub + * @param the type of the return value * @return the return value of the protobuf rpc call, wrapped by a {@link CompletableFuture}. * @see ServiceCaller */ CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable); + ServiceCaller callable); /** * Execute the given coprocessor call on the given region server. @@ -1459,12 +1440,13 @@ CompletableFuture coprocessorService(Function stubMaker * channel -> xxxService.newStub(channel) * * - * @param stubMaker a delegation to the actual {@code newStub} call. - * @param callable a delegation to the actual protobuf rpc call. See the comment of - * {@link ServiceCaller} for more details. + * + * @param stubMaker a delegation to the actual {@code newStub} call. + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. * @param serverName the given region server - * @param the type of the asynchronous stub - * @param the type of the return value + * @param the type of the asynchronous stub + * @param the type of the return value * @return the return value of the protobuf rpc call, wrapped by a {@link CompletableFuture}. * @see ServiceCaller */ @@ -1476,7 +1458,7 @@ CompletableFuture coprocessorService(Function stubMaker */ default CompletableFuture> listDeadServers() { return this.getClusterMetrics(EnumSet.of(Option.DEAD_SERVERS)) - .thenApply(ClusterMetrics::getDeadServerNames); + .thenApply(ClusterMetrics::getDeadServerNames); } /** @@ -1498,26 +1480,24 @@ default CompletableFuture> listDeadServers() { /** * Create a new table by cloning the existent table schema. - * - * @param tableName name of the table to be cloned - * @param newTableName name of the new table where the table will be created + * @param tableName name of the table to be cloned + * @param newTableName name of the new table where the table will be created * @param preserveSplits True if the splits should be preserved */ - CompletableFuture cloneTableSchema(final TableName tableName, - final TableName newTableName, final boolean preserveSplits); + CompletableFuture cloneTableSchema(final TableName tableName, final TableName newTableName, + final boolean preserveSplits); /** * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing - * compactions. This state is ephemeral. The setting will be lost on restart. Compaction - * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled - * in hbase-site.xml. - * + * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also + * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in + * hbase-site.xml. * @param switchState Set to true to enable, false to disable. * @param serverNamesList list of region servers. * @return Previous compaction states for region servers */ CompletableFuture> compactionSwitch(boolean switchState, - List serverNamesList); + List serverNamesList); /** * Switch the rpc throttle enabled state. @@ -1533,8 +1513,8 @@ CompletableFuture> compactionSwitch(boolean switchState CompletableFuture isRpcThrottleEnabled(); /** - * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota - * can be exceeded if region server has availble quota. + * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be + * exceeded if region server has availble quota. * @param enable Set to true to enable, false to disable. * @return Previous exceed throttle enabled value */ @@ -1549,28 +1529,28 @@ CompletableFuture> compactionSwitch(boolean switchState * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer. */ CompletableFuture> - getRegionServerSpaceQuotaSnapshots(ServerName serverName); + getRegionServerSpaceQuotaSnapshots(ServerName serverName); /** * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has * no quota information on that namespace. */ CompletableFuture - getCurrentSpaceQuotaSnapshot(String namespace); + getCurrentSpaceQuotaSnapshot(String namespace); /** * Returns the Master's view of a quota on the given {@code tableName} or null if the Master has * no quota information on that table. */ - CompletableFuture getCurrentSpaceQuotaSnapshot( - TableName tableName); + CompletableFuture + getCurrentSpaceQuotaSnapshot(TableName tableName); /** * Grants user specific permissions - * @param userPermission user name and the specific permission + * @param userPermission user name and the specific permission * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. + * previous granted permissions. otherwise, it'll merge with + * previous granted permissions. */ CompletableFuture grant(UserPermission userPermission, boolean mergeExistingPermissions); @@ -1583,20 +1563,20 @@ CompletableFuture getCurrentSpaceQuotaSnapshot /** * Get the global/namespace/table permissions for user * @param getUserPermissionsRequest A request contains which user, global, namespace or table - * permissions needed + * permissions needed * @return The user and permission list */ CompletableFuture> - getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest); + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest); /** * Check if the user has specific permissions - * @param userName the user name + * @param userName the user name * @param permissions the specific permission list * @return True if user has the specific permissions */ CompletableFuture> hasUserPermissions(String userName, - List permissions); + List permissions); /** * Check if call user has specific permissions @@ -1613,35 +1593,31 @@ default CompletableFuture> hasUserPermissions(List per * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code sync} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * - * @param on Set to true to enable, false to disable. - * @param sync If true, it waits until current snapshot cleanup is completed, - * if outstanding. + * @param on Set to true to enable, false to disable. + * @param sync If true, it waits until current snapshot cleanup is completed, if + * outstanding. * @return Previous auto snapshot cleanup value wrapped by a {@link CompletableFuture}. */ CompletableFuture snapshotCleanupSwitch(boolean on, boolean sync); /** * Query the current state of the auto snapshot cleanup based on TTL. - * - * @return true if the auto snapshot cleanup is enabled, false otherwise. - * The return value will be wrapped by a {@link CompletableFuture}. + * @return true if the auto snapshot cleanup is enabled, false otherwise. The return value will be + * wrapped by a {@link CompletableFuture}. */ CompletableFuture isSnapshotCleanupEnabled(); /** - * Retrieves online slow RPC logs from the provided list of - * RegionServers - * - * @param serverNames Server names to get slowlog responses from + * Retrieves online slow RPC logs from the provided list of RegionServers + * @param serverNames Server names to get slowlog responses from * @param logQueryFilter filter to be used if provided * @return Online slowlog response list. The return value wrapped by a {@link CompletableFuture} - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. */ @Deprecated - default CompletableFuture> getSlowLogResponses( - final Set serverNames, final LogQueryFilter logQueryFilter) { + default CompletableFuture> + getSlowLogResponses(final Set serverNames, final LogQueryFilter logQueryFilter) { String logType; if (LogQueryFilter.Type.LARGE_LOG.equals(logQueryFilter.getType())) { logType = "LARGE_LOG"; @@ -1654,21 +1630,17 @@ default CompletableFuture> getSlowLogResponses( filterParams.put("tableName", logQueryFilter.getTableName()); filterParams.put("userName", logQueryFilter.getUserName()); filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString()); - CompletableFuture> logEntries = - getLogEntries(serverNames, logType, ServerType.REGION_SERVER, logQueryFilter.getLimit(), - filterParams); - return logEntries.thenApply( - logEntryList -> logEntryList.stream().map(logEntry -> (OnlineLogRecord) logEntry) - .collect(Collectors.toList())); + CompletableFuture> logEntries = getLogEntries(serverNames, logType, + ServerType.REGION_SERVER, logQueryFilter.getLimit(), filterParams); + return logEntries.thenApply(logEntryList -> logEntryList.stream() + .map(logEntry -> (OnlineLogRecord) logEntry).collect(Collectors.toList())); } /** - * Clears online slow RPC logs from the provided list of - * RegionServers - * + * Clears online slow RPC logs from the provided list of RegionServers * @param serverNames Set of Server names to clean slowlog responses from - * @return List of booleans representing if online slowlog response buffer is cleaned - * from each RegionServer. The return value wrapped by a {@link CompletableFuture} + * @return List of booleans representing if online slowlog response buffer is cleaned from each + * RegionServer. The return value wrapped by a {@link CompletableFuture} */ CompletableFuture> clearSlowLogResponses(final Set serverNames); @@ -1739,11 +1711,10 @@ default CompletableFuture> getSlowLogResponses( CompletableFuture removeRSGroup(String groupName); /** - * Remove decommissioned servers from group - * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline - * the server for repairing. Or we need to move some servers to join other clusters. - * So we need to remove these servers from the group. - * 2. Dead/recovering/live servers will be disallowed. + * Remove decommissioned servers from group 1. Sometimes we may find the server aborted due to + * some hardware failure and we must offline the server for repairing. Or we need to move some + * servers to join other clusters. So we need to remove these servers from the group. 2. + * Dead/recovering/live servers will be disallowed. * @param servers set of servers to remove * @throws IOException if a remote or network exception occurs */ @@ -1751,7 +1722,7 @@ default CompletableFuture> getSlowLogResponses( /** * Move given set of servers to the specified target RegionServer group - * @param servers set of servers to move + * @param servers set of servers to move * @param groupName the group to move servers to * @throws IOException if a remote or network exception occurs */ @@ -1759,7 +1730,7 @@ default CompletableFuture> getSlowLogResponses( /** * Set the RegionServer group for tables - * @param tables tables to set group for + * @param tables tables to set group for * @param groupName group name for tables * @throws IOException if a remote or network exception occurs */ @@ -1778,7 +1749,7 @@ default CompletableFuture balanceRSGroup(String groupName) { /** * Balance regions in the given RegionServer group * @param groupName the group name - * @param request options to define how the balancer should run + * @param request options to define how the balancer should run * @return BalanceResponse details about the balancer run * @throws IOException if a remote or network exception occurs */ @@ -1794,22 +1765,21 @@ default CompletableFuture balanceRSGroup(String groupName) { /** * Update RSGroup configuration - * @param groupName the group name + * @param groupName the group name * @param configuration new configuration of the group name to be set * @throws IOException if a remote or network exception occurs */ CompletableFuture updateRSGroupConfig(String groupName, Map configuration); /** - * Retrieve recent online records from HMaster / RegionServers. - * Examples include slow/large RPC logs, balancer decisions by master. - * - * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. - * @param logType string representing type of log records - * @param serverType enum for server type: HMaster or RegionServer - * @param limit put a limit to list of records that server should send in response + * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC + * logs, balancer decisions by master. + * @param serverNames servers to retrieve records from, useful in case of records maintained by + * RegionServer as we can select specific server. In case of + * servertype=MASTER, logs will only come from the currently active master. + * @param logType string representing type of log records + * @param serverType enum for server type: HMaster or RegionServer + * @param limit put a limit to list of records that server should send in response * @param filterParams additional filter params * @return Log entries representing online records from servers */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java index c55977dba5e9..798bd7b46644 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseServerException; import org.apache.yetus.audience.InterfaceAudience; @@ -56,18 +55,17 @@ public interface AsyncAdminBuilder { AsyncAdminBuilder setRetryPause(long timeout, TimeUnit unit); /** - * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. - * We use an exponential policy to generate sleep time from this base when retrying. + * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We + * use an exponential policy to generate sleep time from this base when retrying. *

    * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. - * * @see #setRetryPause(long, TimeUnit) * @deprecated Since 2.5.0, will be removed in 4.0.0. Please use - * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. + * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. */ @Deprecated default AsyncAdminBuilder setRetryPauseForCQTBE(long pause, TimeUnit unit) { @@ -75,15 +73,14 @@ default AsyncAdminBuilder setRetryPauseForCQTBE(long pause, TimeUnit unit) { } /** - * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. - * We use an exponential policy to generate sleep time when retrying. + * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We + * use an exponential policy to generate sleep time when retrying. *

    * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. - * * @see #setRetryPause(long, TimeUnit) */ AsyncAdminBuilder setRetryPauseForServerOverloaded(long pause, TimeUnit unit); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java index cd023d8134d8..60dccffd4511 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java index f03e8b5cacb3..de6e967f21c4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,8 @@ public interface Callable { private ServerName serverName; public AsyncAdminRequestRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, int priority, - long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, - long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { + long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, + long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.serverName = serverName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java index 6e4ed552931f..0798915c08de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,7 +63,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.util.Timer; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @@ -148,9 +150,8 @@ public int getPriority() { } public AsyncBatchRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - TableName tableName, List actions, long pauseNs, - long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, - long rpcTimeoutNs, int startLogErrorsCnt) { + TableName tableName, List actions, long pauseNs, long pauseNsForServerOverloaded, + int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { this.retryTimer = retryTimer; this.conn = conn; this.tableName = tableName; @@ -214,13 +215,13 @@ private List removeErrors(Action action) { } private void logException(int tries, Supplier> regionsSupplier, - Throwable error, ServerName serverName) { + Throwable error, ServerName serverName) { if (tries > startLogErrorsCnt) { String regions = regionsSupplier.get().map(r -> "'" + r.loc.getRegion().getRegionNameAsString() + "'") .collect(Collectors.joining(",", "[", "]")); - LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName + - " failed, tries=" + tries, error); + LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName + + " failed, tries=" + tries, error); } } @@ -275,7 +276,7 @@ private void failAll(Stream actions, int tries) { } private ClientProtos.MultiRequest buildReq(Map actionsByRegion, - List cells, Map indexMap) throws IOException { + List cells, Map indexMap) throws IOException { ClientProtos.MultiRequest.Builder multiRequestBuilder = ClientProtos.MultiRequest.newBuilder(); ClientProtos.RegionAction.Builder regionActionBuilder = ClientProtos.RegionAction.newBuilder(); ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); @@ -289,21 +290,21 @@ private ClientProtos.MultiRequest buildReq(Map actionsByR entry.getValue().actions.stream() .sorted((a1, a2) -> Integer.compare(a1.getOriginalIndex(), a2.getOriginalIndex())) .collect(Collectors.toList()), - cells, multiRequestBuilder, regionActionBuilder, actionBuilder, mutationBuilder, - nonceGroup, indexMap); + cells, multiRequestBuilder, regionActionBuilder, actionBuilder, mutationBuilder, nonceGroup, + indexMap); } return multiRequestBuilder.build(); } @SuppressWarnings("unchecked") private void onComplete(Action action, RegionRequest regionReq, int tries, ServerName serverName, - RegionResult regionResult, List failedActions, Throwable regionException, - MutableBoolean retryImmediately) { + RegionResult regionResult, List failedActions, Throwable regionException, + MutableBoolean retryImmediately) { Object result = regionResult.result.getOrDefault(action.getOriginalIndex(), regionException); if (result == null) { - LOG.error("Server " + serverName + " sent us neither result nor exception for row '" + - Bytes.toStringBinary(action.getAction().getRow()) + "' of " + - regionReq.loc.getRegion().getRegionNameAsString()); + LOG.error("Server " + serverName + " sent us neither result nor exception for row '" + + Bytes.toStringBinary(action.getAction().getRow()) + "' of " + + regionReq.loc.getRegion().getRegionNameAsString()); addError(action, new RuntimeException("Invalid response"), serverName); failedActions.add(action); } else if (result instanceof Throwable) { @@ -325,7 +326,7 @@ private void onComplete(Action action, RegionRequest regionReq, int tries, Serve } private void onComplete(Map actionsByRegion, int tries, - ServerName serverName, MultiResponse resp) { + ServerName serverName, MultiResponse resp) { ConnectionUtils.updateStats(conn.getStatisticsTracker(), conn.getConnectionMetrics(), serverName, resp); List failedActions = new ArrayList<>(); @@ -405,8 +406,8 @@ private void sendToServer(ServerName serverName, ServerRequest serverReq, int tr onError(serverReq.actionsByRegion, tries, controller.getFailed(), serverName); } else { try { - onComplete(serverReq.actionsByRegion, tries, serverName, ResponseConverter.getResults(req, - indexMap, resp, controller.cellScanner())); + onComplete(serverReq.actionsByRegion, tries, serverName, + ResponseConverter.getResults(req, indexMap, resp, controller.cellScanner())); } catch (Exception e) { onError(serverReq.actionsByRegion, tries, e, serverName); return; @@ -451,7 +452,7 @@ private void sendOrDelay(Map actionsByServer, int tri } private void onError(Map actionsByRegion, int tries, Throwable t, - ServerName serverName) { + ServerName serverName) { Throwable error = translateException(t); logException(tries, () -> actionsByRegion.values().stream(), error, serverName); actionsByRegion.forEach( @@ -469,7 +470,7 @@ private void onError(Map actionsByRegion, int tries, Thro } private void tryResubmit(Stream actions, int tries, boolean immediately, - boolean isServerOverloaded) { + boolean isServerOverloaded) { if (immediately) { groupAndSend(actions, tries); return; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java index 7b21eb5fa13a..e5f28d2e0602 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java index ea2528d5152c..ed21fb8e23ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java index cd0496377bc4..ede5b359e833 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ class AsyncBufferedMutatorBuilderImpl implements AsyncBufferedMutatorBuilder { private int maxKeyValueSize; public AsyncBufferedMutatorBuilderImpl(AsyncConnectionConfiguration connConf, - AsyncTableBuilder tableBuilder, HashedWheelTimer periodicalFlushTimer) { + AsyncTableBuilder tableBuilder, HashedWheelTimer periodicalFlushTimer) { this.tableBuilder = tableBuilder; this.writeBufferSize = connConf.getWriteBufferSize(); this.periodicFlushTimeoutNs = connConf.getWriteBufferPeriodicFlushTimeoutNs(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java index fcd1724d10e5..ce4193d91382 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,7 +62,7 @@ class AsyncBufferedMutatorImpl implements AsyncBufferedMutator { Timeout periodicFlushTask; AsyncBufferedMutatorImpl(HashedWheelTimer periodicalFlushTimer, AsyncTable table, - long writeBufferSize, long periodicFlushTimeoutNs, int maxKeyValueSize) { + long writeBufferSize, long periodicFlushTimeoutNs, int maxKeyValueSize) { this.periodicalFlushTimer = periodicalFlushTimer; this.table = table; this.writeBufferSize = writeBufferSize; @@ -117,7 +117,7 @@ Stream.> generate(CompletableFuture::new).limit(mutation for (Mutation mutation : mutations) { heapSize += mutation.heapSize(); if (mutation instanceof Put) { - validatePut((Put)mutation, maxKeyValueSize); + validatePut((Put) mutation, maxKeyValueSize); } } synchronized (this) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java index 9ea26b4afb3c..ed381df7e0da 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.isRemote; import static org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.context.Scope; @@ -41,7 +42,9 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.util.Timer; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface; @@ -90,8 +93,8 @@ class AsyncClientScanner { private final Span span; public AsyncClientScanner(Scan scan, AdvancedScanResultConsumer consumer, TableName tableName, - AsyncConnectionImpl conn, Timer retryTimer, long pauseNs, long pauseNsForServerOverloaded, - int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + AsyncConnectionImpl conn, Timer retryTimer, long pauseNs, long pauseNsForServerOverloaded, + int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { if (scan.getStartRow() == null) { scan.withStartRow(EMPTY_START_ROW, scan.includeStartRow()); } @@ -123,10 +126,7 @@ public AsyncClientScanner(Scan scan, AdvancedScanResultConsumer consumer, TableN * `start()` method. The cost of doing so would be making access to the `span` safe for * concurrent threads. */ - span = new TableOperationSpanBuilder(conn) - .setTableName(tableName) - .setOperation(scan) - .build(); + span = new TableOperationSpanBuilder(conn).setTableName(tableName).setOperation(scan).build(); if (consumer instanceof AsyncTableResultScanner) { AsyncTableResultScanner scanner = (AsyncTableResultScanner) consumer; scanner.setSpan(span); @@ -146,7 +146,7 @@ private static final class OpenScannerResponse { public final ScanResponse resp; public OpenScannerResponse(HRegionLocation loc, boolean isRegionServerRemote, Interface stub, - HBaseRpcController controller, ScanResponse resp) { + HBaseRpcController controller, ScanResponse resp) { this.loc = loc; this.isRegionServerRemote = isRegionServerRemote; this.stub = stub; @@ -158,7 +158,7 @@ public OpenScannerResponse(HRegionLocation loc, boolean isRegionServerRemote, In private final AtomicInteger openScannerTries = new AtomicInteger(); private CompletableFuture callOpenScanner(HBaseRpcController controller, - HRegionLocation loc, ClientService.Interface stub) { + HRegionLocation loc, ClientService.Interface stub) { try (Scope ignored = span.makeCurrent()) { boolean isRegionServerRemote = isRemote(loc.getHostname()); incRPCCallsMetrics(scanMetrics, isRegionServerRemote); @@ -167,8 +167,8 @@ private CompletableFuture callOpenScanner(HBaseRpcControlle } CompletableFuture future = new CompletableFuture<>(); try { - ScanRequest request = RequestConverter.buildScanRequest( - loc.getRegion().getRegionName(), scan, scan.getCaching(), false); + ScanRequest request = RequestConverter.buildScanRequest(loc.getRegion().getRegionName(), + scan, scan.getCaching(), false); stub.scan(controller, request, resp -> { try (Scope ignored1 = span.makeCurrent()) { if (controller.failed()) { @@ -178,8 +178,8 @@ private CompletableFuture callOpenScanner(HBaseRpcControlle span.end(); return; } - future.complete(new OpenScannerResponse( - loc, isRegionServerRemote, stub, controller, resp)); + future + .complete(new OpenScannerResponse(loc, isRegionServerRemote, stub, controller, resp)); } }); } catch (IOException e) { @@ -191,17 +191,15 @@ private CompletableFuture callOpenScanner(HBaseRpcControlle } private void startScan(OpenScannerResponse resp) { - addListener( - conn.callerFactory.scanSingleRegion().id(resp.resp.getScannerId()).location(resp.loc) - .remote(resp.isRegionServerRemote) - .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) - .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) - .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).start(resp.controller, resp.resp), - (hasMore, error) -> { + addListener(conn.callerFactory.scanSingleRegion().id(resp.resp.getScannerId()) + .location(resp.loc).remote(resp.isRegionServerRemote) + .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) + .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) + .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) + .start(resp.controller, resp.resp), (hasMore, error) -> { try (Scope ignored = span.makeCurrent()) { if (error != null) { try { @@ -230,17 +228,17 @@ private CompletableFuture openScanner(int replicaId) { try (Scope ignored = span.makeCurrent()) { return conn.callerFactory. single().table(tableName) .row(scan.getStartRow()).replicaId(replicaId).locateType(getLocateType(scan)) - .priority(scan.getPriority()) - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .priority(scan.getPriority()).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner).call(); + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner) + .call(); } } private long getPrimaryTimeoutNs() { - return TableName.isMetaTableName(tableName) ? conn.connConf.getPrimaryMetaScanTimeoutNs() + return TableName.isMetaTableName(tableName) + ? conn.connConf.getPrimaryMetaScanTimeoutNs() : conn.connConf.getPrimaryScanTimeoutNs(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java index 8839eda802a5..6e96918d1d9a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -97,7 +97,7 @@ default AsyncTable getTable(TableName tableName) { * This method no longer checks table existence. An exception will be thrown if the table does not * exist only when the first operation is attempted. * @param tableName the name of the table - * @param pool the thread pool to use for executing callback + * @param pool the thread pool to use for executing callback * @return an AsyncTable to use for interactions with this table */ default AsyncTable getTable(TableName tableName, ExecutorService pool) { @@ -110,7 +110,7 @@ default AsyncTable getTable(TableName tableName, ExecutorSer * This method no longer checks table existence. An exception will be thrown if the table does not * exist only when the first operation is attempted. * @param tableName the name of the table - * @param pool the thread pool to use for executing callback + * @param pool the thread pool to use for executing callback */ AsyncTableBuilder getTableBuilder(TableName tableName, ExecutorService pool); @@ -181,7 +181,7 @@ default AsyncBufferedMutator getBufferedMutator(TableName tableName) { * {@link #getBufferedMutatorBuilder(TableName, ExecutorService)} if you want to customize some * configs. * @param tableName the name of the table - * @param pool the thread pool to use for executing callback + * @param pool the thread pool to use for executing callback * @return an {@link AsyncBufferedMutator} for the supplied tableName. */ default AsyncBufferedMutator getBufferedMutator(TableName tableName, ExecutorService pool) { @@ -191,7 +191,7 @@ default AsyncBufferedMutator getBufferedMutator(TableName tableName, ExecutorSer /** * Returns an {@link AsyncBufferedMutatorBuilder} for creating {@link AsyncBufferedMutator}. * @param tableName the name of the table - * @param pool the thread pool to use for executing callback + * @param pool the thread pool to use for executing callback */ AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName, ExecutorService pool); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java index 2dd5e4a4e2b0..c4a47280a38b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,11 +65,11 @@ class AsyncConnectionConfiguration { private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionConfiguration.class); /** - * Parameter name for client pause when server is overloaded, denoted by - * {@link org.apache.hadoop.hbase.HBaseServerException#isServerOverloaded()} - */ + * Parameter name for client pause when server is overloaded, denoted by + * {@link org.apache.hadoop.hbase.HBaseServerException#isServerOverloaded()} + */ public static final String HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED = - "hbase.client.pause.server.overloaded"; + "hbase.client.pause.server.overloaded"; static { // This is added where the configs are referenced. It may be too late to happen before @@ -77,18 +77,18 @@ class AsyncConnectionConfiguration { // to handle checking both properties in parsing below. The benefit of calling this is // that it should still cause Configuration to log a warning if we do end up falling // through to the old deprecated config. - Configuration.addDeprecation( - HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED); + Configuration.addDeprecation(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, + HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED); } /** - * Configure the number of failures after which the client will start logging. A few failures - * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable - * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at - * this stage. + * Configure the number of failures after which the client will start logging. A few failures is + * fine: region moved, then is not opened, then is overloaded. We try to have an acceptable + * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at this + * stage. */ public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = - "hbase.client.start.log.errors.counter"; + "hbase.client.start.log.errors.counter"; public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 5; private final long metaOperationTimeoutNs; @@ -159,8 +159,8 @@ class AsyncConnectionConfiguration { if (pauseMsForServerOverloaded < pauseMs) { LOG.warn( "The {} setting: {} ms is less than the {} setting: {} ms, use the greater one instead", - HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED, pauseMsForServerOverloaded, - HBASE_CLIENT_PAUSE, pauseMs); + HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED, pauseMsForServerOverloaded, HBASE_CLIENT_PAUSE, + pauseMs); pauseMsForServerOverloaded = pauseMs; } this.pauseNs = TimeUnit.MILLISECONDS.toNanos(pauseMs); @@ -168,9 +168,8 @@ class AsyncConnectionConfiguration { this.maxRetries = conf.getInt(HBASE_CLIENT_RETRIES_NUMBER, DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.startLogErrorsCnt = conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT); - this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos( - conf.getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); + this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf + .getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); this.scannerCaching = conf.getInt(HBASE_CLIENT_SCANNER_CACHING, DEFAULT_HBASE_CLIENT_SCANNER_CACHING); this.metaScannerCaching = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 12a792655457..6198086d503f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLED_KEY; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.SERVER_NAME_KEY; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.net.SocketAddress; @@ -59,8 +60,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; @@ -101,7 +104,7 @@ public class AsyncConnectionImpl implements AsyncConnection { private final ConcurrentMap rsStubs = new ConcurrentHashMap<>(); private final ConcurrentMap adminStubs = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private final AtomicReference masterStub = new AtomicReference<>(); @@ -182,8 +185,7 @@ private void spawnRenewalChore(final UserGroupInformation user) { } /** - * If choreService has not been created yet, create the ChoreService. - * @return ChoreService + * If choreService has not been created yet, create the ChoreService. n */ synchronized ChoreService getChoreService() { if (isClosed()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index fc7ee5c94554..7b44c2d341d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -90,7 +90,7 @@ public CompletableFuture> listTableDescriptors(boolean inc @Override public CompletableFuture> listTableDescriptors(Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { return wrap(rawAdmin.listTableDescriptors(pattern, includeSysTables)); } @@ -111,7 +111,7 @@ public CompletableFuture> listTableNames(boolean includeSysTable @Override public CompletableFuture> listTableNames(Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { return wrap(rawAdmin.listTableNames(pattern, includeSysTables)); } @@ -132,7 +132,7 @@ public CompletableFuture createTable(TableDescriptor desc) { @Override public CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, - int numRegions) { + int numRegions) { return wrap(rawAdmin.createTable(desc, startKey, endKey, numRegions)); } @@ -188,7 +188,7 @@ public CompletableFuture isTableAvailable(TableName tableName) { @Override public CompletableFuture addColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily) { + ColumnFamilyDescriptor columnFamily) { return wrap(rawAdmin.addColumnFamily(tableName, columnFamily)); } @@ -199,7 +199,7 @@ public CompletableFuture deleteColumnFamily(TableName tableName, byte[] co @Override public CompletableFuture modifyColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily) { + ColumnFamilyDescriptor columnFamily) { return wrap(rawAdmin.modifyColumnFamily(tableName, columnFamily)); } @@ -275,14 +275,13 @@ public CompletableFuture flushRegionServer(ServerName sn) { } @Override - public CompletableFuture compact(TableName tableName, - CompactType compactType) { + public CompletableFuture compact(TableName tableName, CompactType compactType) { return wrap(rawAdmin.compact(tableName, compactType)); } @Override - public CompletableFuture compact(TableName tableName, - byte[] columnFamily, CompactType compactType) { + public CompletableFuture compact(TableName tableName, byte[] columnFamily, + CompactType compactType) { return wrap(rawAdmin.compact(tableName, columnFamily, compactType)); } @@ -303,7 +302,7 @@ public CompletableFuture majorCompact(TableName tableName, CompactType com @Override public CompletableFuture majorCompact(TableName tableName, byte[] columnFamily, - CompactType compactType) { + CompactType compactType) { return wrap(rawAdmin.majorCompact(tableName, columnFamily, compactType)); } @@ -408,8 +407,8 @@ public CompletableFuture> getQuota(QuotaFilter filter) { } @Override - public CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled) { + public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) { return wrap(rawAdmin.addReplicationPeer(peerId, peerConfig, enabled)); } @@ -435,25 +434,25 @@ public CompletableFuture getReplicationPeerConfig(String @Override public CompletableFuture updateReplicationPeerConfig(String peerId, - ReplicationPeerConfig peerConfig) { + ReplicationPeerConfig peerConfig) { return wrap(rawAdmin.updateReplicationPeerConfig(peerId, peerConfig)); } @Override public CompletableFuture transitReplicationPeerSyncReplicationState(String peerId, - SyncReplicationState clusterState) { + SyncReplicationState clusterState) { return wrap(rawAdmin.transitReplicationPeerSyncReplicationState(peerId, clusterState)); } @Override public CompletableFuture appendReplicationPeerTableCFs(String peerId, - Map> tableCfs) { + Map> tableCfs) { return wrap(rawAdmin.appendReplicationPeerTableCFs(peerId, tableCfs)); } @Override public CompletableFuture removeReplicationPeerTableCFs(String peerId, - Map> tableCfs) { + Map> tableCfs) { return wrap(rawAdmin.removeReplicationPeerTableCFs(peerId, tableCfs)); } @@ -505,7 +504,7 @@ public CompletableFuture restoreSnapshot(String snapshotName, boolean take @Override public CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT) { + boolean restoreAcl, String customSFT) { return wrap(rawAdmin.cloneSnapshot(snapshotName, tableName, restoreAcl, customSFT)); } @@ -526,7 +525,7 @@ public CompletableFuture> listTableSnapshots(Pattern t @Override public CompletableFuture> listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { return wrap(rawAdmin.listTableSnapshots(tableNamePattern, snapshotNamePattern)); } @@ -552,25 +551,25 @@ public CompletableFuture deleteTableSnapshots(Pattern tableNamePattern) { @Override public CompletableFuture deleteTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { return wrap(rawAdmin.deleteTableSnapshots(tableNamePattern, snapshotNamePattern)); } @Override public CompletableFuture execProcedure(String signature, String instance, - Map props) { + Map props) { return wrap(rawAdmin.execProcedure(signature, instance, props)); } @Override public CompletableFuture execProcedureWithReturn(String signature, String instance, - Map props) { + Map props) { return wrap(rawAdmin.execProcedureWithReturn(signature, instance, props)); } @Override public CompletableFuture isProcedureFinished(String signature, String instance, - Map props) { + Map props) { return wrap(rawAdmin.isProcedureFinished(signature, instance, props)); } @@ -591,7 +590,7 @@ public CompletableFuture getLocks() { @Override public CompletableFuture decommissionRegionServers(List servers, - boolean offload) { + boolean offload) { return wrap(rawAdmin.decommissionRegionServers(servers, offload)); } @@ -602,7 +601,7 @@ public CompletableFuture> listDecommissionedRegionServers() { @Override public CompletableFuture recommissionRegionServer(ServerName server, - List encodedRegionNames) { + List encodedRegionNames) { return wrap(rawAdmin.recommissionRegionServer(server, encodedRegionNames)); } @@ -668,7 +667,7 @@ public CompletableFuture> getRegionMetrics(ServerName server @Override public CompletableFuture> getRegionMetrics(ServerName serverName, - TableName tableName) { + TableName tableName) { return wrap(rawAdmin.getRegionMetrics(serverName, tableName)); } @@ -678,8 +677,8 @@ public CompletableFuture isMasterInMaintenanceMode() { } @Override - public CompletableFuture getCompactionState( - TableName tableName, CompactType compactType) { + public CompletableFuture getCompactionState(TableName tableName, + CompactType compactType) { return wrap(rawAdmin.getCompactionState(tableName, compactType)); } @@ -694,8 +693,8 @@ public CompletableFuture> getLastMajorCompactionTimestamp(TableNa } @Override - public CompletableFuture> getLastMajorCompactionTimestampForRegion( - byte[] regionName) { + public CompletableFuture> + getLastMajorCompactionTimestampForRegion(byte[] regionName) { return wrap(rawAdmin.getLastMajorCompactionTimestampForRegion(regionName)); } @@ -761,13 +760,13 @@ public CompletableFuture runCatalogJanitor() { @Override public CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable) { + ServiceCaller callable) { return wrap(rawAdmin.coprocessorService(stubMaker, callable)); } @Override public CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, ServerName serverName) { + ServiceCaller callable, ServerName serverName) { return wrap(rawAdmin.coprocessorService(stubMaker, callable, serverName)); } @@ -788,13 +787,13 @@ public CompletableFuture clearBlockCache(TableName tableName @Override public CompletableFuture cloneTableSchema(TableName tableName, TableName newTableName, - boolean preserveSplits) { + boolean preserveSplits) { return wrap(rawAdmin.cloneTableSchema(tableName, newTableName, preserveSplits)); } @Override public CompletableFuture> compactionSwitch(boolean switchState, - List serverNamesList) { + List serverNamesList) { return wrap(rawAdmin.compactionSwitch(switchState, serverNamesList)); } @@ -819,8 +818,8 @@ public CompletableFuture> getSpaceQuotaTableSizes() { } @Override - public CompletableFuture> getRegionServerSpaceQuotaSnapshots( - ServerName serverName) { + public CompletableFuture> + getRegionServerSpaceQuotaSnapshots(ServerName serverName) { return wrap(rawAdmin.getRegionServerSpaceQuotaSnapshots(serverName)); } @@ -836,7 +835,7 @@ public CompletableFuture getCurrentSpaceQuotaSnapshot(TableN @Override public CompletableFuture grant(UserPermission userPermission, - boolean mergeExistingPermissions) { + boolean mergeExistingPermissions) { return wrap(rawAdmin.grant(userPermission, mergeExistingPermissions)); } @@ -847,19 +846,18 @@ public CompletableFuture revoke(UserPermission userPermission) { @Override public CompletableFuture> - getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) { + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) { return wrap(rawAdmin.getUserPermissions(getUserPermissionsRequest)); } @Override public CompletableFuture> hasUserPermissions(String userName, - List permissions) { + List permissions) { return wrap(rawAdmin.hasUserPermissions(userName, permissions)); } @Override - public CompletableFuture snapshotCleanupSwitch(final boolean on, - final boolean sync) { + public CompletableFuture snapshotCleanupSwitch(final boolean on, final boolean sync) { return wrap(rawAdmin.snapshotCleanupSwitch(on, sync)); } @@ -894,7 +892,8 @@ public CompletableFuture removeRSGroup(String groupName) { } @Override - public CompletableFuture balanceRSGroup(String groupName, BalanceRequest request) { + public CompletableFuture balanceRSGroup(String groupName, + BalanceRequest request) { return wrap(rawAdmin.balanceRSGroup(groupName, request)); } @@ -940,15 +939,14 @@ public CompletableFuture renameRSGroup(String oldName, String newName) { } @Override - public CompletableFuture - updateRSGroupConfig(String groupName, Map configuration) { + public CompletableFuture updateRSGroupConfig(String groupName, + Map configuration) { return wrap(rawAdmin.updateRSGroupConfig(groupName, configuration)); } @Override public CompletableFuture> getLogEntries(Set serverNames, - String logType, ServerType serverType, int limit, - Map filterParams) { + String logType, ServerType serverType, int limit, Map filterParams) { return wrap(rawAdmin.getLogEntries(serverNames, logType, serverType, limit, filterParams)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java index 976e9e78477c..c02b80c666ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,8 @@ public interface Callable { private final Callable callable; public AsyncMasterRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, - int maxRetries, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, + int maxRetries, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxRetries, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.callable = callable; @@ -53,8 +53,10 @@ public AsyncMasterRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl private void clearMasterStubCacheOnError(MasterService.Interface stub, Throwable error) { // ServerNotRunningYetException may because it is the backup master. - if (ClientExceptionsUtil.isConnectionException(error) || - error instanceof ServerNotRunningYetException) { + if ( + ClientExceptionsUtil.isConnectionException(error) + || error instanceof ServerNotRunningYetException + ) { conn.clearMasterStubCache(stub); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java index 5ae9de6c476d..161160e63599 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,8 +77,10 @@ private void addLocationToCache(HRegionLocation loc) { } } HRegionLocation oldLoc = oldLocs.getRegionLocation(replicaId); - if (oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() || - oldLoc.getServerName().equals(loc.getServerName()))) { + if ( + oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() + || oldLoc.getServerName().equals(loc.getServerName())) + ) { return; } RegionLocations newLocs = replaceRegionLocation(oldLocs, loc); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 877b47acd104..2e3b7f566916 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -158,7 +158,7 @@ public void clearCompletedRequests(RegionLocations locations) { } private boolean tryComplete(LocateRequest req, CompletableFuture future, - RegionLocations locations) { + RegionLocations locations) { if (future.isDone()) { return true; } @@ -178,8 +178,8 @@ private boolean tryComplete(LocateRequest req, CompletableFuture 0 || Bytes.equals(EMPTY_END_ROW, endKey)) && - Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0); + completed = c == 0 || ((c > 0 || Bytes.equals(EMPTY_END_ROW, endKey)) + && Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0); } else { completed = loc.getRegion().containsRow(req.row); } @@ -200,21 +200,21 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { + this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, conn, () -> { int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { - RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( - conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + RegionLocations metaLocations = conn.registry.getMetaRegionLocations() + .get(conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); numOfReplicas = metaLocations.size(); } catch (Exception e) { LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); @@ -224,8 +224,8 @@ private boolean tryComplete(LocateRequest req, CompletableFuture getRegionLocationsInternal(TableName tableName, - byte[] row, int replicaId, RegionLocateType locateType, boolean reload) { + byte[] row, int replicaId, RegionLocateType locateType, boolean reload) { // AFTER should be convert to CURRENT before calling this method assert !locateType.equals(RegionLocateType.AFTER); TableCache tableCache = getTableCache(tableName); @@ -598,7 +600,7 @@ private CompletableFuture getRegionLocationsInternal(TableName } CompletableFuture getRegionLocations(TableName tableName, byte[] row, - int replicaId, RegionLocateType locateType, boolean reload) { + int replicaId, RegionLocateType locateType, boolean reload) { // as we know the exact row after us, so we can just create the new row, and use the same // algorithm to locate it. if (locateType.equals(RegionLocateType.AFTER)) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java index 39c5b040443c..da58dd8e1e53 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java @@ -99,11 +99,8 @@ private boolean isMeta(TableName tableName) { return TableName.isMetaTableName(tableName); } - private CompletableFuture tracedLocationFuture( - Supplier> action, - Function> getRegionNames, - Supplier spanSupplier - ) { + private CompletableFuture tracedLocationFuture(Supplier> action, + Function> getRegionNames, Supplier spanSupplier) { final Span span = spanSupplier.get(); try (Scope scope = span.makeCurrent()) { CompletableFuture future = action.get(); @@ -127,50 +124,44 @@ private static List getRegionNames(RegionLocations locs) { if (locs == null || locs.getRegionLocations() == null) { return Collections.emptyList(); } - return Arrays.stream(locs.getRegionLocations()) - .filter(Objects::nonNull) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) + return Arrays.stream(locs.getRegionLocations()).filter(Objects::nonNull) + .map(HRegionLocation::getRegion).map(RegionInfo::getRegionNameAsString) .collect(Collectors.toList()); } private static List getRegionNames(HRegionLocation location) { - return Optional.ofNullable(location) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .map(Collections::singletonList) + return Optional.ofNullable(location).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).map(Collections::singletonList) .orElseGet(Collections::emptyList); } CompletableFuture getRegionLocations(TableName tableName, byte[] row, RegionLocateType type, boolean reload, long timeoutNs) { final Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.getRegionLocations") - .setTableName(tableName); + .setName("AsyncRegionLocator.getRegionLocations").setTableName(tableName); return tracedLocationFuture(() -> { - CompletableFuture future = isMeta(tableName) ? - metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) : - nonMetaRegionLocator.getRegionLocations(tableName, row, + CompletableFuture future = isMeta(tableName) + ? metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) + : nonMetaRegionLocator.getRegionLocations(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload); return withTimeout(future, timeoutNs, - () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + - "ms) waiting for region locations for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "'"); + () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + "ms) waiting for region locations for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "'"); }, AsyncRegionLocator::getRegionNames, supplier); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, int replicaId, RegionLocateType type, boolean reload, long timeoutNs) { final Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.getRegionLocation") - .setTableName(tableName); + .setName("AsyncRegionLocator.getRegionLocation").setTableName(tableName); return tracedLocationFuture(() -> { // meta region can not be split right now so we always call the same method. // Change it later if the meta table can have more than one regions. CompletableFuture future = new CompletableFuture<>(); - CompletableFuture locsFuture = - isMeta(tableName) ? metaRegionLocator.getRegionLocations(replicaId, reload) : - nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload); + CompletableFuture locsFuture = isMeta(tableName) + ? metaRegionLocator.getRegionLocations(replicaId, reload) + : nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload); addListener(locsFuture, (locs, error) -> { if (error != null) { future.completeExceptionally(error); @@ -179,21 +170,21 @@ CompletableFuture getRegionLocation(TableName tableName, byte[] HRegionLocation loc = locs.getRegionLocation(replicaId); if (loc == null) { future.completeExceptionally( - new RegionOfflineException("No location for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "', locateType=" + type + ", replicaId=" + replicaId)); + new RegionOfflineException("No location for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "', locateType=" + type + ", replicaId=" + replicaId)); } else if (loc.getServerName() == null) { future.completeExceptionally( - new RegionOfflineException("No server address listed for region '" + - loc.getRegion().getRegionNameAsString() + ", row='" + Bytes.toStringBinary(row) + - "', locateType=" + type + ", replicaId=" + replicaId)); + new RegionOfflineException("No server address listed for region '" + + loc.getRegion().getRegionNameAsString() + ", row='" + Bytes.toStringBinary(row) + + "', locateType=" + type + ", replicaId=" + replicaId)); } else { future.complete(loc); } }); return withTimeout(future, timeoutNs, - () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + - "ms) waiting for region location for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "', replicaId=" + replicaId); + () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + "ms) waiting for region location for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "', replicaId=" + replicaId); }, AsyncRegionLocator::getRegionNames, supplier); } @@ -222,9 +213,8 @@ void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) { } void clearCache(TableName tableName) { - Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache") - .setTableName(tableName); + Supplier supplier = + new TableSpanBuilder(conn).setName("AsyncRegionLocator.clearCache").setTableName(tableName); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", tableName); if (tableName.equals(META_TABLE_NAME)) { @@ -236,9 +226,9 @@ void clearCache(TableName tableName) { } void clearCache(ServerName serverName) { - Supplier supplier = new ConnectionSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache") - .addAttribute(SERVER_NAME_KEY, serverName.getServerName()); + Supplier supplier = + new ConnectionSpanBuilder(conn).setName("AsyncRegionLocator.clearCache") + .addAttribute(SERVER_NAME_KEY, serverName.getServerName()); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", serverName); metaRegionLocator.clearCache(serverName); @@ -248,8 +238,8 @@ void clearCache(ServerName serverName) { } void clearCache() { - Supplier supplier = new ConnectionSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache"); + Supplier supplier = + new ConnectionSpanBuilder(conn).setName("AsyncRegionLocator.clearCache"); TraceUtil.trace(() -> { metaRegionLocator.clearCache(); nonMetaRegionLocator.clearCache(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java index 4c6cd5a01172..cc0eccca6e29 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.findException; import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.isMetaClearingException; + import java.util.Arrays; import java.util.function.Consumer; import java.util.function.Function; @@ -50,14 +51,14 @@ static boolean canUpdateOnError(HRegionLocation loc, HRegionLocation oldLoc) { if (oldLoc == null || oldLoc.getServerName() == null) { return false; } - return oldLoc.getSeqNum() <= loc.getSeqNum() && - oldLoc.getServerName().equals(loc.getServerName()); + return oldLoc.getSeqNum() <= loc.getSeqNum() + && oldLoc.getServerName().equals(loc.getServerName()); } static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception, - Function cachedLocationSupplier, - Consumer addToCache, Consumer removeFromCache, - MetricsConnection metrics) { + Function cachedLocationSupplier, + Consumer addToCache, Consumer removeFromCache, + MetricsConnection metrics) { HRegionLocation oldLoc = cachedLocationSupplier.apply(loc); if (LOG.isDebugEnabled()) { LOG.debug("Try updating {} , the old value is {}, error={}", loc, oldLoc, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java index 65fbbd53f4a1..a19d3b039f18 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS; @@ -44,6 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.util.Timer; @InterfaceAudience.Private @@ -80,8 +79,8 @@ public abstract class AsyncRpcRetryingCaller { protected final HBaseRpcController controller; public AsyncRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, int priority, - long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, - long rpcTimeoutNs, int startLogErrorsCnt) { + long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, + long rpcTimeoutNs, int startLogErrorsCnt) { this.retryTimer = retryTimer; this.conn = conn; this.priority = priority; @@ -126,8 +125,8 @@ protected final void resetCallTimeout() { } private void tryScheduleRetry(Throwable error) { - long pauseNsToUse = HBaseServerException.isServerOverloaded(error) ? - pauseNsForServerOverloaded : pauseNs; + long pauseNsToUse = + HBaseServerException.isServerOverloaded(error) ? pauseNsForServerOverloaded : pauseNs; long delayNs; if (operationTimeoutNs > 0) { long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; @@ -158,7 +157,7 @@ protected Throwable preProcessError(Throwable error) { } protected final void onError(Throwable t, Supplier errMsg, - Consumer updateCachedLocation) { + Consumer updateCachedLocation) { if (future.isDone()) { // Give up if the future is already done, this is possible if user has already canceled the // future. And for timeline consistent read, we will also cancel some requests if we have @@ -178,9 +177,9 @@ protected final void onError(Throwable t, Supplier errMsg, return; } if (tries > startLogErrorsCnt) { - LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts + - ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + - " ms, time elapsed = " + elapsedMs() + " ms", error); + LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts + + ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + + " ms, time elapsed = " + elapsedMs() + " ms", error); } updateCachedLocation.accept(error); RetriesExhaustedException.ThrowableWithExtraContext qt = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java index d501998f8684..2d8e7b7aabe9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -93,8 +93,8 @@ public SingleRequestCallerBuilder row(byte[] row) { return this; } - public SingleRequestCallerBuilder action( - AsyncSingleRequestRpcRetryingCaller.Callable callable) { + public SingleRequestCallerBuilder + action(AsyncSingleRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -243,7 +243,7 @@ public ScanSingleRegionCallerBuilder location(HRegionLocation loc) { } public ScanSingleRegionCallerBuilder scannerLeaseTimeoutPeriod(long scannerLeaseTimeoutPeriod, - TimeUnit unit) { + TimeUnit unit) { this.scannerLeaseTimeoutPeriodNs = unit.toNanos(scannerLeaseTimeoutPeriod); return this; } @@ -300,7 +300,7 @@ public AsyncScanSingleRegionRpcRetryingCaller build() { * Short cut for {@code build().start(HBaseRpcController, ScanResponse)}. */ public CompletableFuture start(HBaseRpcController controller, - ScanResponse respWhenOpen) { + ScanResponse respWhenOpen) { return build().start(controller, respWhenOpen); } } @@ -386,8 +386,8 @@ public class MasterRequestCallerBuilder extends BuilderBase { private int priority = PRIORITY_UNSET; - public MasterRequestCallerBuilder action( - AsyncMasterRequestRpcRetryingCaller.Callable callable) { + public MasterRequestCallerBuilder + action(AsyncMasterRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -468,8 +468,8 @@ public class AdminRequestCallerBuilder extends BuilderBase { private int priority; - public AdminRequestCallerBuilder action( - AsyncAdminRequestRetryingCaller.Callable callable) { + public AdminRequestCallerBuilder + action(AsyncAdminRequestRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -540,8 +540,8 @@ public class ServerRequestCallerBuilder extends BuilderBase { private ServerName serverName; - public ServerRequestCallerBuilder action( - AsyncServerRequestRpcRetryingCaller.Callable callable) { + public ServerRequestCallerBuilder + action(AsyncServerRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -583,9 +583,9 @@ public ServerRequestCallerBuilder serverName(ServerName serverName) { public AsyncServerRequestRpcRetryingCaller build() { return new AsyncServerRequestRpcRetryingCaller(retryTimer, conn, pauseNs, - pauseNsForServerOverloaded, - maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, - checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); + pauseNsForServerOverloaded, maxAttempts, operationTimeoutNs, rpcTimeoutNs, + startLogErrorsCnt, checkNotNull(serverName, "serverName is null"), + checkNotNull(callable, "action is null")); } public CompletableFuture call() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java index 84d14aefcebe..dbaae5c26e2e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException; import static org.apache.hadoop.hbase.client.ConnectionUtils.updateResultsMetrics; import static org.apache.hadoop.hbase.client.ConnectionUtils.updateServerSideMetrics; + import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; import java.io.IOException; @@ -51,9 +52,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.hbase.thirdparty.io.netty.util.Timer; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; @@ -72,7 +75,7 @@ class AsyncScanSingleRegionRpcRetryingCaller { private static final Logger LOG = - LoggerFactory.getLogger(AsyncScanSingleRegionRpcRetryingCaller.class); + LoggerFactory.getLogger(AsyncScanSingleRegionRpcRetryingCaller.class); private final Timer retryTimer; @@ -127,7 +130,10 @@ class AsyncScanSingleRegionRpcRetryingCaller { private long nextCallSeq = -1L; private enum ScanControllerState { - INITIALIZED, SUSPENDED, TERMINATED, DESTROYED + INITIALIZED, + SUSPENDED, + TERMINATED, + DESTROYED } // Since suspend and terminate should only be called within onNext or onHeartbeat(see the comments @@ -169,8 +175,8 @@ public ScanControllerImpl(Optional cursor) { private void preCheck() { Preconditions.checkState(Thread.currentThread() == callerThread, - "The current thread is %s, expected thread is %s, " + - "you should not call this method outside onNext or onHeartbeat", + "The current thread is %s, expected thread is %s, " + + "you should not call this method outside onNext or onHeartbeat", Thread.currentThread(), callerThread); Preconditions.checkState(state.equals(ScanControllerState.INITIALIZED), "Invalid Stopper state %s", state); @@ -200,12 +206,14 @@ ScanControllerState destroy() { @Override public Optional cursor() { - return cursor; + return cursor; } } private enum ScanResumerState { - INITIALIZED, SUSPENDED, RESUMED + INITIALIZED, + SUSPENDED, + RESUMED } // The resume method is allowed to be called in another thread so here we also use the @@ -304,11 +312,11 @@ synchronized boolean prepare(ScanResponse resp, int numberOfCompleteRows) { } public AsyncScanSingleRegionRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - Scan scan, ScanMetrics scanMetrics, long scannerId, ScanResultCache resultCache, - AdvancedScanResultConsumer consumer, Interface stub, HRegionLocation loc, - boolean isRegionServerRemote, int priority, long scannerLeaseTimeoutPeriodNs, long pauseNs, - long pauseNsForServerOverloaded, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, - int startLogErrorsCnt) { + Scan scan, ScanMetrics scanMetrics, long scannerId, ScanResultCache resultCache, + AdvancedScanResultConsumer consumer, Interface stub, HRegionLocation loc, + boolean isRegionServerRemote, int priority, long scannerLeaseTimeoutPeriodNs, long pauseNs, + long pauseNsForServerOverloaded, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, + int startLogErrorsCnt) { this.retryTimer = retryTimer; this.scan = scan; this.scanMetrics = scanMetrics; @@ -351,10 +359,9 @@ private void closeScanner() { ScanRequest req = RequestConverter.buildScanRequest(this.scannerId, 0, true, false); stub.scan(controller, req, resp -> { if (controller.failed()) { - LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId + - " for " + loc.getRegion().getEncodedName() + " of " + - loc.getRegion().getTable() + " failed, ignore, probably already closed", - controller.getFailed()); + LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId + + " for " + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + + " failed, ignore, probably already closed", controller.getFailed()); } }); } @@ -391,16 +398,15 @@ private void completeWhenError(boolean closeScanner) { private void onError(Throwable error) { error = translateException(error); if (tries > startLogErrorsCnt) { - LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " + - loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + - " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " + - TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() + - " ms", - error); + LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " + + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + + " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " + + TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() + + " ms", error); } boolean scannerClosed = - error instanceof UnknownScannerException || error instanceof NotServingRegionException || - error instanceof RegionServerStoppedException || error instanceof ScannerResetException; + error instanceof UnknownScannerException || error instanceof NotServingRegionException + || error instanceof RegionServerStoppedException || error instanceof ScannerResetException; RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(error, EnvironmentEdgeManager.currentTime(), ""); @@ -410,8 +416,8 @@ private void onError(Throwable error) { return; } long delayNs; - long pauseNsToUse = HBaseServerException.isServerOverloaded(error) ? - pauseNsForServerOverloaded : pauseNs; + long pauseNsToUse = + HBaseServerException.isServerOverloaded(error) ? pauseNsForServerOverloaded : pauseNs; if (scanTimeoutNs > 0) { long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; if (maxDelayNs <= 0) { @@ -508,8 +514,7 @@ private void onComplete(HBaseRpcController controller, ScanResponse resp) { ScanControllerImpl scanController; if (results.length > 0) { scanController = new ScanControllerImpl( - resp.hasCursor() ? Optional.of(ProtobufUtil.toCursor(resp.getCursor())) - : Optional.empty()); + resp.hasCursor() ? Optional.of(ProtobufUtil.toCursor(resp.getCursor())) : Optional.empty()); updateNextStartRowWhenError(results[results.length - 1]); consumer.onNext(results, scanController); } else { @@ -594,7 +599,7 @@ private void renewLease() { nextCallSeq++; resetController(controller, rpcTimeoutNs, priority); ScanRequest req = - RequestConverter.buildScanRequest(scannerId, 0, false, nextCallSeq, false, true, -1); + RequestConverter.buildScanRequest(scannerId, 0, false, nextCallSeq, false, true, -1); stub.scan(controller, req, resp -> { }); } @@ -607,7 +612,7 @@ private void renewLease() { * @return {@code true} if we should continue, otherwise {@code false}. */ public CompletableFuture start(HBaseRpcController controller, - ScanResponse respWhenOpen) { + ScanResponse respWhenOpen) { onComplete(controller, respWhenOpen); return future; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java index 8c6cf81f4c71..40cd3b87e928 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; /** - * Retry caller for a request call to region server. - * Now only used for coprocessor call to region server. + * Retry caller for a request call to region server. Now only used for coprocessor call to region + * server. */ @InterfaceAudience.Private public class AsyncServerRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { @@ -46,10 +46,10 @@ public interface Callable { private ServerName serverName; public AsyncServerRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, - long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { - super(retryTimer, conn, HConstants.NORMAL_QOS, pauseNs, pauseNsForServerOverloaded, - maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, + long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { + super(retryTimer, conn, HConstants.NORMAL_QOS, pauseNs, pauseNsForServerOverloaded, maxAttempts, + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.serverName = serverName; this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java index 31fa1834bb70..9c115af97b5b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ class AsyncSingleRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { @FunctionalInterface public interface Callable { CompletableFuture call(HBaseRpcController controller, HRegionLocation loc, - ClientService.Interface stub); + ClientService.Interface stub); } private final TableName tableName; @@ -55,9 +55,9 @@ CompletableFuture call(HBaseRpcController controller, HRegionLocation loc, private final Callable callable; public AsyncSingleRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, - TableName tableName, byte[] row, int replicaId, RegionLocateType locateType, - Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, - int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + TableName tableName, byte[] row, int replicaId, RegionLocateType locateType, + Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, + int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.tableName = tableName; @@ -73,8 +73,8 @@ private void call(HRegionLocation loc) { stub = conn.getRegionServerStub(loc.getServerName()); } catch (IOException e) { onError(e, - () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + - "' in " + loc.getRegion().getEncodedName() + " of " + tableName + " failed", + () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + + "' in " + loc.getRegion().getEncodedName() + " of " + tableName + " failed", err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } @@ -82,8 +82,8 @@ private void call(HRegionLocation loc) { addListener(callable.call(controller, loc, stub), (result, error) -> { if (error != null) { onError(error, - () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " + - loc.getRegion().getEncodedName() + " of " + tableName + " failed", + () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " + + loc.getRegion().getEncodedName() + " of " + tableName + " failed", err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index df25351e1017..5497b4a0b723 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -154,7 +154,7 @@ default CompletableFuture exists(Get get) { * write operations to a row are synchronized, but readers do not take row locks so get and scan * operations can see this operation partially completed. * @param append object that specifies the columns and amounts to be used for the increment - * operations + * operations * @return values of columns after the append operation (maybe null). The return value will be * wrapped by a {@link CompletableFuture}. */ @@ -167,7 +167,7 @@ default CompletableFuture exists(Get get) { * so write operations to a row are synchronized, but readers do not take row locks so get and * scan operations can see this operation partially completed. * @param increment object that specifies the columns and amounts to be used for the increment - * operations + * operations * @return values of columns after the increment. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -177,10 +177,11 @@ default CompletableFuture exists(Get get) { * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} *

    * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the amount is negative). + * @param amount The amount to increment the cell with (or decrement, if the amount is + * negative). * @return The new value, post increment. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -196,10 +197,11 @@ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, *

    * Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose * any increments that have not been flushed. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. - * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the amount is negative). + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. + * @param qualifier The column qualifier of the cell to increment. + * @param amount The amount to increment the cell with (or decrement, if the amount is + * negative). * @param durability The persistence guarantee for this increment. * @return The new value, post increment. The return value will be wrapped by a * {@link CompletableFuture}. @@ -272,7 +274,7 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { /** * @param compareOp comparison operator to use - * @param value the expected value + * @param value the expected value */ CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value); @@ -397,7 +399,7 @@ interface CheckAndMutateWithFilterBuilder { /** * The scan API uses the observer pattern. - * @param scan A configured {@link Scan} object. + * @param scan A configured {@link Scan} object. * @param consumer the consumer used to receive results. * @see ScanResultConsumer * @see AdvancedScanResultConsumer @@ -415,7 +417,7 @@ default ResultScanner getScanner(byte[] family) { /** * Gets a scanner on the current table for the given family and qualifier. - * @param family The column family to scan. + * @param family The column family to scan. * @param qualifier The column qualifier to scan. * @return A scanner. */ @@ -461,7 +463,7 @@ default ResultScanner getScanner(byte[] family, byte[] qualifier) { * a {@code ResultScanner} or let you pass in a {@code ScanResultConsumer}. There is no * performance difference between these scan methods so do not worry. * @param scan A configured {@link Scan} object. So if you use this method to fetch a really large - * result set, it is likely to cause OOM. + * result set, it is likely to cause OOM. * @return The results of this small scan operation. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -578,11 +580,11 @@ default CompletableFuture> batchAll(List actions) { * * * @param stubMaker a delegation to the actual {@code newStub} call. - * @param callable a delegation to the actual protobuf rpc call. See the comment of - * {@link ServiceCaller} for more details. - * @param row The row key used to identify the remote region location - * @param the type of the asynchronous stub - * @param the type of the return value + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. + * @param row The row key used to identify the remote region location + * @param the type of the asynchronous stub + * @param the type of the return value * @return the return value of the protobuf rpc call, wrapped by a {@link CompletableFuture}. * @see ServiceCaller */ @@ -638,13 +640,13 @@ interface CoprocessorCallback { /** * @param region the region that the response belongs to - * @param resp the response of the coprocessor call + * @param resp the response of the coprocessor call */ void onRegionComplete(RegionInfo region, R resp); /** * @param region the region that the error belongs to - * @param error the response error of the coprocessor call + * @param error the response error of the coprocessor call */ void onRegionError(RegionInfo region, Throwable error); @@ -680,7 +682,7 @@ default CoprocessorServiceBuilder fromRow(byte[] startKey) { } /** - * @param startKey start region selection with region containing this row + * @param startKey start region selection with region containing this row * @param inclusive whether to include the startKey */ CoprocessorServiceBuilder fromRow(byte[] startKey, boolean inclusive); @@ -693,7 +695,7 @@ default CoprocessorServiceBuilder toRow(byte[] endKey) { } /** - * @param endKey select regions up to and including the region containing this row + * @param endKey select regions up to and including the region containing this row * @param inclusive whether to include the endKey */ CoprocessorServiceBuilder toRow(byte[] endKey, boolean inclusive); @@ -720,10 +722,10 @@ default CoprocessorServiceBuilder toRow(byte[] endKey) { * * * @param stubMaker a delegation to the actual {@code newStub} call. - * @param callable a delegation to the actual protobuf rpc call. See the comment of - * {@link ServiceCaller} for more details. - * @param callback callback to get the response. See the comment of {@link CoprocessorCallback} - * for more details. + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. + * @param callback callback to get the response. See the comment of {@link CoprocessorCallback} + * for more details. */ CoprocessorServiceBuilder coprocessorService(Function stubMaker, ServiceCaller callable, CoprocessorCallback callback); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java index ebf98f98bc3e..f6db89f82bf5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseServerException; import org.apache.yetus.audience.InterfaceAudience; @@ -82,18 +81,17 @@ public interface AsyncTableBuilder { AsyncTableBuilder setRetryPause(long pause, TimeUnit unit); /** - * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. - * We use an exponential policy to generate sleep time when retrying. + * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We + * use an exponential policy to generate sleep time when retrying. *

    * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. - * * @see #setRetryPause(long, TimeUnit) * @deprecated Since 2.5.0, will be removed in 4.0.0. Please use - * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. + * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. */ @Deprecated default AsyncTableBuilder setRetryPauseForCQTBE(long pause, TimeUnit unit) { @@ -101,15 +99,14 @@ default AsyncTableBuilder setRetryPauseForCQTBE(long pause, TimeUnit unit) { } /** - * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. - * We use an exponential policy to generate sleep time when retrying. + * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We + * use an exponential policy to generate sleep time when retrying. *

    * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. - * * @see #setRetryPause(long, TimeUnit) */ AsyncTableBuilder setRetryPauseForServerOverloaded(long pause, TimeUnit unit); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java index bec9f1236907..7c58e8c672f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -29,7 +28,7 @@ */ @InterfaceAudience.Private abstract class AsyncTableBuilderBase - implements AsyncTableBuilder { + implements AsyncTableBuilder { protected TableName tableName; @@ -53,7 +52,8 @@ abstract class AsyncTableBuilderBase AsyncTableBuilderBase(TableName tableName, AsyncConnectionConfiguration connConf) { this.tableName = tableName; - this.operationTimeoutNs = tableName.isSystemTable() ? connConf.getMetaOperationTimeoutNs() + this.operationTimeoutNs = tableName.isSystemTable() + ? connConf.getMetaOperationTimeoutNs() : connConf.getOperationTimeoutNs(); this.scanTimeoutNs = connConf.getScanTimeoutNs(); this.rpcTimeoutNs = connConf.getRpcTimeoutNs(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java index 0bf3179673db..8207a7104b34 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static java.util.stream.Collectors.toList; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; /** @@ -179,8 +181,7 @@ public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value) public CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) { return new CheckAndMutateWithFilterBuilder() { - private final CheckAndMutateWithFilterBuilder builder = - rawTable.checkAndMutate(row, filter); + private final CheckAndMutateWithFilterBuilder builder = rawTable.checkAndMutate(row, filter); @Override public CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange) { @@ -211,10 +212,9 @@ public CompletableFuture checkAndMutate(CheckAndMutate che } @Override - public List> checkAndMutate( - List checkAndMutates) { - return rawTable.checkAndMutate(checkAndMutates).stream() - .map(this::wrap).collect(toList()); + public List> + checkAndMutate(List checkAndMutates) { + return rawTable.checkAndMutate(checkAndMutates).stream().map(this::wrap).collect(toList()); } @Override @@ -238,7 +238,7 @@ private void scan0(Scan scan, ScanResultConsumer consumer) { span = scanner.getSpan(); try (Scope ignored = span.makeCurrent()) { consumer.onScanMetricsCreated(scanner.getScanMetrics()); - for (Result result; (result = scanner.next()) != null; ) { + for (Result result; (result = scanner.next()) != null;) { if (!consumer.onNext(result)) { break; } @@ -280,14 +280,14 @@ public List> batch(List actions) { @Override public CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, byte[] row) { + ServiceCaller callable, byte[] row) { return wrap(rawTable.coprocessorService(stubMaker, callable, row)); } @Override public CoprocessorServiceBuilder coprocessorService( - Function stubMaker, ServiceCaller callable, - CoprocessorCallback callback) { + Function stubMaker, ServiceCaller callable, + CoprocessorCallback callback) { final Context context = Context.current(); CoprocessorCallback wrappedCallback = new CoprocessorCallback() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java index 96e3ec4173a9..fffca5c2dccd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ default CompletableFuture getRegionLocation(byte[] row) { * Finds the region on which the given row is being served. *

    * Returns the location of the region to which the row belongs. - * @param row Row to find. + * @param row Row to find. * @param reload true to reload information or false to use cached information */ default CompletableFuture getRegionLocation(byte[] row, boolean reload) { @@ -66,7 +66,7 @@ default CompletableFuture getRegionLocation(byte[] row, boolean *

    * Returns the location of the region with the given replicaId to which the row * belongs. - * @param row Row to find. + * @param row Row to find. * @param replicaId the replica id of the region */ default CompletableFuture getRegionLocation(byte[] row, int replicaId) { @@ -78,9 +78,9 @@ default CompletableFuture getRegionLocation(byte[] row, int rep *

    * Returns the location of the region with the given replicaId to which the row * belongs. - * @param row Row to find. + * @param row Row to find. * @param replicaId the replica id of the region - * @param reload true to reload information or false to use cached information + * @param reload true to reload information or false to use cached information */ CompletableFuture getRegionLocation(byte[] row, int replicaId, boolean reload); @@ -95,7 +95,7 @@ default CompletableFuture> getRegionLocations(byte[] row) /** * Find all the replicas for the region on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Locations for all the replicas of the row. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java index d20e1faed0cc..fd04e662dd7f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ public CompletableFuture> getAllRegionLocations() { } CompletableFuture> future = ClientMetaTableAccessor .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName); - addListener(future, (locs, error) -> locs.forEach(loc -> conn.getLocator() - .getNonMetaRegionLocator().addLocationToCache(loc))); + addListener(future, (locs, error) -> locs + .forEach(loc -> conn.getLocator().getNonMetaRegionLocator().addLocationToCache(loc))); return future; }, getClass().getSimpleName() + ".getAllRegionLocations"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java index 6462cd093f85..1f9d7497ee0f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize; + import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.io.InterruptedIOException; @@ -74,10 +75,10 @@ private void addToCache(Result result) { private void stopPrefetch(ScanController controller) { if (LOG.isDebugEnabled()) { - LOG.debug("{} stop prefetching when scanning {} as the cache size {}" + - " is greater than the maxCacheSize {}", - String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize, - maxCacheSize); + LOG.debug( + "{} stop prefetching when scanning {} as the cache size {}" + + " is greater than the maxCacheSize {}", + String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize, maxCacheSize); } resumer = controller.suspend(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java index d693cb329b30..259807866d80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,20 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public interface Attributes { /** - * Sets an attribute. - * In case value = null attribute is removed from the attributes map. - * Attribute names starting with _ indicate system attributes. - * @param name attribute name + * Sets an attribute. In case value = null attribute is removed from the attributes map. Attribute + * names starting with _ indicate system attributes. + * @param name attribute name * @param value attribute value */ Attributes setAttribute(String name, byte[] value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java index 4e67bcedbd84..70a809da05bb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,19 +34,15 @@ public final static class Builder { private boolean dryRun = false; private boolean ignoreRegionsInTransition = false; - private Builder() {} + private Builder() { + } /** - * Updates BalancerRequest to run the balancer in dryRun mode. - * In this mode, the balancer will try to find a plan but WILL NOT - * execute any region moves or call any coprocessors. - * - * You can run in dryRun mode regardless of whether the balancer switch - * is enabled or disabled, but dryRun mode will not run over an existing - * request or chore. - * - * Dry run is useful for testing out new balance configs. See the logs - * on the active HMaster for the results of the dry run. + * Updates BalancerRequest to run the balancer in dryRun mode. In this mode, the balancer will + * try to find a plan but WILL NOT execute any region moves or call any coprocessors. You can + * run in dryRun mode regardless of whether the balancer switch is enabled or disabled, but + * dryRun mode will not run over an existing request or chore. Dry run is useful for testing out + * new balance configs. See the logs on the active HMaster for the results of the dry run. */ public Builder setDryRun(boolean dryRun) { this.dryRun = dryRun; @@ -55,10 +50,8 @@ public Builder setDryRun(boolean dryRun) { } /** - * Updates BalancerRequest to run the balancer even if there are regions - * in transition. - * - * WARNING: Advanced usage only, this could cause more issues than it fixes. + * Updates BalancerRequest to run the balancer even if there are regions in transition. WARNING: + * Advanced usage only, this could cause more issues than it fixes. */ public Builder setIgnoreRegionsInTransition(boolean ignoreRegionsInTransition) { this.ignoreRegionsInTransition = ignoreRegionsInTransition; @@ -81,8 +74,8 @@ public static Builder newBuilder() { } /** - * Get a BalanceRequest for a default run of the balancer. The default mode executes - * any moves calculated and will not run if regions are already in transition. + * Get a BalanceRequest for a default run of the balancer. The default mode executes any moves + * calculated and will not run if regions are already in transition. */ public static BalanceRequest defaultInstance() { return DEFAULT; @@ -97,16 +90,16 @@ private BalanceRequest(boolean dryRun, boolean ignoreRegionsInTransition) { } /** - * Returns true if the balancer should run in dry run mode, otherwise false. In - * dry run mode, moves will be calculated but not executed. + * Returns true if the balancer should run in dry run mode, otherwise false. In dry run mode, + * moves will be calculated but not executed. */ public boolean isDryRun() { return dryRun; } /** - * Returns true if the balancer should execute even if regions are in transition, otherwise - * false. This is an advanced usage feature, as it can cause more issues than it fixes. + * Returns true if the balancer should execute even if regions are in transition, otherwise false. + * This is an advanced usage feature, as it can cause more issues than it fixes. */ public boolean isIgnoreRegionsInTransition() { return ignoreRegionsInTransition; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java index 143878209d11..c7914f150de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,8 @@ public final class BalanceResponse { /** - * Used in HMaster to build a {@link BalanceResponse} for returning results of a balance invocation to callers + * Used in HMaster to build a {@link BalanceResponse} for returning results of a balance + * invocation to callers */ @InterfaceAudience.Private public final static class Builder { @@ -35,13 +35,13 @@ public final static class Builder { private int movesCalculated; private int movesExecuted; - private Builder() {} + private Builder() { + } /** * Set true if the balancer ran, otherwise false. The balancer may not run in some - * circumstances, such as if a balance is already running or there are regions already - * in transition. - * + * circumstances, such as if a balance is already running or there are regions already in + * transition. * @param balancerRan true if balancer ran, false otherwise */ public Builder setBalancerRan(boolean balancerRan) { @@ -52,7 +52,6 @@ public Builder setBalancerRan(boolean balancerRan) { /** * Set how many moves were calculated by the balancer. This will be zero if the cluster is * already balanced. - * * @param movesCalculated moves calculated by the balance run */ public Builder setMovesCalculated(int movesCalculated) { @@ -64,7 +63,6 @@ public Builder setMovesCalculated(int movesCalculated) { * Set how many of the calculated moves were actually executed by the balancer. This should be * zero if the balancer is run with {@link BalanceRequest#isDryRun()}. It may also not equal * movesCalculated if the balancer ran out of time while executing the moves. - * * @param movesExecuted moves executed by the balance run */ public Builder setMovesExecuted(int movesExecuted) { @@ -98,9 +96,9 @@ private BalanceResponse(boolean balancerRan, int movesCalculated, int movesExecu } /** - * Returns true if the balancer ran, otherwise false. The balancer may not run for a - * variety of reasons, such as: another balance is running, there are regions in - * transition, the cluster is in maintenance mode, etc. + * Returns true if the balancer ran, otherwise false. The balancer may not run for a variety of + * reasons, such as: another balance is running, there are regions in transition, the cluster is + * in maintenance mode, etc. */ public boolean isBalancerRan() { return balancerRan; @@ -115,10 +113,10 @@ public int getMovesCalculated() { } /** - * The number of moves actually executed by the balancer if it ran. This will be - * zero if {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} - * was true. It may also not be equal to {@link #getMovesCalculated()} if the balancer - * was interrupted midway through executing the moves due to max run time. + * The number of moves actually executed by the balancer if it ran. This will be zero if + * {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} was true. It may + * also not be equal to {@link #getMovesCalculated()} if the balancer was interrupted midway + * through executing the moves due to max run time. */ public int getMovesExecuted() { return movesExecuted; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java index e2bf2e28e0e7..4d66da19402a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.List; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -45,17 +42,15 @@ final public class BalancerDecision extends LogEntry { // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .registerTypeAdapter(BalancerDecision.class, (JsonSerializer) - (balancerDecision, type, jsonSerializationContext) -> { + private static final Gson GSON = + GsonUtil.createGson().setPrettyPrinting().registerTypeAdapter(BalancerDecision.class, + (JsonSerializer) (balancerDecision, type, jsonSerializationContext) -> { Gson gson = new Gson(); return gson.toJsonTree(balancerDecision); }).create(); private BalancerDecision(String initialFunctionCosts, String finalFunctionCosts, - double initTotalCost, double computedTotalCost, List regionPlans, - long computedSteps) { + double initTotalCost, double computedTotalCost, List regionPlans, long computedSteps) { this.initialFunctionCosts = initialFunctionCosts; this.finalFunctionCosts = finalFunctionCosts; this.initTotalCost = initTotalCost; @@ -90,14 +85,10 @@ public long getComputedSteps() { @Override public String toString() { - return new ToStringBuilder(this) - .append("initialFunctionCosts", initialFunctionCosts) - .append("finalFunctionCosts", finalFunctionCosts) - .append("initTotalCost", initTotalCost) - .append("computedTotalCost", computedTotalCost) - .append("computedSteps", computedSteps) - .append("regionPlans", regionPlans) - .toString(); + return new ToStringBuilder(this).append("initialFunctionCosts", initialFunctionCosts) + .append("finalFunctionCosts", finalFunctionCosts).append("initTotalCost", initTotalCost) + .append("computedTotalCost", computedTotalCost).append("computedSteps", computedSteps) + .append("regionPlans", regionPlans).toString(); } @Override @@ -144,8 +135,8 @@ public Builder setComputedSteps(long computedSteps) { } public BalancerDecision build() { - return new BalancerDecision(initialFunctionCosts, finalFunctionCosts, - initTotalCost, computedTotalCost, regionPlans, computedSteps); + return new BalancerDecision(initialFunctionCosts, finalFunctionCosts, initTotalCost, + computedTotalCost, regionPlans, computedSteps); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java index d6e6cee20fc8..eb5f7ff7ad2d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -37,27 +34,25 @@ @InterfaceAudience.Public @InterfaceStability.Evolving final public class BalancerRejection extends LogEntry { - //The reason why balancer was rejected + // The reason why balancer was rejected private final String reason; private final List costFuncInfoList; // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .disableHtmlEscaping() - .registerTypeAdapter(BalancerRejection.class, (JsonSerializer) - (balancerRejection, type, jsonSerializationContext) -> { + private static final Gson GSON = GsonUtil.createGson().setPrettyPrinting().disableHtmlEscaping() + .registerTypeAdapter(BalancerRejection.class, + (JsonSerializer) (balancerRejection, type, jsonSerializationContext) -> { Gson gson = new Gson(); return gson.toJsonTree(balancerRejection); - }).create(); + }) + .create(); private BalancerRejection(String reason, List costFuncInfoList) { this.reason = reason; - if(costFuncInfoList == null){ + if (costFuncInfoList == null) { this.costFuncInfoList = Collections.emptyList(); - } - else { + } else { this.costFuncInfoList = costFuncInfoList; } } @@ -72,10 +67,8 @@ public List getCostFuncInfoList() { @Override public String toString() { - return new ToStringBuilder(this) - .append("reason", reason) - .append("costFuncInfoList", costFuncInfoList.toString()) - .toString(); + return new ToStringBuilder(this).append("reason", reason) + .append("costFuncInfoList", costFuncInfoList.toString()).toString(); } @Override @@ -92,19 +85,15 @@ public Builder setReason(String reason) { return this; } - public void addCostFuncInfo(String funcName, double cost, float multiplier){ - if(costFuncInfoList == null){ + public void addCostFuncInfo(String funcName, double cost, float multiplier) { + if (costFuncInfoList == null) { costFuncInfoList = new ArrayList<>(); } - costFuncInfoList.add( - new StringBuilder() - .append(funcName) - .append(" cost:").append(cost) - .append(" multiplier:").append(multiplier) - .toString()); + costFuncInfoList.add(new StringBuilder().append(funcName).append(" cost:").append(cost) + .append(" multiplier:").append(multiplier).toString()); } - public Builder setCostFuncInfoList(List costFuncInfoList){ + public Builder setCostFuncInfoList(List costFuncInfoList) { this.costFuncInfoList = costFuncInfoList; return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java index 3b27298585e9..b0423c6c5cea 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,11 +24,10 @@ import java.util.ArrayList; import java.util.Deque; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A scan result cache for batched scan, i.e, @@ -142,8 +141,9 @@ public Result[] addAndGet(Result[] results, boolean isHeartbeatMessage) throws I numberOfCompleteRows++; } // check if we have a row change - if (!partialResults.isEmpty() && - !Bytes.equals(partialResults.peek().getRow(), result.getRow())) { + if ( + !partialResults.isEmpty() && !Bytes.equals(partialResults.peek().getRow(), result.getRow()) + ) { regroupedResults.add(createCompletedResult()); } Result regroupedResult = regroupResults(result); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java index 8ad6a7922303..d755926a697c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,34 +25,38 @@ import org.apache.yetus.audience.InterfaceAudience; /** - *

    Used to communicate with a single HBase table similar to {@link Table} but meant for - * batched, asynchronous puts. Obtain an instance from a {@link Connection} and call - * {@link #close()} afterwards. Customizations can be applied to the {@code BufferedMutator} via - * the {@link BufferedMutatorParams}. + *

    + * Used to communicate with a single HBase table similar to {@link Table} but meant for batched, + * asynchronous puts. Obtain an instance from a {@link Connection} and call {@link #close()} + * afterwards. Customizations can be applied to the {@code BufferedMutator} via the + * {@link BufferedMutatorParams}. *

    - * - *

    Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. - * The default implementation is to throw the exception upon receipt. This behavior can be - * overridden with a custom implementation, provided as a parameter with - * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}.

    - * - *

    Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs - * benefit from batching, but have no natural flush point. {@code BufferedMutator} receives the - * puts from the M/R job and will batch puts based on some heuristic, such as the accumulated size - * of the puts, and submit batches of puts asynchronously so that the M/R logic can continue - * without interruption. + *

    + * Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. The + * default implementation is to throw the exception upon receipt. This behavior can be overridden + * with a custom implementation, provided as a parameter with + * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}. *

    - * - *

    {@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs - * will have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can - * also be effectively used in high volume online systems to batch puts, with the caveat that - * extreme circumstances, such as JVM or machine failure, may cause some data loss.

    - * - *

    NOTE: This class replaces the functionality that used to be available via + *

    + * Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs benefit + * from batching, but have no natural flush point. {@code BufferedMutator} receives the puts from + * the M/R job and will batch puts based on some heuristic, such as the accumulated size of the + * puts, and submit batches of puts asynchronously so that the M/R logic can continue without + * interruption. + *

    + *

    + * {@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs will + * have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can also be + * effectively used in high volume online systems to batch puts, with the caveat that extreme + * circumstances, such as JVM or machine failure, may cause some data loss. + *

    + *

    + * NOTE: This class replaces the functionality that used to be available via * HTable#setAutoFlush(boolean) set to {@code false}. *

    - * - *

    See also the {@code BufferedMutatorExample} in the hbase-examples module.

    + *

    + * See also the {@code BufferedMutatorExample} in the hbase-examples module. + *

    * @see ConnectionFactory * @see Connection * @since 1.0.0 @@ -70,8 +73,8 @@ public interface BufferedMutator extends Closeable { String CLASSNAME_KEY = "hbase.client.bufferedmutator.classname"; /** - * Having the timer tick run more often that once every 100ms is needless and will - * probably cause too many timer events firing having a negative impact on performance. + * Having the timer tick run more often that once every 100ms is needless and will probably cause + * too many timer events firing having a negative impact on performance. */ long MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS = 100; @@ -83,25 +86,22 @@ public interface BufferedMutator extends Closeable { /** * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance. *

    - * The reference returned is not a copy, so any change made to it will - * affect this instance. + * The reference returned is not a copy, so any change made to it will affect this instance. */ Configuration getConfiguration(); /** - * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the - * wire as part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. - * + * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the wire as + * part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. * @param mutation The data to send. * @throws IOException if a remote or network exception occurs. */ void mutate(Mutation mutation) throws IOException; /** - * Send some {@link Mutation}s to the table. The mutations will be buffered and sent over the - * wire as part of a batch. There is no guarantee of sending entire content of {@code mutations} - * in a single batch; it will be broken up according to the write buffer capacity. - * + * Send some {@link Mutation}s to the table. The mutations will be buffered and sent over the wire + * as part of a batch. There is no guarantee of sending entire content of {@code mutations} in a + * single batch; it will be broken up according to the write buffer capacity. * @param mutations The data to send. * @throws IOException if a remote or network exception occurs. */ @@ -109,24 +109,22 @@ public interface BufferedMutator extends Closeable { /** * Performs a {@link #flush()} and releases any resources held. - * * @throws IOException if a remote or network exception occurs. */ @Override void close() throws IOException; /** - * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they - * are done. - * + * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they are + * done. * @throws IOException if a remote or network exception occurs. */ void flush() throws IOException; /** * Sets the maximum time before the buffer is automatically flushed checking once per second. - * @param timeoutMs The maximum number of milliseconds how long records may be buffered - * before they are flushed. Set to 0 to disable. + * @param timeoutMs The maximum number of milliseconds how long records may be buffered before + * they are flushed. Set to 0 to disable. */ default void setWriteBufferPeriodicFlush(long timeoutMs) { setWriteBufferPeriodicFlush(timeoutMs, 1000L); @@ -134,16 +132,16 @@ default void setWriteBufferPeriodicFlush(long timeoutMs) { /** * Sets the maximum time before the buffer is automatically flushed. - * @param timeoutMs The maximum number of milliseconds how long records may be buffered - * before they are flushed. Set to 0 to disable. - * @param timerTickMs The number of milliseconds between each check if the - * timeout has been exceeded. Must be 100ms (as defined in - * {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) - * or larger to avoid performance problems. + * @param timeoutMs The maximum number of milliseconds how long records may be buffered before + * they are flushed. Set to 0 to disable. + * @param timerTickMs The number of milliseconds between each check if the timeout has been + * exceeded. Must be 100ms (as defined in + * {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) or larger to avoid + * performance problems. */ default void setWriteBufferPeriodicFlush(long timeoutMs, long timerTickMs) { throw new UnsupportedOperationException( - "The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented"); + "The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented"); } /** @@ -155,22 +153,22 @@ default void disableWriteBufferPeriodicFlush() { /** * Returns the current periodic flush timeout value in milliseconds. - * @return The maximum number of milliseconds how long records may be buffered before they - * are flushed. The value 0 means this is disabled. + * @return The maximum number of milliseconds how long records may be buffered before they are + * flushed. The value 0 means this is disabled. */ default long getWriteBufferPeriodicFlushTimeoutMs() { throw new UnsupportedOperationException( - "The BufferedMutator::getWriteBufferPeriodicFlushTimeoutMs has not been implemented"); + "The BufferedMutator::getWriteBufferPeriodicFlushTimeoutMs has not been implemented"); } /** * Returns the current periodic flush timertick interval in milliseconds. - * @return The number of milliseconds between each check if the timeout has been exceeded. - * This value only has a real meaning if the timeout has been set to > 0 + * @return The number of milliseconds between each check if the timeout has been exceeded. This + * value only has a real meaning if the timeout has been set to > 0 */ default long getWriteBufferPeriodicFlushTimerTickMs() { throw new UnsupportedOperationException( - "The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented"); + "The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented"); } /** @@ -202,7 +200,7 @@ default long getWriteBufferPeriodicFlushTimerTickMs() { */ @InterfaceAudience.Public interface ExceptionListener { - public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator mutator) throws RetriesExhaustedWithDetailsException; + public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) + throws RetriesExhaustedWithDetailsException; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java index b8bc55c47c37..72692eac59e5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ class BufferedMutatorOverAsyncBufferedMutator implements BufferedMutator { new ConcurrentLinkedQueue<>(); BufferedMutatorOverAsyncBufferedMutator(AsyncBufferedMutator mutator, - ExceptionListener listener) { + ExceptionListener listener) { this.mutator = mutator; this.listener = listener; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java index 54c133b81bf8..b3efa14fa7ee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.concurrent.ExecutorService; @@ -43,8 +41,7 @@ public class BufferedMutatorParams implements Cloneable { private BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() { @Override public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator bufferedMutator) - throws RetriesExhaustedWithDetailsException { + BufferedMutator bufferedMutator) throws RetriesExhaustedWithDetailsException { throw exception; } }; @@ -145,8 +142,8 @@ public BufferedMutatorParams maxKeyValueSize(int maxKeyValueSize) { } /** - * @deprecated Since 3.0.0-alpha-2, will be removed in 4.0.0. You can not set it anymore. - * BufferedMutator will use Connection's ExecutorService. + * @deprecated Since 3.0.0-alpha-2, will be removed in 4.0.0. You can not set it anymore. + * BufferedMutator will use Connection's ExecutorService. */ @Deprecated public ExecutorService getPool() { @@ -154,8 +151,8 @@ public ExecutorService getPool() { } /** - * Override the default executor pool defined by the {@code hbase.htable.threads.*} - * configuration values. + * Override the default executor pool defined by the {@code hbase.htable.threads.*} configuration + * values. * @deprecated Since 3.0.0-alpha-2, will be removed in 4.0.0. You can not set it anymore. * BufferedMutator will use Connection's ExecutorService. */ @@ -200,18 +197,18 @@ public BufferedMutatorParams listener(BufferedMutator.ExceptionListener listener return this; } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL", - justification="The clone below is complete") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "CN_IDIOM_NO_SUPER_CALL", + justification = "The clone below is complete") @Override public BufferedMutatorParams clone() { BufferedMutatorParams clone = new BufferedMutatorParams(this.tableName); - clone.writeBufferSize = this.writeBufferSize; - clone.writeBufferPeriodicFlushTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs; + clone.writeBufferSize = this.writeBufferSize; + clone.writeBufferPeriodicFlushTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs; clone.writeBufferPeriodicFlushTimerTickMs = this.writeBufferPeriodicFlushTimerTickMs; - clone.maxKeyValueSize = this.maxKeyValueSize; - clone.pool = this.pool; - clone.listener = this.listener; - clone.implementationClassName = this.implementationClassName; + clone.maxKeyValueSize = this.maxKeyValueSize; + clone.pool = this.pool; + clone.listener = this.listener; + clone.implementationClassName = this.implementationClassName; return clone; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index 27be88a9def2..c6bf6b5d59e7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -39,10 +39,9 @@ interface CatalogReplicaLoadBalanceSelector { /** * Select a catalog replica region where client go to loop up the input row key. - * - * @param tablename table name - * @param row key to look up - * @param locateType locate type + * @param tablename table name + * @param row key to look up + * @param locateType locate type * @return replica id */ int select(TableName tablename, byte[] row, RegionLocateType locateType); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java index fe686f79ab8a..e94102248b59 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java @@ -35,10 +35,10 @@ private CatalogReplicaLoadBalanceSelectorFactory() { /** * Create a CatalogReplicaLoadBalanceReplicaSelector based on input config. - * @param replicaSelectorClass Selector classname. - * @param tableName System table name. - * @param conn {@link AsyncConnectionImpl} - * @return {@link CatalogReplicaLoadBalanceSelector} + * @param replicaSelectorClass Selector classname. + * @param tableName System table name. + * @param conn {@link AsyncConnectionImpl} + * @return {@link CatalogReplicaLoadBalanceSelector} */ public static CatalogReplicaLoadBalanceSelector createSelector(String replicaSelectorClass, TableName tableName, AsyncConnectionImpl conn, IntSupplier getReplicaCount) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index 4d9e1aa24d6c..2e704c5bdc10 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR; import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; + import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -38,32 +39,34 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + /** - *

    CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load - * balancing algorithm. It maintains a stale location cache for each table. Whenever client looks - * up location, it first check if the row is the stale location cache. If yes, the location from - * catalog replica is stale, it will go to the primary region to look up update-to-date location; - * otherwise, it will randomly pick up a replica region or primary region for lookup. When clients - * receive RegionNotServedException from region servers, it will add these region locations to the - * stale location cache. The stale cache will be cleaned up periodically by a chore.

    - * + *

    + * CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load balancing + * algorithm. It maintains a stale location cache for each table. Whenever client looks up location, + * it first check if the row is the stale location cache. If yes, the location from catalog replica + * is stale, it will go to the primary region to look up update-to-date location; otherwise, it will + * randomly pick up a replica region or primary region for lookup. When clients receive + * RegionNotServedException from region servers, it will add these region locations to the stale + * location cache. The stale cache will be cleaned up periodically by a chore. + *

    * It follows a simple algorithm to choose a meta replica region (including primary meta) to go: - * *
      - *
    1. If there is no stale location entry for rows it looks up, it will randomly - * pick a meta replica region (including primary meta) to do lookup.
    2. - *
    3. If the location from the replica region is stale, client gets RegionNotServedException - * from region server, in this case, it will create StaleLocationCacheEntry in - * CatalogReplicaLoadBalanceReplicaSimpleSelector.
    4. - *
    5. When client tries to do location lookup, it checks StaleLocationCache first for rows it - * tries to lookup, if entry exists, it will go with primary meta region to do lookup; - * otherwise, it will follow step 1.
    6. - *
    7. A chore will periodically run to clean up cache entries in the StaleLocationCache.
    8. + *
    9. If there is no stale location entry for rows it looks up, it will randomly pick a meta + * replica region (including primary meta) to do lookup.
    10. + *
    11. If the location from the replica region is stale, client gets RegionNotServedException from + * region server, in this case, it will create StaleLocationCacheEntry in + * CatalogReplicaLoadBalanceReplicaSimpleSelector.
    12. + *
    13. When client tries to do location lookup, it checks StaleLocationCache first for rows it tries + * to lookup, if entry exists, it will go with primary meta region to do lookup; otherwise, it will + * follow step 1.
    14. + *
    15. A chore will periodically run to clean up cache entries in the StaleLocationCache.
    16. *
    */ -class CatalogReplicaLoadBalanceSimpleSelector implements - CatalogReplicaLoadBalanceSelector, Stoppable { +class CatalogReplicaLoadBalanceSimpleSelector + implements CatalogReplicaLoadBalanceSelector, Stoppable { private static final Logger LOG = LoggerFactory.getLogger(CatalogReplicaLoadBalanceSimpleSelector.class); private final long STALE_CACHE_TIMEOUT_IN_MILLISECONDS = 3000; // 3 seconds @@ -94,10 +97,8 @@ public long getTimestamp() { @Override public String toString() { - return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("endKey", endKey) - .append("timestamp", timestamp) - .toString(); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("endKey", endKey) + .append("timestamp", timestamp).toString(); } } @@ -123,24 +124,22 @@ public String toString() { } /** - * When a client runs into RegionNotServingException, it will call this method to - * update Selector's internal state. + * When a client runs into RegionNotServingException, it will call this method to update + * Selector's internal state. * @param loc the location which causes exception. */ public void onError(HRegionLocation loc) { - ConcurrentNavigableMap tableCache = - computeIfAbsent(staleCache, loc.getRegion().getTable(), - () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); + ConcurrentNavigableMap tableCache = computeIfAbsent(staleCache, + loc.getRegion().getTable(), () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); byte[] startKey = loc.getRegion().getStartKey(); - tableCache.putIfAbsent(startKey, - new StaleLocationCacheEntry(loc.getRegion().getEndKey())); + tableCache.putIfAbsent(startKey, new StaleLocationCacheEntry(loc.getRegion().getEndKey())); LOG.debug("Add entry to stale cache for table {} with startKey {}, {}", loc.getRegion().getTable(), startKey, loc.getRegion().getEndKey()); } /** - * Select an random replica id (including the primary replica id). In case there is no replica region configured, return - * the primary replica id. + * Select an random replica id (including the primary replica id). In case there is no replica + * region configured, return the primary replica id. * @return Replica id */ private int getRandomReplicaId() { @@ -157,20 +156,18 @@ private int getRandomReplicaId() { } /** - * When it looks up a location, it will call this method to find a replica region to go. - * For a normal case, > 99% of region locations from catalog/meta replica will be up to date. - * In extreme cases such as region server crashes, it will depends on how fast replication - * catches up. - * - * @param tablename table name it looks up - * @param row key it looks up. + * When it looks up a location, it will call this method to find a replica region to go. For a + * normal case, > 99% of region locations from catalog/meta replica will be up to date. In extreme + * cases such as region server crashes, it will depends on how fast replication catches up. + * @param tablename table name it looks up + * @param row key it looks up. * @param locateType locateType, Only BEFORE and CURRENT will be passed in. * @return catalog replica id */ public int select(final TableName tablename, final byte[] row, final RegionLocateType locateType) { - Preconditions.checkArgument(locateType == RegionLocateType.BEFORE || - locateType == RegionLocateType.CURRENT, + Preconditions.checkArgument( + locateType == RegionLocateType.BEFORE || locateType == RegionLocateType.CURRENT, "Expected type BEFORE or CURRENT but got: %s", locateType); ConcurrentNavigableMap tableCache = staleCache.get(tablename); @@ -198,15 +195,17 @@ public int select(final TableName tablename, final byte[] row, // long comparing is faster than comparing byte arrays(in most cases). It could remove // stale entries faster. If the possible match entry does not time out, it will check if // the entry is a match for the row passed in and select the replica id accordingly. - if ((EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp()) >= - STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + if ( + (EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp()) + >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS + ) { LOG.debug("Entry for table {} with startKey {}, {} times out", tablename, entry.getKey(), entry); tableCache.remove(entry.getKey()); return getRandomReplicaId(); } - byte[] endKey = entry.getValue().getEndKey(); + byte[] endKey = entry.getValue().getEndKey(); // The following logic is borrowed from AsyncNonMetaRegionLocator. if (isEmptyStopRow(endKey)) { @@ -245,12 +244,12 @@ public boolean isStopped() { private void cleanupReplicaReplicaStaleCache() { long curTimeInMills = EnvironmentEdgeManager.currentTime(); for (ConcurrentNavigableMap tableCache : staleCache.values()) { - Iterator> it = - tableCache.entrySet().iterator(); + Iterator> it = tableCache.entrySet().iterator(); while (it.hasNext()) { Map.Entry entry = it.next(); - if (curTimeInMills - entry.getValue().getTimestamp() >= - STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + if ( + curTimeInMills - entry.getValue().getTimestamp() >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS + ) { LOG.debug("clean entry {}, {} from stale cache", entry.getKey(), entry.getValue()); it.remove(); } @@ -269,15 +268,17 @@ private int refreshCatalogReplicaCount() { } int cachedNumOfReplicas = this.numOfReplicas; - if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - (cachedNumOfReplicas != newNumOfReplicas)) { + if ( + (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) + || (cachedNumOfReplicas != newNumOfReplicas) + ) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; } - private ScheduledChore getCacheCleanupChore( - final CatalogReplicaLoadBalanceSimpleSelector selector) { + private ScheduledChore + getCacheCleanupChore(final CatalogReplicaLoadBalanceSimpleSelector selector) { return new ScheduledChore("CleanupCatalogReplicaStaleCache", this, STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS) { @Override @@ -287,8 +288,8 @@ protected void chore() { }; } - private ScheduledChore getRefreshReplicaCountChore( - final CatalogReplicaLoadBalanceSimpleSelector selector) { + private ScheduledChore + getRefreshReplicaCountChore(final CatalogReplicaLoadBalanceSimpleSelector selector) { return new ScheduledChore("RefreshReplicaCountChore", this, REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS) { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java index 40062e32e83c..647d5dcf38f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,18 +20,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** - *

    There are two modes with catalog replica support.

    - * + *

    + * There are two modes with catalog replica support. + *

    *
      - *
    1. HEDGED_READ - Client sends requests to the primary region first, within a - * configured amount of time, if there is no response coming back, - * client sends requests to all replica regions and takes the first - * response.
    2. - * - *
    3. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, - * if results from replica regions are stale, next time, client sends requests for - * these stale locations to the primary region. In this mode, scan - * requests are load balanced across all replica regions.
    4. + *
    5. HEDGED_READ - Client sends requests to the primary region first, within a configured amount + * of time, if there is no response coming back, client sends requests to all replica regions and + * takes the first response.
    6. + *
    7. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, if results + * from replica regions are stale, next time, client sends requests for these stale locations to the + * primary region. In this mode, scan requests are load balanced across all replica regions.
    8. *
    */ @InterfaceAudience.Private @@ -54,7 +54,7 @@ public String toString() { }; public static CatalogReplicaMode fromString(final String value) { - for(CatalogReplicaMode mode : values()) { + for (CatalogReplicaMode mode : values()) { if (mode.toString().equalsIgnoreCase(value)) { return mode; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java index b7f17f310fd8..67f30177663f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java @@ -23,13 +23,15 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** * Used to perform CheckAndMutate operations. *

    - * Use the builder class to instantiate a CheckAndMutate object. - * This builder class is fluent style APIs, the code are like: + * Use the builder class to instantiate a CheckAndMutate object. This builder class is fluent style + * APIs, the code are like: + * *

      * 
      * // A CheckAndMutate operation where do the specified action if the column (specified by the
    @@ -75,8 +77,7 @@ private Builder(byte[] row) {
     
         /**
          * Check for lack of column
    -     *
    -     * @param family family to check
    +     * @param family    family to check
          * @param qualifier qualifier to check
          * @return the CheckAndMutate object
          */
    @@ -86,10 +87,9 @@ public Builder ifNotExists(byte[] family, byte[] qualifier) {
     
         /**
          * Check for equality
    -     *
    -     * @param family family to check
    +     * @param family    family to check
          * @param qualifier qualifier to check
    -     * @param value the expected value
    +     * @param value     the expected value
          * @return the CheckAndMutate object
          */
         public Builder ifEquals(byte[] family, byte[] qualifier, byte[] value) {
    @@ -97,10 +97,10 @@ public Builder ifEquals(byte[] family, byte[] qualifier, byte[] value) {
         }
     
         /**
    -     * @param family family to check
    +     * @param family    family to check
          * @param qualifier qualifier to check
          * @param compareOp comparison operator to use
    -     * @param value the expected value
    +     * @param value     the expected value
          * @return the CheckAndMutate object
          */
         public Builder ifMatches(byte[] family, byte[] qualifier, CompareOperator compareOp,
    @@ -133,13 +133,14 @@ public Builder timeRange(TimeRange timeRange) {
         private void preCheck(Row action) {
           Preconditions.checkNotNull(action, "action is null");
           if (!Bytes.equals(row, action.getRow())) {
    -        throw new IllegalArgumentException("The row of the action <" +
    -          Bytes.toStringBinary(action.getRow()) + "> doesn't match the original one <" +
    -          Bytes.toStringBinary(this.row) + ">");
    +        throw new IllegalArgumentException(
    +          "The row of the action <" + Bytes.toStringBinary(action.getRow())
    +            + "> doesn't match the original one <" + Bytes.toStringBinary(this.row) + ">");
           }
    -      Preconditions.checkState(op != null || filter != null, "condition is null. You need to"
    -        + " specify the condition by calling ifNotExists/ifEquals/ifMatches before building a"
    -        + " CheckAndMutate object");
    +      Preconditions.checkState(op != null || filter != null,
    +        "condition is null. You need to"
    +          + " specify the condition by calling ifNotExists/ifEquals/ifMatches before building a"
    +          + " CheckAndMutate object");
         }
     
         /**
    @@ -210,7 +211,6 @@ public CheckAndMutate build(RowMutations mutations) {
     
       /**
        * returns a builder object to build a CheckAndMutate object
    -   *
        * @param row row
        * @return a builder object
        */
    @@ -227,7 +227,7 @@ public static Builder newBuilder(byte[] row) {
       private final TimeRange timeRange;
       private final Row action;
     
    -  private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier,final CompareOperator op,
    +  private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier, final CompareOperator op,
         byte[] value, TimeRange timeRange, Row action) {
         this.row = row;
         this.family = family;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    index 69aa120b6b99..ed198f3b7fe1 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -17,7 +17,6 @@
      */
     package org.apache.hadoop.hbase.client;
     
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    index 9125132e66c5..758cf508578a 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -20,27 +19,27 @@
     
     import java.io.IOException;
     import java.lang.management.ManagementFactory;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
     import org.apache.hadoop.hbase.util.Addressing;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +import org.apache.yetus.audience.InterfaceAudience;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
     
     /**
    - * The class that is able to determine some unique strings for the client,
    - * such as an IP address, PID, and composite deterministic ID.
    + * The class that is able to determine some unique strings for the client, such as an IP address,
    + * PID, and composite deterministic ID.
      */
     @InterfaceAudience.Private
     final class ClientIdGenerator {
       private static final Logger LOG = LoggerFactory.getLogger(ClientIdGenerator.class);
     
    -  private ClientIdGenerator() {}
    +  private ClientIdGenerator() {
    +  }
     
       /**
    -   * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill...
    -   * Note though that new UUID in java by default is just a random number.
    +   * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill... Note
    +   *         though that new UUID in java by default is just a random number.
        */
       public static byte[] generateClientId() {
         byte[] selfBytes = getIpAddressBytes();
    @@ -78,8 +77,8 @@ public static Long getPid() {
       }
     
       /**
    -   * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual
    -   *         and not a loopback address. Empty array if none can be found or error occurred.
    +   * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual and not
    +   *         a loopback address. Empty array if none can be found or error occurred.
        */
       public static byte[] getIpAddressBytes() {
         try {
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    index ba447d5a81ba..44eef0668f03 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -25,7 +25,6 @@
     @InterfaceAudience.Private
     public class ClientUtil {
     
    -
       public static boolean areScanStartRowAndStopRowEqual(byte[] startRow, byte[] stopRow) {
         return startRow != null && startRow.length > 0 && Bytes.equals(startRow, stopRow);
       }
    @@ -35,19 +34,23 @@ public static Cursor createCursor(byte[] row) {
       }
     
       /**
    -   * 

    When scanning for a prefix the scan should stop immediately after the the last row that - * has the specified prefix. This method calculates the closest next rowKey immediately following - * the given rowKeyPrefix.

    - *

    IMPORTANT: This converts a rowKeyPrefix into a rowKey.

    - *

    If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can - * simply increment the last byte of the array. - * But if your application uses real binary rowids you may run into the scenario that your - * prefix is something like:

    + *

    + * When scanning for a prefix the scan should stop immediately after the the last row that has the + * specified prefix. This method calculates the closest next rowKey immediately following the + * given rowKeyPrefix. + *

    + *

    + * IMPORTANT: This converts a rowKeyPrefix into a rowKey. + *

    + *

    + * If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can simply + * increment the last byte of the array. But if your application uses real binary rowids you may + * run into the scenario that your prefix is something like: + *

    *    { 0x12, 0x23, 0xFF, 0xFF }
    * Then this stopRow needs to be fed into the actual scan
    *    { 0x12, 0x24 } (Notice that it is shorter now)
    * This method calculates the correct stop row value for this usecase. - * * @param rowKeyPrefix the rowKeyPrefix. * @return the closest next rowKey immediately following the given rowKeyPrefix. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index 1370d07c5fb3..cc34d59c7321 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.Closeable; @@ -37,6 +35,10 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; @@ -48,15 +50,13 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.DatagramChannel; import org.apache.hbase.thirdparty.io.netty.channel.socket.DatagramPacket; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioDatagramChannel; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A class that receives the cluster status, and provide it as a set of service to the client. - * Today, manages only the dead server list. - * The class is abstract to allow multiple implementations, from ZooKeeper to multicast based. + * Today, manages only the dead server list. The class is abstract to allow multiple + * implementations, from ZooKeeper to multicast based. */ @InterfaceAudience.Private class ClusterStatusListener implements Closeable { @@ -70,7 +70,7 @@ class ClusterStatusListener implements Closeable { */ public static final String STATUS_LISTENER_CLASS = "hbase.status.listener.class"; public static final Class DEFAULT_STATUS_LISTENER_CLASS = - MulticastListener.class; + MulticastListener.class; /** * Class to be extended to manage a new dead server. @@ -80,13 +80,11 @@ public interface DeadServerHandler { /** * Called when a server is identified as dead. Called only once even if we receive the * information multiple times. - * * @param sn - the server name */ void newDead(ServerName sn); } - /** * The interface to be implemented by a listener of a cluster status event. */ @@ -99,7 +97,6 @@ interface Listener extends Closeable { /** * Called to connect. - * * @param conf Configuration to use. * @throws IOException if failing to connect */ @@ -107,11 +104,11 @@ interface Listener extends Closeable { } public ClusterStatusListener(DeadServerHandler dsh, Configuration conf, - Class listenerClass) throws IOException { + Class listenerClass) throws IOException { this.deadServerHandler = dsh; try { Constructor ctor = - listenerClass.getConstructor(ClusterStatusListener.class); + listenerClass.getConstructor(ClusterStatusListener.class); this.listener = ctor.newInstance(this); } catch (InstantiationException e) { throw new IOException("Can't create listener " + listenerClass.getName(), e); @@ -128,7 +125,6 @@ public ClusterStatusListener(DeadServerHandler dsh, Configuration conf, /** * Acts upon the reception of a new cluster status. - * * @param ncs the cluster status */ public void receive(ClusterMetrics ncs) { @@ -152,7 +148,6 @@ public void close() { /** * Check if we know if a server is dead. - * * @param sn the server name to check. * @return true if we know for sure that the server is dead, false otherwise. */ @@ -162,9 +157,10 @@ public boolean isDeadServer(ServerName sn) { } for (ServerName dead : deadServers) { - if (dead.getStartcode() >= sn.getStartcode() && - dead.getPort() == sn.getPort() && - dead.getHostname().equals(sn.getHostname())) { + if ( + dead.getStartcode() >= sn.getStartcode() && dead.getPort() == sn.getPort() + && dead.getHostname().equals(sn.getHostname()) + ) { return true; } } @@ -172,7 +168,6 @@ public boolean isDeadServer(ServerName sn) { return false; } - /** * An implementation using a multicast message between the master & the client. */ @@ -189,12 +184,12 @@ public MulticastListener() { @Override public void connect(Configuration conf) throws IOException { - String mcAddress = conf.get(HConstants.STATUS_MULTICAST_ADDRESS, - HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); + String mcAddress = + conf.get(HConstants.STATUS_MULTICAST_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); String bindAddress = conf.get(HConstants.STATUS_MULTICAST_BIND_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_BIND_ADDRESS); - int port = conf.getInt(HConstants.STATUS_MULTICAST_PORT, - HConstants.DEFAULT_STATUS_MULTICAST_PORT); + int port = + conf.getInt(HConstants.STATUS_MULTICAST_PORT, HConstants.DEFAULT_STATUS_MULTICAST_PORT); String niName = conf.get(HConstants.STATUS_MULTICAST_NI_NAME); InetAddress ina; @@ -207,11 +202,9 @@ public void connect(Configuration conf) throws IOException { try { Bootstrap b = new Bootstrap(); - b.group(group) - .channel(NioDatagramChannel.class) - .option(ChannelOption.SO_REUSEADDR, true) + b.group(group).channel(NioDatagramChannel.class).option(ChannelOption.SO_REUSEADDR, true) .handler(new ClusterStatusHandler()); - channel = (DatagramChannel)b.bind(bindAddress, port).sync().channel(); + channel = (DatagramChannel) b.bind(bindAddress, port).sync().channel(); } catch (InterruptedException e) { close(); throw ExceptionUtil.asInterrupt(e); @@ -228,7 +221,6 @@ public void connect(Configuration conf) throws IOException { channel.joinGroup(ina, ni, null, channel.newPromise()); } - @Override public void close() { if (channel != null) { @@ -238,17 +230,13 @@ public void close() { group.shutdownGracefully(); } - - /** * Class, conforming to the Netty framework, that manages the message received. */ private class ClusterStatusHandler extends SimpleChannelInboundHandler { @Override - public void exceptionCaught( - ChannelHandlerContext ctx, Throwable cause) - throws Exception { + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { LOG.error("Unexpected exception, continuing.", cause); } @@ -257,7 +245,6 @@ public boolean acceptInboundMessage(Object msg) throws Exception { return super.acceptInboundMessage(msg); } - @Override protected void channelRead0(ChannelHandlerContext ctx, DatagramPacket dp) throws Exception { ByteBufInputStream bis = new ByteBufInputStream(dp.content()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java index 001d672620ea..5f11f8d258a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,71 +20,68 @@ import java.util.Comparator; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.MemoryCompactionPolicy; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * An ColumnFamilyDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. - * - * To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods + * An ColumnFamilyDescriptor contains information about a column family such as the number of + * versions, compression settings, etc. It is used as input when creating a table or adding a + * column. To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods * @since 2.0.0 */ @InterfaceAudience.Public public interface ColumnFamilyDescriptor { @InterfaceAudience.Private - static final Comparator COMPARATOR - = (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { - int result = Bytes.compareTo(lhs.getName(), rhs.getName()); - if (result != 0) { - return result; - } - // punt on comparison for ordering, just calculate difference. - result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); - if (result != 0) { - return result; - } - return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); - }; - - static final Bytes REPLICATION_SCOPE_BYTES = new Bytes( - Bytes.toBytes(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE)); + static final Comparator COMPARATOR = + (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { + int result = Bytes.compareTo(lhs.getName(), rhs.getName()); + if (result != 0) { + return result; + } + // punt on comparison for ordering, just calculate difference. + result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); + if (result != 0) { + return result; + } + return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); + }; + + static final Bytes REPLICATION_SCOPE_BYTES = + new Bytes(Bytes.toBytes(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE)); @InterfaceAudience.Private - static final Comparator COMPARATOR_IGNORE_REPLICATION = ( - ColumnFamilyDescriptor lcf, ColumnFamilyDescriptor rcf) -> { - int result = Bytes.compareTo(lcf.getName(), rcf.getName()); - if (result != 0) { - return result; - } - // ColumnFamilyDescriptor.getValues is a immutable map, so copy it and remove - // REPLICATION_SCOPE_BYTES - Map lValues = new HashMap<>(); - lValues.putAll(lcf.getValues()); - lValues.remove(REPLICATION_SCOPE_BYTES); - Map rValues = new HashMap<>(); - rValues.putAll(rcf.getValues()); - rValues.remove(REPLICATION_SCOPE_BYTES); - result = lValues.hashCode() - rValues.hashCode(); - if (result != 0) { - return result; - } - return lcf.getConfiguration().hashCode() - rcf.getConfiguration().hashCode(); - }; + static final Comparator COMPARATOR_IGNORE_REPLICATION = + (ColumnFamilyDescriptor lcf, ColumnFamilyDescriptor rcf) -> { + int result = Bytes.compareTo(lcf.getName(), rcf.getName()); + if (result != 0) { + return result; + } + // ColumnFamilyDescriptor.getValues is a immutable map, so copy it and remove + // REPLICATION_SCOPE_BYTES + Map lValues = new HashMap<>(); + lValues.putAll(lcf.getValues()); + lValues.remove(REPLICATION_SCOPE_BYTES); + Map rValues = new HashMap<>(); + rValues.putAll(rcf.getValues()); + rValues.remove(REPLICATION_SCOPE_BYTES); + result = lValues.hashCode() - rValues.hashCode(); + if (result != 0) { + return result; + } + return lcf.getConfiguration().hashCode() - rcf.getConfiguration().hashCode(); + }; /** * @return The storefile/hfile blocksize for this column family. */ int getBlocksize(); + /** * @return bloom filter type used for new StoreFiles in ColumnFamily */ @@ -114,20 +111,23 @@ public interface ColumnFamilyDescriptor { * @return an unmodifiable map. */ Map getConfiguration(); + /** * @param key the key whose associated value is to be returned * @return accessing the configuration value by key. */ String getConfigurationValue(String key); + /** * @return replication factor set for this CF */ short getDFSReplication(); + /** - * @return the data block encoding algorithm used in block cache and - * optionally on disk + * @return the data block encoding algorithm used in block cache and optionally on disk */ DataBlockEncoding getDataBlockEncoding(); + /** * @return Return the raw crypto key attribute for the family, or null if not set */ @@ -137,35 +137,40 @@ public interface ColumnFamilyDescriptor { * @return Return the encryption algorithm in use by this family */ String getEncryptionType(); + /** - * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for - * for this column family + * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for for + * this column family */ MemoryCompactionPolicy getInMemoryCompaction(); + /** * @return return the KeepDeletedCells */ KeepDeletedCells getKeepDeletedCells(); + /** * @return maximum number of versions */ int getMaxVersions(); + /** * @return The minimum number of versions to keep. */ int getMinVersions(); + /** - * Get the mob compact partition policy for this family - * @return MobCompactPartitionPolicy + * Get the mob compact partition policy for this family n */ MobCompactPartitionPolicy getMobCompactPartitionPolicy(); + /** - * Gets the mob threshold of the family. - * If the size of a cell value is larger than this threshold, it's regarded as a mob. - * The default threshold is 1024*100(100K)B. + * Gets the mob threshold of the family. If the size of a cell value is larger than this + * threshold, it's regarded as a mob. The default threshold is 1024*100(100K)B. * @return The mob threshold. */ long getMobThreshold(); + /** * @return a copy of Name of this column family */ @@ -176,45 +181,53 @@ public interface ColumnFamilyDescriptor { */ String getNameAsString(); - /** - * @return the scope tag - */ + /** + * @return the scope tag + */ int getScope(); + /** * Not using {@code enum} here because HDFS is not using {@code enum} for storage policy, see * org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more details. * @return Return the storage policy in use by this family */ String getStoragePolicy(); - /** + + /** * @return Time-to-live of cell contents, in seconds. */ int getTimeToLive(); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ Bytes getValue(Bytes key); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ String getValue(String key); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ byte[] getValue(byte[] key); + /** * It clone all bytes of all elements. * @return All values */ Map getValues(); + /** * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX - * and BLOOM type blocks). + * and BLOOM type blocks). */ boolean isBlockCacheEnabled(); + /** * @return true if we should cache bloomfilter blocks on write */ @@ -224,29 +237,35 @@ public interface ColumnFamilyDescriptor { * @return true if we should cache data blocks on write */ boolean isCacheDataOnWrite(); + /** * @return true if we should cache index blocks on write */ boolean isCacheIndexesOnWrite(); + /** * @return Whether KV tags should be compressed along with DataBlockEncoding. When no * DataBlockEncoding is been used, this is having no effect. */ boolean isCompressTags(); + /** * @return true if we should evict cached blocks from the blockcache on close */ boolean isEvictBlocksOnClose(); + /** - * @return True if we are to favor keeping all values for this column family in the - * HRegionServer cache. + * @return True if we are to favor keeping all values for this column family in the HRegionServer + * cache. */ boolean isInMemory(); + /** * Gets whether the mob is enabled for the family. * @return True if the mob is enabled for the family. */ boolean isMobEnabled(); + /** * @return true if we should prefetch blocks into the blockcache on open */ @@ -258,9 +277,9 @@ public interface ColumnFamilyDescriptor { String toStringCustomizedValues(); /** - * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts - * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. - * We will also consider mvcc in versions. See HBASE-15968 for details. + * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts will + * mask a later Put with lower ts. Set this to true to enable new semantics of versions. We will + * also consider mvcc in versions. See HBASE-15968 for details. */ boolean isNewVersionBehavior(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 6d85cb439c3e..80178027b6f2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,20 +49,21 @@ public class ColumnFamilyDescriptorBuilder { // For future backward compatibility - // Version 3 was when column names become byte arrays and when we picked up - // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. - // Version 5 was when bloom filter descriptors were removed. - // Version 6 adds metadata as a map where keys and values are byte[]. - // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) - // Version 8 -- reintroduction of bloom filters, changed from boolean to enum - // Version 9 -- add data block encoding + // Version 3 was when column names become byte arrays and when we picked up + // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. + // Version 5 was when bloom filter descriptors were removed. + // Version 6 adds metadata as a map where keys and values are byte[]. + // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) + // Version 8 -- reintroduction of bloom filters, changed from boolean to enum + // Version 9 -- add data block encoding // Version 10 -- change metadata to standard type. // Version 11 -- add column family level configuration. private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11; @InterfaceAudience.Private public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; - private static final Bytes IN_MEMORY_COMPACTION_BYTES = new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); + private static final Bytes IN_MEMORY_COMPACTION_BYTES = + new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); @InterfaceAudience.Private public static final String IN_MEMORY = HConstants.IN_MEMORY; @@ -74,53 +75,59 @@ public class ColumnFamilyDescriptorBuilder { private static final Bytes COMPRESSION_BYTES = new Bytes(Bytes.toBytes(COMPRESSION)); @InterfaceAudience.Private public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; - private static final Bytes COMPRESSION_COMPACT_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); + private static final Bytes COMPRESSION_COMPACT_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); public static final String COMPRESSION_COMPACT_MAJOR = "COMPRESSION_COMPACT_MAJOR"; - private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); + private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); public static final String COMPRESSION_COMPACT_MINOR = "COMPRESSION_COMPACT_MINOR"; - private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); + private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); @InterfaceAudience.Private public static final String DATA_BLOCK_ENCODING = "DATA_BLOCK_ENCODING"; - private static final Bytes DATA_BLOCK_ENCODING_BYTES = new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); + private static final Bytes DATA_BLOCK_ENCODING_BYTES = + new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); /** - * Key for the BLOCKCACHE attribute. A more exact name would be - * CACHE_DATA_ON_READ because this flag sets whether or not we cache DATA - * blocks. We always cache INDEX and BLOOM blocks; caching these blocks cannot - * be disabled. + * Key for the BLOCKCACHE attribute. A more exact name would be CACHE_DATA_ON_READ because this + * flag sets whether or not we cache DATA blocks. We always cache INDEX and BLOOM blocks; caching + * these blocks cannot be disabled. */ @InterfaceAudience.Private public static final String BLOCKCACHE = "BLOCKCACHE"; private static final Bytes BLOCKCACHE_BYTES = new Bytes(Bytes.toBytes(BLOCKCACHE)); @InterfaceAudience.Private public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE"; - private static final Bytes CACHE_DATA_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); + private static final Bytes CACHE_DATA_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); @InterfaceAudience.Private public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE"; - private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); + private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); @InterfaceAudience.Private public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE"; - private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); + private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); @InterfaceAudience.Private public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE"; - private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); + private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = + new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); /** - * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, - * and DATA blocks of HFiles belonging to this family will be loaded into the - * cache as soon as the file is opened. These loads will not count as cache - * misses. + * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, and DATA blocks of + * HFiles belonging to this family will be loaded into the cache as soon as the file is opened. + * These loads will not count as cache misses. */ @InterfaceAudience.Private public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN"; - private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); + private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = + new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); /** - * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. - * Use smaller block sizes for faster random-access at expense of larger - * indices (more memory consumption). Note that this is a soft limit and that - * blocks have overhead (metadata, CRCs) so blocks will tend to be the size - * specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k - * means hbase data will align with an SSDs 4k page accesses (TODO). + * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. Use smaller block + * sizes for faster random-access at expense of larger indices (more memory consumption). Note + * that this is a soft limit and that blocks have overhead (metadata, CRCs) so blocks will tend to + * be the size specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k means + * hbase data will align with an SSDs 4k page accesses (TODO). */ @InterfaceAudience.Private public static final String BLOCKSIZE = "BLOCKSIZE"; @@ -141,13 +148,14 @@ public class ColumnFamilyDescriptorBuilder { public static final String MIN_VERSIONS = "MIN_VERSIONS"; private static final Bytes MIN_VERSIONS_BYTES = new Bytes(Bytes.toBytes(MIN_VERSIONS)); /** - * Retain all cells across flushes and compactions even if they fall behind a - * delete tombstone. To see all retained cells, do a 'raw' scan; see - * Scan#setRaw or pass RAW => true attribute in the shell. + * Retain all cells across flushes and compactions even if they fall behind a delete tombstone. To + * see all retained cells, do a 'raw' scan; see Scan#setRaw or pass RAW => true attribute in + * the shell. */ @InterfaceAudience.Private public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; - private static final Bytes KEEP_DELETED_CELLS_BYTES = new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); + private static final Bytes KEEP_DELETED_CELLS_BYTES = + new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); @InterfaceAudience.Private public static final String COMPRESS_TAGS = "COMPRESS_TAGS"; private static final Bytes COMPRESS_TAGS_BYTES = new Bytes(Bytes.toBytes(COMPRESS_TAGS)); @@ -168,9 +176,10 @@ public class ColumnFamilyDescriptorBuilder { public static final long DEFAULT_MOB_THRESHOLD = 100 * 1024; // 100k @InterfaceAudience.Private public static final String MOB_COMPACT_PARTITION_POLICY = "MOB_COMPACT_PARTITION_POLICY"; - private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); - public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY - = MobCompactPartitionPolicy.DAILY; + private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = + new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); + public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY = + MobCompactPartitionPolicy.DAILY; @InterfaceAudience.Private public static final String DFS_REPLICATION = "DFS_REPLICATION"; private static final Bytes DFS_REPLICATION_BYTES = new Bytes(Bytes.toBytes(DFS_REPLICATION)); @@ -180,7 +189,8 @@ public class ColumnFamilyDescriptorBuilder { private static final Bytes STORAGE_POLICY_BYTES = new Bytes(Bytes.toBytes(STORAGE_POLICY)); public static final String NEW_VERSION_BEHAVIOR = "NEW_VERSION_BEHAVIOR"; - private static final Bytes NEW_VERSION_BEHAVIOR_BYTES = new Bytes(Bytes.toBytes(NEW_VERSION_BEHAVIOR)); + private static final Bytes NEW_VERSION_BEHAVIOR_BYTES = + new Bytes(Bytes.toBytes(NEW_VERSION_BEHAVIOR)); public static final boolean DEFAULT_NEW_VERSION_BEHAVIOR = false; /** * Default compression type. @@ -203,8 +213,7 @@ public class ColumnFamilyDescriptorBuilder { public static final int DEFAULT_MIN_VERSIONS = 0; /** - * Default setting for whether to try and serve this column family from memory - * or not. + * Default setting for whether to try and serve this column family from memory or not. */ public static final boolean DEFAULT_IN_MEMORY = false; @@ -219,14 +228,12 @@ public class ColumnFamilyDescriptorBuilder { public static final boolean DEFAULT_BLOCKCACHE = true; /** - * Default setting for whether to cache data blocks on write if block caching - * is enabled. + * Default setting for whether to cache data blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; /** - * Default setting for whether to cache index blocks on write if block caching - * is enabled. + * Default setting for whether to cache index blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false; @@ -241,8 +248,7 @@ public class ColumnFamilyDescriptorBuilder { public static final BloomType DEFAULT_BLOOMFILTER = BloomType.ROW; /** - * Default setting for whether to cache bloom filter blocks on write if block - * caching is enabled. + * Default setting for whether to cache bloom filter blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; @@ -257,8 +263,7 @@ public class ColumnFamilyDescriptorBuilder { public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL; /** - * Default setting for whether to evict cached blocks from the blockcache on - * close. + * Default setting for whether to evict cached blocks from the blockcache on close. */ public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false; @@ -276,7 +281,8 @@ public class ColumnFamilyDescriptorBuilder { private static Map getDefaultValuesBytes() { Map values = new HashMap<>(); - DEFAULT_VALUES.forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); + DEFAULT_VALUES + .forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); return values; } @@ -326,10 +332,11 @@ public static Unit getUnit(String key) { /** * @param b Family name. * @return b - * @throws IllegalArgumentException If not null and not a legitimate family - * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because - * b can be null when deserializing). Cannot start with a '.' - * either. Also Family can not be an empty value or equal "recovered.edits". + * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable' + * and ends in a ':' (Null passes are allowed because + * b can be null when deserializing). Cannot start + * with a '.' either. Also Family can not be an empty value or + * equal "recovered.edits". */ public static byte[] isLegalColumnFamilyName(final byte[] b) { if (b == null) { @@ -337,27 +344,28 @@ public static byte[] isLegalColumnFamilyName(final byte[] b) { } Preconditions.checkArgument(b.length != 0, "Column Family name can not be empty"); if (b[0] == '.') { - throw new IllegalArgumentException("Column Family names cannot start with a " - + "period: " + Bytes.toString(b)); + throw new IllegalArgumentException( + "Column Family names cannot start with a " + "period: " + Bytes.toString(b)); } for (int i = 0; i < b.length; i++) { if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { throw new IllegalArgumentException("Illegal character <" + b[i] - + ">. Column Family names cannot contain control characters or colons: " - + Bytes.toString(b)); + + ">. Column Family names cannot contain control characters or colons: " + + Bytes.toString(b)); } } byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR); if (Bytes.equals(recoveredEdit, b)) { - throw new IllegalArgumentException("Column Family name cannot be: " - + HConstants.RECOVERED_EDITS_DIR); + throw new IllegalArgumentException( + "Column Family name cannot be: " + HConstants.RECOVERED_EDITS_DIR); } return b; } private final ModifyableColumnFamilyDescriptor desc; - public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) throws DeserializationException { + public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) + throws DeserializationException { return ModifyableColumnFamilyDescriptor.parseFrom(pbBytes); } @@ -453,12 +461,14 @@ public ColumnFamilyDescriptorBuilder setCompactionCompressionType(Compression.Al return this; } - public ColumnFamilyDescriptorBuilder setMajorCompactionCompressionType(Compression.Algorithm value) { + public ColumnFamilyDescriptorBuilder + setMajorCompactionCompressionType(Compression.Algorithm value) { desc.setMajorCompactionCompressionType(value); return this; } - public ColumnFamilyDescriptorBuilder setMinorCompactionCompressionType(Compression.Algorithm value) { + public ColumnFamilyDescriptorBuilder + setMinorCompactionCompressionType(Compression.Algorithm value) { desc.setMinorCompactionCompressionType(value); return this; } @@ -532,7 +542,8 @@ public ColumnFamilyDescriptorBuilder setMinVersions(final int value) { return this; } - public ColumnFamilyDescriptorBuilder setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { + public ColumnFamilyDescriptorBuilder + setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { desc.setMobCompactPartitionPolicy(value); return this; } @@ -593,19 +604,18 @@ public ColumnFamilyDescriptorBuilder setValue(final String key, final String val } public ColumnFamilyDescriptorBuilder setVersionsWithTimeToLive(final int retentionInterval, - final int versionAfterInterval) { + final int versionAfterInterval) { desc.setVersionsWithTimeToLive(retentionInterval, versionAfterInterval); return this; } /** - * An ModifyableFamilyDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. + * An ModifyableFamilyDescriptor contains information about a column family such as the number of + * versions, compression settings, etc. It is used as input when creating a table or adding a + * column. */ private static final class ModifyableColumnFamilyDescriptor - implements ColumnFamilyDescriptor, Comparable { + implements ColumnFamilyDescriptor, Comparable { // Column family name private final byte[] name; @@ -614,20 +624,17 @@ private static final class ModifyableColumnFamilyDescriptor private final Map values = new HashMap<>(); /** - * A map which holds the configuration specific to the column family. The - * keys of the map have the same names as config keys and override the - * defaults with cf-specific settings. Example usage may be for compactions, - * etc. + * A map which holds the configuration specific to the column family. The keys of the map have + * the same names as config keys and override the defaults with cf-specific settings. Example + * usage may be for compactions, etc. */ private final Map configuration = new HashMap<>(); /** - * Construct a column descriptor specifying only the family name The other - * attributes are defaulted. - * - * @param name Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - * TODO: make this private after the HCD is removed. + * Construct a column descriptor specifying only the family name The other attributes are + * defaulted. + * @param name Column family name. Must be 'printable' -- digit or letter -- and may not contain + * a : TODO: make this private after the HCD is removed. */ @InterfaceAudience.Private public ModifyableColumnFamilyDescriptor(final byte[] name) { @@ -635,8 +642,8 @@ public ModifyableColumnFamilyDescriptor(final byte[] name) { } /** - * Constructor. Makes a deep copy of the supplied descriptor. - * TODO: make this private after the HCD is removed. + * Constructor. Makes a deep copy of the supplied descriptor. TODO: make this private after the + * HCD is removed. * @param desc The descriptor. */ @InterfaceAudience.Private @@ -644,7 +651,8 @@ public ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) { this(desc.getName(), desc.getValues(), desc.getConfiguration()); } - private ModifyableColumnFamilyDescriptor(byte[] name, Map values, Map config) { + private ModifyableColumnFamilyDescriptor(byte[] name, Map values, + Map config) { this.name = name; this.values.putAll(values); this.configuration.putAll(config); @@ -683,12 +691,13 @@ public Map getValues() { } /** - * @param key The key. + * @param key The key. * @param value The value. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setValue(byte[] key, byte[] value) { - return setValue(toBytesOrNull(key, Function.identity()), toBytesOrNull(value, Function.identity())); + return setValue(toBytesOrNull(key, Function.identity()), + toBytesOrNull(value, Function.identity())); } public ModifyableColumnFamilyDescriptor setValue(String key, String value) { @@ -698,8 +707,9 @@ public ModifyableColumnFamilyDescriptor setValue(String key, String value) { private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } + /** - * @param key The key. + * @param key The key. * @param value The value. * @return this (for chained invocation) */ @@ -749,9 +759,9 @@ public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { throw new IllegalArgumentException("Maximum versions must be positive"); } if (maxVersions < this.getMinVersions()) { - throw new IllegalArgumentException("Set MaxVersion to " + maxVersions - + " while minVersion is " + this.getMinVersions() - + ". Maximum versions must be >= minimum versions "); + throw new IllegalArgumentException( + "Set MaxVersion to " + maxVersions + " while minVersion is " + this.getMinVersions() + + ". Maximum versions must be >= minimum versions "); } setValue(MAX_VERSIONS_BYTES, Integer.toString(maxVersions)); return this; @@ -759,7 +769,6 @@ public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { /** * Set minimum and maximum versions to keep - * * @param minVersions minimal number of versions * @param maxVersions maximum number of versions * @return this (for chained invocation) @@ -772,24 +781,22 @@ public ModifyableColumnFamilyDescriptor setVersions(int minVersions, int maxVers } if (maxVersions < minVersions) { - throw new IllegalArgumentException("Unable to set MaxVersion to " + maxVersions - + " and set MinVersion to " + minVersions - + ", as maximum versions must be >= minimum versions."); + throw new IllegalArgumentException( + "Unable to set MaxVersion to " + maxVersions + " and set MinVersion to " + minVersions + + ", as maximum versions must be >= minimum versions."); } setMinVersions(minVersions); setMaxVersions(maxVersions); return this; } - @Override public int getBlocksize() { return getStringOrDefault(BLOCKSIZE_BYTES, Integer::valueOf, DEFAULT_BLOCKSIZE); } /** - * @param s Blocksize to use when writing out storefiles/hfiles on this - * column family. + * @param s Blocksize to use when writing out storefiles/hfiles on this column family. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlocksize(int s) { @@ -797,8 +804,8 @@ public ModifyableColumnFamilyDescriptor setBlocksize(int s) { } public ModifyableColumnFamilyDescriptor setBlocksize(String blocksize) throws HBaseException { - return setBlocksize(Integer.parseInt(PrettyPrinter. - valueOf(blocksize, PrettyPrinter.Unit.BYTE))); + return setBlocksize( + Integer.parseInt(PrettyPrinter.valueOf(blocksize, PrettyPrinter.Unit.BYTE))); } @Override @@ -808,11 +815,9 @@ public Compression.Algorithm getCompressionType() { } /** - * Compression types supported in hbase. LZO is not bundled as part of the - * hbase distribution. See - * See LZO Compression - * for how to enable it. - * + * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. + * See See LZO Compression for + * how to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ @@ -828,20 +833,18 @@ public DataBlockEncoding getDataBlockEncoding() { /** * Set data block encoding algorithm used in block cache. - * * @param type What kind of data block encoding will be used. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setDataBlockEncoding(DataBlockEncoding type) { - return setValue(DATA_BLOCK_ENCODING_BYTES, type == null ? DataBlockEncoding.NONE.name() : type.name()); + return setValue(DATA_BLOCK_ENCODING_BYTES, + type == null ? DataBlockEncoding.NONE.name() : type.name()); } /** - * Set whether the tags should be compressed along with DataBlockEncoding. - * When no DataBlockEncoding is been used, this is having no effect. - * - * @param compressTags - * @return this (for chained invocation) + * Set whether the tags should be compressed along with DataBlockEncoding. When no + * DataBlockEncoding is been used, this is having no effect. n * @return this (for chained + * invocation) */ public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) { return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags)); @@ -849,8 +852,7 @@ public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) { @Override public boolean isCompressTags() { - return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, - DEFAULT_COMPRESS_TAGS); + return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, DEFAULT_COMPRESS_TAGS); } @Override @@ -872,26 +874,24 @@ public Compression.Algorithm getMinorCompactionCompressionType() { } /** - * Compression types supported in hbase. LZO is not bundled as part of the - * hbase distribution. See - * See LZO Compression - * for how to enable it. - * + * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. + * See See LZO Compression for + * how to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_BYTES, type.name()); } - public ModifyableColumnFamilyDescriptor setMajorCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setMajorCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_MAJOR_BYTES, type.name()); } - public ModifyableColumnFamilyDescriptor setMinorCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setMinorCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_MINOR_BYTES, type.name()); } @@ -901,8 +901,8 @@ public boolean isInMemory() { } /** - * @param inMemory True if we are to favor keeping all values for this - * column family in the HRegionServer cache + * @param inMemory True if we are to favor keeping all values for this column family in the + * HRegionServer cache * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setInMemory(boolean inMemory) { @@ -916,23 +916,22 @@ public MemoryCompactionPolicy getInMemoryCompaction() { } /** - * @param inMemoryCompaction the prefered in-memory compaction policy for - * this column family + * @param inMemoryCompaction the prefered in-memory compaction policy for this column family * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { + public ModifyableColumnFamilyDescriptor + setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { return setValue(IN_MEMORY_COMPACTION_BYTES, inMemoryCompaction.name()); } @Override public KeepDeletedCells getKeepDeletedCells() { - return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, - KeepDeletedCells::getValue, DEFAULT_KEEP_DELETED); + return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, KeepDeletedCells::getValue, + DEFAULT_KEEP_DELETED); } /** - * @param keepDeletedCells True if deleted rows should not be collected - * immediately. + * @param keepDeletedCells True if deleted rows should not be collected immediately. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) { @@ -941,13 +940,13 @@ public ModifyableColumnFamilyDescriptor setKeepDeletedCells(KeepDeletedCells kee /** * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts - * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. - * We will also consider mvcc in versions. See HBASE-15968 for details. + * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. We + * will also consider mvcc in versions. See HBASE-15968 for details. */ @Override public boolean isNewVersionBehavior() { - return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, - Boolean::parseBoolean, DEFAULT_NEW_VERSION_BEHAVIOR); + return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, Boolean::parseBoolean, + DEFAULT_NEW_VERSION_BEHAVIOR); } public ModifyableColumnFamilyDescriptor setNewVersionBehavior(boolean newVersionBehavior) { @@ -982,8 +981,7 @@ public int getMinVersions() { } /** - * @param minVersions The minimum number of versions to keep. (used when - * timeToLive is set) + * @param minVersions The minimum number of versions to keep. (used when timeToLive is set) * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { @@ -991,15 +989,14 @@ public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { } /** - * Retain all versions for a given TTL(retentionInterval), and then only a specific number - * of versions(versionAfterInterval) after that interval elapses. - * - * @param retentionInterval Retain all versions for this interval + * Retain all versions for a given TTL(retentionInterval), and then only a specific number of + * versions(versionAfterInterval) after that interval elapses. + * @param retentionInterval Retain all versions for this interval * @param versionAfterInterval Retain no of versions to retain after retentionInterval * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive( - final int retentionInterval, final int versionAfterInterval) { + public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive(final int retentionInterval, + final int versionAfterInterval) { ModifyableColumnFamilyDescriptor modifyableColumnFamilyDescriptor = setVersions(versionAfterInterval, Integer.MAX_VALUE); modifyableColumnFamilyDescriptor.setTimeToLive(retentionInterval); @@ -1013,8 +1010,8 @@ public boolean isBlockCacheEnabled() { } /** - * @param blockCacheEnabled True if hfile DATA type blocks should be cached - * (We always cache INDEX and BLOOM blocks; you cannot turn this off). + * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache + * INDEX and BLOOM blocks; you cannot turn this off). * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { @@ -1033,7 +1030,8 @@ public ModifyableColumnFamilyDescriptor setBloomFilterType(final BloomType bt) { @Override public int getScope() { - return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, DEFAULT_REPLICATION_SCOPE); + return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, + DEFAULT_REPLICATION_SCOPE); } /** @@ -1046,7 +1044,8 @@ public ModifyableColumnFamilyDescriptor setScope(int scope) { @Override public boolean isCacheDataOnWrite() { - return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_ON_WRITE); + return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_DATA_ON_WRITE); } /** @@ -1059,7 +1058,8 @@ public ModifyableColumnFamilyDescriptor setCacheDataOnWrite(boolean value) { @Override public boolean isCacheIndexesOnWrite() { - return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_INDEX_ON_WRITE); + return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_INDEX_ON_WRITE); } /** @@ -1072,7 +1072,8 @@ public ModifyableColumnFamilyDescriptor setCacheIndexesOnWrite(boolean value) { @Override public boolean isCacheBloomsOnWrite() { - return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_BLOOMS_ON_WRITE); + return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_BLOOMS_ON_WRITE); } /** @@ -1085,12 +1086,12 @@ public ModifyableColumnFamilyDescriptor setCacheBloomsOnWrite(boolean value) { @Override public boolean isEvictBlocksOnClose() { - return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, DEFAULT_EVICT_BLOCKS_ON_CLOSE); + return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, + DEFAULT_EVICT_BLOCKS_ON_CLOSE); } /** - * @param value true if we should evict cached blocks from the blockcache on - * close + * @param value true if we should evict cached blocks from the blockcache on close * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { @@ -1099,12 +1100,12 @@ public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { @Override public boolean isPrefetchBlocksOnOpen() { - return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, DEFAULT_PREFETCH_BLOCKS_ON_OPEN); + return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, + DEFAULT_PREFETCH_BLOCKS_ON_OPEN); } /** - * @param value true if we should prefetch blocks into the blockcache on - * open + * @param value true if we should prefetch blocks into the blockcache on open * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setPrefetchBlocksOnOpen(boolean value) { @@ -1124,7 +1125,6 @@ public String toString() { return s.toString(); } - @Override public String toStringCustomizedValues() { StringBuilder s = new StringBuilder(); @@ -1151,9 +1151,10 @@ private StringBuilder getValues(boolean printDefaults) { } String key = Bytes.toString(entry.getKey().get()); String value = Bytes.toStringBinary(entry.getValue().get()); - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + if ( + printDefaults || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) + ) { s.append(", "); s.append(key); s.append(" => "); @@ -1197,7 +1198,8 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForConfiguration = true; s.append('\'').append(e.getKey()).append('\''); s.append(" => "); - s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\''); + s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))) + .append('\''); } s.append("}"); } @@ -1210,7 +1212,8 @@ public boolean equals(Object obj) { return true; } if (obj instanceof ModifyableColumnFamilyDescriptor) { - return ColumnFamilyDescriptor.COMPARATOR.compare(this, (ModifyableColumnFamilyDescriptor) obj) == 0; + return ColumnFamilyDescriptor.COMPARATOR.compare(this, + (ModifyableColumnFamilyDescriptor) obj) == 0; } return false; } @@ -1234,19 +1237,17 @@ public int compareTo(ModifyableColumnFamilyDescriptor other) { * @see #parseFrom(byte[]) */ private byte[] toByteArray() { - return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this) - .toByteArray()); + return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this).toByteArray()); } /** - * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb - * magic prefix - * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from - * bytes - * @throws DeserializationException - * @see #toByteArray() + * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic + * prefix + * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from bytes + * n * @see #toByteArray() */ - private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) throws DeserializationException { + private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) + throws DeserializationException { if (!ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("No magic"); } @@ -1275,9 +1276,7 @@ public Map getConfiguration() { /** * Setter for storing a configuration setting in {@link #configuration} map. - * - * @param key Config key. Same as XML config key e.g. - * hbase.something.or.other. + * @param key Config key. Same as XML config key e.g. hbase.something.or.other. * @param value String value. If null, removes the configuration. * @return this (for chained invocation) */ @@ -1291,11 +1290,8 @@ public ModifyableColumnFamilyDescriptor setConfiguration(String key, String valu } /** - * Remove a configuration setting represented by the key from the - * {@link #configuration} map. - * - * @param key - * @return this (for chained invocation) + * Remove a configuration setting represented by the key from the {@link #configuration} map. n + * * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor removeConfiguration(final String key) { return setConfiguration(key, null); @@ -1307,10 +1303,8 @@ public String getEncryptionType() { } /** - * Set the encryption algorithm for use with this family - * - * @param algorithm - * @return this (for chained invocation) + * Set the encryption algorithm for use with this family n * @return this (for chained + * invocation) */ public ModifyableColumnFamilyDescriptor setEncryptionType(String algorithm) { return setValue(ENCRYPTION_BYTES, algorithm); @@ -1322,10 +1316,7 @@ public byte[] getEncryptionKey() { } /** - * Set the raw crypto key attribute for the family - * - * @param keyBytes - * @return this (for chained invocation) + * Set the raw crypto key attribute for the family n * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) { return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes)); @@ -1338,7 +1329,6 @@ public long getMobThreshold() { /** * Sets the mob threshold of the family. - * * @param threshold The mob threshold. * @return this (for chained invocation) */ @@ -1353,7 +1343,6 @@ public boolean isMobEnabled() { /** * Enables the mob for the family. - * * @param isMobEnabled Whether to enable the mob for the family. * @return this (for chained invocation) */ @@ -1370,32 +1359,30 @@ public MobCompactPartitionPolicy getMobCompactPartitionPolicy() { /** * Set the mob compact partition policy for the family. - * * @param policy policy type * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { + public ModifyableColumnFamilyDescriptor + setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { return setValue(MOB_COMPACT_PARTITION_POLICY_BYTES, policy.name()); } @Override public short getDFSReplication() { - return getStringOrDefault(DFS_REPLICATION_BYTES, - Short::valueOf, DEFAULT_DFS_REPLICATION); + return getStringOrDefault(DFS_REPLICATION_BYTES, Short::valueOf, DEFAULT_DFS_REPLICATION); } /** * Set the replication factor to hfile(s) belonging to this family - * - * @param replication number of replicas the blocks(s) belonging to this CF - * should have, or {@link #DEFAULT_DFS_REPLICATION} for the default - * replication factor set in the filesystem + * @param replication number of replicas the blocks(s) belonging to this CF should have, or + * {@link #DEFAULT_DFS_REPLICATION} for the default replication factor set in + * the filesystem * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setDFSReplication(short replication) { if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) { throw new IllegalArgumentException( - "DFS replication factor cannot be less than 1 if explicitly set."); + "DFS replication factor cannot be less than 1 if explicitly set."); } return setValue(DFS_REPLICATION_BYTES, Short.toString(replication)); } @@ -1407,11 +1394,8 @@ public String getStoragePolicy() { /** * Set the storage policy for use with this family - * - * @param policy the policy to set, valid setting includes: - * "LAZY_PERSIST", - * "ALL_SSD", "ONE_SSD", "HOT", "WARM", - * "COLD" + * @param policy the policy to set, valid setting includes: "LAZY_PERSIST", + * "ALL_SSD", "ONE_SSD", "HOT", "WARM", "COLD" * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setStoragePolicy(String policy) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java index 018cfef02605..225bb072db70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +16,19 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; /** - * Currently, there are only two compact types: - * {@code NORMAL} means do store files compaction; + * Currently, there are only two compact types: {@code NORMAL} means do store files compaction; * {@code MOB} means do mob files compaction. - * */ + */ @InterfaceAudience.Public public enum CompactType { - NORMAL (0), - MOB (1); + NORMAL(0), + MOB(1); - CompactType(int value) {} + CompactType(int value) { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java index 51f7d071e4ac..e1f1dcd7f773 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,5 +24,8 @@ */ @InterfaceAudience.Public public enum CompactionState { - NONE, MINOR, MAJOR, MAJOR_AND_MINOR + NONE, + MINOR, + MAJOR, + MAJOR_AND_MINOR } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java index 08afeb61b558..592a99b0584d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A scan result cache that only returns complete result. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index b638e72a46db..385007703e2f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,23 +29,22 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A cluster connection encapsulating lower level individual connections to actual servers and - * a connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} - * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} - * the connection to release the resources. - * - *

    The connection object contains logic to find the master, locate regions out on the cluster, - * keeps a cache of locations and then knows how to re-calibrate after they move. The individual - * connections to servers, meta cache, zookeeper connection, etc are all shared by the - * {@link Table} and {@link Admin} instances obtained from this connection. - * - *

    Connection creation is a heavy-weight operation. Connection implementations are thread-safe, - * so that the client can create a connection once, and share it with different threads. - * {@link Table} and {@link Admin} instances, on the other hand, are light-weight and are not - * thread-safe. Typically, a single connection per client application is instantiated and every - * thread will obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} - * is not recommended. - * + * A cluster connection encapsulating lower level individual connections to actual servers and a + * connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} + * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} the + * connection to release the resources. + *

    + * The connection object contains logic to find the master, locate regions out on the cluster, keeps + * a cache of locations and then knows how to re-calibrate after they move. The individual + * connections to servers, meta cache, zookeeper connection, etc are all shared by the {@link Table} + * and {@link Admin} instances obtained from this connection. + *

    + * Connection creation is a heavy-weight operation. Connection implementations are thread-safe, so + * that the client can create a connection once, and share it with different threads. {@link Table} + * and {@link Admin} instances, on the other hand, are light-weight and are not thread-safe. + * Typically, a single connection per client application is instantiated and every thread will + * obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} is not + * recommended. * @see ConnectionFactory * @since 0.99.0 */ @@ -53,13 +52,11 @@ public interface Connection extends Abortable, Closeable { /* - * Implementation notes: - * - Only allow new style of interfaces: - * -- All table names are passed as TableName. No more byte[] and string arguments - * -- Most of the classes with names H is deprecated in favor of non-H versions - * (Table, Connection, etc) - * -- Only real client-facing public methods are allowed - * - Connection should contain only getTable(), getAdmin() kind of general methods. + * Implementation notes: - Only allow new style of interfaces: -- All table names are passed as + * TableName. No more byte[] and string arguments -- Most of the classes with names H is + * deprecated in favor of non-H versions (Table, Connection, etc) -- Only real client-facing + * public methods are allowed - Connection should contain only getTable(), getAdmin() kind of + * general methods. */ /** @@ -68,17 +65,14 @@ public interface Connection extends Abortable, Closeable { Configuration getConfiguration(); /** - * Retrieve a Table implementation for accessing a table. - * The returned Table is not thread safe, a new instance should be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned Table - * is neither required nor desired. + * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a + * new instance should be created for each using thread. This is a lightweight operation, pooling + * or caching of the returned Table is neither required nor desired. *

    - * The caller is responsible for calling {@link Table#close()} on the returned - * table instance. + * The caller is responsible for calling {@link Table#close()} on the returned table instance. *

    - * Since 0.98.1 this method no longer checks table existence. An exception - * will be thrown if the table does not exist only when the first operation is - * attempted. + * Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the + * table does not exist only when the first operation is attempted. * @param tableName the name of the table * @return a Table to use for interactions with this table */ @@ -87,20 +81,16 @@ default Table getTable(TableName tableName) throws IOException { } /** - * Retrieve a Table implementation for accessing a table. - * The returned Table is not thread safe, a new instance should be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned Table - * is neither required nor desired. + * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a + * new instance should be created for each using thread. This is a lightweight operation, pooling + * or caching of the returned Table is neither required nor desired. *

    - * The caller is responsible for calling {@link Table#close()} on the returned - * table instance. + * The caller is responsible for calling {@link Table#close()} on the returned table instance. *

    - * Since 0.98.1 this method no longer checks table existence. An exception - * will be thrown if the table does not exist only when the first operation is - * attempted. - * + * Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the + * table does not exist only when the first operation is attempted. * @param tableName the name of the table - * @param pool The thread pool to use for batch operations, null to use a default pool. + * @param pool The thread pool to use for batch operations, null to use a default pool. * @return a Table to use for interactions with this table */ default Table getTable(TableName tableName, ExecutorService pool) throws IOException { @@ -110,19 +100,17 @@ default Table getTable(TableName tableName, ExecutorService pool) throws IOExcep /** *

    * Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The - * {@link BufferedMutator} returned by this method is thread-safe. This BufferedMutator will - * use the Connection's ExecutorService. This object can be used for long lived operations. + * {@link BufferedMutator} returned by this method is thread-safe. This BufferedMutator will use + * the Connection's ExecutorService. This object can be used for long lived operations. *

    *

    - * The caller is responsible for calling {@link BufferedMutator#close()} on - * the returned {@link BufferedMutator} instance. + * The caller is responsible for calling {@link BufferedMutator#close()} on the returned + * {@link BufferedMutator} instance. *

    *

    - * This accessor will use the connection's ExecutorService and will throw an - * exception in the main thread when an asynchronous exception occurs. - * + * This accessor will use the connection's ExecutorService and will throw an exception in the main + * thread when an asynchronous exception occurs. * @param tableName the name of the table - * * @return a {@link BufferedMutator} for the supplied tableName. */ default BufferedMutator getBufferedMutator(TableName tableName) throws IOException { @@ -134,7 +122,6 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti * {@link BufferedMutator} returned by this method is thread-safe. This object can be used for * long lived table operations. The caller is responsible for calling * {@link BufferedMutator#close()} on the returned {@link BufferedMutator} instance. - * * @param params details on how to instantiate the {@code BufferedMutator}. * @return a {@link BufferedMutator} for the supplied tableName. */ @@ -143,15 +130,10 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti /** * Retrieve a RegionLocator implementation to inspect region information on a table. The returned * RegionLocator is not thread-safe, so a new instance should be created for each using thread. - * - * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither - * required nor desired. - *
    + * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither + * required nor desired.
    * The caller is responsible for calling {@link RegionLocator#close()} on the returned - * RegionLocator instance. - * - * RegionLocator needs to be unmanaged - * + * RegionLocator instance. RegionLocator needs to be unmanaged * @param tableName Name of the table who's region is to be examined * @return A RegionLocator instance */ @@ -168,14 +150,10 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti void clearRegionLocationCache(); /** - * Retrieve an Admin implementation to administer an HBase cluster. - * The returned Admin is not guaranteed to be thread-safe. A new instance should be created for - * each using thread. This is a lightweight operation. Pooling or caching of the returned - * Admin is not recommended. - *
    - * The caller is responsible for calling {@link Admin#close()} on the returned - * Admin instance. - * + * Retrieve an Admin implementation to administer an HBase cluster. The returned Admin is not + * guaranteed to be thread-safe. A new instance should be created for each using thread. This is a + * lightweight operation. Pooling or caching of the returned Admin is not recommended.
    + * The caller is responsible for calling {@link Admin#close()} on the returned Admin instance. * @return an Admin instance for cluster administration */ Admin getAdmin() throws IOException; @@ -192,7 +170,7 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti /** * Returns an {@link TableBuilder} for creating {@link Table}. * @param tableName the name of the table - * @param pool the thread pool to use for requests like batch and scan + * @param pool the thread pool to use for requests like batch and scan */ TableBuilder getTableBuilder(TableName tableName, ExecutorService pool); @@ -210,15 +188,11 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti String getClusterId(); /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
    - * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
    + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
    + * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
    * This will be used mostly by hbck tool. - * * @return an Hbck instance for active master. Active master is fetched from the zookeeper. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) @@ -227,18 +201,13 @@ default Hbck getHbck() throws IOException { } /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
    - * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
    - * This will be used mostly by hbck tool. This may only be used to by pass getting - * registered master from ZK. In situations where ZK is not available or active master is not - * registered with ZK and user can get master address by other means, master can be explicitly - * specified. - * + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
    + * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
    + * This will be used mostly by hbck tool. This may only be used to by pass getting registered + * master from ZK. In situations where ZK is not available or active master is not registered with + * ZK and user can get master address by other means, master can be explicitly specified. * @param masterServer explicit {@link ServerName} for master server * @return an Hbck instance for a specified master server */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java index 232d7cdf0750..e539805475a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java @@ -1,14 +1,20 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; @@ -16,12 +22,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Configuration parameters for the connection. - * Configuration is a heavy weight registry that does a lot of string operations and regex matching. - * Method calls into Configuration account for high CPU usage and have huge performance impact. - * This class caches connection-related configuration values in the ConnectionConfiguration - * object so that expensive conf.getXXX() calls are avoided every time HTable, etc is instantiated. - * see HBASE-12128 + * Configuration parameters for the connection. Configuration is a heavy weight registry that does a + * lot of string operations and regex matching. Method calls into Configuration account for high CPU + * usage and have huge performance impact. This class caches connection-related configuration values + * in the ConnectionConfiguration object so that expensive conf.getXXX() calls are avoided every + * time HTable, etc is instantiated. see HBASE-12128 */ @InterfaceAudience.Private public class ConnectionConfiguration { @@ -29,9 +34,9 @@ public class ConnectionConfiguration { public static final String WRITE_BUFFER_SIZE_KEY = "hbase.client.write.buffer"; public static final long WRITE_BUFFER_SIZE_DEFAULT = 2097152; public static final String WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS = - "hbase.client.write.buffer.periodicflush.timeout.ms"; + "hbase.client.write.buffer.periodicflush.timeout.ms"; public static final String WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS = - "hbase.client.write.buffer.periodicflush.timertick.ms"; + "hbase.client.write.buffer.periodicflush.timertick.ms"; public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT = 0; // 0 == Disabled public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT = 1000L; // 1 second public static final String MAX_KEYVALUE_SIZE_KEY = "hbase.client.keyvalue.maxsize"; @@ -69,26 +74,23 @@ public class ConnectionConfiguration { ConnectionConfiguration(Configuration conf) { this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT); - this.writeBufferPeriodicFlushTimeoutMs = conf.getLong( - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); + this.writeBufferPeriodicFlushTimeoutMs = conf.getLong(WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, + WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); this.writeBufferPeriodicFlushTimerTickMs = conf.getLong( - WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); + WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); this.metaOperationTimeout = conf.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.operationTimeout = conf.getInt( - HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.scannerCaching = conf.getInt( - HConstants.HBASE_CLIENT_SCANNER_CACHING, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + this.scannerCaching = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); - this.scannerMaxResultSize = - conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); + this.scannerMaxResultSize = conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.primaryCallTimeoutMicroSecond = conf.getInt(PRIMARY_CALL_TIMEOUT_MICROSECOND, PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT); @@ -100,28 +102,27 @@ public class ConnectionConfiguration { conf.getInt(HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT); - this.retries = conf.getInt( - HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + this.retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - this.clientScannerAsyncPrefetch = conf.getBoolean( - Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH); + this.clientScannerAsyncPrefetch = conf.getBoolean(Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, + Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH); this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT); this.rpcTimeout = - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.readRpcTimeout = conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); this.writeRpcTimeout = conf.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); } /** - * Constructor - * This is for internal testing purpose (using the default value). - * In real usage, we should read the configuration from the Configuration object. + * Constructor This is for internal testing purpose (using the default value). In real usage, we + * should read the configuration from the Configuration object. */ protected ConnectionConfiguration() { this.writeBufferSize = WRITE_BUFFER_SIZE_DEFAULT; @@ -134,7 +135,7 @@ protected ConnectionConfiguration() { this.primaryCallTimeoutMicroSecond = 10000; this.replicaCallTimeoutMicroSecondScan = 1000000; this.metaReplicaCallTimeoutMicroSecondScan = - HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT; + HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT; this.retries = HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER; this.clientScannerAsyncPrefetch = Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH; this.maxKeyValueSize = MAX_KEYVALUE_SIZE_DEFAULT; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index a3cf55715bdf..4d4559f4b7a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,13 +54,15 @@ * Since 2.2.0, Connection created by ConnectionFactory can contain user-specified kerberos * credentials if caller has following two configurations set: *

      - *
    • hbase.client.keytab.file, points to a valid keytab on the local filesystem - *
    • hbase.client.kerberos.principal, gives the Kerberos principal to use + *
    • hbase.client.keytab.file, points to a valid keytab on the local filesystem + *
    • hbase.client.kerberos.principal, gives the Kerberos principal to use *
    * By this way, caller can directly connect to kerberized cluster without caring login and * credentials renewal logic in application. + * *
      * 
    + * * Similarly, {@link Connection} also returns {@link Admin} and {@link RegionLocator} * implementations. * @see Connection @@ -70,7 +71,8 @@ @InterfaceAudience.Public public class ConnectionFactory { - public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = "hbase.client.async.connection.impl"; + public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = + "hbase.client.async.connection.impl"; /** No public c.tors */ protected ConnectionFactory() { @@ -155,7 +157,7 @@ public static Connection createConnection(Configuration conf) throws IOException * @return Connection object for conf */ public static Connection createConnection(Configuration conf, ExecutorService pool) - throws IOException { + throws IOException { return createConnection(conf, pool, AuthUtil.loginClient(conf)); } @@ -213,7 +215,7 @@ public static Connection createConnection(Configuration conf, User user) throws * @return Connection object for conf */ public static Connection createConnection(Configuration conf, ExecutorService pool, - final User user) throws IOException { + final User user) throws IOException { Class clazz = conf.getClass(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ConnectionOverAsyncConnection.class, Connection.class); if (clazz != ConnectionOverAsyncConnection.class) { @@ -222,8 +224,8 @@ public static Connection createConnection(Configuration conf, ExecutorService po Constructor constructor = clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class); constructor.setAccessible(true); - return user.runAs((PrivilegedExceptionAction) () -> (Connection) constructor - .newInstance(conf, pool, user)); + return user.runAs((PrivilegedExceptionAction< + Connection>) () -> (Connection) constructor.newInstance(conf, pool, user)); } catch (Exception e) { throw new IOException(e); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java index 15d5775be3dd..7a7b38a4df6a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -193,8 +193,7 @@ public Table build() { conn.getTableBuilder(tableName).setRpcTimeout(rpcTimeout, TimeUnit.MILLISECONDS) .setReadRpcTimeout(readRpcTimeout, TimeUnit.MILLISECONDS) .setWriteRpcTimeout(writeRpcTimeout, TimeUnit.MILLISECONDS) - .setOperationTimeout(operationTimeout, TimeUnit.MILLISECONDS) - .build(), + .setOperationTimeout(operationTimeout, TimeUnit.MILLISECONDS).build(), poolSupplier); } }; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java index 975d8df71808..2ace3959ffa6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java @@ -25,8 +25,8 @@ /** * Registry for meta information needed for connection setup to a HBase cluster. Implementations - * hold cluster information such as this cluster's id, location of hbase:meta, etc.. - * Internal use only. + * hold cluster information such as this cluster's id, location of hbase:meta, etc.. Internal use + * only. */ @InterfaceAudience.Private public interface ConnectionRegistry extends Closeable { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 70312aa4de46..2124a0aa0034 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -24,8 +24,6 @@ import java.io.IOException; import java.lang.reflect.UndeclaredThrowableException; -import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Arrays; import java.util.List; @@ -111,11 +109,11 @@ public static long getPauseTime(final long pause, final int tries) { /** * Changes the configuration to set the number of retries needed when using Connection internally, * e.g. for updating catalog tables, etc. Call this method before we create any Connections. - * @param c The Configuration instance to set the retries into. + * @param c The Configuration instance to set the retries into. * @param log Used to log what we set in here. */ public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn, - final Logger log) { + final Logger log) { // TODO: Fix this. Not all connections from server side should have 10 times the retries. int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); @@ -359,7 +357,7 @@ static void incRPCRetriesMetrics(ScanMetrics scanMetrics, boolean isRegionServer } static void updateResultsMetrics(ScanMetrics scanMetrics, Result[] rrs, - boolean isRegionServerRemote) { + boolean isRegionServerRemote) { if (scanMetrics == null || rrs == null || rrs.length == 0) { return; } @@ -402,7 +400,7 @@ static void incRegionCountMetrics(ScanMetrics scanMetrics) { * increase the hedge read related metrics. */ private static void connect(CompletableFuture srcFuture, CompletableFuture dstFuture, - Optional metrics) { + Optional metrics) { addListener(srcFuture, (r, e) -> { if (e != null) { dstFuture.completeExceptionally(e); @@ -421,8 +419,8 @@ private static void connect(CompletableFuture srcFuture, CompletableFutur } private static void sendRequestsToSecondaryReplicas( - Function> requestReplica, RegionLocations locs, - CompletableFuture future, Optional metrics) { + Function> requestReplica, RegionLocations locs, + CompletableFuture future, Optional metrics) { if (future.isDone()) { // do not send requests to secondary replicas if the future is done, i.e, the primary request // has already been finished. @@ -436,9 +434,9 @@ private static void sendRequestsToSecondaryReplicas( } static CompletableFuture timelineConsistentRead(AsyncRegionLocator locator, - TableName tableName, Query query, byte[] row, RegionLocateType locateType, - Function> requestReplica, long rpcTimeoutNs, - long primaryCallTimeoutNs, Timer retryTimer, Optional metrics) { + TableName tableName, Query query, byte[] row, RegionLocateType locateType, + Function> requestReplica, long rpcTimeoutNs, + long primaryCallTimeoutNs, Timer retryTimer, Optional metrics) { if (query.getConsistency() != Consistency.TIMELINE) { return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID); } @@ -458,8 +456,8 @@ static CompletableFuture timelineConsistentRead(AsyncRegionLocator locato (locs, error) -> { if (error != null) { LOG.warn( - "Failed to locate all the replicas for table={}, row='{}', locateType={}" + - " give up timeline consistent read", + "Failed to locate all the replicas for table={}, row='{}', locateType={}" + + " give up timeline consistent read", tableName, Bytes.toStringBinary(row), locateType, error); return; } @@ -514,7 +512,7 @@ static void validatePutsInRowMutations(RowMutations rowMutations, int maxKeyValu *
  • For system table, use {@link HConstants#SYSTEMTABLE_QOS}.
  • *
  • For other tables, use {@link HConstants#NORMAL_QOS}.
  • * - * @param priority the priority set by user, can be {@link HConstants#PRIORITY_UNSET}. + * @param priority the priority set by user, can be {@link HConstants#PRIORITY_UNSET}. * @param tableName the table we operate on */ static int calcPriority(int priority, TableName tableName) { @@ -534,8 +532,8 @@ static int getPriority(TableName tableName) { } static CompletableFuture getOrFetch(AtomicReference cacheRef, - AtomicReference> futureRef, boolean reload, - Supplier> fetch, Predicate validator, String type) { + AtomicReference> futureRef, boolean reload, + Supplier> fetch, Predicate validator, String type) { for (;;) { if (!reload) { T value = cacheRef.get(); @@ -578,7 +576,7 @@ static CompletableFuture getOrFetch(AtomicReference cacheRef, } static void updateStats(Optional optStats, - Optional optMetrics, ServerName serverName, MultiResponse resp) { + Optional optMetrics, ServerName serverName, MultiResponse resp) { if (!optStats.isPresent() && !optMetrics.isPresent()) { // ServerStatisticTracker and MetricsConnection are both not present, just return return; @@ -606,13 +604,13 @@ interface Converter { @FunctionalInterface interface RpcCall { void call(ClientService.Interface stub, HBaseRpcController controller, REQ req, - RpcCallback done); + RpcCallback done); } static CompletableFuture call(HBaseRpcController controller, - HRegionLocation loc, ClientService.Interface stub, REQ req, - Converter reqConvert, RpcCall rpcCall, - Converter respConverter) { + HRegionLocation loc, ClientService.Interface stub, REQ req, + Converter reqConvert, RpcCall rpcCall, + Converter respConverter) { CompletableFuture future = new CompletableFuture<>(); try { rpcCall.call(stub, controller, reqConvert.convert(loc.getRegion().getRegionName(), req), diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java index 533bd0f41b6d..45dec17a6958 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -27,22 +26,18 @@ public enum Consistency { // developer note: Do not reorder. Client.proto#Consistency depends on this order /** - * Strong consistency is the default consistency model in HBase, - * where reads and writes go through a single server which serializes - * the updates, and returns all data that was written and ack'd. + * Strong consistency is the default consistency model in HBase, where reads and writes go through + * a single server which serializes the updates, and returns all data that was written and ack'd. */ STRONG, /** - * Timeline consistent reads might return values that may not see - * the most recent updates. Write transactions are always performed - * in strong consistency model in HBase which guarantees that transactions - * are ordered, and replayed in the same order by all copies of the data. - * In timeline consistency, the get and scan requests can be answered from data - * that may be stale. - *
    - * The client may still observe transactions out of order if the requests are - * responded from different servers. + * Timeline consistent reads might return values that may not see the most recent updates. Write + * transactions are always performed in strong consistency model in HBase which guarantees that + * transactions are ordered, and replayed in the same order by all copies of the data. In timeline + * consistency, the get and scan requests can be answered from data that may be stale.
    + * The client may still observe transactions out of order if the requests are responded from + * different servers. */ TIMELINE, } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java index 56091ff6ec0d..ebc99d285b8c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,4 +66,4 @@ public synchronized R get() throws IOException { } return result; } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java index 72d588bc9763..3331c8107009 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,9 +22,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * CoprocessorDescriptor contains the details about how to build a coprocessor. - * This class is a pojo so there are no checks for the details carried by this class. - * Use {@link CoprocessorDescriptorBuilder} to instantiate a CoprocessorDescriptor + * CoprocessorDescriptor contains the details about how to build a coprocessor. This class is a pojo + * so there are no checks for the details carried by this class. Use + * {@link CoprocessorDescriptorBuilder} to instantiate a CoprocessorDescriptor */ @InterfaceAudience.Public public interface CoprocessorDescriptor { @@ -45,7 +44,7 @@ public interface CoprocessorDescriptor { int getPriority(); /** - * @return Arbitrary key-value parameter pairs passed into the coprocessor. + * @return Arbitrary key-value parameter pairs passed into the coprocessor. */ Map getProperties(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java index 71d1264c0741..cb0caca21b0e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -109,9 +108,7 @@ public Map getProperties() { @Override public String toString() { - return "class:" + className - + ", jarPath:" + jarPath - + ", priority:" + priority + return "class:" + className + ", jarPath:" + jarPath + ", priority:" + priority + ", properties:" + properties; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java index 837e72d109c2..73e128dfd8f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Scan cursor to tell client where server is scanning - * {@link Scan#setNeedCursorResult(boolean)} - * {@link Result#isCursor()} - * {@link Result#getCursor()} + * Scan cursor to tell client where server is scanning {@link Scan#setNeedCursorResult(boolean)} + * {@link Result#isCursor()} {@link Result#getCursor()} */ @InterfaceAudience.Public public class Cursor { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 2a219f6a39c9..755c0ca0b8c0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -37,97 +35,84 @@ /** * Used to perform Delete operations on a single row. *

    - * To delete an entire row, instantiate a Delete object with the row - * to delete. To further define the scope of what to delete, perform - * additional methods as outlined below. + * To delete an entire row, instantiate a Delete object with the row to delete. To further define + * the scope of what to delete, perform additional methods as outlined below. + *

    + * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} for each family to + * delete. *

    - * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} - * for each family to delete. + * To delete multiple versions of specific columns, execute {@link #addColumns(byte[], byte[]) + * deleteColumns} for each column to delete. *

    - * To delete multiple versions of specific columns, execute - * {@link #addColumns(byte[], byte[]) deleteColumns} - * for each column to delete. + * To delete specific versions of specific columns, execute {@link #addColumn(byte[], byte[], long) + * deleteColumn} for each column version to delete. *

    - * To delete specific versions of specific columns, execute - * {@link #addColumn(byte[], byte[], long) deleteColumn} - * for each column version to delete. + * Specifying timestamps, deleteFamily and deleteColumns will delete all versions with a timestamp + * less than or equal to that passed. If no timestamp is specified, an entry is added with a + * timestamp of 'now' where 'now' is the servers's EnvironmentEdgeManager.currentTime(). Specifying + * a timestamp to the deleteColumn method will delete versions only with a timestamp equal to that + * specified. If no timestamp is passed to deleteColumn, internally, it figures the most recent + * cell's timestamp and adds a delete at that timestamp; i.e. it deletes the most recently added + * cell. *

    - * Specifying timestamps, deleteFamily and deleteColumns will delete all - * versions with a timestamp less than or equal to that passed. If no - * timestamp is specified, an entry is added with a timestamp of 'now' - * where 'now' is the servers's EnvironmentEdgeManager.currentTime(). - * Specifying a timestamp to the deleteColumn method will - * delete versions only with a timestamp equal to that specified. - * If no timestamp is passed to deleteColumn, internally, it figures the - * most recent cell's timestamp and adds a delete at that timestamp; i.e. - * it deletes the most recently added cell. - *

    The timestamp passed to the constructor is used ONLY for delete of - * rows. For anything less -- a deleteColumn, deleteColumns or - * deleteFamily -- then you need to use the method overrides that take a - * timestamp. The constructor timestamp is not referenced. + * The timestamp passed to the constructor is used ONLY for delete of rows. For anything less -- a + * deleteColumn, deleteColumns or deleteFamily -- then you need to use the method overrides that + * take a timestamp. The constructor timestamp is not referenced. */ @InterfaceAudience.Public public class Delete extends Mutation { /** * Create a Delete operation for the specified row. *

    - * If no further operations are done, this will delete everything - * associated with the specified row (all versions of all columns in all - * families), with timestamp from current point in time to the past. - * Cells defining timestamp for a future point in time - * (timestamp > current time) will not be deleted. + * If no further operations are done, this will delete everything associated with the specified + * row (all versions of all columns in all families), with timestamp from current point in time to + * the past. Cells defining timestamp for a future point in time (timestamp > current time) will + * not be deleted. * @param row row key */ - public Delete(byte [] row) { + public Delete(byte[] row) { this(row, HConstants.LATEST_TIMESTAMP); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. - * @param row row key + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. + * @param row row key * @param timestamp maximum version timestamp (only for delete row) */ - public Delete(byte [] row, long timestamp) { + public Delete(byte[] row, long timestamp) { this(row, 0, row.length, timestamp); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. - * @param row We make a local copy of this passed in row. - * @param rowOffset - * @param rowLength + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. + * @param row We make a local copy of this passed in row. nn */ public Delete(final byte[] row, final int rowOffset, final int rowLength) { this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. - * @param row We make a local copy of this passed in row. - * @param rowOffset - * @param rowLength - * @param timestamp maximum version timestamp (only for delete row) + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. + * @param row We make a local copy of this passed in row. nn * @param timestamp maximum version + * timestamp (only for delete row) */ public Delete(final byte[] row, final int rowOffset, final int rowLength, long timestamp) { checkRow(row, rowOffset, rowLength); @@ -143,23 +128,21 @@ public Delete(final Delete deleteToCopy) { } /** - * Construct the Delete with user defined data. NOTED: - * 1) all cells in the familyMap must have the delete type. - * see {@link org.apache.hadoop.hbase.Cell.Type} - * 2) the row of each cell must be same with passed row. - * @param row row. CAN'T be null - * @param ts timestamp + * Construct the Delete with user defined data. NOTED: 1) all cells in the familyMap must have the + * delete type. see {@link org.apache.hadoop.hbase.Cell.Type} 2) the row of each cell must be same + * with passed row. + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Delete(byte[] row, long ts, NavigableMap> familyMap) { + public Delete(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } /** * Add an existing delete marker to this Delete object. * @param cell An existing cell of type "delete". - * @return this for invocation chaining - * @throws IOException + * @return this for invocation chaining n */ public Delete add(Cell cell) throws IOException { super.add(cell); @@ -169,32 +152,30 @@ public Delete add(Cell cell) throws IOException { /** * Delete all versions of all columns of the specified family. *

    - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. + * Overrides previous calls to deleteColumn and deleteColumns for the specified family. * @param family family name * @return this for invocation chaining */ - public Delete addFamily(final byte [] family) { + public Delete addFamily(final byte[] family) { this.addFamily(family, this.ts); return this; } /** - * Delete all columns of the specified family with a timestamp less than - * or equal to the specified timestamp. + * Delete all columns of the specified family with a timestamp less than or equal to the specified + * timestamp. *

    - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. - * @param family family name + * Overrides previous calls to deleteColumn and deleteColumns for the specified family. + * @param family family name * @param timestamp maximum version timestamp * @return this for invocation chaining */ - public Delete addFamily(final byte [] family, final long timestamp) { + public Delete addFamily(final byte[] family, final long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } List list = getCellList(family); - if(!list.isEmpty()) { + if (!list.isEmpty()) { list.clear(); } KeyValue kv = new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily); @@ -203,70 +184,66 @@ public Delete addFamily(final byte [] family, final long timestamp) { } /** - * Delete all columns of the specified family with a timestamp equal to - * the specified timestamp. - * @param family family name + * Delete all columns of the specified family with a timestamp equal to the specified timestamp. + * @param family family name * @param timestamp version timestamp * @return this for invocation chaining */ - public Delete addFamilyVersion(final byte [] family, final long timestamp) { + public Delete addFamilyVersion(final byte[] family, final long timestamp) { List list = getCellList(family); - list.add(new KeyValue(row, family, null, timestamp, - KeyValue.Type.DeleteFamilyVersion)); + list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamilyVersion)); return this; } /** * Delete all versions of the specified column. - * @param family family name + * @param family family name * @param qualifier column qualifier * @return this for invocation chaining */ - public Delete addColumns(final byte [] family, final byte [] qualifier) { + public Delete addColumns(final byte[] family, final byte[] qualifier) { addColumns(family, qualifier, this.ts); return this; } /** - * Delete all versions of the specified column with a timestamp less than - * or equal to the specified timestamp. - * @param family family name + * Delete all versions of the specified column with a timestamp less than or equal to the + * specified timestamp. + * @param family family name * @param qualifier column qualifier * @param timestamp maximum version timestamp * @return this for invocation chaining */ - public Delete addColumns(final byte [] family, final byte [] qualifier, final long timestamp) { + public Delete addColumns(final byte[] family, final byte[] qualifier, final long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } List list = getCellList(family); - list.add(new KeyValue(this.row, family, qualifier, timestamp, - KeyValue.Type.DeleteColumn)); + list.add(new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.DeleteColumn)); return this; } /** - * Delete the latest version of the specified column. - * This is an expensive call in that on the server-side, it first does a - * get to find the latest versions timestamp. Then it adds a delete using - * the fetched cells timestamp. - * @param family family name + * Delete the latest version of the specified column. This is an expensive call in that on the + * server-side, it first does a get to find the latest versions timestamp. Then it adds a delete + * using the fetched cells timestamp. + * @param family family name * @param qualifier column qualifier * @return this for invocation chaining */ - public Delete addColumn(final byte [] family, final byte [] qualifier) { + public Delete addColumn(final byte[] family, final byte[] qualifier) { this.addColumn(family, qualifier, this.ts); return this; } /** * Delete the specified version of the specified column. - * @param family family name + * @param family family name * @param qualifier column qualifier * @param timestamp version timestamp * @return this for invocation chaining */ - public Delete addColumn(byte [] family, byte [] qualifier, long timestamp) { + public Delete addColumn(byte[] family, byte[] qualifier, long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java index 9419137842f7..4bc7a76514a2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.DoNotRetryIOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java index aaf0b5cc7320..7ee451b982cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java @@ -15,22 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Enum describing the durability guarantees for tables and {@link Mutation}s - * Note that the items must be sorted in order of increasing durability + * Enum describing the durability guarantees for tables and {@link Mutation}s Note that the items + * must be sorted in order of increasing durability */ @InterfaceAudience.Public public enum Durability { /* Developer note: Do not rename the enum field names. They are serialized in HTableDescriptor */ /** - * If this is for tables durability, use HBase's global default value (SYNC_WAL). - * Otherwise, if this is for mutation, use the table's default setting to determine durability. - * This must remain the first option. + * If this is for tables durability, use HBase's global default value (SYNC_WAL). Otherwise, if + * this is for mutation, use the table's default setting to determine durability. This must remain + * the first option. */ USE_DEFAULT, /** @@ -42,15 +41,15 @@ public enum Durability { */ ASYNC_WAL, /** - * Write the Mutation to the WAL synchronously. - * The data is flushed to the filesystem implementation, but not necessarily to disk. - * For HDFS this will flush the data to the designated number of DataNodes. - * See HADOOP-6313 + * Write the Mutation to the WAL synchronously. The data is flushed to the filesystem + * implementation, but not necessarily to disk. For HDFS this will flush the data to the + * designated number of DataNodes. See + * HADOOP-6313 */ SYNC_WAL, /** - * Write the Mutation to the WAL synchronously and force the entries to disk. - * See HADOOP-6313 + * Write the Mutation to the WAL synchronously and force the entries to disk. See + * HADOOP-6313 */ FSYNC_WAL } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index 0f04407ac3e3..17975ff631de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; - import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -29,37 +27,36 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Used to perform Get operations on a single row. *

    - * To get everything for a row, instantiate a Get object with the row to get. - * To further narrow the scope of what to Get, use the methods below. + * To get everything for a row, instantiate a Get object with the row to get. To further narrow the + * scope of what to Get, use the methods below. *

    - * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} - * for each family to retrieve. + * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} for each + * family to retrieve. *

    - * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} - * for each column to retrieve. + * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} for each column to + * retrieve. *

    - * To only retrieve columns within a specific range of version timestamps, - * execute {@link #setTimeRange(long, long) setTimeRange}. + * To only retrieve columns within a specific range of version timestamps, execute + * {@link #setTimeRange(long, long) setTimeRange}. *

    - * To only retrieve columns with a specific timestamp, execute - * {@link #setTimestamp(long) setTimestamp}. + * To only retrieve columns with a specific timestamp, execute {@link #setTimestamp(long) + * setTimestamp}. *

    - * To limit the number of versions of each column to be returned, execute - * {@link #readVersions(int) readVersions}. + * To limit the number of versions of each column to be returned, execute {@link #readVersions(int) + * readVersions}. *

    * To add a filter, call {@link #setFilter(Filter) setFilter}. */ @@ -67,31 +64,29 @@ public class Get extends Query implements Row { private static final Logger LOG = LoggerFactory.getLogger(Get.class); - private byte [] row = null; + private byte[] row = null; private int maxVersions = 1; private boolean cacheBlocks = true; private int storeLimit = -1; private int storeOffset = 0; private TimeRange tr = TimeRange.allTime(); private boolean checkExistenceOnly = false; - private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Create a Get operation for the specified row. *

    - * If no further operations are done, this will get the latest version of - * all columns in all families of the specified row. + * If no further operations are done, this will get the latest version of all columns in all + * families of the specified row. * @param row row key */ - public Get(byte [] row) { + public Get(byte[] row) { Mutation.checkRow(row); this.row = row; } /** - * Copy-constructor - * - * @param get + * Copy-constructor n */ public Get(Get get) { this(get.getRow()); @@ -108,8 +103,8 @@ public Get(Get get) { this.checkExistenceOnly = get.isCheckExistenceOnly(); this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); Map> fams = get.getFamilyMap(); - for (Map.Entry> entry : fams.entrySet()) { - byte [] fam = entry.getKey(); + for (Map.Entry> entry : fams.entrySet()) { + byte[] fam = entry.getKey(); NavigableSet cols = entry.getValue(); if (cols != null && cols.size() > 0) { for (byte[] col : cols) { @@ -130,10 +125,7 @@ public Get(Get get) { } /** - * Create a Get operation for the specified row. - * @param row - * @param rowOffset - * @param rowLength + * Create a Get operation for the specified row. nnn */ public Get(byte[] row, int rowOffset, int rowLength) { Mutation.checkRow(row, rowOffset, rowLength); @@ -141,8 +133,7 @@ public Get(byte[] row, int rowOffset, int rowLength) { } /** - * Create a Get operation for the specified row. - * @param row + * Create a Get operation for the specified row. n */ public Get(ByteBuffer row) { Mutation.checkRow(row); @@ -166,7 +157,7 @@ public Get setCheckExistenceOnly(boolean checkExistenceOnly) { * @param family family name * @return the Get object */ - public Get addFamily(byte [] family) { + public Get addFamily(byte[] family) { familyMap.remove(family); familyMap.put(family, null); return this; @@ -176,13 +167,13 @@ public Get addFamily(byte [] family) { * Get the column from the specific family with the specified qualifier. *

    * Overrides previous calls to addFamily for this family. - * @param family family name + * @param family family name * @param qualifier column qualifier * @return the Get objec */ - public Get addColumn(byte [] family, byte [] qualifier) { - NavigableSet set = familyMap.get(family); - if(set == null) { + public Get addColumn(byte[] family, byte[] qualifier) { + NavigableSet set = familyMap.get(family); + if (set == null) { set = new TreeSet<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, set); } @@ -194,8 +185,7 @@ public Get addColumn(byte [] family, byte [] qualifier) { } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp). + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive * @return this for invocation chaining @@ -213,7 +203,7 @@ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { public Get setTimestamp(long timestamp) { try { tr = TimeRange.at(timestamp); - } catch(Exception e) { + } catch (Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; @@ -286,12 +276,9 @@ public Get setFilter(Filter filter) { /** * Set whether blocks should be cached for this Get. *

    - * This is true by default. When true, default settings of the table and - * family are used (this will never override caching blocks if the block - * cache is disabled for that family or entirely). - * - * @param cacheBlocks if false, default settings are overridden and blocks - * will not be cached + * This is true by default. When true, default settings of the table and family are used (this + * will never override caching blocks if the block cache is disabled for that family or entirely). + * @param cacheBlocks if false, default settings are overridden and blocks will not be cached */ public Get setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; @@ -300,19 +287,17 @@ public Get setCacheBlocks(boolean cacheBlocks) { /** * Get whether blocks should be cached for this Get. - * @return true if default caching should be used, false if blocks should not - * be cached + * @return true if default caching should be used, false if blocks should not be cached */ public boolean getCacheBlocks() { return cacheBlocks; } /** - * Method for retrieving the get's row - * @return row + * Method for retrieving the get's row n */ @Override - public byte [] getRow() { + public byte[] getRow() { return this.row; } @@ -325,8 +310,7 @@ public int getMaxVersions() { } /** - * Method for retrieving the get's maximum number of values - * to return per Column Family + * Method for retrieving the get's maximum number of values to return per Column Family * @return the maximum number of values to fetch per CF */ public int getMaxResultsPerColumnFamily() { @@ -334,8 +318,7 @@ public int getMaxResultsPerColumnFamily() { } /** - * Method for retrieving the get's offset per row per column - * family (#kvs to be skipped) + * Method for retrieving the get's offset per row per column family (#kvs to be skipped) * @return the row offset */ public int getRowOffsetPerColumnFamily() { @@ -343,8 +326,7 @@ public int getRowOffsetPerColumnFamily() { } /** - * Method for retrieving the get's TimeRange - * @return timeRange + * Method for retrieving the get's TimeRange n */ public TimeRange getTimeRange() { return this.tr; @@ -375,37 +357,32 @@ public boolean hasFamilies() { } /** - * Method for retrieving the get's familyMap - * @return familyMap + * Method for retrieving the get's familyMap n */ - public Map> getFamilyMap() { + public Map> getFamilyMap() { return this.familyMap; } /** - * Compile the table and column family (i.e. schema) information - * into a String. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map + * Compile the table and column family (i.e. schema) information into a String. Useful for parsing + * and aggregation by debugging, logging, and administration tools. n */ @Override public Map getFingerprint() { Map map = new HashMap<>(); List families = new ArrayList<>(this.familyMap.entrySet().size()); map.put("families", families); - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. + * @param maxCols a limit on the number of columns output prior to truncation n */ @Override public Map toMap(int maxCols) { @@ -425,11 +402,10 @@ public Map toMap(int maxCols) { map.put("timeRange", timeRange); int colCount = 0; // iterate through affected families and add details - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { List familyList = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), familyList); - if(entry.getValue() == null) { + if (entry.getValue() == null) { colCount++; --maxCols; familyList.add("ALL"); @@ -438,7 +414,7 @@ public Map toMap(int maxCols) { if (maxCols <= 0) { continue; } - for (byte [] column : entry.getValue()) { + for (byte[] column : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -459,7 +435,7 @@ public Map toMap(int maxCols) { @Override public int hashCode() { - // TODO: This is wrong. Can't have two gets the same just because on same row. But it + // TODO: This is wrong. Can't have two gets the same just because on same row. But it // matches how equals works currently and gets rid of the findbugs warning. return Bytes.hashCode(this.getRow()); } @@ -473,7 +449,7 @@ public boolean equals(Object obj) { return false; } Row other = (Row) obj; - // TODO: This is wrong. Can't have two gets the same just because on same row. + // TODO: This is wrong. Can't have two gets the same just because on same row. return Row.COMPARATOR.compare(this, other) == 0; } @@ -514,7 +490,7 @@ public Get setReplicaId(int Id) { @Override public Get setIsolationLevel(IsolationLevel level) { - return (Get) super.setIsolationLevel(level); + return (Get) super.setIsolationLevel(level); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java index d153ef7dd771..8df0504b2a9a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,20 +51,21 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignsResponse; /** - * Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of - * constructing an HBaseHbck directly. - * - *

    Connection should be an unmanaged connection obtained via - * {@link ConnectionFactory#createConnection(Configuration)}.

    - * - *

    NOTE: The methods in here can do damage to a cluster if applied in the wrong sequence or at - * the wrong time. Use with caution. For experts only. These methods are only for the - * extreme case where the cluster has been damaged or has achieved an inconsistent state because - * of some unforeseen circumstance or bug and requires manual intervention. - * - *

    An instance of this class is lightweight and not-thread safe. A new instance should be created - * by each thread. Pooling or caching of the instance is not recommended.

    - * + * Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of constructing an + * HBaseHbck directly. + *

    + * Connection should be an unmanaged connection obtained via + * {@link ConnectionFactory#createConnection(Configuration)}. + *

    + *

    + * NOTE: The methods in here can do damage to a cluster if applied in the wrong sequence or at the + * wrong time. Use with caution. For experts only. These methods are only for the extreme case where + * the cluster has been damaged or has achieved an inconsistent state because of some unforeseen + * circumstance or bug and requires manual intervention. + *

    + * An instance of this class is lightweight and not-thread safe. A new instance should be created by + * each thread. Pooling or caching of the instance is not recommended. + *

    * @see ConnectionFactory * @see Hbck */ @@ -102,9 +103,9 @@ public boolean isAborted() { @Override public TableState setTableStateInMeta(TableState state) throws IOException { try { - GetTableStateResponse response = hbck.setTableStateInMeta( - rpcControllerFactory.newController(), - RequestConverter.buildSetTableStateInMetaRequest(state)); + GetTableStateResponse response = + hbck.setTableStateInMeta(rpcControllerFactory.newController(), + RequestConverter.buildSetTableStateInMetaRequest(state)); return TableState.convert(state.getTableName(), response.getTableState()); } catch (ServiceException se) { LOG.debug("table={}, state={}", state.getTableName(), state.getState(), se); @@ -134,11 +135,10 @@ public Map setRegionStateInMeta( } @Override - public List assigns(List encodedRegionNames, boolean override) - throws IOException { + public List assigns(List encodedRegionNames, boolean override) throws IOException { try { AssignsResponse response = this.hbck.assigns(rpcControllerFactory.newController(), - RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -148,10 +148,10 @@ public List assigns(List encodedRegionNames, boolean override) @Override public List unassigns(List encodedRegionNames, boolean override) - throws IOException { + throws IOException { try { UnassignsResponse response = this.hbck.unassigns(rpcControllerFactory.newController(), - RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -165,38 +165,34 @@ private static String toCommaDelimitedString(List list) { @Override public List bypassProcedure(List pids, long waitTime, boolean override, - boolean recursive) - throws IOException { - BypassProcedureResponse response = ProtobufUtil.call( - new Callable() { - @Override - public BypassProcedureResponse call() throws Exception { - try { - return hbck.bypassProcedure(rpcControllerFactory.newController(), - BypassProcedureRequest.newBuilder().addAllProcId(pids). - setWaitTime(waitTime).setOverride(override).setRecursive(recursive).build()); - } catch (Throwable t) { - LOG.error(pids.stream().map(i -> i.toString()). - collect(Collectors.joining(", ")), t); - throw t; - } - } - }); + boolean recursive) throws IOException { + BypassProcedureResponse response = ProtobufUtil.call(new Callable() { + @Override + public BypassProcedureResponse call() throws Exception { + try { + return hbck.bypassProcedure(rpcControllerFactory.newController(), + BypassProcedureRequest.newBuilder().addAllProcId(pids).setWaitTime(waitTime) + .setOverride(override).setRecursive(recursive).build()); + } catch (Throwable t) { + LOG.error(pids.stream().map(i -> i.toString()).collect(Collectors.joining(", ")), t); + throw t; + } + } + }); return response.getBypassedList(); } @Override - public List scheduleServerCrashProcedures(List serverNames) - throws IOException { + public List scheduleServerCrashProcedures(List serverNames) throws IOException { try { ScheduleServerCrashProcedureResponse response = - this.hbck.scheduleServerCrashProcedure(rpcControllerFactory.newController(), - RequestConverter.toScheduleServerCrashProcedureRequest(serverNames)); + this.hbck.scheduleServerCrashProcedure(rpcControllerFactory.newController(), + RequestConverter.toScheduleServerCrashProcedureRequest(serverNames)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString( serverNames.stream().map(serverName -> ProtobufUtil.toServerName(serverName).toString()) - .collect(Collectors.toList())), + .collect(Collectors.toList())), se); throw new IOException(se); } @@ -206,8 +202,7 @@ public List scheduleServerCrashProcedures(List serverNames) public List scheduleSCPsForUnknownServers() throws IOException { try { ScheduleSCPsForUnknownServersResponse response = - this.hbck.scheduleSCPsForUnknownServers( - rpcControllerFactory.newController(), + this.hbck.scheduleSCPsForUnknownServers(rpcControllerFactory.newController(), ScheduleSCPsForUnknownServersRequest.newBuilder().build()); return response.getPidList(); } catch (ServiceException se) { @@ -220,7 +215,7 @@ public List scheduleSCPsForUnknownServers() throws IOException { public boolean runHbckChore() throws IOException { try { RunHbckChoreResponse response = this.hbck.runHbckChore(rpcControllerFactory.newController(), - RunHbckChoreRequest.newBuilder().build()); + RunHbckChoreRequest.newBuilder().build()); return response.getRan(); } catch (ServiceException se) { LOG.debug("Failed to run HBCK chore", se); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java index 7e9a519b95f1..b5ba25058838 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java @@ -30,19 +30,19 @@ /** * Hbck fixup tool APIs. Obtain an instance from {@link Connection#getHbck()} and call * {@link #close()} when done. - *

    WARNING: the below methods can damage the cluster. It may leave the cluster in an - * indeterminate state, e.g. region not assigned, or some hdfs files left behind. After running - * any of the below, operators may have to do some clean up on hdfs or schedule some assign - * procedures to get regions back online. DO AT YOUR OWN RISK. For experienced users only. - * + *

    + * WARNING: the below methods can damage the cluster. It may leave the cluster in an indeterminate + * state, e.g. region not assigned, or some hdfs files left behind. After running any of the below, + * operators may have to do some clean up on hdfs or schedule some assign procedures to get regions + * back online. DO AT YOUR OWN RISK. For experienced users only. * @see ConnectionFactory * @since 2.0.2, 2.1.1 */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) public interface Hbck extends Abortable, Closeable { /** - * Update table state in Meta only. No procedures are submitted to open/assign or - * close/unassign regions of the table. + * Update table state in Meta only. No procedures are submitted to open/assign or close/unassign + * regions of the table. * @param state table state * @return previous state of the table in Meta */ @@ -58,17 +58,18 @@ public interface Hbck extends Abortable, Closeable { setRegionStateInMeta(Map nameOrEncodedName2State) throws IOException; /** - * Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time - * -- good if many Regions to online -- and it will schedule the assigns even in the case where + * Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time -- + * good if many Regions to online -- and it will schedule the assigns even in the case where * Master is initializing (as long as the ProcedureExecutor is up). Does NOT call Coprocessor * hooks. - * @param override You need to add the override for case where a region has previously been - * bypassed. When a Procedure has been bypassed, a Procedure will have completed - * but no other Procedure will be able to make progress on the target entity - * (intentionally). This override flag will override this fencing mechanism. - * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding - * for hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an - * example of what a random user-space encoded Region name looks like. + * @param override You need to add the override for case where a region has previously + * been bypassed. When a Procedure has been bypassed, a Procedure will + * have completed but no other Procedure will be able to make progress + * on the target entity (intentionally). This override flag will + * override this fencing mechanism. + * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for + * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example + * of what a random user-space encoded Region name looks like. */ List assigns(List encodedRegionNames, boolean override) throws IOException; @@ -81,13 +82,14 @@ default List assigns(List encodedRegionNames) throws IOException { * at a time -- good if many Regions to offline -- and it will schedule the assigns even in the * case where Master is initializing (as long as the ProcedureExecutor is up). Does NOT call * Coprocessor hooks. - * @param override You need to add the override for case where a region has previously been - * bypassed. When a Procedure has been bypassed, a Procedure will have completed - * but no other Procedure will be able to make progress on the target entity - * (intentionally). This override flag will override this fencing mechanism. - * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding - * for hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an - * example of what a random user-space encoded Region name looks like. + * @param override You need to add the override for case where a region has previously + * been bypassed. When a Procedure has been bypassed, a Procedure will + * have completed but no other Procedure will be able to make progress + * on the target entity (intentionally). This override flag will + * override this fencing mechanism. + * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for + * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example + * of what a random user-space encoded Region name looks like. */ List unassigns(List encodedRegionNames, boolean override) throws IOException; @@ -96,21 +98,20 @@ default List unassigns(List encodedRegionNames) throws IOException } /** - * Bypass specified procedure and move it to completion. Procedure is marked completed but - * no actual work is done from the current state/step onwards. Parents of the procedure are - * also marked for bypass. - * - * @param pids of procedures to complete. - * @param waitTime wait time in ms for acquiring lock for a procedure - * @param override if override set to true, we will bypass the procedure even if it is executing. - * This is for procedures which can't break out during execution (bugs?). - * @param recursive If set, if a parent procedure, we will find and bypass children and then - * the parent procedure (Dangerous but useful in case where child procedure has been 'lost'). - * Does not always work. Experimental. + * Bypass specified procedure and move it to completion. Procedure is marked completed but no + * actual work is done from the current state/step onwards. Parents of the procedure are also + * marked for bypass. + * @param pids of procedures to complete. + * @param waitTime wait time in ms for acquiring lock for a procedure + * @param override if override set to true, we will bypass the procedure even if it is executing. + * This is for procedures which can't break out during execution (bugs?). + * @param recursive If set, if a parent procedure, we will find and bypass children and then the + * parent procedure (Dangerous but useful in case where child procedure has been + * 'lost'). Does not always work. Experimental. * @return true if procedure is marked for bypass successfully, false otherwise */ List bypassProcedure(List pids, long waitTime, boolean override, boolean recursive) - throws IOException; + throws IOException; List scheduleServerCrashProcedures(List serverNames) throws IOException; @@ -118,7 +119,6 @@ List bypassProcedure(List pids, long waitTime, boolean override, /** * Request HBCK chore to run at master side. - * * @return true if HBCK chore ran, false if HBCK chore already running * @throws IOException if a remote or network exception occurs */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java index 01ec316c798a..128d46daac4b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -41,7 +39,6 @@ public final class ImmutableScan extends Scan { /** * Create Immutable instance of Scan from given Scan object - * * @param scan Copy all values from Scan */ public ImmutableScan(Scan scan) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index bd824d4a855f..11da33d8106a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,12 +38,12 @@ /** * Used to perform Increment operations on a single row. *

    - * This operation ensures atomicity to readers. Increments are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. + * This operation ensures atomicity to readers. Increments are done under a single row lock, so + * write operations to a row are synchronized, and readers are guaranteed to see this operation + * fully completed. *

    - * To increment columns of a row, instantiate an Increment object with the row - * to increment. At least one column to increment must be specified using the + * To increment columns of a row, instantiate an Increment object with the row to increment. At + * least one column to increment must be specified using the * {@link #addColumn(byte[], byte[], long)} method. */ @InterfaceAudience.Public @@ -58,7 +57,7 @@ public class Increment extends Mutation { * At least one column must be incremented. * @param row row key (we will make a copy of this). */ - public Increment(byte [] row) { + public Increment(byte[] row) { this(row, 0, row.length); } @@ -68,10 +67,11 @@ public Increment(byte [] row) { * At least one column must be incremented. * @param row row key (we will make a copy of this). */ - public Increment(final byte [] row, final int offset, final int length) { + public Increment(final byte[] row, final int offset, final int length) { checkRow(row, offset, length); this.row = Bytes.copy(row, offset, length); } + /** * Copy constructor * @param incrementToCopy increment to copy @@ -82,39 +82,36 @@ public Increment(Increment incrementToCopy) { } /** - * Construct the Increment with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. - * @param row row. CAN'T be null - * @param ts timestamp + * Construct the Increment with user defined data. NOTED: 1) all cells in the familyMap must have + * the Type.Put 2) the row of each cell must be same with passed row. + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Increment(byte[] row, long ts, NavigableMap> familyMap) { + public Increment(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } /** * Add the specified KeyValue to this operation. - * @param cell individual Cell - * @return this - * @throws java.io.IOException e + * @param cell individual Cell n * @throws java.io.IOException e */ - public Increment add(Cell cell) throws IOException{ + public Increment add(Cell cell) throws IOException { super.add(cell); return this; } /** - * Increment the column from the specific family with the specified qualifier - * by the specified amount. + * Increment the column from the specific family with the specified qualifier by the specified + * amount. *

    * Overrides previous calls to addColumn for this family and qualifier. - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param amount amount to increment by + * @param amount amount to increment by * @return the Increment object */ - public Increment addColumn(byte [] family, byte [] qualifier, long amount) { + public Increment addColumn(byte[] family, byte[] qualifier, long amount) { if (family == null) { throw new IllegalArgumentException("family cannot be null"); } @@ -125,8 +122,7 @@ public Increment addColumn(byte [] family, byte [] qualifier, long amount) { } /** - * Gets the TimeRange used for this increment. - * @return TimeRange + * Gets the TimeRange used for this increment. n */ public TimeRange getTimeRange() { return this.tr; @@ -135,18 +131,16 @@ public TimeRange getTimeRange() { /** * Sets the TimeRange to be used on the Get for this increment. *

    - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this increment, you can potentially gain - * some performance with a more optimal Get operation. - * Be careful adding the time range to this class as you will update the old cell if the - * time range doesn't include the latest cells. + * This is useful for when you have counters that only last for specific periods of time (ie. + * counters that are partitioned by time). By setting the range of valid times for this increment, + * you can potentially gain some performance with a more optimal Get operation. Be careful adding + * the time range to this class as you will update the old cell if the time range doesn't include + * the latest cells. *

    * This range is used as [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive - * @throws IOException if invalid time range - * @return this + * @throws IOException if invalid time range n */ public Increment setTimeRange(long minStamp, long maxStamp) throws IOException { tr = TimeRange.between(minStamp, maxStamp); @@ -161,8 +155,8 @@ public Increment setTimestamp(long timestamp) { /** * @param returnResults True (default) if the increment operation should return the results. A - * client that is not interested in the result can save network bandwidth setting this - * to false. + * client that is not interested in the result can save network bandwidth + * setting this to false. */ @Override public Increment setReturnResults(boolean returnResults) { @@ -197,21 +191,20 @@ public boolean hasFamilies() { } /** - * Before 0.95, when you called Increment#getFamilyMap(), you got back - * a map of families to a list of Longs. Now, {@link #getFamilyCellMap()} returns - * families by list of Cells. This method has been added so you can have the - * old behavior. + * Before 0.95, when you called Increment#getFamilyMap(), you got back a map of families to a list + * of Longs. Now, {@link #getFamilyCellMap()} returns families by list of Cells. This method has + * been added so you can have the old behavior. * @return Map of families to a Map of qualifiers and their Long increments. * @since 0.95.0 */ - public Map> getFamilyMapOfLongs() { + public Map> getFamilyMapOfLongs() { NavigableMap> map = super.getFamilyCellMap(); - Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Map.Entry> entry: map.entrySet()) { - NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Cell cell: entry.getValue()) { + Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Map.Entry> entry : map.entrySet()) { + NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Cell cell : entry.getValue()) { longs.put(CellUtil.cloneQualifier(cell), - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } results.put(entry.getKey(), longs); } @@ -219,21 +212,21 @@ public boolean hasFamilies() { } /** - * @return String + * n */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("row="); sb.append(Bytes.toStringBinary(this.row)); - if(this.familyMap.isEmpty()) { + if (this.familyMap.isEmpty()) { sb.append(", no columns set to be incremented"); return sb.toString(); } sb.append(", families="); boolean moreThanOne = false; - for(Map.Entry> entry: this.familyMap.entrySet()) { - if(moreThanOne) { + for (Map.Entry> entry : this.familyMap.entrySet()) { + if (moreThanOne) { sb.append("), "); } else { moreThanOne = true; @@ -242,19 +235,19 @@ public String toString() { sb.append("(family="); sb.append(Bytes.toString(entry.getKey())); sb.append(", columns="); - if(entry.getValue() == null) { + if (entry.getValue() == null) { sb.append("NONE"); } else { sb.append("{"); boolean moreThanOneB = false; - for(Cell cell : entry.getValue()) { - if(moreThanOneB) { + for (Cell cell : entry.getValue()) { + if (moreThanOneB) { sb.append(", "); } else { moreThanOneB = true; } - sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } sb.append("}"); } @@ -264,7 +257,7 @@ public String toString() { } @Override - protected long extraHeapSize(){ + protected long extraHeapSize() { return HEAP_OVERHEAD; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java index ba7609087001..7804e48de9f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -25,11 +22,9 @@ /** * Specify Isolation levels in Scan operations. *

    - * There are two isolation levels. A READ_COMMITTED isolation level - * indicates that only data that is committed be returned in a scan. - * An isolation level of READ_UNCOMMITTED indicates that a scan - * should return data that is being modified by transactions that might - * not have been committed yet. + * There are two isolation levels. A READ_COMMITTED isolation level indicates that only data that is + * committed be returned in a scan. An isolation level of READ_UNCOMMITTED indicates that a scan + * should return data that is being modified by transactions that might not have been committed yet. */ @InterfaceAudience.Public public enum IsolationLevel { @@ -37,17 +32,18 @@ public enum IsolationLevel { READ_COMMITTED(1), READ_UNCOMMITTED(2); - IsolationLevel(int value) {} + IsolationLevel(int value) { + } - public byte [] toBytes() { - return new byte [] { toByte() }; + public byte[] toBytes() { + return new byte[] { toByte() }; } public byte toByte() { - return (byte)this.ordinal(); + return (byte) this.ordinal(); } - public static IsolationLevel fromBytes(byte [] bytes) { + public static IsolationLevel fromBytes(byte[] bytes) { return IsolationLevel.fromByte(bytes[0]); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java index 41f79cf8e813..807c7f1f435d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Abstract response class representing online logs response from ring-buffer use-cases - * e.g slow/large RPC logs, balancer decision logs + * Abstract response class representing online logs response from ring-buffer use-cases e.g + * slow/large RPC logs, balancer decision logs */ @InterfaceAudience.Public @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java index 506fc4f76521..b2d217da3de0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -26,8 +24,8 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Slow/Large Log Query Filter with all filter and limit parameters - * Extends generic LogRequest used by Admin API getLogEntries + * Slow/Large Log Query Filter with all filter and limit parameters Extends generic LogRequest used + * by Admin API getLogEntries * @deprecated as of 2.4.0. Will be removed in 4.0.0. */ @InterfaceAudience.Public @@ -121,41 +119,24 @@ public boolean equals(Object o) { LogQueryFilter that = (LogQueryFilter) o; - return new EqualsBuilder() - .append(limit, that.limit) - .append(regionName, that.regionName) - .append(clientAddress, that.clientAddress) - .append(tableName, that.tableName) - .append(userName, that.userName) - .append(type, that.type) - .append(filterByOperator, that.filterByOperator) - .isEquals(); + return new EqualsBuilder().append(limit, that.limit).append(regionName, that.regionName) + .append(clientAddress, that.clientAddress).append(tableName, that.tableName) + .append(userName, that.userName).append(type, that.type) + .append(filterByOperator, that.filterByOperator).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(regionName) - .append(clientAddress) - .append(tableName) - .append(userName) - .append(limit) - .append(type) - .append(filterByOperator) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(regionName).append(clientAddress).append(tableName) + .append(userName).append(limit).append(type).append(filterByOperator).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this) - .append("regionName", regionName) - .append("clientAddress", clientAddress) - .append("tableName", tableName) - .append("userName", userName) - .append("limit", limit) - .append("type", type) - .append("filterByOperator", filterByOperator) - .toString(); + return new ToStringBuilder(this).append("regionName", regionName) + .append("clientAddress", clientAddress).append("tableName", tableName) + .append("userName", userName).append("limit", limit).append("type", type) + .append("filterByOperator", filterByOperator).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java index d85971bdb057..4be0362be85d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,14 +49,13 @@ class MasterCoprocessorRpcChannelImpl implements RpcChannel { } private CompletableFuture rpcCall(MethodDescriptor method, Message request, - Message responsePrototype, HBaseRpcController controller, MasterService.Interface stub) { + Message responsePrototype, HBaseRpcController controller, MasterService.Interface stub) { CompletableFuture future = new CompletableFuture<>(); CoprocessorServiceRequest csr = - CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); - stub.execMasterService( - controller, - csr, - new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback() { + CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); + stub.execMasterService(controller, csr, + new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback< + CoprocessorServiceResponse>() { @Override public void run(CoprocessorServiceResponse resp) { @@ -76,7 +75,7 @@ public void run(CoprocessorServiceResponse resp) { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { addListener( callerBuilder.action((c, s) -> rpcCall(method, request, responsePrototype, c, s)).call(), ((r, e) -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index 05773d0b4195..a031d3530971 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -135,7 +135,7 @@ private static Set transformServerNames(GetMastersResponse resp) { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/(.*/MasterRegistry.java|src/test/.*)") + allowedOnPath = ".*/(.*/MasterRegistry.java|src/test/.*)") CompletableFuture> getMasters() { return this . call( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java index 6d4b85cfc51e..d56906d8ba5e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; + /** * Represents the master switch type */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index cac1e8e75a1e..dc452bcd9d9a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,30 +26,28 @@ import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.RatioGauge; import com.codahale.metrics.Timer; - import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; /** - * This class is for maintaining the various connection statistics and publishing them through - * the metrics interfaces. - * - * This class manages its own {@link MetricRegistry} and {@link JmxReporter} so as to not - * conflict with other uses of Yammer Metrics within the client application. Instantiating + * This class is for maintaining the various connection statistics and publishing them through the + * metrics interfaces. This class manages its own {@link MetricRegistry} and {@link JmxReporter} so + * as to not conflict with other uses of Yammer Metrics within the client application. Instantiating * this class implicitly creates and "starts" instances of these classes; be sure to call * {@link #shutdown()} to terminate the thread pools they allocate. */ @@ -60,27 +58,25 @@ public class MetricsConnection implements StatisticTrackable { public static final String CLIENT_SIDE_METRICS_ENABLED_KEY = "hbase.client.metrics.enable"; /** - * Set to specify a custom scope for the metrics published through {@link MetricsConnection}. - * The scope is added to JMX MBean objectName, and defaults to a combination of the Connection's - * clusterId and hashCode. For example, a default value for a connection to cluster "foo" might - * be "foo-7d9d0818", where "7d9d0818" is the hashCode of the underlying AsyncConnectionImpl. - * Users may set this key to give a more contextual name for this scope. For example, one might - * want to differentiate a read connection from a write connection by setting the scopes to - * "foo-read" and "foo-write" respectively. - * - * Scope is the only thing that lends any uniqueness to the metrics. Care should be taken to - * avoid using the same scope for multiple Connections, otherwise the metrics may aggregate in - * unforeseen ways. + * Set to specify a custom scope for the metrics published through {@link MetricsConnection}. The + * scope is added to JMX MBean objectName, and defaults to a combination of the Connection's + * clusterId and hashCode. For example, a default value for a connection to cluster "foo" might be + * "foo-7d9d0818", where "7d9d0818" is the hashCode of the underlying AsyncConnectionImpl. Users + * may set this key to give a more contextual name for this scope. For example, one might want to + * differentiate a read connection from a write connection by setting the scopes to "foo-read" and + * "foo-write" respectively. Scope is the only thing that lends any uniqueness to the metrics. + * Care should be taken to avoid using the same scope for multiple Connections, otherwise the + * metrics may aggregate in unforeseen ways. */ public static final String METRICS_SCOPE_KEY = "hbase.client.metrics.scope"; /** - * Returns the scope for a MetricsConnection based on the configured {@link #METRICS_SCOPE_KEY} - * or by generating a default from the passed clusterId and connectionObj's hashCode. + * Returns the scope for a MetricsConnection based on the configured {@link #METRICS_SCOPE_KEY} or + * by generating a default from the passed clusterId and connectionObj's hashCode. * @param conf configuration for the connection * @param clusterId clusterId for the connection - * @param connectionObj either a Connection or AsyncConnectionImpl, the instance - * creating this MetricsConnection. + * @param connectionObj either a Connection or AsyncConnectionImpl, the instance creating this + * MetricsConnection. */ static String getScope(Configuration conf, String clusterId, Object connectionObj) { return conf.get(METRICS_SCOPE_KEY, @@ -169,12 +165,10 @@ private CallTracker(MetricRegistry registry, String name, String subName, String sb.append("(").append(subName).append(")"); } this.name = sb.toString(); - this.callTimer = registry.timer(name(MetricsConnection.class, - DRTN_BASE + this.name, scope)); - this.reqHist = registry.histogram(name(MetricsConnection.class, - REQ_BASE + this.name, scope)); - this.respHist = registry.histogram(name(MetricsConnection.class, - RESP_BASE + this.name, scope)); + this.callTimer = registry.timer(name(MetricsConnection.class, DRTN_BASE + this.name, scope)); + this.reqHist = registry.histogram(name(MetricsConnection.class, REQ_BASE + this.name, scope)); + this.respHist = + registry.histogram(name(MetricsConnection.class, RESP_BASE + this.name, scope)); } private CallTracker(MetricRegistry registry, String name, String scope) { @@ -200,10 +194,10 @@ protected static class RegionStats { public RegionStats(MetricRegistry registry, String name) { this.name = name; - this.memstoreLoadHist = registry.histogram(name(MetricsConnection.class, - MEMLOAD_BASE + this.name)); - this.heapOccupancyHist = registry.histogram(name(MetricsConnection.class, - HEAP_BASE + this.name)); + this.memstoreLoadHist = + registry.histogram(name(MetricsConnection.class, MEMLOAD_BASE + this.name)); + this.heapOccupancyHist = + registry.histogram(name(MetricsConnection.class, HEAP_BASE + this.name)); } public void update(RegionLoadStats regionStatistics) { @@ -218,12 +212,10 @@ protected static class RunnerStats { final Histogram delayIntevalHist; public RunnerStats(MetricRegistry registry) { - this.normalRunners = registry.counter( - name(MetricsConnection.class, "normalRunnersCount")); - this.delayRunners = registry.counter( - name(MetricsConnection.class, "delayRunnersCount")); - this.delayIntevalHist = registry.histogram( - name(MetricsConnection.class, "delayIntervalHist")); + this.normalRunners = registry.counter(name(MetricsConnection.class, "normalRunnersCount")); + this.delayRunners = registry.counter(name(MetricsConnection.class, "delayRunnersCount")); + this.delayIntevalHist = + registry.histogram(name(MetricsConnection.class, "delayIntervalHist")); } public void incrNormalRunners() { @@ -239,11 +231,10 @@ public void updateDelayInterval(long interval) { } } - protected ConcurrentHashMap> serverStats - = new ConcurrentHashMap<>(); + protected ConcurrentHashMap> serverStats = + new ConcurrentHashMap<>(); - public void updateServerStats(ServerName serverName, byte[] regionName, - Object r) { + public void updateServerStats(ServerName serverName, byte[] regionName, Object r) { if (!(r instanceof Result)) { return; } @@ -261,7 +252,7 @@ public void updateRegionStats(ServerName serverName, byte[] regionName, RegionLo ConcurrentMap rsStats = computeIfAbsent(serverStats, serverName, () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR)); RegionStats regionStats = - computeIfAbsent(rsStats, regionName, () -> new RegionStats(this.registry, name)); + computeIfAbsent(rsStats, regionName, () -> new RegionStats(this.registry, name)); regionStats.update(stats); } @@ -284,19 +275,22 @@ private static interface NewMetric { protected final String scope; private final NewMetric timerFactory = new NewMetric() { - @Override public Timer newMetric(Class clazz, String name, String scope) { + @Override + public Timer newMetric(Class clazz, String name, String scope) { return registry.timer(name(clazz, name, scope)); } }; private final NewMetric histogramFactory = new NewMetric() { - @Override public Histogram newMetric(Class clazz, String name, String scope) { + @Override + public Histogram newMetric(Class clazz, String name, String scope) { return registry.histogram(name(clazz, name, scope)); } }; private final NewMetric counterFactory = new NewMetric() { - @Override public Counter newMetric(Class clazz, String name, String scope) { + @Override + public Counter newMetric(Class clazz, String name, String scope) { return registry.counter(name(clazz, name, scope)); } }; @@ -328,47 +322,44 @@ private static interface NewMetric { // registry. I don't think their use perfectly removes redundant allocations, but it's // a big improvement over calling registry.newMetric each time. protected final ConcurrentMap rpcTimers = - new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - protected final ConcurrentMap rpcHistograms = - new ConcurrentHashMap<>(CAPACITY * 2 /* tracking both request and response sizes */, - LOAD_FACTOR, CONCURRENCY_LEVEL); + new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); + protected final ConcurrentMap rpcHistograms = new ConcurrentHashMap<>( + CAPACITY * 2 /* tracking both request and response sizes */, LOAD_FACTOR, CONCURRENCY_LEVEL); private final ConcurrentMap cacheDroppingExceptions = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - protected final ConcurrentMap rpcCounters = - new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); + protected final ConcurrentMap rpcCounters = + new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); MetricsConnection(String scope, Supplier batchPool, - Supplier metaPool) { + Supplier metaPool) { this.scope = scope; this.registry = new MetricRegistry(); - this.registry.register(getExecutorPoolName(), - new RatioGauge() { - @Override - protected Ratio getRatio() { - ThreadPoolExecutor pool = batchPool.get(); - if (pool == null) { - return Ratio.of(0, 0); - } - return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); - } - }); - this.registry.register(getMetaPoolName(), - new RatioGauge() { - @Override - protected Ratio getRatio() { - ThreadPoolExecutor pool = metaPool.get(); - if (pool == null) { - return Ratio.of(0, 0); - } - return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); - } - }); + this.registry.register(getExecutorPoolName(), new RatioGauge() { + @Override + protected Ratio getRatio() { + ThreadPoolExecutor pool = batchPool.get(); + if (pool == null) { + return Ratio.of(0, 0); + } + return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); + } + }); + this.registry.register(getMetaPoolName(), new RatioGauge() { + @Override + protected Ratio getRatio() { + ThreadPoolExecutor pool = metaPool.get(); + if (pool == null) { + return Ratio.of(0, 0); + } + return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); + } + }); this.metaCacheHits = registry.counter(name(this.getClass(), "metaCacheHits", scope)); this.metaCacheMisses = registry.counter(name(this.getClass(), "metaCacheMisses", scope)); - this.metaCacheNumClearServer = registry.counter(name(this.getClass(), - "metaCacheNumClearServer", scope)); - this.metaCacheNumClearRegion = registry.counter(name(this.getClass(), - "metaCacheNumClearRegion", scope)); + this.metaCacheNumClearServer = + registry.counter(name(this.getClass(), "metaCacheNumClearServer", scope)); + this.metaCacheNumClearRegion = + registry.counter(name(this.getClass(), "metaCacheNumClearRegion", scope)); this.hedgedReadOps = registry.counter(name(this.getClass(), "hedgedReadOps", scope)); this.hedgedReadWin = registry.counter(name(this.getClass(), "hedgedReadWin", scope)); this.getTracker = new CallTracker(this.registry, "Get", scope); @@ -379,10 +370,10 @@ protected Ratio getRatio() { this.putTracker = new CallTracker(this.registry, "Mutate", "Put", scope); this.multiTracker = new CallTracker(this.registry, "Multi", scope); this.runnerStats = new RunnerStats(this.registry); - this.concurrentCallsPerServerHist = registry.histogram(name(MetricsConnection.class, - "concurrentCallsPerServer", scope)); - this.numActionsPerServerHist = registry.histogram(name(MetricsConnection.class, - "numActionsPerServer", scope)); + this.concurrentCallsPerServerHist = + registry.histogram(name(MetricsConnection.class, "concurrentCallsPerServer", scope)); + this.numActionsPerServerHist = + registry.histogram(name(MetricsConnection.class, "numActionsPerServer", scope)); this.nsLookups = registry.counter(name(this.getClass(), NS_LOOKUPS, scope)); this.nsLookupsFailed = registry.counter(name(this.getClass(), NS_LOOKUPS_FAILED, scope)); @@ -467,12 +458,12 @@ private T getMetric(String key, ConcurrentMap map, NewMetric f /** Update call stats for non-critical-path methods */ private void updateRpcGeneric(String methodName, CallStats stats) { - getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory) - .update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS); + getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory).update(stats.getCallTimeMs(), + TimeUnit.MILLISECONDS); getMetric(REQ_BASE + methodName, rpcHistograms, histogramFactory) - .update(stats.getRequestSizeBytes()); + .update(stats.getRequestSizeBytes()); getMetric(RESP_BASE + methodName, rpcHistograms, histogramFactory) - .update(stats.getResponseSizeBytes()); + .update(stats.getResponseSizeBytes()); } /** Report RPC context to metrics system. */ @@ -487,7 +478,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { // this implementation is tied directly to protobuf implementation details. would be better // if we could dispatch based on something static, ie, request Message type. if (method.getService() == ClientService.getDescriptor()) { - switch(method.getIndex()) { + switch (method.getIndex()) { case 0: assert "Get".equals(method.getName()); getTracker.updateRpc(stats); @@ -495,7 +486,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { case 1: assert "Mutate".equals(method.getName()); final MutationType mutationType = ((MutateRequest) param).getMutation().getMutateType(); - switch(mutationType) { + switch (mutationType) { case APPEND: appendTracker.updateRpc(stats); return; @@ -549,8 +540,8 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { } public void incrCacheDroppingExceptions(Object exception) { - getMetric(CACHE_BASE + - (exception == null? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), + getMetric( + CACHE_BASE + (exception == null ? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), cacheDroppingExceptions, counterFactory).inc(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java index 6ad44f08a60d..fc473bdbb709 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 03f168893a71..97a01e1bb6ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.HashMap; import java.util.Map; import java.util.TreeMap; - +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.util.Bytes; /** * A container for Result objects, grouped by regionName. @@ -37,11 +35,10 @@ public class MultiResponse extends AbstractResponse { private Map results = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** - * The server can send us a failure for the region itself, instead of individual failure. - * It's a part of the protobuf definition. + * The server can send us a failure for the region itself, instead of individual failure. It's a + * part of the protobuf definition. */ - private Map exceptions = - new TreeMap<>(Bytes.BYTES_COMPARATOR); + private Map exceptions = new TreeMap<>(Bytes.BYTES_COMPARATOR); public MultiResponse() { super(); @@ -52,31 +49,29 @@ public MultiResponse() { */ public int size() { int size = 0; - for (RegionResult result: results.values()) { + for (RegionResult result : results.values()) { size += result.size(); } return size; } /** - * Add the pair to the container, grouped by the regionName - * - * @param regionName - * @param originalIndex the original index of the Action (request). + * Add the pair to the container, grouped by the regionName n * @param originalIndex the original + * index of the Action (request). * @param resOrEx the result or error; will be empty for successful Put and Delete actions. */ public void add(byte[] regionName, int originalIndex, Object resOrEx) { getResult(regionName).addResult(originalIndex, resOrEx); } - public void addException(byte []regionName, Throwable ie){ + public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } /** * @return the exception for the region, if any. Null otherwise. */ - public Throwable getException(byte []regionName){ + public Throwable getException(byte[] regionName) { return exceptions.get(regionName); } @@ -88,7 +83,7 @@ public void addStatistic(byte[] regionName, ClientProtos.RegionLoadStats stat) { getResult(regionName).setStat(stat); } - private RegionResult getResult(byte[] region){ + private RegionResult getResult(byte[] region) { RegionResult rs = results.get(region); if (rs == null) { rs = new RegionResult(); @@ -97,7 +92,7 @@ private RegionResult getResult(byte[] region){ return rs; } - public Map getResults(){ + public Map getResults() { return this.results; } @@ -106,15 +101,15 @@ public ResponseType type() { return ResponseType.MULTI; } - static class RegionResult{ + static class RegionResult { Map result = new HashMap<>(); ClientProtos.RegionLoadStats stat; - public void addResult(int index, Object result){ + public void addResult(int index, Object result) { this.result.put(index, result); } - public void setStat(ClientProtos.RegionLoadStats stat){ + public void setStat(ClientProtos.RegionLoadStats stat) { this.stat = stat; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 0aa301c4c8cd..22114f8f624a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import java.util.Arrays; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; @@ -36,26 +37,16 @@ class MutableRegionInfo implements RegionInfo { private static final int MAX_REPLICA_ID = 0xFFFF; /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

    - * **NOTE** - * - * The first hbase:meta region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. + * The new format for a region name contains its encodedName at the end. The encoded name also + * serves as the directory name for the region in the filesystem. New region name format: + * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. where, <encodedName> + * is a hex version of the MD5 hash of <tablename>,<startkey>,<regionIdTimestamp> The old + * region name format: <tablename>,<startkey>,<regionIdTimestamp> For region names in the + * old format, the encoded name is a 32-bit JenkinsHash integer value (in its decimal notation, + * string form). + *

    + * **NOTE** The first hbase:meta region, and regions created by an older version of HBase (0.20 or + * prior) will continue to use the old region name format. */ // This flag is in the parent of a split while the parent is still referenced by daughter @@ -76,8 +67,8 @@ class MutableRegionInfo implements RegionInfo { private final TableName tableName; private static int generateHashCode(final TableName tableName, final byte[] startKey, - final byte[] endKey, final long regionId, - final int replicaId, boolean offLine, byte[] regionName) { + final byte[] endKey, final long regionId, final int replicaId, boolean offLine, + byte[] regionName) { int result = Arrays.hashCode(regionName); result = (int) (result ^ regionId); result ^= Arrays.hashCode(checkStartKey(startKey)); @@ -89,11 +80,11 @@ private static int generateHashCode(final TableName tableName, final byte[] star } private static byte[] checkStartKey(byte[] startKey) { - return startKey == null? HConstants.EMPTY_START_ROW: startKey; + return startKey == null ? HConstants.EMPTY_START_ROW : startKey; } private static byte[] checkEndKey(byte[] endKey) { - return endKey == null? HConstants.EMPTY_END_ROW: endKey; + return endKey == null ? HConstants.EMPTY_END_ROW : endKey; } private static TableName checkTableName(TableName tableName) { @@ -119,7 +110,7 @@ private static int checkReplicaId(int regionId) { } MutableRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, - final boolean split, final long regionId, final int replicaId, boolean offLine) { + final boolean split, final long regionId, final int replicaId, boolean offLine) { this.tableName = checkTableName(tableName); this.startKey = checkStartKey(startKey); this.endKey = checkEndKey(endKey); @@ -145,11 +136,10 @@ public String getShortNameToLog() { /** @return the regionId */ @Override - public long getRegionId(){ + public long getRegionId() { return regionId; } - /** * @return the regionName as an array of bytes. * @see #getRegionNameAsString() @@ -191,8 +181,7 @@ public byte[] getEndKey() { } /** - * Get current table name of the region - * @return TableName + * Get current table name of the region n */ @Override public TableName getTable() { @@ -200,25 +189,22 @@ public TableName getTable() { } /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. + * Returns true if the given inclusive range of rows is fully contained by this region. For + * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return + * true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ @Override public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); if (cellComparator.compareRows(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); + throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(rangeStartKey) + + " > " + Bytes.toStringBinary(rangeEndKey)); } boolean firstKeyInRange = cellComparator.compareRows(rangeStartKey, startKey) >= 0; - boolean lastKeyInRange = - cellComparator.compareRows(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); + boolean lastKeyInRange = cellComparator.compareRows(rangeEndKey, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); return firstKeyInRange && lastKeyInRange; } @@ -228,9 +214,9 @@ public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { @Override public boolean containsRow(byte[] row) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); - return cellComparator.compareRows(row, startKey) >= 0 && - (cellComparator.compareRows(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); + return cellComparator.compareRows(row, startKey) >= 0 + && (cellComparator.compareRows(row, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); } /** @return true if this region is a meta region */ @@ -248,8 +234,7 @@ public boolean isSplit() { } /** - * @param split set split status - * @return MutableRegionInfo + * @param split set split status n */ public MutableRegionInfo setSplit(boolean split) { this.split = split; @@ -268,10 +253,9 @@ public boolean isOffline() { } /** - * The parent of a region split is offline while split daughters hold - * references to the parent. Offlined regions are closed. - * @param offLine Set online/offline status. - * @return MutableRegionInfo + * The parent of a region split is offline while split daughters hold references to the parent. + * Offlined regions are closed. + * @param offLine Set online/offline status. n */ public MutableRegionInfo setOffline(boolean offLine) { this.offLine = offLine; @@ -309,14 +293,11 @@ public int getReplicaId() { */ @Override public String toString() { - return "{ENCODED => " + getEncodedName() + ", " + - HConstants.NAME + " => '" + Bytes.toStringBinary(this.regionName) - + "', STARTKEY => '" + - Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + - Bytes.toStringBinary(this.endKey) + "'" + - (isOffline()? ", OFFLINE => true": "") + - (isSplit()? ", SPLIT => true": "") + - ((replicaId > 0)? ", REPLICA_ID => " + replicaId : "") + "}"; + return "{ENCODED => " + getEncodedName() + ", " + HConstants.NAME + " => '" + + Bytes.toStringBinary(this.regionName) + "', STARTKEY => '" + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + + "'" + (isOffline() ? ", OFFLINE => true" : "") + (isSplit() ? ", SPLIT => true" : "") + + ((replicaId > 0) ? ", REPLICA_ID => " + replicaId : "") + "}"; } /** @@ -333,7 +314,7 @@ public boolean equals(Object o) { if (!(o instanceof RegionInfo)) { return false; } - return compareTo((RegionInfo)o) == 0; + return compareTo((RegionInfo) o) == 0; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index ab6fc9475142..cecaed3388cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -66,12 +65,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @InterfaceAudience.Public -public abstract class Mutation extends OperationWithAttributes implements Row, CellScannable, - HeapSize { +public abstract class Mutation extends OperationWithAttributes + implements Row, CellScannable, HeapSize { public static final long MUTATION_OVERHEAD = ClassSize.align( - // This - ClassSize.OBJECT + - // row + OperationWithAttributes.attributes + // This + ClassSize.OBJECT + + // row + OperationWithAttributes.attributes 2 * ClassSize.REFERENCE + // Timestamp 1 * Bytes.SIZEOF_LONG + @@ -82,8 +81,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C // familyMap ClassSize.TREEMAP + // priority - ClassSize.INTEGER - ); + ClassSize.INTEGER); /** * The attribute for storing the list of clusters that have consumed the change. @@ -98,17 +96,16 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C private static final String RETURN_RESULTS = "_rr_"; // TODO: row should be final - protected byte [] row = null; + protected byte[] row = null; protected long ts = HConstants.LATEST_TIMESTAMP; protected Durability durability = Durability.USE_DEFAULT; // TODO: familyMap should be final // A Map sorted by column family. - protected NavigableMap> familyMap; + protected NavigableMap> familyMap; /** - * empty construction. - * We need this empty construction to keep binary compatibility. + * empty construction. We need this empty construction to keep binary compatibility. */ protected Mutation() { this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -118,19 +115,19 @@ protected Mutation(Mutation clone) { super(clone); this.row = clone.getRow(); this.ts = clone.getTimestamp(); - this.familyMap = clone.getFamilyCellMap().entrySet().stream(). - collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { + this.familyMap = clone.getFamilyCellMap().entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { throw new RuntimeException("collisions!!!"); }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); } /** * Construct the mutation with user defined data. - * @param row row. CAN'T be null - * @param ts timestamp + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { + protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { this.row = Preconditions.checkNotNull(row); if (row.length == 0) { throw new IllegalArgumentException("Row can't be empty"); @@ -145,9 +142,8 @@ public CellScanner cellScanner() { } /** - * Creates an empty list if one doesn't exist for the given column family - * or else it returns the associated list of Cell objects. - * + * Creates an empty list if one doesn't exist for the given column family or else it returns the + * associated list of Cell objects. * @param family column family * @return a list of Cell objects, returns an empty list if one doesn't exist. */ @@ -162,7 +158,6 @@ List getCellList(byte[] family) { /* * Create a KeyValue with this objects row key and the Put identifier. - * * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value) { @@ -170,12 +165,8 @@ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] valu } /** - * Create a KeyValue with this objects row key and the Put identifier. - * @param family - * @param qualifier - * @param ts - * @param value - * @param tags - Specify the Tags as an Array + * Create a KeyValue with this objects row key and the Put identifier. nnnn * @param tags - + * Specify the Tags as an Array * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value, Tag[] tags) { @@ -185,21 +176,18 @@ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] valu /* * Create a KeyValue with this objects row key and the Put identifier. - * * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value, - Tag[] tags) { - return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, - family, 0, family == null ? 0 : family.length, - qualifier, ts, KeyValue.Type.Put, value, tags != null ? Arrays.asList(tags) : null); + Tag[] tags) { + return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, family, 0, + family == null ? 0 : family.length, qualifier, ts, KeyValue.Type.Put, value, + tags != null ? Arrays.asList(tags) : null); } /** - * Compile the column family (i.e. schema) information - * into a Map. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map + * Compile the column family (i.e. schema) information into a Map. Useful for parsing and + * aggregation by debugging, logging, and administration tools. n */ @Override public Map getFingerprint() { @@ -208,18 +196,17 @@ public Map getFingerprint() { // ideally, we would also include table information, but that information // is not stored in each Operation instance. map.put("families", families); - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. + * @param maxCols a limit on the number of columns output prior to truncation n */ @Override public Map toMap(int maxCols) { @@ -232,7 +219,7 @@ public Map toMap(int maxCols) { map.put("row", Bytes.toStringBinary(this.row)); int colCount = 0; // iterate through all column families affected - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { // map from this family to details for each cell affected within the family List> qualifierDetails = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); @@ -241,7 +228,7 @@ public Map toMap(int maxCols) { continue; } // add details for each cell - for (Cell cell: entry.getValue()) { + for (Cell cell : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -266,16 +253,15 @@ public Map toMap(int maxCols) { private static Map cellToStringMap(Cell c) { Map stringMap = new HashMap<>(); - stringMap.put("qualifier", Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), - c.getQualifierLength())); + stringMap.put("qualifier", + Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength())); stringMap.put("timestamp", c.getTimestamp()); stringMap.put("vlen", c.getValueLength()); List tags = PrivateCellUtil.getTags(c); if (tags != null) { List tagsString = new ArrayList<>(tags.size()); for (Tag t : tags) { - tagsString - .add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); + tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); } stringMap.put("tag", tagsString); } @@ -283,8 +269,7 @@ private static Map cellToStringMap(Cell c) { } /** - * Set the durability for this mutation - * @param d + * Set the durability for this mutation n */ public Mutation setDurability(Durability d) { this.durability = d; @@ -297,10 +282,9 @@ public Durability getDurability() { } /** - * Method for retrieving the put's familyMap - * @return familyMap + * Method for retrieving the put's familyMap n */ - public NavigableMap> getFamilyCellMap() { + public NavigableMap> getFamilyCellMap() { return this.familyMap; } @@ -313,18 +297,15 @@ public boolean isEmpty() { } /** - * Method for retrieving the delete's row - * @return row + * Method for retrieving the delete's row n */ @Override - public byte [] getRow() { + public byte[] getRow() { return this.row; } /** - * Method for retrieving the timestamp. - * - * @return timestamp + * Method for retrieving the timestamp. n */ public long getTimestamp() { return this.ts; @@ -351,10 +332,10 @@ public Mutation setClusterIds(List clusterIds) { public List getClusterIds() { List clusterIds = new ArrayList<>(); byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS); - if(bytes != null) { + if (bytes != null) { ByteArrayDataInput in = ByteStreams.newDataInput(bytes); int numClusters = in.readInt(); - for(int i=0; i getClusterIds() { } /** - * Sets the visibility expression associated with cells in this Mutation. - * @param expression + * Sets the visibility expression associated with cells in this Mutation. n */ public Mutation setCellVisibility(CellVisibility expression) { this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, - toCellVisibility(expression).toByteArray()); + toCellVisibility(expression).toByteArray()); return this; } /** - * @return CellVisibility associated with cells in this Mutation. - * @throws DeserializationException + * @return CellVisibility associated with cells in this Mutation. n */ public CellVisibility getCellVisibility() throws DeserializationException { byte[] cellVisibilityBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY); @@ -382,10 +361,8 @@ public CellVisibility getCellVisibility() throws DeserializationException { } /** - * Create a protocol buffer CellVisibility based on a client CellVisibility. - * - * @param cellVisibility - * @return a protocol buffer CellVisibility + * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a + * protocol buffer CellVisibility */ static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) { ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); @@ -394,10 +371,8 @@ static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibilit } /** - * Convert a protocol buffer CellVisibility to a client CellVisibility - * - * @param proto - * @return the converted client CellVisibility + * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted + * client CellVisibility */ private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) { if (proto == null) return null; @@ -405,13 +380,11 @@ private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto } /** - * Convert a protocol buffer CellVisibility bytes to a client CellVisibility - * - * @param protoBytes - * @return the converted client CellVisibility - * @throws DeserializationException + * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the + * converted client CellVisibility n */ - private static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException { + private static CellVisibility toCellVisibility(byte[] protoBytes) + throws DeserializationException { if (protoBytes == null) return null; ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); ClientProtos.CellVisibility proto = null; @@ -453,20 +426,17 @@ public long heapSize() { heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length); // Adding map overhead - heapsize += - ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY); - for(Map.Entry> entry : getFamilyCellMap().entrySet()) { - //Adding key overhead - heapsize += - ClassSize.align(ClassSize.ARRAY + entry.getKey().length); - - //This part is kinds tricky since the JVM can reuse references if you - //store the same value, but have a good match with SizeOf at the moment - //Adding value overhead + heapsize += ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY); + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + // Adding key overhead + heapsize += ClassSize.align(ClassSize.ARRAY + entry.getKey().length); + + // This part is kinds tricky since the JVM can reuse references if you + // store the same value, but have a good match with SizeOf at the moment + // Adding value overhead heapsize += ClassSize.align(ClassSize.ARRAYLIST); int size = entry.getValue().size(); - heapsize += ClassSize.align(ClassSize.ARRAY + - size * ClassSize.REFERENCE); + heapsize += ClassSize.align(ClassSize.ARRAY + size * ClassSize.REFERENCE); for (Cell cell : entry.getValue()) { heapsize += cell.heapSize(); @@ -485,7 +455,7 @@ public byte[] getACL() { } /** - * @param user User short name + * @param user User short name * @param perms Permissions for the user */ public Mutation setACL(String user, Permission perms) { @@ -509,8 +479,8 @@ public Mutation setACL(Map perms) { /** * Return the TTL requested for the result of the mutation, in milliseconds. - * @return the TTL requested for the result of the mutation, in milliseconds, - * or Long.MAX_VALUE if unset + * @return the TTL requested for the result of the mutation, in milliseconds, or Long.MAX_VALUE if + * unset */ public long getTTL() { byte[] ttlBytes = getAttribute(OP_ATTRIBUTE_TTL); @@ -522,8 +492,7 @@ public long getTTL() { /** * Set the TTL desired for the result of the mutation, in milliseconds. - * @param ttl the TTL desired for the result of the mutation, in milliseconds - * @return this + * @param ttl the TTL desired for the result of the mutation, in milliseconds n */ public Mutation setTTL(long ttl) { setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl)); @@ -551,7 +520,7 @@ protected Mutation setReturnResults(boolean returnResults) { * Subclasses should override this method to add the heap size of their own fields. * @return the heap size to add (will be aligned). */ - protected long extraHeapSize(){ + protected long extraHeapSize() { return 0L; } @@ -567,76 +536,71 @@ public Mutation setTimestamp(long timestamp) { } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family & qualifier. - * Both given arguments must match the KeyValue object to return true. - * - * @param family column family + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family & qualifier. Both given arguments must match the KeyValue object to return + * true. + * @param family column family * @param qualifier column qualifier - * @return returns true if the given family and qualifier already has an - * existing KeyValue object in the family map. + * @return returns true if the given family and qualifier already has an existing KeyValue object + * in the family map. */ - public boolean has(byte [] family, byte [] qualifier) { + public boolean has(byte[] family, byte[] qualifier) { return has(family, qualifier, this.ts, HConstants.EMPTY_BYTE_ARRAY, true, true); } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * - * @param family column family + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family, qualifier and timestamp. All 3 given arguments must match the KeyValue object to + * return true. + * @param family column family * @param qualifier column qualifier - * @param ts timestamp - * @return returns true if the given family, qualifier and timestamp already has an - * existing KeyValue object in the family map. + * @param ts timestamp + * @return returns true if the given family, qualifier and timestamp already has an existing + * KeyValue object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, long ts) { + public boolean has(byte[] family, byte[] qualifier, long ts) { return has(family, qualifier, ts, HConstants.EMPTY_BYTE_ARRAY, false, true); } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * - * @param family column family + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family, qualifier and timestamp. All 3 given arguments must match the KeyValue object to + * return true. + * @param family column family * @param qualifier column qualifier - * @param value value to check - * @return returns true if the given family, qualifier and value already has an - * existing KeyValue object in the family map. + * @param value value to check + * @return returns true if the given family, qualifier and value already has an existing KeyValue + * object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, byte [] value) { + public boolean has(byte[] family, byte[] qualifier, byte[] value) { return has(family, qualifier, this.ts, value, true, false); } /** - * A convenience method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp. - * All 4 given arguments must match the KeyValue object to return true. - * - * @param family column family + * A convenience method to determine if this object's familyMap contains the given value assigned + * to the given family, qualifier and timestamp. All 4 given arguments must match the KeyValue + * object to return true. + * @param family column family * @param qualifier column qualifier - * @param ts timestamp - * @param value value to check - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. + * @param ts timestamp + * @param value value to check + * @return returns true if the given family, qualifier timestamp and value already has an existing + * KeyValue object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) { + public boolean has(byte[] family, byte[] qualifier, long ts, byte[] value) { return has(family, qualifier, ts, value, false, false); } /** * Returns a list of all KeyValue objects with matching column family and qualifier. - * - * @param family column family + * @param family column family * @param qualifier column qualifier - * @return a list of KeyValue objects with the matching family and qualifier, - * returns an empty list if one doesn't exist for the given family. + * @return a list of KeyValue objects with the matching family and qualifier, returns an empty + * list if one doesn't exist for the given family. */ public List get(byte[] family, byte[] qualifier) { List filteredList = new ArrayList<>(); - for (Cell cell: getCellList(family)) { + for (Cell cell : getCellList(family)) { if (CellUtil.matchingQualifier(cell, qualifier)) { filteredList.add(cell); } @@ -645,21 +609,13 @@ public List get(byte[] family, byte[] qualifier) { } /* - * Private method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp - * respecting the 2 boolean arguments - * - * @param family - * @param qualifier - * @param ts - * @param value - * @param ignoreTS - * @param ignoreValue - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. + * Private method to determine if this object's familyMap contains the given value assigned to the + * given family, qualifier and timestamp respecting the 2 boolean arguments nnnnnn * @return + * returns true if the given family, qualifier timestamp and value already has an existing + * KeyValue object in the family map. */ - protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, - boolean ignoreTS, boolean ignoreValue) { + protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS, + boolean ignoreValue) { List list = getCellList(family); if (list.isEmpty()) { return false; @@ -671,31 +627,34 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, // F F => 1 if (!ignoreTS && !ignoreValue) { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && - CellUtil.matchingQualifier(cell, qualifier) && - CellUtil.matchingValue(cell, value) && - cell.getTimestamp() == ts) { + if ( + CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) + && CellUtil.matchingValue(cell, value) && cell.getTimestamp() == ts + ) { return true; } } } else if (ignoreValue && !ignoreTS) { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) - && cell.getTimestamp() == ts) { + if ( + CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) + && cell.getTimestamp() == ts + ) { return true; } } } else if (!ignoreValue && ignoreTS) { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) - && CellUtil.matchingValue(cell, value)) { + if ( + CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) + && CellUtil.matchingValue(cell, value) + ) { return true; } } } else { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && - CellUtil.matchingQualifier(cell, qualifier)) { + if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)) { return true; } } @@ -705,23 +664,20 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, /** * @param row Row to check - * @throws IllegalArgumentException Thrown if row is empty or null or - * > {@link HConstants#MAX_ROW_LENGTH} + * @throws IllegalArgumentException Thrown if row is empty or null or > + * {@link HConstants#MAX_ROW_LENGTH} * @return row */ - static byte [] checkRow(final byte [] row) { - return checkRow(row, 0, row == null? 0: row.length); + static byte[] checkRow(final byte[] row) { + return checkRow(row, 0, row == null ? 0 : row.length); } /** - * @param row Row to check - * @param offset - * @param length - * @throws IllegalArgumentException Thrown if row is empty or null or - * > {@link HConstants#MAX_ROW_LENGTH} + * @param row Row to check nn * @throws IllegalArgumentException Thrown if row is + * empty or null or > {@link HConstants#MAX_ROW_LENGTH} * @return row */ - static byte [] checkRow(final byte [] row, final int offset, final int length) { + static byte[] checkRow(final byte[] row, final int offset, final int length) { if (row == null) { throw new IllegalArgumentException("Row buffer is null"); } @@ -729,8 +685,8 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, throw new IllegalArgumentException("Row length is 0"); } if (length > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row length " + length + " is > " + - HConstants.MAX_ROW_LENGTH); + throw new IllegalArgumentException( + "Row length " + length + " is > " + HConstants.MAX_ROW_LENGTH); } return row; } @@ -743,18 +699,18 @@ static void checkRow(ByteBuffer row) { throw new IllegalArgumentException("Row length is 0"); } if (row.remaining() > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row length " + row.remaining() + " is > " + - HConstants.MAX_ROW_LENGTH); + throw new IllegalArgumentException( + "Row length " + row.remaining() + " is > " + HConstants.MAX_ROW_LENGTH); } } Mutation add(Cell cell) throws IOException { - //Checking that the row of the kv is the same as the mutation + // Checking that the row of the kv is the same as the mutation // TODO: It is fraught with risk if user pass the wrong row. // Throwing the IllegalArgumentException is more suitable I'd say. if (!CellUtil.matchingRows(cell, this.row)) { - throw new WrongRowIOException("The row in " + cell.toString() + - " doesn't match the original one " + Bytes.toStringBinary(this.row)); + throw new WrongRowIOException("The row in " + cell.toString() + + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } byte[] family; @@ -785,8 +741,8 @@ Mutation add(Cell cell) throws IOException { public abstract CellBuilder getCellBuilder(CellBuilderType cellBuilderType); /** - * get a CellBuilder instance that already has relevant Type and Row set. - * the default CellBuilderType is CellBuilderType.SHALLOW_COPY + * get a CellBuilder instance that already has relevant Type and Row set. the default + * CellBuilderType is CellBuilderType.SHALLOW_COPY * @return CellBuilder which already has relevant Type and Row set. */ public CellBuilder getCellBuilder() { @@ -796,9 +752,9 @@ public CellBuilder getCellBuilder() { /** * get a CellBuilder instance that already has relevant Type and Row set. * @param cellBuilderType e.g CellBuilderType.SHALLOW_COPY - * @param cellType e.g Cell.Type.Put + * @param cellType e.g Cell.Type.Put * @return CellBuilder which already has relevant Type and Row set. - */ + */ protected CellBuilder getCellBuilder(CellBuilderType cellBuilderType, Cell.Type cellType) { CellBuilder builder = CellBuilderFactory.create(cellBuilderType).setRow(row).setType(cellType); return new CellBuilder() { @@ -876,11 +832,10 @@ public CellBuilder clear() { } private static final class CellWrapper implements ExtendedCell { - private static final long FIXED_OVERHEAD = ClassSize.align( - ClassSize.OBJECT // object header - + KeyValue.TIMESTAMP_SIZE // timestamp - + Bytes.SIZEOF_LONG // sequence id - + 1 * ClassSize.REFERENCE); // references to cell + private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT // object header + + KeyValue.TIMESTAMP_SIZE // timestamp + + Bytes.SIZEOF_LONG // sequence id + + 1 * ClassSize.REFERENCE); // references to cell private final Cell cell; private long sequenceId; private long timestamp; @@ -1013,21 +968,19 @@ public byte[] cloneTags() { } private long heapOverhead() { - return FIXED_OVERHEAD - + ClassSize.ARRAY // row - + getFamilyLength() == 0 ? 0 : ClassSize.ARRAY - + getQualifierLength() == 0 ? 0 : ClassSize.ARRAY - + getValueLength() == 0 ? 0 : ClassSize.ARRAY - + getTagsLength() == 0 ? 0 : ClassSize.ARRAY; + return FIXED_OVERHEAD + ClassSize.ARRAY // row + + getFamilyLength() == 0 + ? 0 + : ClassSize.ARRAY + getQualifierLength() == 0 ? 0 + : ClassSize.ARRAY + getValueLength() == 0 ? 0 + : ClassSize.ARRAY + getTagsLength() == 0 ? 0 + : ClassSize.ARRAY; } @Override public long heapSize() { - return heapOverhead() - + ClassSize.align(getRowLength()) - + ClassSize.align(getFamilyLength()) - + ClassSize.align(getQualifierLength()) - + ClassSize.align(getValueLength()) + return heapOverhead() + ClassSize.align(getRowLength()) + ClassSize.align(getFamilyLength()) + + ClassSize.align(getQualifierLength()) + ClassSize.align(getValueLength()) + ClassSize.align(getTagsLength()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java index 184f0c0bc0f4..6d5d94802d0e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java index 70fa36a5afa6..3020be221059 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * NonceGenerator interface. - * In general, nonce group is an ID (one per client, or region+client, or whatever) that - * could be used to reduce collision potential, or be used by compatible server nonce manager - * to optimize nonce storage and removal. See HBASE-3787. + * NonceGenerator interface. In general, nonce group is an ID (one per client, or region+client, or + * whatever) that could be used to reduce collision potential, or be used by compatible server nonce + * manager to optimize nonce storage and removal. See HBASE-3787. */ @InterfaceAudience.Private public interface NonceGenerator { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java index 982ec5b0065b..a720e1a7112d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java @@ -24,25 +24,16 @@ /** * A collection of criteria used for table selection. The logic of table selection is as follows: *

      - *
    • - * When no parameter values are provided, an unfiltered list of all user tables is returned. - *
    • - *
    • - * When a list of {@link TableName TableNames} are provided, the filter starts with any of - * these tables that exist. - *
    • - *
    • - * When a {@code namespace} name is provided, the filter starts with all the tables present in - * that namespace. - *
    • - *
    • - * If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, - * the {@link TableName} list is honored and the {@code namespace} name is ignored. - *
    • - *
    • - * If a {@code regex} is provided, this subset of {@link TableName TableNames} is further - * reduced to those that match the provided regular expression. - *
    • + *
    • When no parameter values are provided, an unfiltered list of all user tables is returned. + *
    • + *
    • When a list of {@link TableName TableNames} are provided, the filter starts with any of these + * tables that exist.
    • + *
    • When a {@code namespace} name is provided, the filter starts with all the tables present in + * that namespace.
    • + *
    • If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, the + * {@link TableName} list is honored and the {@code namespace} name is ignored.
    • + *
    • If a {@code regex} is provided, this subset of {@link TableName TableNames} is further + * reduced to those that match the provided regular expression.
    • *
    */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java index 115e55f336f6..96182ca4b296 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -40,10 +38,9 @@ final public class OnlineLogRecord extends LogEntry { // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .registerTypeAdapter(OnlineLogRecord.class, (JsonSerializer) - (slowLogPayload, type, jsonSerializationContext) -> { + private static final Gson GSON = + GsonUtil.createGson().setPrettyPrinting().registerTypeAdapter(OnlineLogRecord.class, + (JsonSerializer) (slowLogPayload, type, jsonSerializationContext) -> { Gson gson = new Gson(); JsonObject jsonObj = (JsonObject) gson.toJsonTree(slowLogPayload); if (slowLogPayload.getMultiGetsCount() == 0) { @@ -132,10 +129,10 @@ public int getMultiServiceCalls() { } private OnlineLogRecord(final long startTime, final int processingTime, final int queueTime, - final long responseSize, final String clientAddress, final String serverClass, - final String methodName, final String callDetails, final String param, - final String regionName, final String userName, final int multiGetsCount, - final int multiMutationsCount, final int multiServiceCalls) { + final long responseSize, final String clientAddress, final String serverClass, + final String methodName, final String callDetails, final String param, final String regionName, + final String userName, final int multiGetsCount, final int multiMutationsCount, + final int multiServiceCalls) { this.startTime = startTime; this.processingTime = processingTime; this.queueTime = queueTime; @@ -239,9 +236,9 @@ public OnlineLogRecordBuilder setMultiServiceCalls(int multiServiceCalls) { } public OnlineLogRecord build() { - return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, - clientAddress, serverClass, methodName, callDetails, param, regionName, - userName, multiGetsCount, multiMutationsCount, multiServiceCalls); + return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, clientAddress, + serverClass, methodName, callDetails, param, regionName, userName, multiGetsCount, + multiMutationsCount, multiServiceCalls); } } @@ -257,42 +254,22 @@ public boolean equals(Object o) { OnlineLogRecord that = (OnlineLogRecord) o; - return new EqualsBuilder() - .append(startTime, that.startTime) - .append(processingTime, that.processingTime) - .append(queueTime, that.queueTime) - .append(responseSize, that.responseSize) - .append(multiGetsCount, that.multiGetsCount) + return new EqualsBuilder().append(startTime, that.startTime) + .append(processingTime, that.processingTime).append(queueTime, that.queueTime) + .append(responseSize, that.responseSize).append(multiGetsCount, that.multiGetsCount) .append(multiMutationsCount, that.multiMutationsCount) - .append(multiServiceCalls, that.multiServiceCalls) - .append(clientAddress, that.clientAddress) - .append(serverClass, that.serverClass) - .append(methodName, that.methodName) - .append(callDetails, that.callDetails) - .append(param, that.param) - .append(regionName, that.regionName) - .append(userName, that.userName) - .isEquals(); + .append(multiServiceCalls, that.multiServiceCalls).append(clientAddress, that.clientAddress) + .append(serverClass, that.serverClass).append(methodName, that.methodName) + .append(callDetails, that.callDetails).append(param, that.param) + .append(regionName, that.regionName).append(userName, that.userName).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(startTime) - .append(processingTime) - .append(queueTime) - .append(responseSize) - .append(clientAddress) - .append(serverClass) - .append(methodName) - .append(callDetails) - .append(param) - .append(regionName) - .append(userName) - .append(multiGetsCount) - .append(multiMutationsCount) - .append(multiServiceCalls) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(startTime).append(processingTime).append(queueTime) + .append(responseSize).append(clientAddress).append(serverClass).append(methodName) + .append(callDetails).append(param).append(regionName).append(userName).append(multiGetsCount) + .append(multiMutationsCount).append(multiServiceCalls).toHashCode(); } @Override @@ -302,22 +279,14 @@ public String toJsonPrettyPrint() { @Override public String toString() { - return new ToStringBuilder(this) - .append("startTime", startTime) - .append("processingTime", processingTime) - .append("queueTime", queueTime) - .append("responseSize", responseSize) - .append("clientAddress", clientAddress) - .append("serverClass", serverClass) - .append("methodName", methodName) - .append("callDetails", callDetails) - .append("param", param) - .append("regionName", regionName) - .append("userName", userName) - .append("multiGetsCount", multiGetsCount) + return new ToStringBuilder(this).append("startTime", startTime) + .append("processingTime", processingTime).append("queueTime", queueTime) + .append("responseSize", responseSize).append("clientAddress", clientAddress) + .append("serverClass", serverClass).append("methodName", methodName) + .append("callDetails", callDetails).append("param", param).append("regionName", regionName) + .append("userName", userName).append("multiGetsCount", multiGetsCount) .append("multiMutationsCount", multiMutationsCount) - .append("multiServiceCalls", multiServiceCalls) - .toString(); + .append("multiServiceCalls", multiServiceCalls).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java index 3b6a6f5e51c4..a517f0bb43a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,12 @@ import java.io.IOException; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.JsonMapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Superclass for any type that maps to a potentially application-level query. - * (e.g. Put, Get, Delete, Scan, Next, etc.) - * Contains methods for exposure to logging and debugging tools. + * Superclass for any type that maps to a potentially application-level query. (e.g. Put, Get, + * Delete, Scan, Next, etc.) Contains methods for exposure to logging and debugging tools. */ @InterfaceAudience.Public public abstract class Operation { @@ -36,15 +33,15 @@ public abstract class Operation { private static final int DEFAULT_MAX_COLS = 5; /** - * Produces a Map containing a fingerprint which identifies the type and - * the static schema components of a query (i.e. column families) + * Produces a Map containing a fingerprint which identifies the type and the static schema + * components of a query (i.e. column families) * @return a map containing fingerprint information (i.e. column families) */ public abstract Map getFingerprint(); /** - * Produces a Map containing a summary of the details of a query - * beyond the scope of the fingerprint (i.e. columns, rows...) + * Produces a Map containing a summary of the details of a query beyond the scope of the + * fingerprint (i.e. columns, rows...) * @param maxCols a limit on the number of columns output prior to truncation * @return a map containing parameters of a query (i.e. rows, columns...) */ @@ -59,8 +56,7 @@ public Map toMap() { } /** - * Produces a JSON object for fingerprint and details exposure in a - * parseable format. + * Produces a JSON object for fingerprint and details exposure in a parseable format. * @param maxCols a limit on the number of columns to include in the JSON * @return a JSONObject containing this Operation's information, as a string */ @@ -69,8 +65,7 @@ public String toJSON(int maxCols) throws IOException { } /** - * Produces a JSON object sufficient for description of a query - * in a debugging or logging context. + * Produces a JSON object sufficient for description of a query in a debugging or logging context. * @return the produced JSON object, as a string */ public String toJSON() throws IOException { @@ -78,17 +73,16 @@ public String toJSON() throws IOException { } /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. - * @param maxCols a limit on the number of columns output in the summary - * prior to truncation + * Produces a string representation of this Operation. It defaults to a JSON representation, but + * falls back to a string representation of the fingerprint and details in the case of a JSON + * encoding failure. + * @param maxCols a limit on the number of columns output in the summary prior to truncation * @return a JSON-parseable String */ public String toString(int maxCols) { - /* for now this is merely a wrapper from producing a JSON string, but - * toJSON is kept separate in case this is changed to be a less parsable - * pretty printed representation. + /* + * for now this is merely a wrapper from producing a JSON string, but toJSON is kept separate in + * case this is changed to be a less parsable pretty printed representation. */ try { return toJSON(maxCols); @@ -98,10 +92,9 @@ public String toString(int maxCols) { } /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. - * @return String + * Produces a string representation of this Operation. It defaults to a JSON representation, but + * falls back to a string representation of the fingerprint and details in the case of a JSON + * encoding failure. n */ @Override public String toString() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java index 7342e65bb316..e34c9d6eacb4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Collections; @@ -39,15 +37,15 @@ public abstract class OperationWithAttributes extends Operation implements Attri private int priority = HConstants.PRIORITY_UNSET; /** - * empty construction. - * We need this empty construction to keep binary compatibility. + * empty construction. We need this empty construction to keep binary compatibility. */ protected OperationWithAttributes() { } protected OperationWithAttributes(OperationWithAttributes clone) { - this.attributes = clone.getAttributesMap() == null ? null : - clone.getAttributesMap().entrySet().stream() + this.attributes = clone.getAttributesMap() == null + ? null + : clone.getAttributesMap().entrySet().stream() .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue(), (k, v) -> { throw new RuntimeException("collisions!!!"); }, () -> new TreeMap<>())); @@ -96,7 +94,7 @@ protected long getAttributeSize() { long size = 0; if (attributes != null) { size += ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY); - for(Map.Entry entry : this.attributes.entrySet()) { + for (Map.Entry entry : this.attributes.entrySet()) { size += ClassSize.align(ClassSize.STRING + entry.getKey().length()); size += ClassSize.align(ClassSize.ARRAY + entry.getValue().length); } @@ -105,13 +103,10 @@ protected long getAttributeSize() { } /** - * This method allows you to set an identifier on an operation. The original - * motivation for this was to allow the identifier to be used in slow query - * logging, but this could obviously be useful in other places. One use of - * this could be to put a class.method identifier in here to see where the - * slow query is coming from. - * @param id - * id to set for the scan + * This method allows you to set an identifier on an operation. The original motivation for this + * was to allow the identifier to be used in slow query logging, but this could obviously be + * useful in other places. One use of this could be to put a class.method identifier in here to + * see where the slow query is coming from. n * id to set for the scan */ public OperationWithAttributes setId(String id) { setAttribute(ID_ATRIBUTE, Bytes.toBytes(id)); @@ -119,13 +114,12 @@ public OperationWithAttributes setId(String id) { } /** - * This method allows you to retrieve the identifier for the operation if one - * was set. + * This method allows you to retrieve the identifier for the operation if one was set. * @return the id or null if not set */ public String getId() { byte[] attr = getAttribute(ID_ATRIBUTE); - return attr == null? null: Bytes.toString(attr); + return attr == null ? null : Bytes.toString(attr); } public OperationWithAttributes setPriority(int priority) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java index 1b1ded9953bb..56a8dd19fcc5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java index 8aedc4d2205c..f264d9babd78 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +35,8 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator { private PerClientRandomNonceGenerator() { byte[] clientIdBase = ClientIdGenerator.generateClientId(); - this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) + - ThreadLocalRandom.current().nextInt(); + this.clientId = + (((long) Arrays.hashCode(clientIdBase)) << 32) + ThreadLocalRandom.current().nextInt(); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index 719251ff1f09..bcc1bda9ef02 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -39,10 +37,9 @@ /** * Used to perform Put operations for a single row. *

    - * To perform a Put, instantiate a Put object with the row to insert to, and - * for each column to be inserted, execute {@link #addColumn(byte[], byte[], - * byte[]) add} or {@link #addColumn(byte[], byte[], long, byte[]) add} if - * setting the timestamp. + * To perform a Put, instantiate a Put object with the row to insert to, and for each column to be + * inserted, execute {@link #addColumn(byte[], byte[], byte[]) add} or + * {@link #addColumn(byte[], byte[], long, byte[]) add} if setting the timestamp. */ @InterfaceAudience.Public public class Put extends Mutation implements HeapSize { @@ -50,27 +47,23 @@ public class Put extends Mutation implements HeapSize { * Create a Put operation for the specified row. * @param row row key */ - public Put(byte [] row) { + public Put(byte[] row) { this(row, HConstants.LATEST_TIMESTAMP); } /** * Create a Put operation for the specified row, using a given timestamp. - * * @param row row key; we make a copy of what we are passed to keep local. - * @param ts timestamp + * @param ts timestamp */ public Put(byte[] row, long ts) { this(row, 0, row.length, ts); } /** - * We make a copy of the passed in row key to keep local. - * @param rowArray - * @param rowOffset - * @param rowLength + * We make a copy of the passed in row key to keep local. nnn */ - public Put(byte [] rowArray, int rowOffset, int rowLength) { + public Put(byte[] rowArray, int rowOffset, int rowLength) { this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP); } @@ -96,13 +89,9 @@ public Put(ByteBuffer row) { } /** - * We make a copy of the passed in row key to keep local. - * @param rowArray - * @param rowOffset - * @param rowLength - * @param ts + * We make a copy of the passed in row key to keep local. nnnn */ - public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) { + public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) { checkRow(rowArray, rowOffset, rowLength); this.row = Bytes.copy(rowArray, rowOffset, rowLength); this.ts = ts; @@ -113,24 +102,20 @@ public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) { /** * Create a Put operation for an immutable row key. - * - * @param row row key - * @param rowIsImmutable whether the input row is immutable. - * Set to true if the caller can guarantee that - * the row will not be changed for the Put duration. + * @param row row key + * @param rowIsImmutable whether the input row is immutable. Set to true if the caller can + * guarantee that the row will not be changed for the Put duration. */ - public Put(byte [] row, boolean rowIsImmutable) { + public Put(byte[] row, boolean rowIsImmutable) { this(row, HConstants.LATEST_TIMESTAMP, rowIsImmutable); } /** * Create a Put operation for an immutable row key, using a given timestamp. - * - * @param row row key - * @param ts timestamp - * @param rowIsImmutable whether the input row is immutable. - * Set to true if the caller can guarantee that - * the row will not be changed for the Put duration. + * @param row row key + * @param ts timestamp + * @param rowIsImmutable whether the input row is immutable. Set to true if the caller can + * guarantee that the row will not be changed for the Put duration. */ public Put(byte[] row, long ts, boolean rowIsImmutable) { // Check and set timestamp @@ -141,15 +126,15 @@ public Put(byte[] row, long ts, boolean rowIsImmutable) { // Deal with row according to rowIsImmutable checkRow(row); - if (rowIsImmutable) { // Row is immutable - this.row = row; // Do not make a local copy, but point to the provided byte array directly - } else { // Row is not immutable - this.row = Bytes.copy(row, 0, row.length); // Make a local copy + if (rowIsImmutable) { // Row is immutable + this.row = row; // Do not make a local copy, but point to the provided byte array directly + } else { // Row is not immutable + this.row = Bytes.copy(row, 0, row.length); // Make a local copy } } /** - * Copy constructor. Creates a Put operation cloned from the specified Put. + * Copy constructor. Creates a Put operation cloned from the specified Put. * @param putToCopy put to copy */ public Put(Put putToCopy) { @@ -157,38 +142,35 @@ public Put(Put putToCopy) { } /** - * Construct the Put with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. - * @param row row. CAN'T be null - * @param ts timestamp + * Construct the Put with user defined data. NOTED: 1) all cells in the familyMap must have the + * Type.Put 2) the row of each cell must be same with passed row. + * @param row row. CAN'T be null + * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Put(byte[] row, long ts, NavigableMap> familyMap) { + public Put(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } /** * Add the specified column and value to this Put operation. - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param value column value - * @return this + * @param value column value n */ - public Put addColumn(byte [] family, byte [] qualifier, byte [] value) { + public Put addColumn(byte[] family, byte[] qualifier, byte[] value) { return addColumn(family, qualifier, this.ts, value); } /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. - * @param family family name + * Add the specified column and value, with the specified timestamp as its version to this Put + * operation. + * @param family family name * @param qualifier column qualifier - * @param ts version timestamp - * @param value column value - * @return this + * @param ts version timestamp + * @param value column value n */ - public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) { + public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) { if (ts < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); } @@ -199,13 +181,12 @@ public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) } /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. - * @param family family name + * Add the specified column and value, with the specified timestamp as its version to this Put + * operation. + * @param family family name * @param qualifier column qualifier - * @param ts version timestamp - * @param value column value - * @return this + * @param ts version timestamp + * @param value column value n */ public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) { if (ts < 0) { @@ -218,12 +199,9 @@ public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer va } /** - * Add the specified KeyValue to this Put operation. Operation assumes that - * the passed KeyValue is immutable and its backing array will not be modified - * for the duration of this Put. - * @param cell individual cell - * @return this - * @throws java.io.IOException e + * Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is + * immutable and its backing array will not be modified for the duration of this Put. + * @param cell individual cell n * @throws java.io.IOException e */ public Put add(Cell cell) throws IOException { super.add(cell); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 919513ceb622..ab0d9887df2b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Map; - -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; @@ -29,11 +26,14 @@ import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.VisibilityConstants; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** * Base class for HBase read operations; e.g. Scan and Get. @@ -46,8 +46,9 @@ public abstract class Query extends OperationWithAttributes { protected Consistency consistency = Consistency.STRONG; protected Map colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); protected Boolean loadColumnFamiliesOnDemand = null; + /** - * @return Filter + * n */ public Filter getFilter() { return filter; @@ -66,18 +67,16 @@ public Query setFilter(Filter filter) { } /** - * Sets the authorizations to be used by this Query - * @param authorizations + * Sets the authorizations to be used by this Query n */ public Query setAuthorizations(Authorizations authorizations) { - this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, ProtobufUtil - .toAuthorizations(authorizations).toByteArray()); + this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, + ProtobufUtil.toAuthorizations(authorizations).toByteArray()); return this; } /** - * @return The authorizations this Query is associated with. - * @throws DeserializationException + * @return The authorizations this Query is associated with. n */ public Authorizations getAuthorizations() throws DeserializationException { byte[] authorizationsBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY); @@ -93,7 +92,7 @@ public byte[] getACL() { } /** - * @param user User short name + * @param user User short name * @param perms Permissions for the user */ public Query setACL(String user, Permission perms) { @@ -111,7 +110,7 @@ public Query setACL(Map perms) { permMap.put(entry.getKey(), entry.getValue()); } setAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL, - AccessControlUtil.toUsersAndPermissions(permMap).toByteArray()); + AccessControlUtil.toUsersAndPermissions(permMap).toByteArray()); return this; } @@ -134,10 +133,9 @@ public Query setConsistency(Consistency consistency) { /** * Specify region replica id where Query will fetch data from. Use this together with - * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from - * a specific replicaId. - *
    Expert: This is an advanced API exposed. Only use it if you know what you are doing - * @param Id + * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a + * specific replicaId.
    + * Expert: This is an advanced API exposed. Only use it if you know what you are doing n */ public Query setReplicaId(int Id) { this.targetReplicaId = Id; @@ -153,14 +151,10 @@ public int getReplicaId() { } /** - * Set the isolation level for this query. If the - * isolation level is set to READ_UNCOMMITTED, then - * this query will return data from committed and - * uncommitted transactions. If the isolation level - * is set to READ_COMMITTED, then this query will return - * data from committed transactions only. If a isolation - * level is not explicitly set on a Query, then it - * is assumed to be READ_COMMITTED. + * Set the isolation level for this query. If the isolation level is set to READ_UNCOMMITTED, then + * this query will return data from committed and uncommitted transactions. If the isolation level + * is set to READ_COMMITTED, then this query will return data from committed transactions only. If + * a isolation level is not explicitly set on a Query, then it is assumed to be READ_COMMITTED. * @param level IsolationLevel for this query */ public Query setIsolationLevel(IsolationLevel level) { @@ -169,32 +163,28 @@ public Query setIsolationLevel(IsolationLevel level) { } /** - * @return The isolation level of this query. - * If no isolation level was set for this query object, - * then it returns READ_COMMITTED. + * @return The isolation level of this query. If no isolation level was set for this query object, + * then it returns READ_COMMITTED. * @return The IsolationLevel for this query */ public IsolationLevel getIsolationLevel() { byte[] attr = getAttribute(ISOLATION_LEVEL); - return attr == null ? IsolationLevel.READ_COMMITTED : - IsolationLevel.fromBytes(attr); - } - - /** - * Set the value indicating whether loading CFs on demand should be allowed (cluster - * default is false). On-demand CF loading doesn't load column families until necessary, e.g. - * if you filter on one column, the other column family data will be loaded only for the rows - * that are included in result, not all rows like in normal case. - * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true, - * this can deliver huge perf gains when there's a cf with lots of data; however, it can - * also lead to some inconsistent results, as follows: - * - if someone does a concurrent update to both column families in question you may get a row - * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } } - * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan - * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, - * { video => "my dog" } }. - * - if there's a concurrent split and you have more than 2 column families, some rows may be - * missing some column families. + return attr == null ? IsolationLevel.READ_COMMITTED : IsolationLevel.fromBytes(attr); + } + + /** + * Set the value indicating whether loading CFs on demand should be allowed (cluster default is + * false). On-demand CF loading doesn't load column families until necessary, e.g. if you filter + * on one column, the other column family data will be loaded only for the rows that are included + * in result, not all rows like in normal case. With column-specific filters, like + * SingleColumnValueFilter w/filterIfMissing == true, this can deliver huge perf gains when + * there's a cf with lots of data; however, it can also lead to some inconsistent results, as + * follows: - if someone does a concurrent update to both column families in question you may get + * a row that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" + * } } someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent + * scan filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, { video => + * "my dog" } }. - if there's a concurrent split and you have more than 2 column families, some + * rows may be missing some column families. */ public Query setLoadColumnFamiliesOnDemand(boolean value) { this.loadColumnFamiliesOnDemand = value; @@ -212,21 +202,17 @@ public Boolean getLoadColumnFamiliesOnDemandValue() { * Get the logical value indicating whether on-demand CF loading should be allowed. */ public boolean doLoadColumnFamiliesOnDemand() { - return (this.loadColumnFamiliesOnDemand != null) - && this.loadColumnFamiliesOnDemand; + return (this.loadColumnFamiliesOnDemand != null) && this.loadColumnFamiliesOnDemand; } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp) on a per CF bases. Note, default maximum versions to return is 1. If - * your time range spans more than one version and you want all versions - * returned, up the number of versions beyond the default. + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp) on a + * per CF bases. Note, default maximum versions to return is 1. If your time range spans more than + * one version and you want all versions returned, up the number of versions beyond the default. * Column Family time ranges take precedence over the global time range. - * * @param cf the column family for which you want to restrict * @param minStamp minimum timestamp value, inclusive - * @param maxStamp maximum timestamp value, exclusive - * @return this + * @param maxStamp maximum timestamp value, exclusive n */ public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index ad0fd7ac807e..d88279eeafc6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -373,7 +373,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { private final NonceGenerator ng; RawAsyncHBaseAdmin(AsyncConnectionImpl connection, HashedWheelTimer retryTimer, - AsyncAdminBuilderBase builder) { + AsyncAdminBuilderBase builder) { this.connection = connection; this.retryTimer = retryTimer; this.metaTable = connection.getTable(META_TABLE_NAME); @@ -382,8 +382,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { this.pauseNs = builder.pauseNs; if (builder.pauseNsForServerOverloaded < builder.pauseNs) { LOG.warn( - "Configured value of pauseNsForServerOverloaded is {} ms, which is less than" + - " the normal pause value {} ms, use the greater one instead", + "Configured value of pauseNsForServerOverloaded is {} ms, which is less than" + + " the normal pause value {} ms, use the greater one instead", TimeUnit.NANOSECONDS.toMillis(builder.pauseNsForServerOverloaded), TimeUnit.NANOSECONDS.toMillis(builder.pauseNs)); this.pauseNsForServerOverloaded = builder.pauseNs; @@ -416,13 +416,13 @@ private AdminRequestCallerBuilder newAdminCaller() { @FunctionalInterface private interface MasterRpcCall { void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, - RpcCallback done); + RpcCallback done); } @FunctionalInterface private interface AdminRpcCall { void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, - RpcCallback done); + RpcCallback done); } @FunctionalInterface @@ -431,8 +431,8 @@ private interface Converter { } private CompletableFuture call(HBaseRpcController controller, - MasterService.Interface stub, PREQ preq, MasterRpcCall rpcCall, - Converter respConverter) { + MasterService.Interface stub, PREQ preq, MasterRpcCall rpcCall, + Converter respConverter) { CompletableFuture future = new CompletableFuture<>(); rpcCall.call(stub, controller, preq, new RpcCallback() { @@ -453,8 +453,8 @@ public void run(PRESP resp) { } private CompletableFuture adminCall(HBaseRpcController controller, - AdminService.Interface stub, PREQ preq, AdminRpcCall rpcCall, - Converter respConverter) { + AdminService.Interface stub, PREQ preq, AdminRpcCall rpcCall, + Converter respConverter) { CompletableFuture future = new CompletableFuture<>(); rpcCall.call(stub, controller, preq, new RpcCallback() { @@ -475,24 +475,24 @@ public void run(PRESP resp) { } private CompletableFuture procedureCall(PREQ preq, - MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { + MasterRpcCall rpcCall, Converter respConverter, + ProcedureBiConsumer consumer) { return procedureCall(b -> { }, preq, rpcCall, respConverter, consumer); } private CompletableFuture procedureCall(TableName tableName, PREQ preq, - MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { + MasterRpcCall rpcCall, Converter respConverter, + ProcedureBiConsumer consumer) { return procedureCall(b -> b.priority(tableName), preq, rpcCall, respConverter, consumer); } private CompletableFuture procedureCall( - Consumer> prioritySetter, PREQ preq, - MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { + Consumer> prioritySetter, PREQ preq, + MasterRpcCall rpcCall, Converter respConverter, + ProcedureBiConsumer consumer) { MasterRequestCallerBuilder builder = this. newMasterCaller().action((controller, - stub) -> this. call(controller, stub, preq, rpcCall, respConverter)); + stub) -> this. call(controller, stub, preq, rpcCall, respConverter)); prioritySetter.accept(builder); CompletableFuture procFuture = builder.call(); CompletableFuture future = waitProcedureResult(procFuture); @@ -515,8 +515,8 @@ public CompletableFuture tableExists(TableName tableName) { @Override public CompletableFuture> listTableDescriptors(boolean includeSysTables) { - return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(null, - includeSysTables)); + return getTableDescriptors( + RequestConverter.buildGetTableDescriptorsRequest(null, includeSysTables)); } /** @@ -524,19 +524,17 @@ public CompletableFuture> listTableDescriptors(boolean inc */ @Override public CompletableFuture> listTableDescriptors(Pattern pattern, - boolean includeSysTables) { - Preconditions.checkNotNull(pattern, - "pattern is null. If you don't specify a pattern, " - + "use listTableDescriptors(boolean) instead"); - return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(pattern, - includeSysTables)); + boolean includeSysTables) { + Preconditions.checkNotNull(pattern, "pattern is null. If you don't specify a pattern, " + + "use listTableDescriptors(boolean) instead"); + return getTableDescriptors( + RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables)); } @Override public CompletableFuture> listTableDescriptors(List tableNames) { - Preconditions.checkNotNull(tableNames, - "tableNames is null. If you don't specify tableNames, " - + "use listTableDescriptors(boolean) instead"); + Preconditions.checkNotNull(tableNames, "tableNames is null. If you don't specify tableNames, " + + "use listTableDescriptors(boolean) instead"); if (tableNames.isEmpty()) { return CompletableFuture.completedFuture(Collections.emptyList()); } @@ -544,13 +542,13 @@ public CompletableFuture> listTableDescriptors(List> - getTableDescriptors(GetTableDescriptorsRequest request) { + getTableDescriptors(GetTableDescriptorsRequest request) { return this.> newMasterCaller() - .action((controller, stub) -> this - .> call( - controller, stub, request, (s, c, req, done) -> s.getTableDescriptors(c, req, done), - (resp) -> ProtobufUtil.toTableDescriptorList(resp))) - .call(); + .action((controller, stub) -> this.> call(controller, stub, request, + (s, c, req, done) -> s.getTableDescriptors(c, req, done), + (resp) -> ProtobufUtil.toTableDescriptorList(resp))) + .call(); } @Override @@ -559,54 +557,51 @@ public CompletableFuture> listTableNames(boolean includeSysTable } @Override - public CompletableFuture> - listTableNames(Pattern pattern, boolean includeSysTables) { + public CompletableFuture> listTableNames(Pattern pattern, + boolean includeSysTables) { Preconditions.checkNotNull(pattern, - "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead"); + "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead"); return getTableNames(RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables)); } private CompletableFuture> getTableNames(GetTableNamesRequest request) { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this - .> call(controller, - stub, request, (s, c, req, done) -> s.getTableNames(c, req, done), - (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))).call(); + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, request, + (s, c, req, done) -> s.getTableNames(c, req, done), + (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))) + .call(); } @Override public CompletableFuture> listTableDescriptorsByNamespace(String name) { - return this.> newMasterCaller().action((controller, stub) -> this - .> call( - controller, stub, + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name).build(), (s, c, req, done) -> s.listTableDescriptorsByNamespace(c, req, done), (resp) -> ProtobufUtil.toTableDescriptorList(resp))) - .call(); + .call(); } @Override public CompletableFuture> listTableNamesByNamespace(String name) { - return this.> newMasterCaller().action((controller, stub) -> this - .> call( - controller, stub, + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, ListTableNamesByNamespaceRequest.newBuilder().setNamespaceName(name).build(), (s, c, req, done) -> s.listTableNamesByNamespace(c, req, done), (resp) -> ProtobufUtil.toTableNameList(resp.getTableNameList()))) - .call(); + .call(); } @Override public CompletableFuture getDescriptor(TableName tableName) { CompletableFuture future = new CompletableFuture<>(); addListener(this.> newMasterCaller().priority(tableName) - .action((controller, stub) -> this - .> call( - controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), + .action((controller, stub) -> this.> call(controller, stub, + RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp.getTableSchemaList())) .call(), (tableSchemas, error) -> { @@ -631,7 +626,7 @@ public CompletableFuture createTable(TableDescriptor desc) { @Override public CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, - int numRegions) { + int numRegions) { try { return createTable(desc, getSplitKeys(startKey, endKey, numRegions)); } catch (IllegalArgumentException e) { @@ -642,7 +637,7 @@ public CompletableFuture createTable(TableDescriptor desc, byte[] startKey @Override public CompletableFuture createTable(TableDescriptor desc, byte[][] splitKeys) { Preconditions.checkNotNull(splitKeys, "splitKeys is null. If you don't specify splitKeys," - + " use createTable(TableDescriptor) instead"); + + " use createTable(TableDescriptor) instead"); try { verifySplitKeys(splitKeys); return createTable(desc.getTableName(), RequestConverter.buildCreateTableRequest(desc, @@ -663,15 +658,15 @@ private CompletableFuture createTable(TableName tableName, CreateTableRequ public CompletableFuture modifyTable(TableDescriptor desc) { return this. procedureCall(desc.getTableName(), RequestConverter.buildModifyTableRequest(desc.getTableName(), desc, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.modifyTable(c, req, done), - (resp) -> resp.getProcId(), new ModifyTableProcedureBiConsumer(this, desc.getTableName())); + ng.newNonce()), + (s, c, req, done) -> s.modifyTable(c, req, done), (resp) -> resp.getProcId(), + new ModifyTableProcedureBiConsumer(this, desc.getTableName())); } @Override public CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT) { - return this - . procedureCall( - tableName, + return this. procedureCall(tableName, RequestConverter.buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, ng.getNonceGroup(), ng.newNonce()), (s, c, req, done) -> s.modifyTableStoreFileTracker(c, req, done), @@ -691,8 +686,9 @@ public CompletableFuture deleteTable(TableName tableName) { public CompletableFuture truncateTable(TableName tableName, boolean preserveSplits) { return this. procedureCall(tableName, RequestConverter.buildTruncateTableRequest(tableName, preserveSplits, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.truncateTable(c, req, done), - (resp) -> resp.getProcId(), new TruncateTableProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.truncateTable(c, req, done), (resp) -> resp.getProcId(), + new TruncateTableProcedureBiConsumer(tableName)); } @Override @@ -712,13 +708,13 @@ public CompletableFuture disableTable(TableName tableName) { } /** - * Utility for completing passed TableState {@link CompletableFuture} future - * using passed parameters. Sets error or boolean result ('true' if table matches - * the passed-in targetState). + * Utility for completing passed TableState {@link CompletableFuture} future using + * passed parameters. Sets error or boolean result ('true' if table matches the passed-in + * targetState). */ private static CompletableFuture completeCheckTableState( - CompletableFuture future, TableState tableState, Throwable error, - TableState.State targetState, TableName tableName) { + CompletableFuture future, TableState tableState, Throwable error, + TableState.State targetState, TableName tableName) { if (error != null) { future.completeExceptionally(error); } else { @@ -778,8 +774,7 @@ public CompletableFuture isTableAvailable(TableName tableName) { if (!enabled) { future.complete(false); } else { - addListener( - ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName), + addListener(ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName), (locations, error1) -> { if (error1 != null) { future.completeExceptionally(error1); @@ -802,11 +797,12 @@ public CompletableFuture isTableAvailable(TableName tableName) { } @Override - public CompletableFuture addColumnFamily( - TableName tableName, ColumnFamilyDescriptor columnFamily) { + public CompletableFuture addColumnFamily(TableName tableName, + ColumnFamilyDescriptor columnFamily) { return this. procedureCall(tableName, RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), + ng.newNonce()), + (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), new AddColumnFamilyProcedureBiConsumer(tableName)); } @@ -814,25 +810,26 @@ public CompletableFuture addColumnFamily( public CompletableFuture deleteColumnFamily(TableName tableName, byte[] columnFamily) { return this. procedureCall(tableName, RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.deleteColumn(c, req, done), - (resp) -> resp.getProcId(), new DeleteColumnFamilyProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.deleteColumn(c, req, done), (resp) -> resp.getProcId(), + new DeleteColumnFamilyProcedureBiConsumer(tableName)); } @Override public CompletableFuture modifyColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily) { + ColumnFamilyDescriptor columnFamily) { return this. procedureCall(tableName, RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.modifyColumn(c, req, done), - (resp) -> resp.getProcId(), new ModifyColumnFamilyProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.modifyColumn(c, req, done), (resp) -> resp.getProcId(), + new ModifyColumnFamilyProcedureBiConsumer(tableName)); } @Override public CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT) { - return this - . procedureCall( - tableName, + return this. procedureCall(tableName, RequestConverter.buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, ng.getNonceGroup(), ng.newNonce()), (s, c, req, done) -> s.modifyColumnStoreFileTracker(c, req, done), @@ -866,48 +863,45 @@ public CompletableFuture deleteNamespace(String name) { @Override public CompletableFuture getNamespaceDescriptor(String name) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . - call(controller, stub, RequestConverter.buildGetNamespaceDescriptorRequest(name), - (s, c, req, done) -> s.getNamespaceDescriptor(c, req, done), (resp) - -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor()))).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, + RequestConverter.buildGetNamespaceDescriptorRequest(name), + (s, c, req, done) -> s.getNamespaceDescriptor(c, req, done), + (resp) -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor()))) + .call(); } @Override public CompletableFuture> listNamespaces() { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this - .> call( - controller, stub, ListNamespacesRequest.newBuilder().build(), (s, c, req, - done) -> s.listNamespaces(c, req, done), - (resp) -> resp.getNamespaceNameList())).call(); + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, ListNamespacesRequest.newBuilder().build(), + (s, c, req, done) -> s.listNamespaces(c, req, done), + (resp) -> resp.getNamespaceNameList())) + .call(); } @Override public CompletableFuture> listNamespaceDescriptors() { - return this - .> newMasterCaller().action((controller, stub) -> this - .> call(controller, stub, - ListNamespaceDescriptorsRequest.newBuilder().build(), (s, c, req, done) -> - s.listNamespaceDescriptors(c, req, done), - (resp) -> ProtobufUtil.toNamespaceDescriptorList(resp))).call(); + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, + ListNamespaceDescriptorsRequest.newBuilder().build(), + (s, c, req, done) -> s.listNamespaceDescriptors(c, req, done), + (resp) -> ProtobufUtil.toNamespaceDescriptorList(resp))) + .call(); } @Override public CompletableFuture> getRegions(ServerName serverName) { return this.> newAdminCaller() - .action((controller, stub) -> this - .> adminCall( - controller, stub, RequestConverter.buildGetOnlineRegionRequest(), - (s, c, req, done) -> s.getOnlineRegion(c, req, done), - resp -> ProtobufUtil.getRegionInfos(resp))) - .serverName(serverName).call(); + .action((controller, stub) -> this.> adminCall(controller, stub, + RequestConverter.buildGetOnlineRegionRequest(), + (s, c, req, done) -> s.getOnlineRegion(c, req, done), + resp -> ProtobufUtil.getRegionInfos(resp))) + .serverName(serverName).call(); } @Override @@ -917,11 +911,11 @@ public CompletableFuture> getRegions(TableName tableName) { .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion) .collect(Collectors.toList())); } else { - return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName) - .thenApply( - locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList())); + return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).thenApply( + locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList())); } } + @Override public CompletableFuture flush(TableName tableName) { return flush(tableName, null); @@ -946,8 +940,9 @@ public CompletableFuture flush(TableName tableName, byte[] columnFamily) { if (columnFamily != null) { props.put(HConstants.FAMILY_KEY_STR, Bytes.toString(columnFamily)); } - addListener(execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), - props), (ret, err3) -> { + addListener( + execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), props), + (ret, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { @@ -971,8 +966,8 @@ public CompletableFuture flushRegion(byte[] regionName) { public CompletableFuture flushRegion(byte[] regionName, byte[] columnFamily) { Preconditions.checkNotNull(columnFamily, "columnFamily is null." + "If you don't specify a columnFamily, use flushRegion(regionName) instead"); - return flushRegionInternal(regionName, columnFamily, false) - .thenAccept(r -> {}); + return flushRegionInternal(regionName, columnFamily, false).thenAccept(r -> { + }); } /** @@ -981,8 +976,8 @@ public CompletableFuture flushRegion(byte[] regionName, byte[] columnFamil * As it exposes the protobuf message, please do NOT try to expose it as a public * API. */ - CompletableFuture flushRegionInternal(byte[] regionName, - byte[] columnFamily, boolean writeFlushWALMarker) { + CompletableFuture flushRegionInternal(byte[] regionName, byte[] columnFamily, + boolean writeFlushWALMarker) { CompletableFuture future = new CompletableFuture<>(); addListener(getRegionLocation(regionName), (location, err) -> { if (err != null) { @@ -995,14 +990,14 @@ CompletableFuture flushRegionInternal(byte[] regionName, .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } - addListener( - flush(serverName, location.getRegion(), columnFamily, writeFlushWALMarker), + addListener(flush(serverName, location.getRegion(), columnFamily, writeFlushWALMarker), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); - }}); + } + }); }); return future; } @@ -1010,10 +1005,10 @@ CompletableFuture flushRegionInternal(byte[] regionName, private CompletableFuture flush(ServerName serverName, RegionInfo regionInfo, byte[] columnFamily, boolean writeFlushWALMarker) { return this. newAdminCaller().serverName(serverName) - .action((controller, stub) -> this - . adminCall(controller, stub, - RequestConverter.buildFlushRegionRequest(regionInfo.getRegionName(), - columnFamily, writeFlushWALMarker), + .action((controller, stub) -> this. adminCall(controller, stub, + RequestConverter.buildFlushRegionRequest(regionInfo.getRegionName(), columnFamily, + writeFlushWALMarker), (s, c, req, done) -> s.flushRegion(c, req, done), resp -> resp)) .call(); } @@ -1028,11 +1023,9 @@ public CompletableFuture flushRegionServer(ServerName sn) { } List> compactFutures = new ArrayList<>(); if (hRegionInfos != null) { - hRegionInfos.forEach( - region -> compactFutures.add( - flush(sn, region, null, false).thenAccept(r -> {}) - ) - ); + hRegionInfos + .forEach(region -> compactFutures.add(flush(sn, region, null, false).thenAccept(r -> { + }))); } addListener(CompletableFuture.allOf( compactFutures.toArray(new CompletableFuture[compactFutures.size()])), (ret, err2) -> { @@ -1053,9 +1046,9 @@ public CompletableFuture compact(TableName tableName, CompactType compactT @Override public CompletableFuture compact(TableName tableName, byte[] columnFamily, - CompactType compactType) { + CompactType compactType) { Preconditions.checkNotNull(columnFamily, "columnFamily is null. " - + "If you don't specify a columnFamily, use compact(TableName) instead"); + + "If you don't specify a columnFamily, use compact(TableName) instead"); return compact(tableName, columnFamily, false, compactType); } @@ -1067,7 +1060,7 @@ public CompletableFuture compactRegion(byte[] regionName) { @Override public CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily) { Preconditions.checkNotNull(columnFamily, "columnFamily is null." - + " If you don't specify a columnFamily, use compactRegion(regionName) instead"); + + " If you don't specify a columnFamily, use compactRegion(regionName) instead"); return compactRegion(regionName, columnFamily, false); } @@ -1078,9 +1071,9 @@ public CompletableFuture majorCompact(TableName tableName, CompactType com @Override public CompletableFuture majorCompact(TableName tableName, byte[] columnFamily, - CompactType compactType) { + CompactType compactType) { Preconditions.checkNotNull(columnFamily, "columnFamily is null." - + "If you don't specify a columnFamily, use compact(TableName) instead"); + + "If you don't specify a columnFamily, use compact(TableName) instead"); return compact(tableName, columnFamily, true, compactType); } @@ -1092,7 +1085,7 @@ public CompletableFuture majorCompactRegion(byte[] regionName) { @Override public CompletableFuture majorCompactRegion(byte[] regionName, byte[] columnFamily) { Preconditions.checkNotNull(columnFamily, "columnFamily is null." - + " If you don't specify a columnFamily, use majorCompactRegion(regionName) instead"); + + " If you don't specify a columnFamily, use majorCompactRegion(regionName) instead"); return compactRegion(regionName, columnFamily, true); } @@ -1130,7 +1123,7 @@ private CompletableFuture compactRegionServer(ServerName sn, boolean major } private CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily, - boolean major) { + boolean major) { CompletableFuture future = new CompletableFuture<>(); addListener(getRegionLocation(regionName), (location, err) -> { if (err != null) { @@ -1164,8 +1157,10 @@ private CompletableFuture> getTableHRegionLocations(TableN addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> { if (err != null) { future.completeExceptionally(err); - } else if (metaRegions == null || metaRegions.isEmpty() || - metaRegions.getDefaultRegionLocation() == null) { + } else if ( + metaRegions == null || metaRegions.isEmpty() + || metaRegions.getDefaultRegionLocation() == null + ) { future.completeExceptionally(new IOException("meta region does not found")); } else { future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation())); @@ -1182,7 +1177,7 @@ private CompletableFuture> getTableHRegionLocations(TableN * Compact column family of a table, Asynchronous operation even if CompletableFuture.get() */ private CompletableFuture compact(TableName tableName, byte[] columnFamily, boolean major, - CompactType compactType) { + CompactType compactType) { CompletableFuture future = new CompletableFuture<>(); switch (compactType) { @@ -1236,24 +1231,23 @@ private CompletableFuture compact(TableName tableName, byte[] columnFamily * Compact the region at specific region server. */ private CompletableFuture compact(final ServerName sn, final RegionInfo hri, - final boolean major, byte[] columnFamily) { - return this - . newAdminCaller() - .serverName(sn) - .action( - (controller, stub) -> this. adminCall( - controller, stub, RequestConverter.buildCompactRegionRequest(hri.getRegionName(), - major, columnFamily), (s, c, req, done) -> s.compactRegion(c, req, done), - resp -> null)).call(); + final boolean major, byte[] columnFamily) { + return this. newAdminCaller().serverName(sn) + .action((controller, stub) -> this. adminCall(controller, stub, + RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, columnFamily), + (s, c, req, done) -> s.compactRegion(c, req, done), resp -> null)) + .call(); } private byte[] toEncodeRegionName(byte[] regionName) { - return RegionInfo.isEncodedRegionName(regionName) ? regionName : - Bytes.toBytes(RegionInfo.encodeRegionName(regionName)); + return RegionInfo.isEncodedRegionName(regionName) + ? regionName + : Bytes.toBytes(RegionInfo.encodeRegionName(regionName)); } private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference tableName, - CompletableFuture result) { + CompletableFuture result) { addListener(getRegionLocation(encodeRegionName), (location, err) -> { if (err != null) { result.completeExceptionally(err); @@ -1269,8 +1263,8 @@ private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference

    isSplitEnabled() { } private CompletableFuture setSplitOrMergeOn(boolean enabled, boolean synchronous, - MasterSwitchType switchType) { + MasterSwitchType switchType) { SetSplitOrMergeEnabledRequest request = RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType); return this. newMasterCaller() - .action((controller, stub) -> this - . call(controller, - stub, request, (s, c, req, done) -> s.setSplitOrMergeEnabled(c, req, done), + .action((controller, stub) -> this. call(controller, stub, request, + (s, c, req, done) -> s.setSplitOrMergeEnabled(c, req, done), (resp) -> resp.getPrevValueList().get(0))) .call(); } private CompletableFuture isSplitOrMergeOn(MasterSwitchType switchType) { IsSplitOrMergeEnabledRequest request = - RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType); - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, request, - (s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), - (resp) -> resp.getEnabled())).call(); + RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, request, + (s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), (resp) -> resp.getEnabled())) + .call(); } @Override @@ -1358,9 +1349,8 @@ public CompletableFuture mergeRegions(List nameOfRegionsToMerge, b } addListener( - this.procedureCall(tableName, request, - MasterService.Interface::mergeTableRegions, MergeTableRegionsResponse::getProcId, - new MergeTableRegionProcedureBiConsumer(tableName)), + this.procedureCall(tableName, request, MasterService.Interface::mergeTableRegions, + MergeTableRegionsResponse::getProcId, new MergeTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1406,8 +1396,10 @@ public CompletableFuture split(TableName tableName) { for (HRegionLocation h : rl.getRegionLocations()) { if (h != null && h.getServerName() != null) { RegionInfo hri = h.getRegion(); - if (hri == null || hri.isSplitParent() || - hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { + if ( + hri == null || hri.isSplitParent() + || hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID + ) { continue; } splitFutures.add(split(hri, null)); @@ -1470,9 +1462,8 @@ public CompletableFuture splitRegion(byte[] regionName) { } RegionInfo regionInfo = location.getRegion(); if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - future - .completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + - "Replicas are auto-split when their primary is split.")); + future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split.")); return; } ServerName serverName = location.getServerName(); @@ -1504,9 +1495,8 @@ public CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint) } RegionInfo regionInfo = location.getRegion(); if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - future - .completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + - "Replicas are auto-split when their primary is split.")); + future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split.")); return; } ServerName serverName = location.getServerName(); @@ -1515,8 +1505,10 @@ public CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint) .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } - if (regionInfo.getStartKey() != null && - Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) { + if ( + regionInfo.getStartKey() != null + && Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0 + ) { future.completeExceptionally( new IllegalArgumentException("should not give a splitkey which equals to startkey!")); return; @@ -1545,9 +1537,8 @@ private CompletableFuture split(final RegionInfo hri, byte[] splitPoint) { } addListener( - this.procedureCall(tableName, - request, MasterService.Interface::splitRegion, SplitTableRegionResponse::getProcId, - new SplitTableRegionProcedureBiConsumer(tableName)), + this.procedureCall(tableName, request, MasterService.Interface::splitRegion, + SplitTableRegionResponse::getProcId, new SplitTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1591,8 +1582,8 @@ public CompletableFuture unassign(byte[] regionName) { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this - . call(controller, stub, + .action(((controller, stub) -> this. call(controller, stub, RequestConverter.buildUnassignRegionRequest(regionInfo.getRegionName()), (s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null))) .call(), @@ -1617,8 +1608,8 @@ public CompletableFuture offline(byte[] regionName) { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this - . call(controller, stub, + .action(((controller, stub) -> this. call(controller, stub, RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()), (s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null))) .call(), @@ -1689,50 +1680,49 @@ private CompletableFuture moveRegion(RegionInfo regionInfo, MoveRegionRequ @Override public CompletableFuture setQuota(QuotaSettings quota) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call(controller, - stub, QuotaSettings.buildSetQuotaRequestProto(quota), - (s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null)).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, + stub, QuotaSettings.buildSetQuotaRequestProto(quota), + (s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null)) + .call(); } @Override public CompletableFuture> getQuota(QuotaFilter filter) { CompletableFuture> future = new CompletableFuture<>(); Scan scan = QuotaTableUtil.makeScan(filter); - this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build() - .scan(scan, new AdvancedScanResultConsumer() { - List settings = new ArrayList<>(); + this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build().scan(scan, + new AdvancedScanResultConsumer() { + List settings = new ArrayList<>(); - @Override - public void onNext(Result[] results, ScanController controller) { - for (Result result : results) { - try { - QuotaTableUtil.parseResultToCollection(result, settings); - } catch (IOException e) { - controller.terminate(); - future.completeExceptionally(e); - } + @Override + public void onNext(Result[] results, ScanController controller) { + for (Result result : results) { + try { + QuotaTableUtil.parseResultToCollection(result, settings); + } catch (IOException e) { + controller.terminate(); + future.completeExceptionally(e); } } + } - @Override - public void onError(Throwable error) { - future.completeExceptionally(error); - } + @Override + public void onError(Throwable error) { + future.completeExceptionally(error); + } - @Override - public void onComplete() { - future.complete(settings); - } - }); + @Override + public void onComplete() { + future.complete(settings); + } + }); return future; } @Override - public CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled) { + public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) { return this. procedureCall( RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled), (s, c, req, done) -> s.addReplicationPeer(c, req, done), (resp) -> resp.getProcId(), @@ -1765,18 +1755,20 @@ public CompletableFuture disableReplicationPeer(String peerId) { @Override public CompletableFuture getReplicationPeerConfig(String peerId) { - return this. newMasterCaller().action((controller, stub) -> this - . - call(controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), - (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), - (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, + RequestConverter.buildGetReplicationPeerConfigRequest(peerId), + (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), + (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))) + .call(); } @Override public CompletableFuture updateReplicationPeerConfig(String peerId, - ReplicationPeerConfig peerConfig) { - return this - . procedureCall( + ReplicationPeerConfig peerConfig) { + return this. procedureCall( RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig), (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), (resp) -> resp.getProcId(), @@ -1785,19 +1777,19 @@ public CompletableFuture updateReplicationPeerConfig(String peerId, @Override public CompletableFuture transitReplicationPeerSyncReplicationState(String peerId, - SyncReplicationState clusterState) { + SyncReplicationState clusterState) { return this. procedureCall( + TransitReplicationPeerSyncReplicationStateResponse> procedureCall( RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId, clusterState), - (s, c, req, done) -> s.transitReplicationPeerSyncReplicationState(c, req, done), - (resp) -> resp.getProcId(), new ReplicationProcedureBiConsumer(peerId, - () -> "TRANSIT_REPLICATION_PEER_SYNCHRONOUS_REPLICATION_STATE")); + (s, c, req, done) -> s.transitReplicationPeerSyncReplicationState(c, req, done), + (resp) -> resp.getProcId(), new ReplicationProcedureBiConsumer(peerId, + () -> "TRANSIT_REPLICATION_PEER_SYNCHRONOUS_REPLICATION_STATE")); } @Override public CompletableFuture appendReplicationPeerTableCFs(String id, - Map> tableCfs) { + Map> tableCfs) { if (tableCfs == null) { return failedFuture(new ReplicationException("tableCfs is null")); } @@ -1819,7 +1811,7 @@ public CompletableFuture appendReplicationPeerTableCFs(String id, @Override public CompletableFuture removeReplicationPeerTableCFs(String id, - Map> tableCfs) { + Map> tableCfs) { if (tableCfs == null) { return failedFuture(new ReplicationException("tableCfs is null")); } @@ -1857,17 +1849,16 @@ public CompletableFuture> listReplicationPeers( return listReplicationPeers(RequestConverter.buildListReplicationPeersRequest(pattern)); } - private CompletableFuture> listReplicationPeers( - ListReplicationPeersRequest request) { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this.> call(controller, stub, request, - (s, c, req, done) -> s.listReplicationPeers(c, req, done), - (resp) -> resp.getPeerDescList().stream() - .map(ReplicationPeerConfigUtil::toReplicationPeerDescription) - .collect(Collectors.toList()))).call(); + private CompletableFuture> + listReplicationPeers(ListReplicationPeersRequest request) { + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, request, + (s, c, req, done) -> s.listReplicationPeers(c, req, done), + (resp) -> resp.getPeerDescList().stream() + .map(ReplicationPeerConfigUtil::toReplicationPeerDescription) + .collect(Collectors.toList()))) + .call(); } @Override @@ -1903,13 +1894,11 @@ public CompletableFuture snapshot(SnapshotDescription snapshotDesc) { return failedFuture(e); } CompletableFuture future = new CompletableFuture<>(); - final SnapshotRequest request = - SnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) - .setNonce(ng.newNonce()).build(); + final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot) + .setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()).build(); addListener(this. newMasterCaller() - .action((controller, stub) -> - this. call(controller, stub, - request, (s, c, req, done) -> s.snapshot(c, req, done), resp -> resp)) + .action((controller, stub) -> this. call( + controller, stub, request, (s, c, req, done) -> s.snapshot(c, req, done), resp -> resp)) .call(), (resp, err) -> { if (err != null) { future.completeExceptionally(err); @@ -1923,8 +1912,8 @@ this. call(controller, stub // This is for keeping compatibility with old implementation. // If there is a procId field in the response, then the snapshot will be operated with a // SnapshotProcedure, otherwise the snapshot will be coordinated by zk. - private void waitSnapshotFinish(SnapshotDescription snapshot, - CompletableFuture future, SnapshotResponse resp) { + private void waitSnapshotFinish(SnapshotDescription snapshot, CompletableFuture future, + SnapshotResponse resp) { if (resp.hasProcId()) { getProcedureResult(resp.getProcId(), future, 0); addListener(future, new SnapshotProcedureBiConsumer(snapshot.getTableName())); @@ -1946,17 +1935,16 @@ public void run(Timeout timeout) throws Exception { future.complete(null); } else { // retry again after pauseTime. - long pauseTime = ConnectionUtils - .getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); + long pauseTime = + ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); pauseTime = Math.min(pauseTime, maxPauseTime); - AsyncConnectionImpl.RETRY_TIMER - .newTimeout(this, pauseTime, TimeUnit.MILLISECONDS); + AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime, TimeUnit.MILLISECONDS); } }); } else { - future.completeExceptionally(new SnapshotCreationException( - "Snapshot '" + snapshot.getName() + "' wasn't completed in expectedTime:" - + expectedTimeout + " ms", snapshot)); + future + .completeExceptionally(new SnapshotCreationException("Snapshot '" + snapshot.getName() + + "' wasn't completed in expectedTime:" + expectedTimeout + " ms", snapshot)); } } }; @@ -1966,15 +1954,13 @@ public void run(Timeout timeout) throws Exception { @Override public CompletableFuture isSnapshotFinished(SnapshotDescription snapshot) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, - stub, - IsSnapshotDoneRequest.newBuilder() - .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), (s, c, - req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone())).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, + IsSnapshotDoneRequest.newBuilder() + .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), + (s, c, req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone())) + .call(); } @Override @@ -1987,7 +1973,7 @@ public CompletableFuture restoreSnapshot(String snapshotName) { @Override public CompletableFuture restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl) { + boolean restoreAcl) { CompletableFuture future = new CompletableFuture<>(); addListener(listSnapshots(Pattern.compile(snapshotName)), (snapshotDescriptions, err) -> { if (err != null) { @@ -2034,7 +2020,7 @@ public CompletableFuture restoreSnapshot(String snapshotName, boolean take } private CompletableFuture restoreSnapshot(String snapshotName, TableName tableName, - boolean takeFailSafeSnapshot, boolean restoreAcl) { + boolean takeFailSafeSnapshot, boolean restoreAcl) { if (takeFailSafeSnapshot) { CompletableFuture future = new CompletableFuture<>(); // Step.1 Take a snapshot of the current state @@ -2055,16 +2041,14 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t (void2, err2) -> { if (err2 != null) { // Step.3.a Something went wrong during the restore and try to rollback. - addListener( - internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl, - null), - (void3, err3) -> { + addListener(internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, + restoreAcl, null), (void3, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { String msg = - "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + - failSafeSnapshotSnapshotName + " succeeded."; + "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + + failSafeSnapshotSnapshotName + " succeeded."; future.completeExceptionally(new RestoreSnapshotException(msg, err2)); } }); @@ -2090,7 +2074,7 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t } private void completeConditionalOnFuture(CompletableFuture dependentFuture, - CompletableFuture parentFuture) { + CompletableFuture parentFuture) { addListener(parentFuture, (res, err) -> { if (err != null) { dependentFuture.completeExceptionally(err); @@ -2102,7 +2086,7 @@ private void completeConditionalOnFuture(CompletableFuture dependentFutur @Override public CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT) { + boolean restoreAcl, String customSFT) { CompletableFuture future = new CompletableFuture<>(); addListener(tableExists(tableName), (exists, err) -> { if (err != null) { @@ -2118,7 +2102,7 @@ public CompletableFuture cloneSnapshot(String snapshotName, TableName tabl } private CompletableFuture internalRestoreSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT) { + boolean restoreAcl, String customSFT) { SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder() .setName(snapshotName).setTable(tableName.getNameAsString()).build(); try { @@ -2129,13 +2113,13 @@ private CompletableFuture internalRestoreSnapshot(String snapshotName, Tab RestoreSnapshotRequest.Builder builder = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) .setNonce(ng.newNonce()).setRestoreACL(restoreAcl); - if(customSFT != null){ + if (customSFT != null) { builder.setCustomSFT(customSFT); } - return waitProcedureResult(this. newMasterCaller().action((controller, stub) -> this - . call(controller, stub, - builder.build(), - (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) + return waitProcedureResult(this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, builder.build(), + (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) .call()); } @@ -2152,33 +2136,34 @@ public CompletableFuture> listSnapshots(Pattern patter } private CompletableFuture> getCompletedSnapshots(Pattern pattern) { - return this.> newMasterCaller().action((controller, stub) -> this - .> - call(controller, stub, GetCompletedSnapshotsRequest.newBuilder().build(), + return this.> newMasterCaller() + .action((controller, stub) -> this.> call(controller, stub, + GetCompletedSnapshotsRequest.newBuilder().build(), (s, c, req, done) -> s.getCompletedSnapshots(c, req, done), resp -> ProtobufUtil.toSnapshotDescriptionList(resp, pattern))) - .call(); + .call(); } @Override public CompletableFuture> listTableSnapshots(Pattern tableNamePattern) { Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null." - + " If you don't specify a tableNamePattern, use listSnapshots() instead"); + + " If you don't specify a tableNamePattern, use listSnapshots() instead"); return getCompletedSnapshots(tableNamePattern, null); } @Override public CompletableFuture> listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null." - + " If you don't specify a tableNamePattern, use listSnapshots(Pattern) instead"); + + " If you don't specify a tableNamePattern, use listSnapshots(Pattern) instead"); Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null." - + " If you don't specify a snapshotNamePattern, use listTableSnapshots(Pattern) instead"); + + " If you don't specify a snapshotNamePattern, use listTableSnapshots(Pattern) instead"); return getCompletedSnapshots(tableNamePattern, snapshotNamePattern); } - private CompletableFuture> getCompletedSnapshots( - Pattern tableNamePattern, Pattern snapshotNamePattern) { + private CompletableFuture> + getCompletedSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) { CompletableFuture> future = new CompletableFuture<>(); addListener(listTableNames(tableNamePattern, false), (tableNames, err) -> { if (err != null) { @@ -2219,29 +2204,29 @@ public CompletableFuture deleteSnapshots() { @Override public CompletableFuture deleteSnapshots(Pattern snapshotNamePattern) { Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null." - + " If you don't specify a snapshotNamePattern, use deleteSnapshots() instead"); + + " If you don't specify a snapshotNamePattern, use deleteSnapshots() instead"); return internalDeleteSnapshots(null, snapshotNamePattern); } @Override public CompletableFuture deleteTableSnapshots(Pattern tableNamePattern) { Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null." - + " If you don't specify a tableNamePattern, use deleteSnapshots() instead"); + + " If you don't specify a tableNamePattern, use deleteSnapshots() instead"); return internalDeleteSnapshots(tableNamePattern, null); } @Override public CompletableFuture deleteTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null." - + " If you don't specify a tableNamePattern, use deleteSnapshots(Pattern) instead"); + + " If you don't specify a tableNamePattern, use deleteSnapshots(Pattern) instead"); Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null." - + " If you don't specify a snapshotNamePattern, use deleteSnapshots(Pattern) instead"); + + " If you don't specify a snapshotNamePattern, use deleteSnapshots(Pattern) instead"); return internalDeleteSnapshots(tableNamePattern, snapshotNamePattern); } private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) { + Pattern snapshotNamePattern) { CompletableFuture> listSnapshotsFuture; if (tableNamePattern == null) { listSnapshotsFuture = getCompletedSnapshots(snapshotNamePattern); @@ -2271,20 +2256,18 @@ private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern } private CompletableFuture internalDeleteSnapshot(SnapshotDescription snapshot) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, - stub, - DeleteSnapshotRequest.newBuilder() - .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), (s, c, - req, done) -> s.deleteSnapshot(c, req, done), resp -> null)).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, + DeleteSnapshotRequest.newBuilder() + .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), + (s, c, req, done) -> s.deleteSnapshot(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture execProcedure(String signature, String instance, - Map props) { + Map props) { CompletableFuture future = new CompletableFuture<>(); ProcedureDescription procDesc = ProtobufUtil.buildProcedureDescription(signature, instance, props); @@ -2323,8 +2306,8 @@ public void run(Timeout timeout) throws Exception { } }); } else { - future.completeExceptionally(new IOException("Procedure '" + signature + " : " + - instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); + future.completeExceptionally(new IOException("Procedure '" + signature + " : " + + instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); } } }; @@ -2336,29 +2319,28 @@ public void run(Timeout timeout) throws Exception { @Override public CompletableFuture execProcedureWithReturn(String signature, String instance, - Map props) { + Map props) { ProcedureDescription proDesc = - ProtobufUtil.buildProcedureDescription(signature, instance, props); + ProtobufUtil.buildProcedureDescription(signature, instance, props); return this. newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, stub, ExecProcedureRequest.newBuilder().setProcedure(proDesc).build(), - (s, c, req, done) -> s.execProcedureWithRet(c, req, done), - resp -> resp.hasReturnData() ? resp.getReturnData().toByteArray() : null)) - .call(); + .action((controller, stub) -> this. call( + controller, stub, ExecProcedureRequest.newBuilder().setProcedure(proDesc).build(), + (s, c, req, done) -> s.execProcedureWithRet(c, req, done), + resp -> resp.hasReturnData() ? resp.getReturnData().toByteArray() : null)) + .call(); } @Override public CompletableFuture isProcedureFinished(String signature, String instance, - Map props) { + Map props) { ProcedureDescription proDesc = - ProtobufUtil.buildProcedureDescription(signature, instance, props); + ProtobufUtil.buildProcedureDescription(signature, instance, props); return this. newMasterCaller() - .action((controller, stub) -> this - . call(controller, stub, - IsProcedureDoneRequest.newBuilder().setProcedure(proDesc).build(), - (s, c, req, done) -> s.isProcedureDone(c, req, done), resp -> resp.getDone())) - .call(); + .action( + (controller, stub) -> this. call( + controller, stub, IsProcedureDoneRequest.newBuilder().setProcedure(proDesc).build(), + (s, c, req, done) -> s.isProcedureDone(c, req, done), resp -> resp.getDone())) + .call(); } @Override @@ -2367,66 +2349,61 @@ public CompletableFuture abortProcedure(long procId, boolean mayInterru (controller, stub) -> this. call( controller, stub, AbortProcedureRequest.newBuilder().setProcId(procId).build(), (s, c, req, done) -> s.abortProcedure(c, req, done), resp -> resp.getIsProcedureAborted())) - .call(); + .call(); } @Override public CompletableFuture getProcedures() { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, GetProceduresRequest.newBuilder().build(), - (s, c, req, done) -> s.getProcedures(c, req, done), - resp -> ProtobufUtil.toProcedureJson(resp.getProcedureList()))).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, GetProceduresRequest.newBuilder().build(), + (s, c, req, done) -> s.getProcedures(c, req, done), + resp -> ProtobufUtil.toProcedureJson(resp.getProcedureList()))) + .call(); } @Override public CompletableFuture getLocks() { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, stub, GetLocksRequest.newBuilder().build(), - (s, c, req, done) -> s.getLocks(c, req, done), - resp -> ProtobufUtil.toLockJson(resp.getLockList()))).call(); + return this. newMasterCaller() + .action( + (controller, stub) -> this. call(controller, + stub, GetLocksRequest.newBuilder().build(), (s, c, req, done) -> s.getLocks(c, req, done), + resp -> ProtobufUtil.toLockJson(resp.getLockList()))) + .call(); } @Override - public CompletableFuture decommissionRegionServers( - List servers, boolean offload) { + public CompletableFuture decommissionRegionServers(List servers, + boolean offload) { return this. newMasterCaller() - .action((controller, stub) -> this - . call( - controller, stub, - RequestConverter.buildDecommissionRegionServersRequest(servers, offload), - (s, c, req, done) -> s.decommissionRegionServers(c, req, done), resp -> null)) - .call(); + .action((controller, stub) -> this. call(controller, stub, + RequestConverter.buildDecommissionRegionServersRequest(servers, offload), + (s, c, req, done) -> s.decommissionRegionServers(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture> listDecommissionedRegionServers() { return this.> newMasterCaller() - .action((controller, stub) -> this - .> call( - controller, stub, ListDecommissionedRegionServersRequest.newBuilder().build(), - (s, c, req, done) -> s.listDecommissionedRegionServers(c, req, done), - resp -> resp.getServerNameList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()))) - .call(); + .action((controller, stub) -> this.> call(controller, stub, + ListDecommissionedRegionServersRequest.newBuilder().build(), + (s, c, req, done) -> s.listDecommissionedRegionServers(c, req, done), + resp -> resp.getServerNameList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList()))) + .call(); } @Override public CompletableFuture recommissionRegionServer(ServerName server, - List encodedRegionNames) { + List encodedRegionNames) { return this. newMasterCaller() - .action((controller, stub) -> - this. call( - controller, stub, RequestConverter.buildRecommissionRegionServerRequest( - server, encodedRegionNames), (s, c, req, done) -> s.recommissionRegionServer( - c, req, done), resp -> null)).call(); + .action((controller, stub) -> this. call(controller, stub, + RequestConverter.buildRecommissionRegionServerRequest(server, encodedRegionNames), + (s, c, req, done) -> s.recommissionRegionServer(c, req, done), resp -> null)) + .call(); } /** @@ -2458,8 +2435,8 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR // it needs to throw out IllegalArgumentException in case tableName is passed in. RegionInfo regionInfo; try { - regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( - regionNameOrEncodedRegionName); + regionInfo = + CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); } catch (IOException ioe) { return failedFuture(new IllegalArgumentException(ioe.getMessage())); } @@ -2483,8 +2460,8 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR } if (!location.isPresent() || location.get().getRegion() == null) { returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); } else { returnedFuture.complete(location.get()); } @@ -2503,10 +2480,12 @@ private CompletableFuture getRegionInfo(byte[] regionNameOrEncodedRe return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - if (Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) || + if ( Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { + RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) + || Bytes.equals(regionNameOrEncodedRegionName, + RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()) + ) { return CompletableFuture.completedFuture(RegionInfoBuilder.FIRST_META_REGIONINFO); } @@ -2547,7 +2526,7 @@ private void verifySplitKeys(byte[][] splitKeys) { } if (lastKey != null && Bytes.equals(splitKey, lastKey)) { throw new IllegalArgumentException("All split keys must be unique, " + "found duplicate: " - + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); + + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); } lastKey = splitKey; } @@ -2580,7 +2559,7 @@ private static abstract class TableProcedureBiConsumer extends ProcedureBiConsum String getDescription() { return "Operation: " + getOperationType() + ", " + "Table Name: " - + tableName.getNameWithNamespaceInclAsString(); + + tableName.getNameWithNamespaceInclAsString(); } @Override @@ -2829,7 +2808,6 @@ String getOperationType() { } } - private static class ReplicationProcedureBiConsumer extends ProcedureBiConsumer { private final String peerId; private final Supplier getOperation; @@ -2869,9 +2847,9 @@ private CompletableFuture waitProcedureResult(CompletableFuture proc private void getProcedureResult(long procId, CompletableFuture future, int retries) { addListener( this. newMasterCaller() - .action((controller, stub) -> this - . call( - controller, stub, GetProcedureResultRequest.newBuilder().setProcId(procId).build(), + .action((controller, stub) -> this. call(controller, stub, + GetProcedureResultRequest.newBuilder().setProcId(procId).build(), (s, c, req, done) -> s.getProcedureResult(c, req, done), (resp) -> resp)) .call(), (response, error) -> { @@ -2917,14 +2895,13 @@ public CompletableFuture getClusterMetrics() { @Override public CompletableFuture getClusterMetrics(EnumSet
    servers, String groupName) { return this. newMasterCaller() - .action((controller, stub) -> this. - call(controller, stub, - RequestConverter.buildMoveServersRequest(servers, groupName), - (s, c, req, done) -> s.moveServers(c, req, done), resp -> null)) - .call(); + .action((controller, stub) -> this. call( + controller, stub, RequestConverter.buildMoveServersRequest(servers, groupName), + (s, c, req, done) -> s.moveServers(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture addRSGroup(String groupName) { return this. newMasterCaller() - .action(((controller, stub) -> this. - call(controller, stub, - AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), - (s, c, req, done) -> s.addRSGroup(c, req, done), resp -> null))) - .call(); + .action( + ((controller, stub) -> this. call(controller, + stub, AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), + (s, c, req, done) -> s.addRSGroup(c, req, done), resp -> null))) + .call(); } @Override public CompletableFuture removeRSGroup(String groupName) { return this. newMasterCaller() - .action((controller, stub) -> this. - call(controller, stub, - RemoveRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), - (s, c, req, done) -> s.removeRSGroup(c, req, done), resp -> null)) - .call(); + .action((controller, stub) -> this. call( + controller, stub, RemoveRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), + (s, c, req, done) -> s.removeRSGroup(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture balanceRSGroup(String groupName, BalanceRequest request) { - return this.newMasterCaller().action( - (controller, stub) -> this.call( - controller, stub, ProtobufUtil.createBalanceRSGroupRequest(groupName, request), + return this. newMasterCaller() + .action((controller, stub) -> this. call(controller, stub, + ProtobufUtil.createBalanceRSGroupRequest(groupName, request), MasterService.Interface::balanceRSGroup, ProtobufUtil::toBalanceResponse)) .call(); } @@ -4103,78 +4038,67 @@ public CompletableFuture balanceRSGroup(String groupName, @Override public CompletableFuture> listRSGroups() { return this.> newMasterCaller() - .action((controller, stub) -> this - .> call( - controller, stub, ListRSGroupInfosRequest.getDefaultInstance(), - (s, c, req, done) -> s.listRSGroupInfos(c, req, done), - resp -> resp.getRSGroupInfoList().stream() - .map(r -> ProtobufUtil.toGroupInfo(r)) - .collect(Collectors.toList()))) - .call(); + .action((controller, stub) -> this.> call(controller, stub, ListRSGroupInfosRequest.getDefaultInstance(), + (s, c, req, done) -> s.listRSGroupInfos(c, req, done), resp -> resp.getRSGroupInfoList() + .stream().map(r -> ProtobufUtil.toGroupInfo(r)).collect(Collectors.toList()))) + .call(); } private CompletableFuture> getSlowLogResponses( - final Map filterParams, final Set serverNames, final int limit, - final String logType) { + final Map filterParams, final Set serverNames, final int limit, + final String logType) { if (CollectionUtils.isEmpty(serverNames)) { return CompletableFuture.completedFuture(Collections.emptyList()); } - return CompletableFuture.supplyAsync(() -> serverNames.stream() - .map((ServerName serverName) -> - getSlowLogResponseFromServer(serverName, filterParams, limit, logType)) - .map(CompletableFuture::join) - .flatMap(List::stream) - .collect(Collectors.toList())); + return CompletableFuture.supplyAsync(() -> serverNames + .stream().map((ServerName serverName) -> getSlowLogResponseFromServer(serverName, + filterParams, limit, logType)) + .map(CompletableFuture::join).flatMap(List::stream).collect(Collectors.toList())); } private CompletableFuture> getSlowLogResponseFromServer(ServerName serverName, - Map filterParams, int limit, String logType) { - return this.>newAdminCaller().action((controller, stub) -> this - .adminCall(controller, stub, + Map filterParams, int limit, String logType) { + return this.> newAdminCaller() + .action((controller, stub) -> this.adminCall(controller, stub, RequestConverter.buildSlowLogResponseRequest(filterParams, limit, logType), AdminService.Interface::getLogEntries, ProtobufUtil::toSlowLogPayloads)) .serverName(serverName).call(); } @Override - public CompletableFuture> clearSlowLogResponses( - @Nullable Set serverNames) { + public CompletableFuture> + clearSlowLogResponses(@Nullable Set serverNames) { if (CollectionUtils.isEmpty(serverNames)) { return CompletableFuture.completedFuture(Collections.emptyList()); } - List> clearSlowLogResponseList = serverNames.stream() - .map(this::clearSlowLogsResponses) - .collect(Collectors.toList()); + List> clearSlowLogResponseList = + serverNames.stream().map(this::clearSlowLogsResponses).collect(Collectors.toList()); return convertToFutureOfList(clearSlowLogResponseList); } private CompletableFuture clearSlowLogsResponses(final ServerName serverName) { - return this.newAdminCaller() - .action(((controller, stub) -> this - .adminCall( - controller, stub, RequestConverter.buildClearSlowLogResponseRequest(), - AdminService.Interface::clearSlowLogsResponses, - ProtobufUtil::toClearSlowLogPayload)) - ).serverName(serverName).call(); + return this. newAdminCaller() + .action(((controller, stub) -> this.adminCall(controller, stub, + RequestConverter.buildClearSlowLogResponseRequest(), + AdminService.Interface::clearSlowLogsResponses, ProtobufUtil::toClearSlowLogPayload))) + .serverName(serverName).call(); } - private static CompletableFuture> convertToFutureOfList( - List> futures) { + private static CompletableFuture> + convertToFutureOfList(List> futures) { CompletableFuture allDoneFuture = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); - return allDoneFuture.thenApply(v -> - futures.stream() - .map(CompletableFuture::join) - .collect(Collectors.toList()) - ); + return allDoneFuture + .thenApply(v -> futures.stream().map(CompletableFuture::join).collect(Collectors.toList())); } @Override public CompletableFuture> listTablesInRSGroup(String groupName) { return this.> newMasterCaller() - .action((controller, stub) -> this - .> call(controller, - stub, ListTablesInRSGroupRequest.newBuilder().setGroupName(groupName).build(), + .action((controller, stub) -> this.> call(controller, stub, + ListTablesInRSGroupRequest.newBuilder().setGroupName(groupName).build(), (s, c, req, done) -> s.listTablesInRSGroup(c, req, done), resp -> resp.getTableNameList() .stream().map(ProtobufUtil::toTableName).collect(Collectors.toList()))) .call(); @@ -4184,41 +4108,39 @@ public CompletableFuture> listTablesInRSGroup(String groupName) public CompletableFuture, List>> getConfiguredNamespacesAndTablesInRSGroup(String groupName) { return this., List>> newMasterCaller() - .action((controller, stub) -> this - ., List>> call(controller, stub, + .action((controller, stub) -> this., List>> call(controller, stub, GetConfiguredNamespacesAndTablesInRSGroupRequest.newBuilder().setGroupName(groupName) .build(), - (s, c, req, done) -> s.getConfiguredNamespacesAndTablesInRSGroup(c, req, done), - resp -> Pair.newPair(resp.getNamespaceList(), resp.getTableNameList().stream() - .map(ProtobufUtil::toTableName).collect(Collectors.toList())))) + (s, c, req, done) -> s.getConfiguredNamespacesAndTablesInRSGroup(c, req, done), + resp -> Pair.newPair(resp.getNamespaceList(), resp.getTableNameList().stream() + .map(ProtobufUtil::toTableName).collect(Collectors.toList())))) .call(); } @Override public CompletableFuture getRSGroup(Address hostPort) { return this. newMasterCaller() - .action(((controller, stub) -> this - . call( - controller, stub, - GetRSGroupInfoOfServerRequest.newBuilder() + .action( + ((controller, stub) -> this. call(controller, stub, GetRSGroupInfoOfServerRequest.newBuilder() .setServer(HBaseProtos.ServerName.newBuilder().setHostName(hostPort.getHostname()) .setPort(hostPort.getPort()).build()) - .build(), - (s, c, req, done) -> s.getRSGroupInfoOfServer(c, req, done), - resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null))) + .build(), (s, c, req, done) -> s.getRSGroupInfoOfServer(c, req, done), + resp -> resp.hasRSGroupInfo() + ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) + : null))) .call(); } @Override public CompletableFuture removeServersFromRSGroup(Set
    servers) { return this. newMasterCaller() - .action((controller, stub) -> this. - call(controller, stub, - RequestConverter.buildRemoveServersRequest(servers), - (s, c, req, done) -> s.removeServers(c, req, done), resp -> null)) - .call(); + .action((controller, stub) -> this. call( + controller, stub, RequestConverter.buildRemoveServersRequest(servers), + (s, c, req, done) -> s.removeServers(c, req, done), resp -> null)) + .call(); } @Override @@ -4248,10 +4170,11 @@ public CompletableFuture setRSGroup(Set tables, String groupNam List newTableDescriptors = new ArrayList<>(); for (TableDescriptor td : tableDescriptions) { newTableDescriptors - .add(TableDescriptorBuilder.newBuilder(td).setRegionServerGroup(groupName).build()); + .add(TableDescriptorBuilder.newBuilder(td).setRegionServerGroup(groupName).build()); } - addListener(CompletableFuture.allOf( - newTableDescriptors.stream().map(this::modifyTable).toArray(CompletableFuture[]::new)), + addListener( + CompletableFuture.allOf( + newTableDescriptors.stream().map(this::modifyTable).toArray(CompletableFuture[]::new)), (v, e) -> { if (e != null) { future.completeExceptionally(e); @@ -4265,21 +4188,21 @@ public CompletableFuture setRSGroup(Set tables, String groupNam @Override public CompletableFuture getRSGroup(TableName table) { - return this. newMasterCaller().action(((controller, stub) -> this - . call(controller, - stub, - GetRSGroupInfoOfTableRequest.newBuilder().setTableName(ProtobufUtil.toProtoTableName(table)) - .build(), - (s, c, req, done) -> s.getRSGroupInfoOfTable(c, req, done), - resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null))) + return this. newMasterCaller() + .action(((controller, stub) -> this. call(controller, stub, + GetRSGroupInfoOfTableRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(table)).build(), + (s, c, req, done) -> s.getRSGroupInfoOfTable(c, req, done), + resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null))) .call(); } @Override public CompletableFuture getRSGroup(String groupName) { return this. newMasterCaller() - .action(((controller, stub) -> this - . call(controller, stub, + .action(((controller, stub) -> this. call(controller, stub, GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build(), (s, c, req, done) -> s.getRSGroupInfo(c, req, done), resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null))) @@ -4289,64 +4212,52 @@ public CompletableFuture getRSGroup(String groupName) { @Override public CompletableFuture renameRSGroup(String oldName, String newName) { return this. newMasterCaller() - .action( - ( - (controller, stub) -> this. call( - controller, - stub, - RenameRSGroupRequest.newBuilder().setOldRsgroupName(oldName).setNewRsgroupName(newName) - .build(), - (s, c, req, done) -> s.renameRSGroup(c, req, done), - resp -> null - ) - ) - ).call(); - } - - @Override - public CompletableFuture - updateRSGroupConfig(String groupName, Map configuration) { - UpdateRSGroupConfigRequest.Builder request = UpdateRSGroupConfigRequest.newBuilder() - .setGroupName(groupName); + .action(((controller, stub) -> this. call( + controller, stub, RenameRSGroupRequest.newBuilder().setOldRsgroupName(oldName) + .setNewRsgroupName(newName).build(), + (s, c, req, done) -> s.renameRSGroup(c, req, done), resp -> null))) + .call(); + } + + @Override + public CompletableFuture updateRSGroupConfig(String groupName, + Map configuration) { + UpdateRSGroupConfigRequest.Builder request = + UpdateRSGroupConfigRequest.newBuilder().setGroupName(groupName); if (configuration != null) { - configuration.entrySet().forEach(e -> - request.addConfiguration(NameStringPair.newBuilder().setName(e.getKey()) - .setValue(e.getValue()).build())); + configuration.entrySet().forEach(e -> request.addConfiguration( + NameStringPair.newBuilder().setName(e.getKey()).setValue(e.getValue()).build())); } return this. newMasterCaller() - .action(((controller, stub) -> - this. call( - controller, stub, request.build(), - (s, c, req, done) -> s.updateRSGroupConfig(c, req, done), resp -> null)) - ).call(); + .action(((controller, stub) -> this. call(controller, stub, request.build(), + (s, c, req, done) -> s.updateRSGroupConfig(c, req, done), resp -> null))) + .call(); } private CompletableFuture> getBalancerDecisions(final int limit) { - return this.>newMasterCaller() - .action((controller, stub) -> - this.call(controller, stub, - ProtobufUtil.toBalancerDecisionRequest(limit), - MasterService.Interface::getLogEntries, ProtobufUtil::toBalancerDecisionResponse)) + return this.> newMasterCaller() + .action((controller, stub) -> this.call(controller, stub, + ProtobufUtil.toBalancerDecisionRequest(limit), MasterService.Interface::getLogEntries, + ProtobufUtil::toBalancerDecisionResponse)) .call(); } private CompletableFuture> getBalancerRejections(final int limit) { - return this.>newMasterCaller() - .action((controller, stub) -> - this.call(controller, stub, - ProtobufUtil.toBalancerRejectionRequest(limit), - MasterService.Interface::getLogEntries, ProtobufUtil::toBalancerRejectionResponse)) + return this.> newMasterCaller() + .action((controller, stub) -> this.call(controller, stub, + ProtobufUtil.toBalancerRejectionRequest(limit), MasterService.Interface::getLogEntries, + ProtobufUtil::toBalancerRejectionResponse)) .call(); } @Override public CompletableFuture> getLogEntries(Set serverNames, - String logType, ServerType serverType, int limit, - Map filterParams) { + String logType, ServerType serverType, int limit, Map filterParams) { if (logType == null || serverType == null) { throw new IllegalArgumentException("logType and/or serverType cannot be empty"); } - switch (logType){ + switch (logType) { case "SLOW_LOG": case "LARGE_LOG": if (ServerType.MASTER.equals(serverType)) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java index e51ae136e5d2..af0b20908031 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java @@ -26,6 +26,7 @@ import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture; import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFutures; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.context.Scope; @@ -58,10 +59,12 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; import org.apache.hbase.thirdparty.io.netty.util.Timer; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; @@ -128,8 +131,8 @@ class RawAsyncTableImpl implements AsyncTable { this.pauseNs = builder.pauseNs; if (builder.pauseNsForServerOverloaded < builder.pauseNs) { LOG.warn( - "Configured value of pauseNsForServerOverloaded is {} ms, which is less than" + - " the normal pause value {} ms, use the greater one instead", + "Configured value of pauseNsForServerOverloaded is {} ms, which is less than" + + " the normal pause value {} ms, use the greater one instead", TimeUnit.NANOSECONDS.toMillis(builder.pauseNsForServerOverloaded), TimeUnit.NANOSECONDS.toMillis(builder.pauseNs)); this.pauseNsForServerOverloaded = builder.pauseNs; @@ -138,8 +141,9 @@ class RawAsyncTableImpl implements AsyncTable { } this.maxAttempts = builder.maxAttempts; this.startLogErrorsCnt = builder.startLogErrorsCnt; - this.defaultScannerCaching = tableName.isSystemTable() ? conn.connConf.getMetaScannerCaching() : - conn.connConf.getScannerCaching(); + this.defaultScannerCaching = tableName.isSystemTable() + ? conn.connConf.getMetaScannerCaching() + : conn.connConf.getScannerCaching(); this.defaultScannerMaxResultSize = conn.connConf.getScannerMaxResultSize(); } @@ -216,9 +220,9 @@ private SingleRequestCallerBuilder newCaller(byte[] row, int priority, lo private CompletableFuture get(Get get, int replicaId) { return this. newCaller(get, readRpcTimeoutNs) - .action((controller, loc, stub) -> ConnectionUtils - . call(controller, loc, stub, get, - RequestConverter::buildGetRequest, (s, c, req, done) -> s.get(c, req, done), + .action((controller, loc, stub) -> ConnectionUtils. call(controller, loc, stub, get, RequestConverter::buildGetRequest, + (s, c, req, done) -> s.get(c, req, done), (c, resp) -> ProtobufUtil.toResult(resp.getResult(), c.cellScanner()))) .replicaId(replicaId).call(); } @@ -229,8 +233,7 @@ private TableOperationSpanBuilder newTableOperationSpanBuilder() { @Override public CompletableFuture get(Get get) { - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(get); + final Supplier supplier = newTableOperationSpanBuilder().setOperation(get); return tracedFuture( () -> timelineConsistentRead(conn.getLocator(), tableName, get, get.getRow(), RegionLocateType.CURRENT, replicaId -> get(get, replicaId), readRpcTimeoutNs, @@ -241,8 +244,7 @@ public CompletableFuture get(Get get) { @Override public CompletableFuture put(Put put) { validatePut(put, conn.connConf.getMaxKeyValueSize()); - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(put); + final Supplier supplier = newTableOperationSpanBuilder().setOperation(put); return tracedFuture(() -> this. newCaller(put, writeRpcTimeoutNs) .action((controller, loc, stub) -> RawAsyncTableImpl. voidMutate(controller, loc, stub, put, RequestConverter::buildMutateRequest)) @@ -251,21 +253,17 @@ public CompletableFuture put(Put put) { @Override public CompletableFuture delete(Delete delete) { - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(delete); - return tracedFuture( - () -> this. newCaller(delete, writeRpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl. voidMutate(controller, loc, - stub, delete, RequestConverter::buildMutateRequest)) - .call(), - supplier); + final Supplier supplier = newTableOperationSpanBuilder().setOperation(delete); + return tracedFuture(() -> this. newCaller(delete, writeRpcTimeoutNs) + .action((controller, loc, stub) -> RawAsyncTableImpl. voidMutate(controller, loc, + stub, delete, RequestConverter::buildMutateRequest)) + .call(), supplier); } @Override public CompletableFuture append(Append append) { checkHasFamilies(append); - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(append); + final Supplier supplier = newTableOperationSpanBuilder().setOperation(append); return tracedFuture(() -> { long nonceGroup = conn.getNonceGenerator().getNonceGroup(); long nonce = conn.getNonceGenerator().newNonce(); @@ -280,8 +278,7 @@ public CompletableFuture append(Append append) { @Override public CompletableFuture increment(Increment increment) { checkHasFamilies(increment); - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(increment); + final Supplier supplier = newTableOperationSpanBuilder().setOperation(increment); return tracedFuture(() -> { long nonceGroup = conn.getNonceGenerator().getNonceGroup(); long nonce = conn.getNonceGenerator().newNonce(); @@ -314,8 +311,8 @@ public CheckAndMutateBuilderImpl(byte[] row, byte[] family) { @Override public CheckAndMutateBuilder qualifier(byte[] qualifier) { - this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + - " an empty byte array, or just do not call this method if you want a null qualifier"); + this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + + " an empty byte array, or just do not call this method if you want a null qualifier"); return this; } @@ -340,8 +337,8 @@ public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value) } private void preCheck() { - Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" + - " calling ifNotExists/ifEquals/ifMatches before executing the request"); + Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" + + " calling ifNotExists/ifEquals/ifMatches before executing the request"); } @Override @@ -384,16 +381,14 @@ public CompletableFuture thenMutate(RowMutations mutations) { final Supplier supplier = newTableOperationSpanBuilder() .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) .setContainerOperations(mutations); - return tracedFuture( - () -> RawAsyncTableImpl.this - . newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub, - mutations, - (rn, rm) -> RequestConverter.buildMultiRequest(rn, row, family, qualifier, op, value, - null, timeRange, rm, HConstants.NO_NONCE, HConstants.NO_NONCE), - CheckAndMutateResult::isSuccess)) - .call(), - supplier); + return tracedFuture(() -> RawAsyncTableImpl.this + . newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs) + .action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub, + mutations, + (rn, rm) -> RequestConverter.buildMultiRequest(rn, row, family, qualifier, op, value, + null, timeRange, rm, HConstants.NO_NONCE, HConstants.NO_NONCE), + CheckAndMutateResult::isSuccess)) + .call(), supplier); } } @@ -430,12 +425,11 @@ public CompletableFuture thenPut(Put put) { .setContainerOperations(put); return tracedFuture( () -> RawAsyncTableImpl.this. newCaller(row, put.getPriority(), rpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, - stub, put, - (rn, p) -> RequestConverter.buildMutateRequest(rn, row, null, null, null, null, - filter, timeRange, p, HConstants.NO_NONCE, HConstants.NO_NONCE), - (c, r) -> r.getProcessed())) - .call(), + .action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, put, + (rn, p) -> RequestConverter.buildMutateRequest(rn, row, null, null, null, null, filter, + timeRange, p, HConstants.NO_NONCE, HConstants.NO_NONCE), + (c, r) -> r.getProcessed())) + .call(), supplier); } @@ -460,16 +454,14 @@ public CompletableFuture thenMutate(RowMutations mutations) { final Supplier supplier = newTableOperationSpanBuilder() .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) .setContainerOperations(mutations); - return tracedFuture( - () -> RawAsyncTableImpl.this - . newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub, - mutations, - (rn, rm) -> RequestConverter.buildMultiRequest(rn, row, null, null, null, null, filter, - timeRange, rm, HConstants.NO_NONCE, HConstants.NO_NONCE), - CheckAndMutateResult::isSuccess)) - .call(), - supplier); + return tracedFuture(() -> RawAsyncTableImpl.this + . newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs) + .action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub, + mutations, + (rn, rm) -> RequestConverter.buildMultiRequest(rn, row, null, null, null, null, filter, + timeRange, rm, HConstants.NO_NONCE, HConstants.NO_NONCE), + CheckAndMutateResult::isSuccess)) + .call(), supplier); } } @@ -480,14 +472,14 @@ public CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) @Override public CompletableFuture checkAndMutate(CheckAndMutate checkAndMutate) { - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(checkAndMutate) + final Supplier supplier = newTableOperationSpanBuilder().setOperation(checkAndMutate) .setContainerOperations(checkAndMutate.getAction()); return tracedFuture(() -> { - if (checkAndMutate.getAction() instanceof Put || - checkAndMutate.getAction() instanceof Delete || - checkAndMutate.getAction() instanceof Increment || - checkAndMutate.getAction() instanceof Append) { + if ( + checkAndMutate.getAction() instanceof Put || checkAndMutate.getAction() instanceof Delete + || checkAndMutate.getAction() instanceof Increment + || checkAndMutate.getAction() instanceof Append + ) { Mutation mutation = (Mutation) checkAndMutate.getAction(); if (mutation instanceof Put) { validatePut((Put) mutation, conn.connConf.getMaxKeyValueSize()); @@ -513,9 +505,8 @@ public CompletableFuture checkAndMutate(CheckAndMutate che return RawAsyncTableImpl.this . newCaller(checkAndMutate.getRow(), rowMutations.getMaxPriority(), rpcTimeoutNs) - .action((controller, loc, stub) -> RawAsyncTableImpl.this - . mutateRow(controller, loc, stub, - rowMutations, + .action((controller, loc, stub) -> RawAsyncTableImpl.this. mutateRow(controller, loc, stub, rowMutations, (rn, rm) -> RequestConverter.buildMultiRequest(rn, checkAndMutate.getRow(), checkAndMutate.getFamily(), checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), checkAndMutate.getValue(), @@ -534,13 +525,10 @@ public CompletableFuture checkAndMutate(CheckAndMutate che @Override public List> checkAndMutate(List checkAndMutates) { - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(checkAndMutates) + final Supplier supplier = newTableOperationSpanBuilder().setOperation(checkAndMutates) .setContainerOperations(checkAndMutates); - return tracedFutures( - () -> batch(checkAndMutates, rpcTimeoutNs).stream() - .map(f -> f.thenApply(r -> (CheckAndMutateResult) r)).collect(toList()), - supplier); + return tracedFutures(() -> batch(checkAndMutates, rpcTimeoutNs).stream() + .map(f -> f.thenApply(r -> (CheckAndMutateResult) r)).collect(toList()), supplier); } // We need the MultiRequest when constructing the org.apache.hadoop.hbase.client.MultiResponse, @@ -567,8 +555,9 @@ public void run(MultiResponse resp) { loc.getServerName(), multiResp); Throwable ex = multiResp.getException(regionName); if (ex != null) { - future.completeExceptionally(ex instanceof IOException ? ex : - new IOException( + future.completeExceptionally(ex instanceof IOException + ? ex + : new IOException( "Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()), ex)); } else { future.complete( @@ -591,9 +580,8 @@ public CompletableFuture mutateRow(RowMutations mutations) { validatePutsInRowMutations(mutations, conn.connConf.getMaxKeyValueSize()); long nonceGroup = conn.getNonceGenerator().getNonceGroup(); long nonce = conn.getNonceGenerator().newNonce(); - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(mutations) - .setContainerOperations(mutations); + final Supplier supplier = + newTableOperationSpanBuilder().setOperation(mutations).setContainerOperations(mutations); return tracedFuture( () -> this . newCaller(mutations.getRow(), mutations.getMaxPriority(), writeRpcTimeoutNs) @@ -620,8 +608,7 @@ private Scan setDefaultScanConfig(Scan scan) { public void scan(Scan scan, AdvancedScanResultConsumer consumer) { new AsyncClientScanner(setDefaultScanConfig(scan), consumer, tableName, conn, retryTimer, pauseNs, pauseNsForServerOverloaded, maxAttempts, scanTimeoutNs, readRpcTimeoutNs, - startLogErrorsCnt) - .start(); + startLogErrorsCnt).start(); } private long resultSize2CacheSize(long maxResultSize) { @@ -666,33 +653,29 @@ public void onComplete() { @Override public List> get(List gets) { - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(gets) + final Supplier supplier = newTableOperationSpanBuilder().setOperation(gets) .setContainerOperations(HBaseSemanticAttributes.Operation.GET); return tracedFutures(() -> batch(gets, readRpcTimeoutNs), supplier); } @Override public List> put(List puts) { - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(puts) + final Supplier supplier = newTableOperationSpanBuilder().setOperation(puts) .setContainerOperations(HBaseSemanticAttributes.Operation.PUT); return tracedFutures(() -> voidMutate(puts), supplier); } @Override public List> delete(List deletes) { - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(deletes) + final Supplier supplier = newTableOperationSpanBuilder().setOperation(deletes) .setContainerOperations(HBaseSemanticAttributes.Operation.DELETE); return tracedFutures(() -> voidMutate(deletes), supplier); } @Override public List> batch(List actions) { - final Supplier supplier = newTableOperationSpanBuilder() - .setOperation(actions) - .setContainerOperations(actions); + final Supplier supplier = + newTableOperationSpanBuilder().setOperation(actions).setContainerOperations(actions); return tracedFutures(() -> batch(actions, rpcTimeoutNs), supplier); } @@ -721,8 +704,7 @@ private List> batch(List actions, long r .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).call(); + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).call(); } @Override @@ -814,10 +796,8 @@ private void onLocateComplete(Function stubMaker, if (locateFinished(region, endKey, endKeyInclusive)) { locateFinished.set(true); } else { - addListener( - conn.getLocator().getRegionLocation(tableName, region.getEndKey(), RegionLocateType.CURRENT, - operationTimeoutNs), - (l, e) -> { + addListener(conn.getLocator().getRegionLocation(tableName, region.getEndKey(), + RegionLocateType.CURRENT, operationTimeoutNs), (l, e) -> { try (Scope ignored = span.makeCurrent()) { onLocateComplete(stubMaker, callable, callback, locs, endKey, endKeyInclusive, locateFinished, unfinishedRequest, l, e); @@ -865,9 +845,9 @@ public CoprocessorServiceBuilderImpl(Function stubMaker, @Override public CoprocessorServiceBuilderImpl fromRow(byte[] startKey, boolean inclusive) { this.startKey = Preconditions.checkNotNull(startKey, - "startKey is null. Consider using" + - " an empty byte array, or just do not call this method if you want to start selection" + - " from the first region"); + "startKey is null. Consider using" + + " an empty byte array, or just do not call this method if you want to start selection" + + " from the first region"); this.startKeyInclusive = inclusive; return this; } @@ -875,9 +855,9 @@ public CoprocessorServiceBuilderImpl fromRow(byte[] startKey, boolean incl @Override public CoprocessorServiceBuilderImpl toRow(byte[] endKey, boolean inclusive) { this.endKey = Preconditions.checkNotNull(endKey, - "endKey is null. Consider using" + - " an empty byte array, or just do not call this method if you want to continue" + - " selection to the last region"); + "endKey is null. Consider using" + + " an empty byte array, or just do not call this method if you want to continue" + + " selection to the last region"); this.endKeyInclusive = inclusive; return this; } @@ -885,12 +865,10 @@ public CoprocessorServiceBuilderImpl toRow(byte[] endKey, boolean inclusiv @Override public void execute() { final Span span = newTableOperationSpanBuilder() - .setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC) - .build(); + .setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC).build(); try (Scope ignored = span.makeCurrent()) { - final RegionLocateType regionLocateType = startKeyInclusive - ? RegionLocateType.CURRENT - : RegionLocateType.AFTER; + final RegionLocateType regionLocateType = + startKeyInclusive ? RegionLocateType.CURRENT : RegionLocateType.AFTER; final CompletableFuture future = conn.getLocator() .getRegionLocation(tableName, startKey, regionLocateType, operationTimeoutNs); addListener(future, (loc, error) -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java index 752aa29e7cf9..9489979b84d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; import java.io.IOException; @@ -31,14 +32,15 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; /** * The implementation of a region based coprocessor rpc channel. @@ -61,7 +63,7 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel { private byte[] lastRegion; RegionCoprocessorRpcChannelImpl(AsyncConnectionImpl conn, TableName tableName, RegionInfo region, - byte[] row, long rpcTimeoutNs, long operationTimeoutNs) { + byte[] row, long rpcTimeoutNs, long operationTimeoutNs) { this.conn = conn; this.tableName = tableName; this.region = region; @@ -71,13 +73,13 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel { } private CompletableFuture rpcCall(MethodDescriptor method, Message request, - Message responsePrototype, HBaseRpcController controller, HRegionLocation loc, - ClientService.Interface stub) { + Message responsePrototype, HBaseRpcController controller, HRegionLocation loc, + ClientService.Interface stub) { final Context context = Context.current(); CompletableFuture future = new CompletableFuture<>(); if (region != null && !Bytes.equals(loc.getRegion().getRegionName(), region.getRegionName())) { - future.completeExceptionally(new DoNotRetryIOException("Region name is changed, expected " + - region.getRegionNameAsString() + ", actual " + loc.getRegion().getRegionNameAsString())); + future.completeExceptionally(new DoNotRetryIOException("Region name is changed, expected " + + region.getRegionNameAsString() + ", actual " + loc.getRegion().getRegionNameAsString())); return future; } CoprocessorServiceRequest csr = CoprocessorRpcUtils.getCoprocessorServiceRequest(method, @@ -101,18 +103,15 @@ private CompletableFuture rpcCall(MethodDescriptor method, Message requ @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { final Context context = Context.current(); - addListener( - conn.callerFactory. single().table(tableName).row(row) - .locateType(RegionLocateType.CURRENT).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) - .action((c, l, s) -> { - try (Scope ignored = context.makeCurrent()) { - return rpcCall(method, request, responsePrototype, c, l, s); - } - }).call(), - (r, e) -> { + addListener(conn.callerFactory. single().table(tableName).row(row) + .locateType(RegionLocateType.CURRENT).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS).action((c, l, s) -> { + try (Scope ignored = context.makeCurrent()) { + return rpcCall(method, request, responsePrototype, c, l, s); + } + }).call(), (r, e) -> { try (Scope ignored = context.makeCurrent()) { if (e != null) { setCoprocessorError(controller, e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java index 711f97b503b5..4bf726079463 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.util.Bytes; @@ -26,14 +24,14 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; - /** - * Represents a coprocessor service method execution against a single region. While coprocessor - * service calls are performed against a region, this class implements {@link Row} in order to - * make use of the AsyncProcess framework for batching multi-region calls per region server. - * - *

    Note: This class should not be instantiated directly. Use - * HTable#batchCoprocessorService instead.

    + * Represents a coprocessor service method execution against a single region. While coprocessor + * service calls are performed against a region, this class implements {@link Row} in order to make + * use of the AsyncProcess framework for batching multi-region calls per region server. + *

    + * Note: This class should not be instantiated directly. Use HTable#batchCoprocessorService + * instead. + *

    */ @InterfaceAudience.Private public class RegionCoprocessorServiceExec implements Row { @@ -47,8 +45,8 @@ public class RegionCoprocessorServiceExec implements Row { private final MethodDescriptor method; private final Message request; - public RegionCoprocessorServiceExec(byte[] region, byte[] startKey, - MethodDescriptor method, Message request) { + public RegionCoprocessorServiceExec(byte[] region, byte[] startKey, MethodDescriptor method, + Message request) { this.region = region; this.startKey = startKey; this.method = method; @@ -103,14 +101,9 @@ public boolean equals(Object obj) { @Override public String toString() { StringBuilder builder = new StringBuilder(); - builder.append("region:") - .append(Bytes.toStringBinary(region)) - .append(", startKey:") - .append(Bytes.toStringBinary(startKey)) - .append(", method:") - .append(method.getFullName()) - .append(", request:") - .append(request); + builder.append("region:").append(Bytes.toStringBinary(region)).append(", startKey:") + .append(Bytes.toStringBinary(startKey)).append(", method:").append(method.getFullName()) + .append(", request:").append(request); return builder.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 55a91db93979..ca1db64719e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,33 +36,30 @@ import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.IOUtils; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Information about a region. A region is a range of keys in the whole keyspace - * of a table, an identifier (a timestamp) for differentiating between subset - * ranges (after region split) and a replicaId for differentiating the instance - * for the same range and some status information about the region. - * - * The region has a unique name which consists of the following fields: + * Information about a region. A region is a range of keys in the whole keyspace of a table, an + * identifier (a timestamp) for differentiating between subset ranges (after region split) and a + * replicaId for differentiating the instance for the same range and some status information about + * the region. The region has a unique name which consists of the following fields: *
      - *
    • tableName : The name of the table
    • - *
    • startKey : The startKey for the region.
    • - *
    • regionId : A timestamp when the region is created.
    • - *
    • replicaId : An id starting from 0 to differentiate replicas of the - * same region range but hosted in separated servers. The same region range can - * be hosted in multiple locations.
    • - *
    • encodedName : An MD5 encoded string for the region name.
    • + *
    • tableName : The name of the table
    • + *
    • startKey : The startKey for the region.
    • + *
    • regionId : A timestamp when the region is created.
    • + *
    • replicaId : An id starting from 0 to differentiate replicas of the same region range but + * hosted in separated servers. The same region range can be hosted in multiple locations.
    • + *
    • encodedName : An MD5 encoded string for the region name.
    • *
    - * - *
    Other than the fields in the region name, region info contains: + *
    + * Other than the fields in the region name, region info contains: *
      - *
    • endKey : the endKey for the region (exclusive)
    • - *
    • split : Whether the region is split
    • - *
    • offline : Whether the region is offline
    • + *
    • endKey : the endKey for the region (exclusive)
    • + *
    • split : Whether the region is split
    • + *
    • offline : Whether the region is offline
    • *
    - * */ @InterfaceAudience.Public public interface RegionInfo extends Comparable { @@ -73,12 +69,12 @@ public interface RegionInfo extends Comparable { @Deprecated @InterfaceAudience.Private // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24896 - RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), - RegionInfo.DEFAULT_REPLICA_ID); + RegionInfo UNDEFINED = + new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), RegionInfo.DEFAULT_REPLICA_ID); /** - * Separator used to demarcate the encodedName in a region name - * in the new format. See description on new format above. + * Separator used to demarcate the encodedName in a region name in the new format. See description + * on new format above. */ @InterfaceAudience.Private int ENC_SEPARATOR = '.'; @@ -90,79 +86,73 @@ public interface RegionInfo extends Comparable { int DEFAULT_REPLICA_ID = 0; /** - * to keep appended int's sorted in string format. Only allows 2 bytes - * to be sorted for replicaId. + * to keep appended int's sorted in string format. Only allows 2 bytes to be sorted for replicaId. */ @InterfaceAudience.Private String REPLICA_ID_FORMAT = "%04X"; @InterfaceAudience.Private - byte REPLICA_ID_DELIMITER = (byte)'_'; + byte REPLICA_ID_DELIMITER = (byte) '_'; @InterfaceAudience.Private String INVALID_REGION_NAME_FORMAT_MESSAGE = "Invalid regionName format"; @InterfaceAudience.Private - Comparator COMPARATOR - = (RegionInfo lhs, RegionInfo rhs) -> { - if (rhs == null) { - return 1; - } + Comparator COMPARATOR = (RegionInfo lhs, RegionInfo rhs) -> { + if (rhs == null) { + return 1; + } - // Are regions of same table? - int result = lhs.getTable().compareTo(rhs.getTable()); - if (result != 0) { - return result; - } + // Are regions of same table? + int result = lhs.getTable().compareTo(rhs.getTable()); + if (result != 0) { + return result; + } - // Compare start keys. - result = Bytes.compareTo(lhs.getStartKey(), rhs.getStartKey()); - if (result != 0) { - return result; - } + // Compare start keys. + result = Bytes.compareTo(lhs.getStartKey(), rhs.getStartKey()); + if (result != 0) { + return result; + } - // Compare end keys. - result = Bytes.compareTo(lhs.getEndKey(), rhs.getEndKey()); + // Compare end keys. + result = Bytes.compareTo(lhs.getEndKey(), rhs.getEndKey()); - if (result != 0) { - if (lhs.getStartKey().length != 0 - && lhs.getEndKey().length == 0) { - return 1; // this is last region - } - if (rhs.getStartKey().length != 0 - && rhs.getEndKey().length == 0) { - return -1; // o is the last region - } - return result; + if (result != 0) { + if (lhs.getStartKey().length != 0 && lhs.getEndKey().length == 0) { + return 1; // this is last region } - - // regionId is usually milli timestamp -- this defines older stamps - // to be "smaller" than newer stamps in sort order. - if (lhs.getRegionId() > rhs.getRegionId()) { - return 1; - } else if (lhs.getRegionId() < rhs.getRegionId()) { - return -1; + if (rhs.getStartKey().length != 0 && rhs.getEndKey().length == 0) { + return -1; // o is the last region } + return result; + } - int replicaDiff = lhs.getReplicaId() - rhs.getReplicaId(); - if (replicaDiff != 0) { - return replicaDiff; - } + // regionId is usually milli timestamp -- this defines older stamps + // to be "smaller" than newer stamps in sort order. + if (lhs.getRegionId() > rhs.getRegionId()) { + return 1; + } else if (lhs.getRegionId() < rhs.getRegionId()) { + return -1; + } - if (lhs.isOffline() == rhs.isOffline()) { - return 0; - } - if (lhs.isOffline()) { - return -1; - } + int replicaDiff = lhs.getReplicaId() - rhs.getReplicaId(); + if (replicaDiff != 0) { + return replicaDiff; + } - return 1; - }; + if (lhs.isOffline() == rhs.isOffline()) { + return 0; + } + if (lhs.isOffline()) { + return -1; + } + return 1; + }; /** - * @return Return a short, printable name for this region - * (usually encoded name) for us logging. + * @return Return a short, printable name for this region (usually encoded name) for us logging. */ String getShortNameToLog(); @@ -175,7 +165,7 @@ public interface RegionInfo extends Comparable { * @return the regionName as an array of bytes. * @see #getRegionNameAsString() */ - byte [] getRegionName(); + byte[] getRegionName(); /** * @return Region name as a String for use in logging, etc. @@ -190,17 +180,17 @@ public interface RegionInfo extends Comparable { /** * @return the encoded region name as an array of bytes. */ - byte [] getEncodedNameAsBytes(); + byte[] getEncodedNameAsBytes(); /** * @return the startKey. */ - byte [] getStartKey(); + byte[] getStartKey(); /** * @return the endKey. */ - byte [] getEndKey(); + byte[] getEndKey(); /** * @return current table name of the region @@ -239,10 +229,9 @@ public interface RegionInfo extends Comparable { boolean isMetaRegion(); /** - * @return true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. + * @return true if the given inclusive range of rows is fully contained by this region. For + * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will + * return true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey); @@ -255,28 +244,26 @@ public interface RegionInfo extends Comparable { /** * Does region name contain its encoded name? * @param regionName region name - * @return boolean indicating if this a new format region - * name which contains its encoded name. + * @return boolean indicating if this a new format region name which contains its encoded name. */ @InterfaceAudience.Private static boolean hasEncodedName(final byte[] regionName) { // check if region name ends in ENC_SEPARATOR - return (regionName.length >= 1) && - (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR); + return (regionName.length >= 1) + && (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR); } /** * @return the encodedName */ @InterfaceAudience.Private - static String encodeRegionName(final byte [] regionName) { + static String encodeRegionName(final byte[] regionName) { String encodedName; if (hasEncodedName(regionName)) { // region is in new format: // ,,/encodedName/ - encodedName = Bytes.toString(regionName, - regionName.length - MD5_HEX_LENGTH - 1, - MD5_HEX_LENGTH); + encodedName = + Bytes.toString(regionName, regionName.length - MD5_HEX_LENGTH - 1, MD5_HEX_LENGTH); } else { // old format region name. First hbase:meta region also // use this format.EncodedName is the JenkinsHash value. @@ -308,16 +295,16 @@ static String getRegionNameAsString(@CheckForNull RegionInfo ri, byte[] regionNa } /** - * @return Return a String of short, printable names for hris - * (usually encoded name) for us logging. + * @return Return a String of short, printable names for hris (usually encoded name) + * for us logging. */ - static String getShortNameToLog(RegionInfo...hris) { + static String getShortNameToLog(RegionInfo... hris) { return getShortNameToLog(Arrays.asList(hris)); } /** * @return Return a String of short, printable names for hris (usually encoded name) - * for us logging. + * for us logging. */ static String getShortNameToLog(final List ris) { return ris.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()).toString(); @@ -332,7 +319,7 @@ static String getShortNameToLog(final List ris) { // This method should never be used. Its awful doing parse from bytes. // It is fallback in case we can't get the tablename any other way. Could try removing it. // Keeping it Audience Private so can remove at later date. - static TableName getTable(final byte [] regionName) { + static TableName getTable(final byte[] regionName) { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == HConstants.DELIMITER) { @@ -343,7 +330,7 @@ static TableName getTable(final byte [] regionName) { if (offset <= 0) { throw new IllegalArgumentException("offset=" + offset); } - byte[] buff = new byte[offset]; + byte[] buff = new byte[offset]; System.arraycopy(regionName, 0, buff, 0, offset); return TableName.valueOf(buff); } @@ -375,7 +362,7 @@ public static boolean isEncodedRegionName(byte[] regionName) { Integer.parseInt(encodedName); // If this is a valid integer, it could be hbase:meta's encoded region name. return true; - } catch(NumberFormatException er) { + } catch (NumberFormatException er) { return false; } } @@ -384,21 +371,19 @@ public static boolean isEncodedRegionName(byte[] regionName) { } /** - * @return A deserialized {@link RegionInfo} - * or null if we failed deserialize or passed bytes null + * @return A deserialized {@link RegionInfo} or null if we failed deserialize or passed bytes null */ @InterfaceAudience.Private - static RegionInfo parseFromOrNull(final byte [] bytes) { + static RegionInfo parseFromOrNull(final byte[] bytes) { if (bytes == null) return null; return parseFromOrNull(bytes, 0, bytes.length); } /** - * @return A deserialized {@link RegionInfo} or null - * if we failed deserialize or passed bytes null + * @return A deserialized {@link RegionInfo} or null if we failed deserialize or passed bytes null */ @InterfaceAudience.Private - static RegionInfo parseFromOrNull(final byte [] bytes, int offset, int len) { + static RegionInfo parseFromOrNull(final byte[] bytes, int offset, int len) { if (bytes == null || len <= 0) return null; try { return parseFrom(bytes, offset, len); @@ -412,20 +397,20 @@ static RegionInfo parseFromOrNull(final byte [] bytes, int offset, int len) { * @return A deserialized {@link RegionInfo} */ @InterfaceAudience.Private - static RegionInfo parseFrom(final byte [] bytes) throws DeserializationException { + static RegionInfo parseFrom(final byte[] bytes) throws DeserializationException { if (bytes == null) return null; return parseFrom(bytes, 0, bytes.length); } /** - * @param bytes A pb RegionInfo serialized with a pb magic prefix. + * @param bytes A pb RegionInfo serialized with a pb magic prefix. * @param offset starting point in the byte array - * @param len length to read on the byte array + * @param len length to read on the byte array * @return A deserialized {@link RegionInfo} */ @InterfaceAudience.Private - static RegionInfo parseFrom(final byte [] bytes, int offset, int len) - throws DeserializationException { + static RegionInfo parseFrom(final byte[] bytes, int offset, int len) + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes, offset, len)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -442,14 +427,12 @@ static RegionInfo parseFrom(final byte [] bytes, int offset, int len) } /** - * Check whether two regions are adjacent; i.e. lies just before or just - * after in a table. + * Check whether two regions are adjacent; i.e. lies just before or just after in a table. * @return true if two regions are adjacent */ static boolean areAdjacent(RegionInfo regionA, RegionInfo regionB) { if (regionA == null || regionB == null) { - throw new IllegalArgumentException( - "Can't check whether adjacent for null region"); + throw new IllegalArgumentException("Can't check whether adjacent for null region"); } if (!regionA.getTable().equals(regionB.getTable())) { return false; @@ -467,8 +450,8 @@ static boolean areAdjacent(RegionInfo regionA, RegionInfo regionB) { * @return This instance serialized as protobuf w/ a magic pb prefix. * @see #parseFrom(byte[]) */ - static byte [] toByteArray(RegionInfo ri) { - byte [] bytes = ProtobufUtil.toRegionInfo(ri).toByteArray(); + static byte[] toByteArray(RegionInfo ri) { + byte[] bytes = ProtobufUtil.toRegionInfo(ri).toByteArray(); return ProtobufUtil.prependPBMagic(bytes); } @@ -476,7 +459,7 @@ static boolean areAdjacent(RegionInfo regionA, RegionInfo regionB) { * Use logging. * @param encodedRegionName The encoded regionname. * @return hbase:meta if passed 1028785192 else returns - * encodedRegionName + * encodedRegionName */ static String prettyPrint(final String encodedRegionName) { if (encodedRegionName.equals("1028785192")) { @@ -487,67 +470,67 @@ static String prettyPrint(final String encodedRegionName) { /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param startKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id */ - static byte [] createRegionName(final TableName tableName, final byte[] startKey, - final long regionid, boolean newFormat) { + static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final long regionid, boolean newFormat) { return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); } /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id */ - static byte [] createRegionName(final TableName tableName, - final byte[] startKey, final String id, boolean newFormat) { + static byte[] createRegionName(final TableName tableName, final byte[] startKey, final String id, + boolean newFormat) { return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); } /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param startKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey, id and replicaId */ - static byte [] createRegionName(final TableName tableName, - final byte[] startKey, final long regionid, int replicaId, boolean newFormat) { - return createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionid)), - replicaId, newFormat); + static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final long regionid, int replicaId, boolean newFormat) { + return createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionid)), replicaId, + newFormat); } /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id */ - static byte [] createRegionName(final TableName tableName, - final byte[] startKey, final byte[] id, boolean newFormat) { + static byte[] createRegionName(final TableName tableName, final byte[] startKey, final byte[] id, + boolean newFormat) { return createRegionName(tableName, startKey, id, DEFAULT_REPLICA_ID, newFormat); } /** * Make a region name of passed parameters. - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format * @return Region name made of passed tableName, startKey, id and replicaId */ - static byte [] createRegionName(final TableName tableName, - final byte[] startKey, final byte[] id, final int replicaId, boolean newFormat) { - int len = tableName.getName().length + 2 + id.length + (startKey == null? 0: startKey.length); + static byte[] createRegionName(final TableName tableName, final byte[] startKey, final byte[] id, + final int replicaId, boolean newFormat) { + int len = tableName.getName().length + 2 + id.length + (startKey == null ? 0 : startKey.length); if (newFormat) { len += MD5_HEX_LENGTH + 2; } @@ -561,7 +544,7 @@ static String prettyPrint(final String encodedRegionName) { len += 1 + replicaIdBytes.length; } - byte [] b = new byte [len]; + byte[] b = new byte[len]; int offset = tableName.getName().length; System.arraycopy(tableName.getName(), 0, b, 0, offset); @@ -589,11 +572,11 @@ static String prettyPrint(final String encodedRegionName) { // it to the byte buffer. // String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); - byte [] md5HashBytes = Bytes.toBytes(md5Hash); + byte[] md5HashBytes = Bytes.toBytes(md5Hash); if (md5HashBytes.length != MD5_HEX_LENGTH) { - System.out.println("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + - "; Got=" + md5HashBytes.length); + System.out.println( + "MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + "; Got=" + md5HashBytes.length); } // now append the bytes '..' to the end @@ -608,40 +591,38 @@ static String prettyPrint(final String encodedRegionName) { /** * Creates a RegionInfo object for MOB data. - * * @param tableName the name of the table * @return the MOB {@link RegionInfo}. */ static RegionInfo createMobRegionInfo(TableName tableName) { // Skipping reference to RegionInfoBuilder in this class. - return new MutableRegionInfo(tableName, Bytes.toBytes(".mob"), - HConstants.EMPTY_END_ROW, false, 0, DEFAULT_REPLICA_ID, false); + return new MutableRegionInfo(tableName, Bytes.toBytes(".mob"), HConstants.EMPTY_END_ROW, false, + 0, DEFAULT_REPLICA_ID, false); } /** * Separate elements of a regionName. - * @return Array of byte[] containing tableName, startKey and id OR null if - * not parseable as a region name. + * @return Array of byte[] containing tableName, startKey and id OR null if not parseable as a + * region name. * @throws IOException if not parseable as regionName. */ - static byte [][] parseRegionName(final byte[] regionName) throws IOException { - byte [][] result = parseRegionNameOrReturnNull(regionName); + static byte[][] parseRegionName(final byte[] regionName) throws IOException { + byte[][] result = parseRegionNameOrReturnNull(regionName); if (result == null) { - throw new IOException(INVALID_REGION_NAME_FORMAT_MESSAGE + ": " + Bytes.toStringBinary(regionName)); + throw new IOException( + INVALID_REGION_NAME_FORMAT_MESSAGE + ": " + Bytes.toStringBinary(regionName)); } return result; } /** - * Separate elements of a regionName. - * Region name is of the format: - * tablename,startkey,regionIdTimestamp[_replicaId][.encodedName.]. - * Startkey can contain the delimiter (',') so we parse from the start and then parse from - * the end. - * @return Array of byte[] containing tableName, startKey and id OR null if not parseable - * as a region name. - */ - static byte [][] parseRegionNameOrReturnNull(final byte[] regionName) { + * Separate elements of a regionName. Region name is of the format: + * tablename,startkey,regionIdTimestamp[_replicaId][.encodedName.]. Startkey can + * contain the delimiter (',') so we parse from the start and then parse from the end. + * @return Array of byte[] containing tableName, startKey and id OR null if not parseable as a + * region name. + */ + static byte[][] parseRegionNameOrReturnNull(final byte[] regionName) { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == HConstants.DELIMITER) { @@ -658,9 +639,10 @@ static RegionInfo createMobRegionInfo(TableName tableName) { int endOffset = regionName.length; // check whether regionName contains encodedName - if (regionName.length > MD5_HEX_LENGTH + 2 && - regionName[regionName.length-1] == ENC_SEPARATOR && - regionName[regionName.length-MD5_HEX_LENGTH-2] == ENC_SEPARATOR) { + if ( + regionName.length > MD5_HEX_LENGTH + 2 && regionName[regionName.length - 1] == ENC_SEPARATOR + && regionName[regionName.length - MD5_HEX_LENGTH - 2] == ENC_SEPARATOR + ) { endOffset = endOffset - MD5_HEX_LENGTH - 2; } @@ -668,10 +650,9 @@ static RegionInfo createMobRegionInfo(TableName tableName) { byte[] replicaId = null; int idEndOffset = endOffset; for (int i = endOffset - 1; i > 0; i--) { - if (regionName[i] == REPLICA_ID_DELIMITER) { //replicaId may or may not be present + if (regionName[i] == REPLICA_ID_DELIMITER) { // replicaId may or may not be present replicaId = new byte[endOffset - i - 1]; - System.arraycopy(regionName, i + 1, replicaId, 0, - endOffset - i - 1); + System.arraycopy(regionName, i + 1, replicaId, 0, endOffset - i - 1); idEndOffset = i; // do not break, continue to search for id } @@ -683,16 +664,15 @@ static RegionInfo createMobRegionInfo(TableName tableName) { if (offset == -1) { return null; } - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - if(offset != tableName.length + 1) { + byte[] startKey = HConstants.EMPTY_BYTE_ARRAY; + if (offset != tableName.length + 1) { startKey = new byte[offset - tableName.length - 1]; System.arraycopy(regionName, tableName.length + 1, startKey, 0, - offset - tableName.length - 1); + offset - tableName.length - 1); } - byte [] id = new byte[idEndOffset - offset - 1]; - System.arraycopy(regionName, offset + 1, id, 0, - idEndOffset - offset - 1); - byte [][] elements = new byte[replicaId == null ? 3 : 4][]; + byte[] id = new byte[idEndOffset - offset - 1]; + System.arraycopy(regionName, offset + 1, id, 0, idEndOffset - offset - 1); + byte[][] elements = new byte[replicaId == null ? 3 : 4][]; elements[0] = tableName; elements[1] = startKey; elements[2] = id; @@ -704,10 +684,9 @@ static RegionInfo createMobRegionInfo(TableName tableName) { /** * Serializes given RegionInfo's as a byte array. Use this instead of - * {@link RegionInfo#toByteArray(RegionInfo)} when - * writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads - * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can - * be used to read back the instances. + * {@link RegionInfo#toByteArray(RegionInfo)} when writing to a stream and you want to use the pb + * mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). + * {@link #parseDelimitedFrom(byte[], int, int)} can be used to read back the instances. * @param infos RegionInfo objects to serialize * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. */ @@ -729,30 +708,30 @@ static byte[] toDelimitedByteArray(RegionInfo... infos) throws IOException { } /** - * Use this instead of {@link RegionInfo#toByteArray(RegionInfo)} when writing to a stream and you want to use - * the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). + * Use this instead of {@link RegionInfo#toByteArray(RegionInfo)} when writing to a stream and you + * want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what + * you want). * @return This instance serialized as a delimied protobuf w/ a magic pb prefix. */ - static byte [] toDelimitedByteArray(RegionInfo ri) throws IOException { + static byte[] toDelimitedByteArray(RegionInfo ri) throws IOException { return ProtobufUtil.toDelimitedByteArray(ProtobufUtil.toRegionInfo(ri)); } /** - * Parses an RegionInfo instance from the passed in stream. - * Presumes the RegionInfo was serialized to the stream with - * {@link #toDelimitedByteArray(RegionInfo)}. + * Parses an RegionInfo instance from the passed in stream. Presumes the RegionInfo was serialized + * to the stream with {@link #toDelimitedByteArray(RegionInfo)}. * @return An instance of RegionInfo. */ static RegionInfo parseFrom(final DataInputStream in) throws IOException { // I need to be able to move back in the stream if this is not a pb // serialization so I can do the Writable decoding instead. int pblen = ProtobufUtil.lengthOfPBMagic(); - byte [] pbuf = new byte[pblen]; - if (in.markSupported()) { //read it with mark() + byte[] pbuf = new byte[pblen]; + if (in.markSupported()) { // read it with mark() in.mark(pblen); } - //assumption: if Writable serialization, it should be longer than pblen. + // assumption: if Writable serialization, it should be longer than pblen. IOUtils.readFully(in, pbuf, 0, pblen); if (ProtobufUtil.isPBMagicPrefix(pbuf)) { return ProtobufUtil.toRegionInfo(HBaseProtos.RegionInfo.parseDelimitedFrom(in)); @@ -764,13 +743,13 @@ static RegionInfo parseFrom(final DataInputStream in) throws IOException { /** * Parses all the RegionInfo instances from the passed in stream until EOF. Presumes the * RegionInfo's were serialized to the stream with oDelimitedByteArray() - * @param bytes serialized bytes + * @param bytes serialized bytes * @param offset the start offset into the byte[] buffer * @param length how far we should read into the byte[] buffer * @return All the RegionInfos that are in the byte array. Keeps reading till we hit the end. */ - static List parseDelimitedFrom(final byte[] bytes, final int offset, - final int length) throws IOException { + static List parseDelimitedFrom(final byte[] bytes, final int offset, final int length) + throws IOException { if (bytes == null) { throw new IllegalArgumentException("Can't build an object with empty bytes array"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index cc42b96fb165..ef927fd3a55b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -28,18 +28,17 @@ public class RegionInfoBuilder { /** A non-capture group so that this can be embedded. */ public static final String ENCODED_REGION_NAME_REGEX = "(?:[a-f0-9]+)"; - //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. + // TODO: Move NO_HASH to HStoreFile which is really the only place it is used. public static final String NO_HASH = null; public static final RegionInfo UNDEFINED = RegionInfoBuilder.newBuilder(TableName.valueOf("__UNDEFINED__")).build(); /** - * RegionInfo for first meta region - * You cannot use this builder to make an instance of the {@link #FIRST_META_REGIONINFO}. - * Just refer to this instance. Also, while the instance is actually a MutableRI, its type is - * just RI so the mutable methods are not available (unless you go casting); it appears - * as immutable (I tried adding Immutable type but it just makes a mess). + * RegionInfo for first meta region You cannot use this builder to make an instance of the + * {@link #FIRST_META_REGIONINFO}. Just refer to this instance. Also, while the instance is + * actually a MutableRI, its type is just RI so the mutable methods are not available (unless you + * go casting); it appears as immutable (I tried adding Immutable type but it just makes a mess). */ // TODO: How come Meta regions still do not have encoded region names? Fix. // hbase:meta,,1.1588230740 should be the hbase:meta first region name. @@ -108,8 +107,7 @@ public RegionInfoBuilder setOffline(boolean offLine) { } public RegionInfo build() { - return new MutableRegionInfo(tableName, startKey, endKey, split, - regionId, replicaId, offLine); + return new MutableRegionInfo(tableName, startKey, endKey, split, regionId, replicaId, offLine); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java index 7ced1b3072a3..58163a2d74a2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,17 +17,14 @@ */ package org.apache.hadoop.hbase.client; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; - /** * Utility used composing RegionInfo for 'display'; e.g. on the web UI */ @@ -39,17 +35,16 @@ public class RegionInfoDisplay { public final static byte[] HIDDEN_START_KEY = Bytes.toBytes("hidden-start-key"); /** - * Get the descriptive name as {@link RegionState} does it but with hidden - * startkey optionally + * Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally * @return descriptive string */ public static String getDescriptiveNameFromRegionStateForDisplay(RegionState state, - Configuration conf) { + Configuration conf) { if (conf.getBoolean(DISPLAY_KEYS_KEY, true)) return state.toDescriptiveString(); String descriptiveStringFromState = state.toDescriptiveString(); int idx = descriptiveStringFromState.lastIndexOf(" state="); String regionName = getRegionNameAsStringForDisplay( - RegionInfoBuilder.newBuilder(state.getRegion()).build(), conf); + RegionInfoBuilder.newBuilder(state.getRegion()).build(), conf); return regionName + descriptiveStringFromState.substring(idx); } @@ -64,10 +59,7 @@ public static byte[] getEndKeyForDisplay(RegionInfo ri, Configuration conf) { } /** - * Get the start key for display. Optionally hide the real start key. - * @param ri - * @param conf - * @return the startkey + * Get the start key for display. Optionally hide the real start key. nn * @return the startkey */ public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) { boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true); @@ -76,20 +68,15 @@ public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) { } /** - * Get the region name for display. Optionally hide the start key. - * @param ri - * @param conf - * @return region name as String + * Get the region name for display. Optionally hide the start key. nn * @return region name as + * String */ public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuration conf) { return Bytes.toStringBinary(getRegionNameForDisplay(ri, conf)); } /** - * Get the region name for display. Optionally hide the start key. - * @param ri - * @param conf - * @return region name bytes + * Get the region name for display. Optionally hide the start key. nn * @return region name bytes */ public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) { boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true); @@ -99,17 +86,16 @@ public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) // create a modified regionname with the startkey replaced but preserving // the other parts including the encodedname. try { - byte[][]regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); - regionNameParts[1] = HIDDEN_START_KEY; //replace the real startkey + byte[][] regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); + regionNameParts[1] = HIDDEN_START_KEY; // replace the real startkey int len = 0; // get the total length for (byte[] b : regionNameParts) { len += b.length; } - byte[] encodedRegionName = - Bytes.toBytes(RegionInfo.encodeRegionName(ri.getRegionName())); + byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(ri.getRegionName())); len += encodedRegionName.length; - //allocate some extra bytes for the delimiters and the last '.' + // allocate some extra bytes for the delimiters and the last '.' byte[] modifiedName = new byte[len + regionNameParts.length + 1]; int lengthSoFar = 0; int loopCount = 0; @@ -117,17 +103,16 @@ public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) System.arraycopy(b, 0, modifiedName, lengthSoFar, b.length); lengthSoFar += b.length; if (loopCount++ == 2) modifiedName[lengthSoFar++] = RegionInfo.REPLICA_ID_DELIMITER; - else modifiedName[lengthSoFar++] = HConstants.DELIMITER; + else modifiedName[lengthSoFar++] = HConstants.DELIMITER; } // replace the last comma with '.' modifiedName[lengthSoFar - 1] = RegionInfo.ENC_SEPARATOR; - System.arraycopy(encodedRegionName, 0, modifiedName, lengthSoFar, - encodedRegionName.length); + System.arraycopy(encodedRegionName, 0, modifiedName, lengthSoFar, encodedRegionName.length); lengthSoFar += encodedRegionName.length; modifiedName[lengthSoFar] = RegionInfo.ENC_SEPARATOR; return modifiedName; } catch (IOException e) { - //LOG.warn("Encountered exception " + e); + // LOG.warn("Encountered exception " + e); throw new RuntimeException(e); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java index d1ff3bc1bda7..5d1d1b53c4b8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java index 950123cd0c6d..253836455f3d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,5 +29,7 @@ */ @InterfaceAudience.Private enum RegionLocateType { - BEFORE, CURRENT, AFTER + BEFORE, + CURRENT, + AFTER } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java index 7ea6e4ada36c..40f31b06f25f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +23,8 @@ import java.util.stream.Collectors; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Used to view region location information for a single HBase table. Obtain an instance from an @@ -38,12 +37,14 @@ @InterfaceAudience.Public public interface RegionLocator extends Closeable { - /** Configuration for Region Locator's mode when meta replica is configured. - * Valid values are: HedgedRead, LoadBalance, None + /** + * Configuration for Region Locator's mode when meta replica is configured. Valid values are: + * HedgedRead, LoadBalance, None */ String LOCATOR_META_REPLICAS_MODE = "hbase.locator.meta.replicas.mode"; - /** Configuration for meta replica selector when Region Locator's LoadBalance mode is configured. + /** + * Configuration for meta replica selector when Region Locator's LoadBalance mode is configured. * The default value is org.apache.hadoop.hbase.client.CatalogReplicaLoadBalanceSimpleSelector. */ String LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR = @@ -61,7 +62,7 @@ default HRegionLocation getRegionLocation(byte[] row) throws IOException { /** * Finds the region on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Location of the row. * @throws IOException if a remote or network exception occurs @@ -72,7 +73,7 @@ default HRegionLocation getRegionLocation(byte[] row, boolean reload) throws IOE /** * Finds the region with the given replica id on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param replicaId the replica id * @return Location of the row. * @throws IOException if a remote or network exception occurs @@ -83,9 +84,9 @@ default HRegionLocation getRegionLocation(byte[] row, int replicaId) throws IOEx /** * Finds the region with the given replica id on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param replicaId the replica id - * @param reload true to reload information or false to use cached information + * @param reload true to reload information or false to use cached information * @return Location of the row. * @throws IOException if a remote or network exception occurs */ @@ -103,7 +104,7 @@ default List getRegionLocations(byte[] row) throws IOException /** * Find all the replicas for the region on which the given row is being served. - * @param row Row to find. + * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Locations for all the replicas of the row. * @throws IOException if a remote or network exception occurs diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocatorOverAsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocatorOverAsyncTableRegionLocator.java index 5e21e3b4eab8..0cf0e0b913ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocatorOverAsyncTableRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocatorOverAsyncTableRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public void close() { @Override public HRegionLocation getRegionLocation(byte[] row, int replicaId, boolean reload) - throws IOException { + throws IOException { return get(locator.getRegionLocation(row, replicaId, reload)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java index 4d4731a9e8a7..cdb596a4b132 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,6 +24,7 @@ @InterfaceAudience.Public public class RegionOfflineException extends RegionException { private static final long serialVersionUID = 466008402L; + /** default constructor */ public RegionOfflineException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java index 09150f123fec..ea0228209500 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -33,16 +31,16 @@ public class RegionReplicaUtil { /** - * Whether or not the secondary region will wait for observing a flush / region open event - * from the primary region via async wal replication before enabling read requests. Since replayed + * Whether or not the secondary region will wait for observing a flush / region open event from + * the primary region via async wal replication before enabling read requests. Since replayed * edits from async wal replication from primary is not persisted in WAL, the memstore of the * secondary region might be non-empty at the time of close or crash. For ensuring seqId's not * "going back in time" in the secondary region replica, this should be enabled. However, in some - * cases the above semantics might be ok for some application classes. - * See HBASE-11580 for more context. + * cases the above semantics might be ok for some application classes. See HBASE-11580 for more + * context. */ - public static final String REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY - = "hbase.region.replica.wait.for.primary.flush"; + public static final String REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY = + "hbase.region.replica.wait.for.primary.flush"; protected static final boolean DEFAULT_REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH = true; /** @@ -51,14 +49,11 @@ public class RegionReplicaUtil { static final int DEFAULT_REPLICA_ID = 0; /** - * Returns the RegionInfo for the given replicaId. - * RegionInfo's correspond to a range of a table, but more than one - * "instance" of the same range can be deployed which are differentiated by - * the replicaId. - * @param regionInfo - * @param replicaId the replicaId to use - * @return an RegionInfo object corresponding to the same range (table, start and - * end key), but for the given replicaId. + * Returns the RegionInfo for the given replicaId. RegionInfo's correspond to a range of a table, + * but more than one "instance" of the same range can be deployed which are differentiated by the + * replicaId. n * @param replicaId the replicaId to use + * @return an RegionInfo object corresponding to the same range (table, start and end key), but + * for the given replicaId. */ public static RegionInfo getRegionInfoForReplica(RegionInfo regionInfo, int replicaId) { if (regionInfo.getReplicaId() == replicaId) { @@ -68,11 +63,11 @@ public static RegionInfo getRegionInfoForReplica(RegionInfo regionInfo, int repl } /** - * Returns the RegionInfo for the default replicaId (0). RegionInfo's correspond to - * a range of a table, but more than one "instance" of the same range can be - * deployed which are differentiated by the replicaId. - * @return an RegionInfo object corresponding to the same range (table, start and - * end key), but for the default replicaId. + * Returns the RegionInfo for the default replicaId (0). RegionInfo's correspond to a range of a + * table, but more than one "instance" of the same range can be deployed which are differentiated + * by the replicaId. + * @return an RegionInfo object corresponding to the same range (table, start and end key), but + * for the default replicaId. */ public static RegionInfo getRegionInfoForDefaultReplica(RegionInfo regionInfo) { return getRegionInfoForReplica(regionInfo, DEFAULT_REPLICA_ID); @@ -85,12 +80,11 @@ public static boolean isDefaultReplica(int replicaId) { /** @return true if this region is a default replica for the region */ public static boolean isDefaultReplica(RegionInfo hri) { - return hri.getReplicaId() == DEFAULT_REPLICA_ID; + return hri.getReplicaId() == DEFAULT_REPLICA_ID; } /** - * Removes the non-default replicas from the passed regions collection - * @param regions + * Removes the non-default replicas from the passed regions collection n */ public static void removeNonDefaultRegions(Collection regions) { Iterator iterator = regions.iterator(); @@ -107,7 +101,7 @@ public static boolean isReplicasForSameRegion(RegionInfo regionInfoA, RegionInfo } private static int compareRegionInfosWithoutReplicaId(RegionInfo regionInfoA, - RegionInfo regionInfoB) { + RegionInfo regionInfoB) { int result = regionInfoA.getTable().compareTo(regionInfoB.getTable()); if (result != 0) { return result; @@ -123,13 +117,11 @@ private static int compareRegionInfosWithoutReplicaId(RegionInfo regionInfoA, result = Bytes.compareTo(regionInfoA.getEndKey(), regionInfoB.getEndKey()); if (result != 0) { - if (regionInfoA.getStartKey().length != 0 - && regionInfoA.getEndKey().length == 0) { - return 1; // this is last region + if (regionInfoA.getStartKey().length != 0 && regionInfoA.getEndKey().length == 0) { + return 1; // this is last region } - if (regionInfoB.getStartKey().length != 0 - && regionInfoB.getEndKey().length == 0) { - return -1; // o is the last region + if (regionInfoB.getStartKey().length != 0 && regionInfoB.getEndKey().length == 0) { + return -1; // o is the last region } return result; } @@ -147,7 +139,7 @@ private static int compareRegionInfosWithoutReplicaId(RegionInfo regionInfoA, /** * Create any replicas for the regions (the default replicas that was already created is passed to * the method) - * @param regions existing regions + * @param regions existing regions * @param oldReplicaCount existing replica count * @param newReplicaCount updated replica count due to modify table * @return the combined list of default and non-default replicas @@ -159,9 +151,12 @@ public static List addReplicas(final List regions, int o } List hRegionInfos = new ArrayList<>((newReplicaCount) * regions.size()); for (RegionInfo ri : regions) { - if (RegionReplicaUtil.isDefaultReplica(ri) && - (!ri.isOffline() || (!ri.isSplit() && !ri.isSplitParent()))) { - // region level replica index starts from 0. So if oldReplicaCount was 2 then the max replicaId for + if ( + RegionReplicaUtil.isDefaultReplica(ri) + && (!ri.isOffline() || (!ri.isSplit() && !ri.isSplitParent())) + ) { + // region level replica index starts from 0. So if oldReplicaCount was 2 then the max + // replicaId for // the existing regions would be 1 for (int j = oldReplicaCount; j < newReplicaCount; j++) { hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(ri, j)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java index 0204528ca876..29b092cad883 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,14 +49,13 @@ public class RegionServerCoprocessorRpcChannelImpl implements RpcChannel { } private CompletableFuture rpcCall(MethodDescriptor method, Message request, - Message responsePrototype, HBaseRpcController controller, ClientService.Interface stub) { + Message responsePrototype, HBaseRpcController controller, ClientService.Interface stub) { CompletableFuture future = new CompletableFuture<>(); CoprocessorServiceRequest csr = - CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); - stub.execRegionServerService( - controller, - csr, - new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback() { + CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); + stub.execRegionServerService(controller, csr, + new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback< + CoprocessorServiceResponse>() { @Override public void run(CoprocessorServiceResponse resp) { @@ -76,7 +75,7 @@ public void run(CoprocessorServiceResponse resp) { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { addListener( callerBuilder.action((c, s) -> rpcCall(method, request, responsePrototype, c, s)).call(), ((r, e) -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java index 1e1ce95113b9..582f492d3dec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -106,7 +104,7 @@ public RegionStatesCountBuilder setTotalRegions(int totalRegions) { } public RegionStatesCount build() { - RegionStatesCount regionStatesCount=new RegionStatesCount(); + RegionStatesCount regionStatesCount = new RegionStatesCount(); regionStatesCount.setOpenRegions(openRegions); regionStatesCount.setClosedRegions(closedRegions); regionStatesCount.setRegionsInTransition(regionsInTransition); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java index 2064021f714d..ac7cad275813 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java @@ -80,8 +80,8 @@ private void mainLoop() { } // if refreshNow is true, then we will wait until minTimeBetweenRefreshesMs elapsed, // otherwise wait until periodicRefreshMs elapsed - long waitTime = getRefreshIntervalMs(firstRefresh) - - (EnvironmentEdgeManager.currentTime() - lastRefreshTime); + long waitTime = getRefreshIntervalMs(firstRefresh) + - (EnvironmentEdgeManager.currentTime() - lastRefreshTime); if (waitTime <= 0) { // we are going to refresh, reset this flag firstRefresh = false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java index 4c63e4d08812..66d864be7d49 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.InterruptedIOException; @@ -63,10 +61,10 @@ public interface Checker { /** * Reset the state of the scheduler when completing the iteration of rows. - * @throws InterruptedIOException some controller may wait - * for some busy region or RS to complete the undealt request. + * @throws InterruptedIOException some controller may wait for some busy region or RS to + * complete the undealt request. */ - void reset() throws InterruptedIOException ; + void reset() throws InterruptedIOException; } /** @@ -77,14 +75,14 @@ public interface Checker { /** * Increment the counter if we build a valid task. * @param regions The destination of task - * @param sn The target server + * @param sn The target server */ void incTaskCounters(Collection regions, ServerName sn); /** * Decrement the counter if a task is accomplished. * @param regions The destination of task - * @param sn The target server + * @param sn The target server */ void decTaskCounters(Collection regions, ServerName sn); @@ -94,28 +92,27 @@ public interface Checker { long getNumberOfTasksInProgress(); /** - * Waits for the running tasks to complete. - * If there are specified threshold and trigger, the implementation should - * wake up once in a while for checking the threshold and calling trigger. - * @param max This method will return if the number of running tasks is - * less than or equal to max. - * @param id the caller's id - * @param periodToTrigger The period to invoke the trigger. This value is a - * hint. The real period depends on the implementation. - * @param trigger The object to call periodically. + * Waits for the running tasks to complete. If there are specified threshold and trigger, the + * implementation should wake up once in a while for checking the threshold and calling trigger. + * @param max This method will return if the number of running tasks is less than or + * equal to max. + * @param id the caller's id + * @param periodToTrigger The period to invoke the trigger. This value is a hint. The real period + * depends on the implementation. + * @param trigger The object to call periodically. * @throws java.io.InterruptedIOException If the waiting is interrupted */ - void waitForMaximumCurrentTasks(long max, long id, - int periodToTrigger, Consumer trigger) throws InterruptedIOException; + void waitForMaximumCurrentTasks(long max, long id, int periodToTrigger, Consumer trigger) + throws InterruptedIOException; /** * Wait until there is at least one slot for a new task. - * @param id the caller's id - * @param periodToTrigger The period to invoke the trigger. This value is a - * hint. The real period depends on the implementation. - * @param trigger The object to call periodically. + * @param id the caller's id + * @param periodToTrigger The period to invoke the trigger. This value is a hint. The real period + * depends on the implementation. + * @param trigger The object to call periodically. * @throws java.io.InterruptedIOException If the waiting is interrupted */ - void waitForFreeSlot(long id, int periodToTrigger, - Consumer trigger) throws InterruptedIOException; + void waitForFreeSlot(long id, int periodToTrigger, Consumer trigger) + throws InterruptedIOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java index f03da448750c..706db916aef6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * A factory class that constructs an {@link org.apache.hadoop.hbase.client.RequestController}. */ @InterfaceAudience.Public public final class RequestControllerFactory { - public static final String REQUEST_CONTROLLER_IMPL_CONF_KEY = "hbase.client.request.controller.impl"; + public static final String REQUEST_CONTROLLER_IMPL_CONF_KEY = + "hbase.client.request.controller.impl"; + /** * Constructs a {@link org.apache.hadoop.hbase.client.RequestController}. * @param conf The {@link Configuration} to use. * @return A RequestController which is built according to the configuration. */ public static RequestController create(Configuration conf) { - Class clazz= conf.getClass(REQUEST_CONTROLLER_IMPL_CONF_KEY, + Class clazz = conf.getClass(REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class, RequestController.class); return ReflectionUtils.newInstance(clazz, conf); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index eb72ef82995f..46865380f757 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -32,52 +30,44 @@ import java.util.NavigableMap; import java.util.NoSuchElementException; import java.util.TreeMap; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Single row result of a {@link Get} or {@link Scan} query.

    - * - * This class is NOT THREAD SAFE.

    - * - * Convenience methods are available that return various {@link Map} - * structures and values directly.

    - * - * To get a complete mapping of all cells in the Result, which can include - * multiple families and multiple versions, use {@link #getMap()}.

    - * - * To get a mapping of each family to its columns (qualifiers and values), - * including only the latest version of each, use {@link #getNoVersionMap()}. - * - * To get a mapping of qualifiers to latest values for an individual family use - * {@link #getFamilyMap(byte[])}.

    - * + * Single row result of a {@link Get} or {@link Scan} query. + *

    + * This class is NOT THREAD SAFE. + *

    + * Convenience methods are available that return various {@link Map} structures and values directly. + *

    + * To get a complete mapping of all cells in the Result, which can include multiple families and + * multiple versions, use {@link #getMap()}. + *

    + * To get a mapping of each family to its columns (qualifiers and values), including only the latest + * version of each, use {@link #getNoVersionMap()}. To get a mapping of qualifiers to latest values + * for an individual family use {@link #getFamilyMap(byte[])}. + *

    * To get the latest value for a specific family and qualifier use - * {@link #getValue(byte[], byte[])}. - * - * A Result is backed by an array of {@link Cell} objects, each representing - * an HBase cell defined by the row, family, qualifier, timestamp, and value.

    - * - * The underlying {@link Cell} objects can be accessed through the method {@link #listCells()}. - * This will create a List from the internal Cell []. Better is to exploit the fact that - * a new Result instance is a primed {@link CellScanner}; just call {@link #advance()} and - * {@link #current()} to iterate over Cells as you would any {@link CellScanner}. - * Call {@link #cellScanner()} to reset should you need to iterate the same Result over again - * ({@link CellScanner}s are one-shot). - * - * If you need to overwrite a Result with another Result instance -- as in the old 'mapred' - * RecordReader next invocations -- then create an empty Result with the null constructor and - * in then use {@link #copyFrom(Result)} + * {@link #getValue(byte[], byte[])}. A Result is backed by an array of {@link Cell} objects, each + * representing an HBase cell defined by the row, family, qualifier, timestamp, and value. + *

    + * The underlying {@link Cell} objects can be accessed through the method {@link #listCells()}. This + * will create a List from the internal Cell []. Better is to exploit the fact that a new Result + * instance is a primed {@link CellScanner}; just call {@link #advance()} and {@link #current()} to + * iterate over Cells as you would any {@link CellScanner}. Call {@link #cellScanner()} to reset + * should you need to iterate the same Result over again ({@link CellScanner}s are one-shot). If you + * need to overwrite a Result with another Result instance -- as in the old 'mapred' RecordReader + * next invocations -- then create an empty Result with the null constructor and in then use + * {@link #copyFrom(Result)} */ @InterfaceAudience.Public public class Result implements CellScannable, CellScanner { @@ -89,12 +79,12 @@ public class Result implements CellScannable, CellScanner { * See {@link #mayHaveMoreCellsInRow()}. */ private boolean mayHaveMoreCellsInRow = false; - // We're not using java serialization. Transient here is just a marker to say + // We're not using java serialization. Transient here is just a marker to say // that this is where we cache row if we're ever asked for it. - private transient byte [] row = null; - // Ditto for familyMap. It can be composed on fly from passed in kvs. - private transient NavigableMap>> - familyMap = null; + private transient byte[] row = null; + // Ditto for familyMap. It can be composed on fly from passed in kvs. + private transient NavigableMap>> familyMap = null; private static ThreadLocal localBuffer = new ThreadLocal<>(); private static final int PAD_WIDTH = 128; @@ -114,8 +104,8 @@ public class Result implements CellScannable, CellScanner { /** * Creates an empty Result w/ no KeyValue payload; returns null if you call {@link #rawCells()}. - * Use this to represent no results if {@code null} won't do or in old 'mapred' as opposed - * to 'mapreduce' package MapReduce where you need to overwrite a Result instance with a + * Use this to represent no results if {@code null} won't do or in old 'mapred' as opposed to + * 'mapreduce' package MapReduce where you need to overwrite a Result instance with a * {@link #copyFrom(Result)} call. */ public Result() { @@ -123,8 +113,7 @@ public Result() { } /** - * Allows to construct special purpose immutable Result objects, - * such as EMPTY_RESULT. + * Allows to construct special purpose immutable Result objects, such as EMPTY_RESULT. * @param readonly whether this Result instance is readonly */ private Result(boolean readonly) { @@ -132,8 +121,8 @@ private Result(boolean readonly) { } /** - * Instantiate a Result with the specified List of KeyValues. - *
    Note: You must ensure that the keyvalues are already sorted. + * Instantiate a Result with the specified List of KeyValues.
    + * Note: You must ensure that the keyvalues are already sorted. * @param cells List of cells */ public static Result create(List cells) { @@ -149,16 +138,16 @@ public static Result create(List cells, Boolean exists, boolean stale) { } public static Result create(List cells, Boolean exists, boolean stale, - boolean mayHaveMoreCellsInRow) { - if (exists != null){ + boolean mayHaveMoreCellsInRow) { + if (exists != null) { return new Result(null, exists, stale, mayHaveMoreCellsInRow); } return new Result(cells.toArray(new Cell[cells.size()]), null, stale, mayHaveMoreCellsInRow); } /** - * Instantiate a Result with the specified array of KeyValues. - *
    Note: You must ensure that the keyvalues are already sorted. + * Instantiate a Result with the specified array of KeyValues.
    + * Note: You must ensure that the keyvalues are already sorted. * @param cells array of cells */ public static Result create(Cell[] cells) { @@ -170,7 +159,7 @@ public static Result create(Cell[] cells, Boolean exists, boolean stale) { } public static Result create(Cell[] cells, Boolean exists, boolean stale, - boolean mayHaveMoreCellsInRow) { + boolean mayHaveMoreCellsInRow) { if (exists != null) { return new Result(null, exists, stale, mayHaveMoreCellsInRow); } @@ -196,37 +185,27 @@ private Result(Cell[] cells, Boolean exists, boolean stale, boolean mayHaveMoreC } /** - * Method for retrieving the row key that corresponds to - * the row from which this Result was created. - * @return row + * Method for retrieving the row key that corresponds to the row from which this Result was + * created. n */ - public byte [] getRow() { + public byte[] getRow() { if (this.row == null) { - this.row = (this.cells == null || this.cells.length == 0) ? - null : - CellUtil.cloneRow(this.cells[0]); + this.row = + (this.cells == null || this.cells.length == 0) ? null : CellUtil.cloneRow(this.cells[0]); } return this.row; } /** - * Return the array of Cells backing this Result instance. - * - * The array is sorted from smallest -> largest using the - * {@link CellComparator}. - * - * The array only contains what your Get or Scan specifies and no more. - * For example if you request column "A" 1 version you will have at most 1 - * Cell in the array. If you request column "A" with 2 version you will - * have at most 2 Cells, with the first one being the newer timestamp and - * the second being the older timestamp (this is the sort order defined by - * {@link CellComparator}). If columns don't exist, they won't be - * present in the result. Therefore if you ask for 1 version all columns, - * it is safe to iterate over this array and expect to see 1 Cell for - * each column and no more. - * - * This API is faster than using getFamilyMap() and getMap() - * + * Return the array of Cells backing this Result instance. The array is sorted from smallest -> + * largest using the {@link CellComparator}. The array only contains what your Get or Scan + * specifies and no more. For example if you request column "A" 1 version you will have at most 1 + * Cell in the array. If you request column "A" with 2 version you will have at most 2 Cells, with + * the first one being the newer timestamp and the second being the older timestamp (this is the + * sort order defined by {@link CellComparator}). If columns don't exist, they won't be present in + * the result. Therefore if you ask for 1 version all columns, it is safe to iterate over this + * array and expect to see 1 Cell for each column and no more. This API is faster than using + * getFamilyMap() and getMap() * @return array of Cells; can be null if nothing in the result */ public Cell[] rawCells() { @@ -234,35 +213,27 @@ public Cell[] rawCells() { } /** - * Create a sorted list of the Cell's in this result. - * - * Since HBase 0.20.5 this is equivalent to raw(). - * + * Create a sorted list of the Cell's in this result. Since HBase 0.20.5 this is equivalent to + * raw(). * @return sorted List of Cells; can be null if no cells in the result */ public List listCells() { - return isEmpty()? null: Arrays.asList(rawCells()); + return isEmpty() ? null : Arrays.asList(rawCells()); } /** - * Return the Cells for the specific column. The Cells are sorted in - * the {@link CellComparator} order. That implies the first entry in - * the list is the most recent column. If the query (Scan or Get) only - * requested 1 version the list will contain at most 1 entry. If the column - * did not exist in the result set (either the column does not exist - * or the column was not selected in the query) the list will be empty. - * - * Also see getColumnLatest which returns just a Cell - * - * @param family the family - * @param qualifier - * @return a list of Cells for this column or empty list if the column - * did not exist in the result set + * Return the Cells for the specific column. The Cells are sorted in the {@link CellComparator} + * order. That implies the first entry in the list is the most recent column. If the query (Scan + * or Get) only requested 1 version the list will contain at most 1 entry. If the column did not + * exist in the result set (either the column does not exist or the column was not selected in the + * query) the list will be empty. Also see getColumnLatest which returns just a Cell + * @param family the family n * @return a list of Cells for this column or empty list if the + * column did not exist in the result set */ - public List getColumnCells(byte [] family, byte [] qualifier) { + public List getColumnCells(byte[] family, byte[] qualifier) { List result = new ArrayList<>(); - Cell [] kvs = rawCells(); + Cell[] kvs = rawCells(); if (kvs == null || kvs.length == 0) { return result; @@ -273,7 +244,7 @@ public List getColumnCells(byte [] family, byte [] qualifier) { } for (int i = pos; i < kvs.length; i++) { - if (CellUtil.matchingColumn(kvs[i], family,qualifier)) { + if (CellUtil.matchingColumn(kvs[i], family, qualifier)) { result.add(kvs[i]); } else { break; @@ -291,22 +262,18 @@ private byte[] notNullBytes(final byte[] bytes) { } } - protected int binarySearch(final Cell [] kvs, - final byte [] family, - final byte [] qualifier) { + protected int binarySearch(final Cell[] kvs, final byte[] family, final byte[] qualifier) { byte[] familyNotNull = notNullBytes(family); byte[] qualifierNotNull = notNullBytes(qualifier); - Cell searchTerm = - PrivateCellUtil.createFirstOnRow(kvs[0].getRowArray(), - kvs[0].getRowOffset(), kvs[0].getRowLength(), - familyNotNull, 0, (byte)familyNotNull.length, - qualifierNotNull, 0, qualifierNotNull.length); + Cell searchTerm = PrivateCellUtil.createFirstOnRow(kvs[0].getRowArray(), kvs[0].getRowOffset(), + kvs[0].getRowLength(), familyNotNull, 0, (byte) familyNotNull.length, qualifierNotNull, 0, + qualifierNotNull.length); // pos === ( -(insertion point) - 1) int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.getInstance()); // never will exact match if (pos < 0) { - pos = (pos+1) * -1; + pos = (pos + 1) * -1; // pos is now insertion point } if (pos == kvs.length) { @@ -317,23 +284,20 @@ protected int binarySearch(final Cell [] kvs, /** * Searches for the latest value for the specified column. - * - * @param kvs the array to search - * @param family family name - * @param foffset family offset - * @param flength family length + * @param kvs the array to search + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return the index where the value was found, or -1 otherwise */ - protected int binarySearch(final Cell [] kvs, - final byte [] family, final int foffset, final int flength, - final byte [] qualifier, final int qoffset, final int qlength) { + protected int binarySearch(final Cell[] kvs, final byte[] family, final int foffset, + final int flength, final byte[] qualifier, final int qoffset, final int qlength) { - double keyValueSize = (double) - KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0); + double keyValueSize = + (double) KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0); byte[] buffer = localBuffer.get(); if (buffer == null || keyValueSize > buffer.length) { @@ -342,16 +306,15 @@ protected int binarySearch(final Cell [] kvs, localBuffer.set(buffer); } - Cell searchTerm = KeyValueUtil.createFirstOnRow(buffer, 0, - kvs[0].getRowArray(), kvs[0].getRowOffset(), kvs[0].getRowLength(), - family, foffset, flength, - qualifier, qoffset, qlength); + Cell searchTerm = + KeyValueUtil.createFirstOnRow(buffer, 0, kvs[0].getRowArray(), kvs[0].getRowOffset(), + kvs[0].getRowLength(), family, foffset, flength, qualifier, qoffset, qlength); // pos === ( -(insertion point) - 1) int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.getInstance()); // never will exact match if (pos < 0) { - pos = (pos+1) * -1; + pos = (pos + 1) * -1; // pos is now insertion point } if (pos == kvs.length) { @@ -361,16 +324,12 @@ protected int binarySearch(final Cell [] kvs, } /** - * The Cell for the most recent timestamp for a given column. - * - * @param family - * @param qualifier - * + * The Cell for the most recent timestamp for a given column. nn * * @return the Cell for the column, or null if no value exists in the row or none have been - * selected in the query (Get/Scan) + * selected in the query (Get/Scan) */ - public Cell getColumnLatestCell(byte [] family, byte [] qualifier) { - Cell [] kvs = rawCells(); // side effect possibly. + public Cell getColumnLatestCell(byte[] family, byte[] qualifier) { + Cell[] kvs = rawCells(); // side effect possibly. if (kvs == null || kvs.length == 0) { return null; } @@ -386,21 +345,19 @@ public Cell getColumnLatestCell(byte [] family, byte [] qualifier) { /** * The Cell for the most recent timestamp for a given column. - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return the Cell for the column, or null if no value exists in the row or none have been - * selected in the query (Get/Scan) + * selected in the query (Get/Scan) */ - public Cell getColumnLatestCell(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public Cell getColumnLatestCell(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { - Cell [] kvs = rawCells(); // side effect possibly. + Cell[] kvs = rawCells(); // side effect possibly. if (kvs == null || kvs.length == 0) { return null; } @@ -408,23 +365,24 @@ public Cell getColumnLatestCell(byte [] family, int foffset, int flength, if (pos == -1) { return null; } - if (PrivateCellUtil.matchingColumn(kvs[pos], family, foffset, flength, qualifier, qoffset, - qlength)) { + if ( + PrivateCellUtil.matchingColumn(kvs[pos], family, foffset, flength, qualifier, qoffset, + qlength) + ) { return kvs[pos]; } return null; } /** - * Get the latest version of the specified column. - * Note: this call clones the value content of the hosting Cell. See - * {@link #getValueAsByteBuffer(byte[], byte[])}, etc., or {@link #listCells()} if you would - * avoid the cloning. - * @param family family name + * Get the latest version of the specified column. Note: this call clones the value content of the + * hosting Cell. See {@link #getValueAsByteBuffer(byte[], byte[])}, etc., or {@link #listCells()} + * if you would avoid the cloning. + * @param family family name * @param qualifier column qualifier * @return value of latest version of column, null if none found */ - public byte[] getValue(byte [] family, byte [] qualifier) { + public byte[] getValue(byte[] family, byte[] qualifier) { Cell kv = getColumnLatestCell(family, qualifier); if (kv == null) { return null; @@ -434,62 +392,55 @@ public byte[] getValue(byte [] family, byte [] qualifier) { /** * Returns the value wrapped in a new ByteBuffer. - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * * @return the latest version of the column, or null if none found */ - public ByteBuffer getValueAsByteBuffer(byte [] family, byte [] qualifier) { + public ByteBuffer getValueAsByteBuffer(byte[] family, byte[] qualifier) { Cell kv = getColumnLatestCell(family, 0, family.length, qualifier, 0, qualifier.length); if (kv == null) { return null; } - return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()). - asReadOnlyBuffer(); + return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()) + .asReadOnlyBuffer(); } /** * Returns the value wrapped in a new ByteBuffer. - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return the latest version of the column, or null if none found */ - public ByteBuffer getValueAsByteBuffer(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public ByteBuffer getValueAsByteBuffer(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength); if (kv == null) { return null; } - return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()). - asReadOnlyBuffer(); + return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()) + .asReadOnlyBuffer(); } /** * Loads the latest version of the specified column into the provided ByteBuffer. *

    * Does not clear or flip the buffer. - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * @param dst the buffer where to write the value - * + * @param dst the buffer where to write the value * @return true if a value was found, false otherwise - * * @throws BufferOverflowException there is insufficient space remaining in the buffer */ - public boolean loadValue(byte [] family, byte [] qualifier, ByteBuffer dst) - throws BufferOverflowException { + public boolean loadValue(byte[] family, byte[] qualifier, ByteBuffer dst) + throws BufferOverflowException { return loadValue(family, 0, family.length, qualifier, 0, qualifier.length, dst); } @@ -497,22 +448,18 @@ public boolean loadValue(byte [] family, byte [] qualifier, ByteBuffer dst) * Loads the latest version of the specified column into the provided ByteBuffer. *

    * Does not clear or flip the buffer. - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * @param dst the buffer where to write the value - * + * @param qoffset qualifier offset + * @param qlength qualifier length + * @param dst the buffer where to write the value * @return true if a value was found, false otherwise - * * @throws BufferOverflowException there is insufficient space remaining in the buffer */ - public boolean loadValue(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength, ByteBuffer dst) - throws BufferOverflowException { + public boolean loadValue(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, + int qlength, ByteBuffer dst) throws BufferOverflowException { Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength); if (kv == null) { @@ -524,31 +471,27 @@ public boolean loadValue(byte [] family, int foffset, int flength, /** * Checks if the specified column contains a non-empty value (not a zero-length byte array). - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * * @return whether or not a latest value exists and is not empty */ - public boolean containsNonEmptyColumn(byte [] family, byte [] qualifier) { + public boolean containsNonEmptyColumn(byte[] family, byte[] qualifier) { return containsNonEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length); } /** * Checks if the specified column contains a non-empty value (not a zero-length byte array). - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return whether or not a latest value exists and is not empty */ - public boolean containsNonEmptyColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public boolean containsNonEmptyColumn(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength); @@ -557,31 +500,27 @@ public boolean containsNonEmptyColumn(byte [] family, int foffset, int flength, /** * Checks if the specified column contains an empty value (a zero-length byte array). - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * * @return whether or not a latest value exists and is empty */ - public boolean containsEmptyColumn(byte [] family, byte [] qualifier) { + public boolean containsEmptyColumn(byte[] family, byte[] qualifier) { return containsEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length); } /** * Checks if the specified column contains an empty value (a zero-length byte array). - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return whether or not a latest value exists and is empty */ - public boolean containsEmptyColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public boolean containsEmptyColumn(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength); return (kv != null) && (kv.getValueLength() == 0); @@ -589,31 +528,27 @@ public boolean containsEmptyColumn(byte [] family, int foffset, int flength, /** * Checks for existence of a value for the specified column (empty or not). - * - * @param family family name + * @param family family name * @param qualifier column qualifier - * * @return true if at least one value exists in the result, false if not */ - public boolean containsColumn(byte [] family, byte [] qualifier) { + public boolean containsColumn(byte[] family, byte[] qualifier) { Cell kv = getColumnLatestCell(family, qualifier); return kv != null; } /** * Checks for existence of a value for the specified column (empty or not). - * - * @param family family name - * @param foffset family offset - * @param flength family length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * + * @param qoffset qualifier offset + * @param qlength qualifier length * @return true if at least one value exists in the result, false if not */ - public boolean containsColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { + public boolean containsColumn(byte[] family, int foffset, int flength, byte[] qualifier, + int qoffset, int qlength) { return getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength) != null; } @@ -631,20 +566,20 @@ public NavigableMap>> ge if (this.familyMap != null) { return this.familyMap; } - if(isEmpty()) { + if (isEmpty()) { return null; } this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(Cell kv : this.cells) { - byte [] family = CellUtil.cloneFamily(kv); + for (Cell kv : this.cells) { + byte[] family = CellUtil.cloneFamily(kv); NavigableMap> columnMap = familyMap.get(family); - if(columnMap == null) { + if (columnMap == null) { columnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, columnMap); } - byte [] qualifier = CellUtil.cloneQualifier(kv); + byte[] qualifier = CellUtil.cloneQualifier(kv); NavigableMap versionMap = columnMap.get(qualifier); - if(versionMap == null) { + if (versionMap == null) { versionMap = new TreeMap<>(new Comparator() { @Override public int compare(Long l1, Long l2) { @@ -654,7 +589,7 @@ public int compare(Long l1, Long l2) { columnMap.put(qualifier, versionMap); } Long timestamp = kv.getTimestamp(); - byte [] value = CellUtil.cloneValue(kv); + byte[] value = CellUtil.cloneValue(kv); versionMap.put(timestamp, value); } @@ -670,20 +605,20 @@ public int compare(Long l1, Long l2) { * @return map from families to qualifiers and value */ public NavigableMap> getNoVersionMap() { - if(this.familyMap == null) { + if (this.familyMap == null) { getMap(); } - if(isEmpty()) { + if (isEmpty()) { return null; } - NavigableMap> returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(Map.Entry>> - familyEntry : familyMap.entrySet()) { + NavigableMap> returnMap = + new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Map.Entry>> familyEntry : familyMap + .entrySet()) { NavigableMap qualifierMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(Map.Entry> qualifierEntry : - familyEntry.getValue().entrySet()) { - byte [] value = - qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey()); + for (Map.Entry> qualifierEntry : familyEntry.getValue() + .entrySet()) { + byte[] value = qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey()); qualifierMap.put(qualifierEntry.getKey(), value); } returnMap.put(familyEntry.getKey(), qualifierMap); @@ -698,23 +633,20 @@ public NavigableMap> getNoVersionMap() { * @param family column family to get * @return map of qualifiers to values */ - public NavigableMap getFamilyMap(byte [] family) { - if(this.familyMap == null) { + public NavigableMap getFamilyMap(byte[] family) { + if (this.familyMap == null) { getMap(); } - if(isEmpty()) { + if (isEmpty()) { return null; } NavigableMap returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - NavigableMap> qualifierMap = - familyMap.get(family); - if(qualifierMap == null) { + NavigableMap> qualifierMap = familyMap.get(family); + if (qualifierMap == null) { return returnMap; } - for(Map.Entry> entry : - qualifierMap.entrySet()) { - byte [] value = - entry.getValue().get(entry.getValue().firstKey()); + for (Map.Entry> entry : qualifierMap.entrySet()) { + byte[] value = entry.getValue().get(entry.getValue().firstKey()); returnMap.put(entry.getKey(), value); } return returnMap; @@ -724,7 +656,7 @@ public NavigableMap getFamilyMap(byte [] family) { * Returns the value of the first column in the Result. * @return value of the first column */ - public byte [] value() { + public byte[] value() { if (isEmpty()) { return null; } @@ -743,24 +675,24 @@ public boolean isEmpty() { * @return the size of the underlying Cell [] */ public int size() { - return this.cells == null? 0: this.cells.length; + return this.cells == null ? 0 : this.cells.length; } /** - * @return String + * n */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("keyvalues="); - if(isEmpty()) { + if (isEmpty()) { sb.append("NONE"); return sb.toString(); } sb.append("{"); boolean moreThanOne = false; - for(Cell kv : this.cells) { - if(moreThanOne) { + for (Cell kv : this.cells) { + if (moreThanOne) { sb.append(", "); } else { moreThanOne = true; @@ -777,49 +709,45 @@ public String toString() { * @param res2 second result to compare * @throws Exception Every difference is throwing an exception */ - public static void compareResults(Result res1, Result res2) - throws Exception{ + public static void compareResults(Result res1, Result res2) throws Exception { compareResults(res1, res2, true); } /** * Does a deep comparison of two Results, down to the byte arrays. - * @param res1 first result to compare - * @param res2 second result to compare - * @param verbose includes string representation for all cells in the exception if true; - * otherwise include rowkey only + * @param res1 first result to compare + * @param res2 second result to compare + * @param verbose includes string representation for all cells in the exception if true; otherwise + * include rowkey only * @throws Exception Every difference is throwing an exception */ - public static void compareResults(Result res1, Result res2, boolean verbose) - throws Exception { + public static void compareResults(Result res1, Result res2, boolean verbose) throws Exception { if (res2 == null) { - throw new Exception("There wasn't enough rows, we stopped at " - + Bytes.toStringBinary(res1.getRow())); + throw new Exception( + "There wasn't enough rows, we stopped at " + Bytes.toStringBinary(res1.getRow())); } if (res1.size() != res2.size()) { if (verbose) { throw new Exception( - "This row doesn't have the same number of KVs: " - + res1 + " compared to " + res2); + "This row doesn't have the same number of KVs: " + res1 + " compared to " + res2); } else { throw new Exception( - "This row doesn't have the same number of KVs: row=" - + Bytes.toStringBinary(res1.getRow()) + "This row doesn't have the same number of KVs: row=" + Bytes.toStringBinary(res1.getRow()) + ", " + res1.size() + " cells are compared to " + res2.size() + " cells"); } } Cell[] ourKVs = res1.rawCells(); Cell[] replicatedKVs = res2.rawCells(); for (int i = 0; i < res1.size(); i++) { - if (!ourKVs[i].equals(replicatedKVs[i]) || - !CellUtil.matchingValue(ourKVs[i], replicatedKVs[i]) || - !CellUtil.matchingTags(ourKVs[i], replicatedKVs[i])) { + if ( + !ourKVs[i].equals(replicatedKVs[i]) || !CellUtil.matchingValue(ourKVs[i], replicatedKVs[i]) + || !CellUtil.matchingTags(ourKVs[i], replicatedKVs[i]) + ) { if (verbose) { - throw new Exception("This result was different: " - + res1 + " compared to " + res2); + throw new Exception("This result was different: " + res1 + " compared to " + res2); } else { - throw new Exception("This result was different: row=" - + Bytes.toStringBinary(res1.getRow())); + throw new Exception( + "This result was different: row=" + Bytes.toStringBinary(res1.getRow())); } } } @@ -831,10 +759,9 @@ public static void compareResults(Result res1, Result res2, boolean verbose) * @param partialResults list of partial results * @return The complete result that is formed by combining all of the partial results together * @throws IOException A complete result cannot be formed because the results in the partial list - * come from different rows + * come from different rows */ - public static Result createCompleteResult(Iterable partialResults) - throws IOException { + public static Result createCompleteResult(Iterable partialResults) throws IOException { if (partialResults == null) { return Result.create(Collections.emptyList(), null, false); } @@ -846,9 +773,8 @@ public static Result createCompleteResult(Iterable partialResults) Result r = iter.next(); currentRow = r.getRow(); if (prevRow != null && !Bytes.equals(prevRow, currentRow)) { - throw new IOException( - "Cannot form complete result. Rows of partial results do not match." + - " Partial Results: " + partialResults); + throw new IOException("Cannot form complete result. Rows of partial results do not match." + + " Partial Results: " + partialResults); } // Ensure that all Results except the last one are marked as partials. The last result // may not be marked as a partial because Results are only marked as partials when @@ -862,8 +788,8 @@ public static Result createCompleteResult(Iterable partialResults) // Result2: -3- -4- (2 cells, size limit reached, mark as partial) // Result3: -5- (1 cell, size limit NOT reached, NOT marked as partial) if (iter.hasNext() && !r.mayHaveMoreCellsInRow()) { - throw new IOException("Cannot form complete result. Result is missing partial flag. " + - "Partial Results: " + partialResults); + throw new IOException("Cannot form complete result. Result is missing partial flag. " + + "Partial Results: " + partialResults); } prevRow = currentRow; stale = stale || r.isStale(); @@ -876,9 +802,7 @@ public static Result createCompleteResult(Iterable partialResults) } /** - * Get total size of raw cells - * @param result - * @return Total size. + * Get total size of raw cells n * @return Total size. */ public static long getTotalSizeOfCells(Result result) { long size = 0; @@ -893,9 +817,8 @@ public static long getTotalSizeOfCells(Result result) { /** * Copy another Result into this one. Needed for the old Mapred framework - * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT - * (which is supposed to be immutable). - * @param other + * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT (which is supposed + * to be immutable). n */ public void copyFrom(Result other) { checkReadonly(); @@ -913,10 +836,9 @@ public CellScanner cellScanner() { @Override public Cell current() { - if (isEmpty() - || cellScannerIndex == INITIAL_CELLSCANNER_INDEX - || cellScannerIndex >= cells.length) - return null; + if ( + isEmpty() || cellScannerIndex == INITIAL_CELLSCANNER_INDEX || cellScannerIndex >= cells.length + ) return null; return this.cells[cellScannerIndex]; } @@ -944,8 +866,8 @@ public void setExists(Boolean exists) { } /** - * Whether or not the results are coming from possibly stale data. Stale results - * might be returned if {@link Consistency} is not STRONG for the query. + * Whether or not the results are coming from possibly stale data. Stale results might be returned + * if {@link Consistency} is not STRONG for the query. * @return Whether or not the results are coming from possibly stale data. */ public boolean isStale() { @@ -953,12 +875,12 @@ public boolean isStale() { } /** - * For scanning large rows, the RS may choose to return the cells chunk by chunk to prevent OOM - * or timeout. This flag is used to tell you if the current Result is the last one of the current + * For scanning large rows, the RS may choose to return the cells chunk by chunk to prevent OOM or + * timeout. This flag is used to tell you if the current Result is the last one of the current * row. False means this Result is the last one. True means there MAY be more cells belonging to - * the current row. - * If you don't use {@link Scan#setAllowPartialResults(boolean)} or {@link Scan#setBatch(int)}, - * this method will always return false because the Result must contains all cells in one Row. + * the current row. If you don't use {@link Scan#setAllowPartialResults(boolean)} or + * {@link Scan#setBatch(int)}, this method will always return false because the Result must + * contains all cells in one Row. */ public boolean mayHaveMoreCellsInRow() { return mayHaveMoreCellsInRow; @@ -975,15 +897,15 @@ public void setStatistics(RegionLoadStats loadStats) { /** * @return the associated statistics about the region from which this was returned. Can be - * null if stats are disabled. + * null if stats are disabled. */ public RegionLoadStats getStats() { return stats; } /** - * All methods modifying state of Result object must call this method - * to ensure that special purpose immutable Results can't be accidentally modified. + * All methods modifying state of Result object must call this method to ensure that special + * purpose immutable Results can't be accidentally modified. */ private void checkReadonly() { if (readonly == true) { @@ -992,36 +914,23 @@ private void checkReadonly() { } /** - * Return true if this Result is a cursor to tell users where the server has scanned. - * In this Result the only meaningful method is {@link #getCursor()}. - * - * {@code + * Return true if this Result is a cursor to tell users where the server has scanned. In this + * Result the only meaningful method is {@link #getCursor()}. {@code * while (r = scanner.next() && r != null) { * if(r.isCursor()){ * // scanning is not end, it is a cursor, save its row key and close scanner if you want, or - * // just continue the loop to call next(). - * } else { - * // just like before - * } - * } - * // scanning is end - * - * } - * {@link Scan#setNeedCursorResult(boolean)} - * {@link Cursor} - * {@link #getCursor()} + * // just continue the loop to call next(). } else { // just like before } } // scanning is end } + * {@link Scan#setNeedCursorResult(boolean)} {@link Cursor} {@link #getCursor()} */ public boolean isCursor() { - return cursor != null ; + return cursor != null; } /** - * Return the cursor if this Result is a cursor result. - * {@link Scan#setNeedCursorResult(boolean)} - * {@link Cursor} - * {@link #isCursor()} + * Return the cursor if this Result is a cursor result. {@link Scan#setNeedCursorResult(boolean)} + * {@link Cursor} {@link #isCursor()} */ - public Cursor getCursor(){ + public Cursor getCursor() { return cursor; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java index 0881dd253d74..d6017a1e23fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,9 +23,8 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface for client-side scanning. Go to {@link Table} to obtain instances. @@ -88,8 +86,7 @@ public Result next() { * setting (or hbase.client.scanner.caching in hbase-site.xml). * @param nbRows number of rows to return * @return Between zero and nbRows rowResults. Scan is done if returned array is of zero-length - * (We never return null). - * @throws IOException + * (We never return null). n */ default Result[] next(int nbRows) throws IOException { List resultSets = new ArrayList<>(nbRows); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java index 793d10875cb3..b0bb97476aed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,16 +26,15 @@ @InterfaceAudience.Private public final class ResultStatsUtil { private ResultStatsUtil() { - //private ctor for util class + // private ctor for util class } /** * Update the statistics for the specified region. - * - * @param tracker tracker to update - * @param server server from which the result was obtained + * @param tracker tracker to update + * @param server server from which the result was obtained * @param regionName full region name for the statistics - * @param stats statistics to update for the specified region + * @param stats statistics to update for the specified region */ public static void updateStats(StatisticTrackable tracker, ServerName server, byte[] regionName, RegionLoadStats stats) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java index f0df7e09cf91..a8493b979c62 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; @@ -23,13 +22,12 @@ import java.time.format.DateTimeFormatter; import java.util.List; import java.util.StringJoiner; - import org.apache.commons.lang3.StringUtils; import org.apache.yetus.audience.InterfaceAudience; /** - * Exception thrown by HTable methods when an attempt to do something (like - * commit changes) fails after a bunch of retries. + * Exception thrown by HTable methods when an attempt to do something (like commit changes) fails + * after a bunch of retries. */ @InterfaceAudience.Public public class RetriesExhaustedException extends IOException { @@ -53,7 +51,7 @@ public static class ThrowableWithExtraContext { private final String extras; public ThrowableWithExtraContext(final Throwable throwable, final long whenAsEpochMilli, - final String extras) { + final String extras) { this.throwable = throwable; this.whenAsEpochMilli = whenAsEpochMilli; this.extras = extras; @@ -77,13 +75,12 @@ public String toString() { /** * Create a new RetriesExhaustedException from the list of prior failures. - * @param callableVitals Details from the Callable we were using - * when we got this exception. - * @param numTries The number of tries we made - * @param exceptions List of exceptions that failed before giving up + * @param callableVitals Details from the Callable we were using when we got this exception. + * @param numTries The number of tries we made + * @param exceptions List of exceptions that failed before giving up */ public RetriesExhaustedException(final String callableVitals, int numTries, - List exceptions) { + List exceptions) { super(getMessage(callableVitals, numTries, exceptions)); } @@ -94,13 +91,13 @@ public RetriesExhaustedException(final String callableVitals, int numTries, */ @InterfaceAudience.Private public RetriesExhaustedException(final int numRetries, - final List exceptions) { + final List exceptions) { super(getMessage(numRetries, exceptions), - exceptions.isEmpty()? null: exceptions.get(exceptions.size() - 1).throwable); + exceptions.isEmpty() ? null : exceptions.get(exceptions.size() - 1).throwable); } private static String getMessage(String callableVitals, int numTries, - List exceptions) { + List exceptions) { StringBuilder buffer = new StringBuilder("Failed contacting "); buffer.append(callableVitals); buffer.append(" after "); @@ -114,7 +111,7 @@ private static String getMessage(String callableVitals, int numTries, } private static String getMessage(final int numRetries, - final List exceptions) { + final List exceptions) { StringBuilder buffer = new StringBuilder("Failed after attempts="); buffer.append(numRetries + 1); buffer.append(", exceptions:\n"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java index ecbada95c2a1..8850cbe69404 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -27,25 +26,22 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * This subclass of {@link org.apache.hadoop.hbase.client.RetriesExhaustedException} - * is thrown when we have more information about which rows were causing which - * exceptions on what servers. You can call {@link #mayHaveClusterIssues()} - * and if the result is false, you have input error problems, otherwise you - * may have cluster issues. You can iterate over the causes, rows and last - * known server addresses via {@link #getNumExceptions()} and - * {@link #getCause(int)}, {@link #getRow(int)} and {@link #getHostnamePort(int)}. + * This subclass of {@link org.apache.hadoop.hbase.client.RetriesExhaustedException} is thrown when + * we have more information about which rows were causing which exceptions on what servers. You can + * call {@link #mayHaveClusterIssues()} and if the result is false, you have input error problems, + * otherwise you may have cluster issues. You can iterate over the causes, rows and last known + * server addresses via {@link #getNumExceptions()} and {@link #getCause(int)}, {@link #getRow(int)} + * and {@link #getHostnamePort(int)}. */ @SuppressWarnings("serial") @InterfaceAudience.Public -public class RetriesExhaustedWithDetailsException -extends RetriesExhaustedException { +public class RetriesExhaustedWithDetailsException extends RetriesExhaustedException { List exceptions; List actions; List hostnameAndPort; @@ -58,12 +54,10 @@ public RetriesExhaustedWithDetailsException(final String msg, final IOException super(msg, e); } - public RetriesExhaustedWithDetailsException(List exceptions, - List actions, - List hostnameAndPort) { - super("Failed " + exceptions.size() + " action" + - pluralize(exceptions) + ": " + - getDesc(exceptions, actions, hostnameAndPort)); + public RetriesExhaustedWithDetailsException(List exceptions, List actions, + List hostnameAndPort) { + super("Failed " + exceptions.size() + " action" + pluralize(exceptions) + ": " + + getDesc(exceptions, actions, hostnameAndPort)); this.exceptions = exceptions; this.actions = actions; @@ -102,7 +96,6 @@ public boolean mayHaveClusterIssues() { return res; } - public static String pluralize(Collection c) { return pluralize(c.size()); } @@ -111,9 +104,8 @@ public static String pluralize(int c) { return c > 1 ? "s" : ""; } - public static String getDesc(List exceptions, - List actions, - List hostnamePort) { + public static String getDesc(List exceptions, List actions, + List hostnamePort) { String s = getDesc(classifyExs(exceptions)); StringBuilder addrs = new StringBuilder(s); addrs.append("servers with issues: "); @@ -147,14 +139,12 @@ public String getExhaustiveDescription() { return errorWriter.toString(); } - public static Map classifyExs(List ths) { Map cls = new HashMap<>(); for (Throwable t : ths) { if (t == null) continue; String name = ""; - if (t instanceof DoNotRetryIOException || - t instanceof RegionTooBusyException) { + if (t instanceof DoNotRetryIOException || t instanceof RegionTooBusyException) { // If RegionTooBusyException, print message since it has Region name in it. // RegionTooBusyException message was edited to remove variance. Has regionname, server, // and why the exception; no longer has duration it waited on lock nor current memsize. @@ -172,8 +162,8 @@ public static Map classifyExs(List ths) { return cls; } - public static String getDesc(Map classificaton) { - StringBuilder classificatons =new StringBuilder(11); + public static String getDesc(Map classificaton) { + StringBuilder classificatons = new StringBuilder(11); for (Map.Entry e : classificaton.entrySet()) { classificatons.append(e.getKey()); classificatons.append(": "); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java index c6eb4cc5e684..179903706cae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +27,9 @@ @InterfaceAudience.Public public interface Row { Comparator COMPARATOR = (v1, v2) -> Bytes.compareTo(v1.getRow(), v2.getRow()); + /** * @return The row. */ - byte [] getRow(); + byte[] getRow(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java index 9f92c66d317f..ba613bb17733 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Provide a way to access the inner buffer. - * The purpose is to reduce the elapsed time to move a large number - * of elements between collections. + * Provide a way to access the inner buffer. The purpose is to reduce the elapsed time to move a + * large number of elements between collections. * @param */ @InterfaceAudience.Private diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java index 0f8b429959de..9e9d0f1754e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java @@ -21,50 +21,46 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** - * Performs multiple mutations atomically on a single row. - * - * The mutations are performed in the order in which they - * were added. - * - *

    We compare and equate mutations based off their row so be careful putting RowMutations - * into Sets or using them as keys in Maps. + * Performs multiple mutations atomically on a single row. The mutations are performed in the order + * in which they were added. + *

    + * We compare and equate mutations based off their row so be careful putting RowMutations into Sets + * or using them as keys in Maps. */ @InterfaceAudience.Public public class RowMutations implements Row { /** * Create a {@link RowMutations} with the specified mutations. - * @param mutations the mutations to send - * @return RowMutations - * @throws IOException if any row in mutations is different to another + * @param mutations the mutations to send n * @throws IOException if any row in mutations is + * different to another */ public static RowMutations of(List mutations) throws IOException { if (CollectionUtils.isEmpty(mutations)) { throw new IllegalArgumentException("Cannot instantiate a RowMutations by empty list"); } - return new RowMutations(mutations.get(0).getRow(), mutations.size()) - .add(mutations); + return new RowMutations(mutations.get(0).getRow(), mutations.size()).add(mutations); } private final List mutations; - private final byte [] row; + private final byte[] row; - public RowMutations(byte [] row) { + public RowMutations(byte[] row) { this(row, -1); } + /** * Create an atomic mutation for the specified row. - * @param row row key + * @param row row key * @param initialCapacity the initial capacity of the RowMutations */ - public RowMutations(byte [] row, int initialCapacity) { + public RowMutations(byte[] row, int initialCapacity) { this.row = Bytes.copy(Mutation.checkRow(row)); if (initialCapacity <= 0) { this.mutations = new ArrayList<>(); @@ -88,9 +84,9 @@ public RowMutations add(Mutation mutation) throws IOException { public RowMutations add(List mutations) throws IOException { for (Mutation mutation : mutations) { if (!Bytes.equals(row, mutation.getRow())) { - throw new WrongRowIOException("The row in the recently added Mutation <" + - Bytes.toStringBinary(mutation.getRow()) + "> doesn't match the original one <" + - Bytes.toStringBinary(this.row) + ">"); + throw new WrongRowIOException( + "The row in the recently added Mutation <" + Bytes.toStringBinary(mutation.getRow()) + + "> doesn't match the original one <" + Bytes.toStringBinary(this.row) + ">"); } } this.mutations.addAll(mutations); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java index ef5ea05e1139..02268451c7ba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +17,11 @@ */ package org.apache.hadoop.hbase.client; - import org.apache.yetus.audience.InterfaceAudience; /** - * Gets or Scans throw this exception if running without in-row scan flag - * set and row size appears to exceed max configured size (configurable via - * hbase.table.max.rowsize). + * Gets or Scans throw this exception if running without in-row scan flag set and row size appears + * to exceed max configured size (configurable via hbase.table.max.rowsize). */ @InterfaceAudience.Public public class RowTooBigException extends DoNotRetryRegionException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java index 660d74e74c28..2c320d3a9d1d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java @@ -86,9 +86,7 @@ private String buildConnectionString(Configuration conf) throws UnknownHostExcep if (StringUtils.isBlank(configuredBootstrapNodes)) { return MasterRegistry.getConnectionString(conf); } - return Splitter.on(ADDRS_CONF_SEPARATOR) - .trimResults() - .splitToStream(configuredBootstrapNodes) + return Splitter.on(ADDRS_CONF_SEPARATOR).trimResults().splitToStream(configuredBootstrapNodes) .collect(Collectors.joining(String.valueOf(ADDRS_CONF_SEPARATOR))); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index dbbc4e6d70ce..51ac7e8d3140 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -89,7 +87,7 @@ public class Scan extends Query { private byte[] startRow = HConstants.EMPTY_START_ROW; private boolean includeStartRow = true; - private byte[] stopRow = HConstants.EMPTY_END_ROW; + private byte[] stopRow = HConstants.EMPTY_END_ROW; private boolean includeStopRow = false; private int maxVersions = 1; private int batch = -1; @@ -126,18 +124,17 @@ public class Scan extends Query { private boolean cacheBlocks = true; private boolean reversed = false; private TimeRange tr = TimeRange.allTime(); - private Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); + private Map> familyMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); private Boolean asyncPrefetch = null; /** - * Parameter name for client scanner sync/async prefetch toggle. - * When using async scanner, prefetching data from the server is done at the background. - * The parameter currently won't have any effect in the case that the user has set - * Scan#setSmall or Scan#setReversed + * Parameter name for client scanner sync/async prefetch toggle. When using async scanner, + * prefetching data from the server is done at the background. The parameter currently won't have + * any effect in the case that the user has set Scan#setSmall or Scan#setReversed */ public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = - "hbase.client.scanner.async.prefetch"; + "hbase.client.scanner.async.prefetch"; /** * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}. @@ -166,18 +163,18 @@ public class Scan extends Query { /** * Create a Scan operation across all rows. */ - public Scan() {} + public Scan() { + } /** * Creates a new instance of this class while copying all values. - * - * @param scan The scan instance to copy from. + * @param scan The scan instance to copy from. * @throws IOException When copying the values fails. */ public Scan(Scan scan) throws IOException { startRow = scan.getStartRow(); includeStartRow = scan.includeStartRow(); - stopRow = scan.getStopRow(); + stopRow = scan.getStopRow(); includeStopRow = scan.includeStopRow(); maxVersions = scan.getMaxVersions(); batch = scan.getBatch(); @@ -195,8 +192,8 @@ public Scan(Scan scan) throws IOException { allowPartialResults = scan.getAllowPartialResults(); tr = scan.getTimeRange(); // TimeRange is immutable Map> fams = scan.getFamilyMap(); - for (Map.Entry> entry : fams.entrySet()) { - byte [] fam = entry.getKey(); + for (Map.Entry> entry : fams.entrySet()) { + byte[] fam = entry.getKey(); NavigableSet cols = entry.getValue(); if (cols != null && cols.size() > 0) { for (byte[] col : cols) { @@ -255,17 +252,16 @@ public Scan(Get get) { public boolean isGetScan() { return includeStartRow && includeStopRow - && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow); + && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow); } /** * Get all columns from the specified family. *

    * Overrides previous calls to addColumn for this family. - * @param family family name - * @return this + * @param family family name n */ - public Scan addFamily(byte [] family) { + public Scan addFamily(byte[] family) { familyMap.remove(family); familyMap.put(family, null); return this; @@ -275,13 +271,12 @@ public Scan addFamily(byte [] family) { * Get the column from the specified family with the specified qualifier. *

    * Overrides previous calls to addFamily for this family. - * @param family family name - * @param qualifier column qualifier - * @return this + * @param family family name + * @param qualifier column qualifier n */ - public Scan addColumn(byte [] family, byte [] qualifier) { - NavigableSet set = familyMap.get(family); - if(set == null) { + public Scan addColumn(byte[] family, byte[] qualifier) { + NavigableSet set = familyMap.get(family); + if (set == null) { set = new TreeSet<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, set); } @@ -293,15 +288,13 @@ public Scan addColumn(byte [] family, byte [] qualifier) { } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp). Note, default maximum versions to return is 1. If - * your time range spans more than one version and you want all versions - * returned, up the number of versions beyond the default. + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). Note, + * default maximum versions to return is 1. If your time range spans more than one version and you + * want all versions returned, up the number of versions beyond the default. * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive * @see #readAllVersions() - * @see #readVersions(int) - * @return this + * @see #readVersions(int) n */ public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { tr = TimeRange.between(minStamp, maxStamp); @@ -309,19 +302,17 @@ public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { } /** - * Get versions of columns with the specified timestamp. Note, default maximum - * versions to return is 1. If your time range spans more than one version - * and you want all versions returned, up the number of versions beyond the - * defaut. + * Get versions of columns with the specified timestamp. Note, default maximum versions to return + * is 1. If your time range spans more than one version and you want all versions returned, up the + * number of versions beyond the defaut. * @param timestamp version timestamp * @see #readAllVersions() - * @see #readVersions(int) - * @return this + * @see #readVersions(int) n */ public Scan setTimestamp(long timestamp) { try { tr = TimeRange.at(timestamp); - } catch(Exception e) { + } catch (Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; @@ -330,7 +321,8 @@ public Scan setTimestamp(long timestamp) { return this; } - @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { + @Override + public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); } @@ -340,14 +332,13 @@ public Scan setTimestamp(long timestamp) { * If the specified row does not exist, the Scanner will start from the next closest row after the * specified row. *

    - * Note: Do NOT use this in combination with - * {@link #setRowPrefixFilter(byte[])} or {@link #setStartStopRowForPrefixScan(byte[])}. - * Doing so will make the scan result unexpected or even undefined. + * Note: Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or + * {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result + * unexpected or even undefined. *

    - * @param startRow row to start scanner at or after - * @return this - * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if + * startRow does not meet criteria for a row key (when length exceeds + * {@link HConstants#MAX_ROW_LENGTH}) */ public Scan withStartRow(byte[] startRow) { return withStartRow(startRow, true); @@ -359,20 +350,19 @@ public Scan withStartRow(byte[] startRow) { * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner * will start from the next closest row after the specified row. *

    - * Note: Do NOT use this in combination with - * {@link #setRowPrefixFilter(byte[])} or {@link #setStartStopRowForPrefixScan(byte[])}. - * Doing so will make the scan result unexpected or even undefined. + * Note: Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or + * {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result + * unexpected or even undefined. *

    - * @param startRow row to start scanner at or after - * @param inclusive whether we should include the start row when scan - * @return this - * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param startRow row to start scanner at or after + * @param inclusive whether we should include the start row when scan n * @throws + * IllegalArgumentException if startRow does not meet criteria for a row key + * (when length exceeds {@link HConstants#MAX_ROW_LENGTH}) */ public Scan withStartRow(byte[] startRow, boolean inclusive) { if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) { throw new IllegalArgumentException("startRow's length must be less than or equal to " - + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); + + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); } this.startRow = startRow; this.includeStartRow = inclusive; @@ -384,14 +374,13 @@ public Scan withStartRow(byte[] startRow, boolean inclusive) { *

    * The scan will include rows that are lexicographically less than the provided stopRow. *

    - * Note: Do NOT use this in combination with - * {@link #setRowPrefixFilter(byte[])} or {@link #setStartStopRowForPrefixScan(byte[])}. - * Doing so will make the scan result unexpected or even undefined. + * Note: Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or + * {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result + * unexpected or even undefined. *

    - * @param stopRow row to end at (exclusive) - * @return this - * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does + * not meet criteria for a row key (when length exceeds + * {@link HConstants#MAX_ROW_LENGTH}) */ public Scan withStopRow(byte[] stopRow) { return withStopRow(stopRow, false); @@ -403,20 +392,19 @@ public Scan withStopRow(byte[] stopRow) { * The scan will include rows that are lexicographically less than (or equal to if * {@code inclusive} is {@code true}) the provided stopRow. *

    - * Note: Do NOT use this in combination with - * {@link #setRowPrefixFilter(byte[])} or {@link #setStartStopRowForPrefixScan(byte[])}. - * Doing so will make the scan result unexpected or even undefined. + * Note: Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or + * {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result + * unexpected or even undefined. *

    - * @param stopRow row to end at - * @param inclusive whether we should include the stop row when scan - * @return this - * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length - * exceeds {@link HConstants#MAX_ROW_LENGTH}) + * @param stopRow row to end at + * @param inclusive whether we should include the stop row when scan n * @throws + * IllegalArgumentException if stopRow does not meet criteria for a row key (when + * length exceeds {@link HConstants#MAX_ROW_LENGTH}) */ public Scan withStopRow(byte[] stopRow, boolean inclusive) { if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) { throw new IllegalArgumentException("stopRow's length must be less than or equal to " - + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); + + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); } this.stopRow = stopRow; this.includeStopRow = inclusive; @@ -424,19 +412,26 @@ public Scan withStopRow(byte[] stopRow, boolean inclusive) { } /** - *

    Set a filter (using stopRow and startRow) so the result set only contains rows where the - * rowKey starts with the specified prefix.

    - *

    This is a utility method that converts the desired rowPrefix into the appropriate values - * for the startRow and stopRow to achieve the desired result.

    - *

    This can safely be used in combination with setFilter.

    - *

    This CANNOT be used in combination with withStartRow and/or withStopRow. - * Such a combination will yield unexpected and even undefined results.

    - * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) - * @return this - * @deprecated since 2.5.0, will be removed in 4.0.0. - * The name of this method is considered to be confusing as it does not - * use a {@link Filter} but uses setting the startRow and stopRow instead. - * Use {@link #setStartStopRowForPrefixScan(byte[])} instead. + *

    + * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey + * starts with the specified prefix. + *

    + *

    + * This is a utility method that converts the desired rowPrefix into the appropriate values for + * the startRow and stopRow to achieve the desired result. + *

    + *

    + * This can safely be used in combination with setFilter. + *

    + *

    + * This CANNOT be used in combination with withStartRow and/or withStopRow. Such + * a combination will yield unexpected and even undefined results. + *

    + * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) n + * * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method + * is considered to be confusing as it does not use a {@link Filter} but uses + * setting the startRow and stopRow instead. Use + * {@link #setStartStopRowForPrefixScan(byte[])} instead. */ @Deprecated public Scan setRowPrefixFilter(byte[] rowPrefix) { @@ -444,15 +439,22 @@ public Scan setRowPrefixFilter(byte[] rowPrefix) { } /** - *

    Set a filter (using stopRow and startRow) so the result set only contains rows where the - * rowKey starts with the specified prefix.

    - *

    This is a utility method that converts the desired rowPrefix into the appropriate values - * for the startRow and stopRow to achieve the desired result.

    - *

    This can safely be used in combination with setFilter.

    - *

    This CANNOT be used in combination with withStartRow and/or withStopRow. - * Such a combination will yield unexpected and even undefined results.

    - * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) - * @return this + *

    + * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey + * starts with the specified prefix. + *

    + *

    + * This is a utility method that converts the desired rowPrefix into the appropriate values for + * the startRow and stopRow to achieve the desired result. + *

    + *

    + * This can safely be used in combination with setFilter. + *

    + *

    + * This CANNOT be used in combination with withStartRow and/or withStopRow. Such + * a combination will yield unexpected and even undefined results. + *

    + * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) n */ public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) { if (rowPrefix == null) { @@ -466,8 +468,7 @@ public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) { } /** - * Get all available versions. - * @return this + * Get all available versions. n */ public Scan readAllVersions() { this.maxVersions = Integer.MAX_VALUE; @@ -476,8 +477,7 @@ public Scan readAllVersions() { /** * Get up to the specified number of versions of each column. - * @param versions specified number of versions for each column - * @return this + * @param versions specified number of versions for each column n */ public Scan readVersions(int versions) { this.maxVersions = versions; @@ -485,19 +485,18 @@ public Scan readVersions(int versions) { } /** - * Set the maximum number of cells to return for each call to next(). Callers should be aware - * that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. - * If you don't allow partial results, the number of cells in each Result must equal to your - * batch setting unless it is the last Result for current row. So this method is helpful in paging - * queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better. + * Set the maximum number of cells to return for each call to next(). Callers should be aware that + * this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. If you don't allow + * partial results, the number of cells in each Result must equal to your batch setting unless it + * is the last Result for current row. So this method is helpful in paging queries. If you just + * want to prevent OOM at client, use setAllowPartialResults(true) is better. * @param batch the maximum number of values * @see Result#mayHaveMoreCellsInRow() */ public Scan setBatch(int batch) { if (this.hasFilter() && this.filter.hasFilterRow()) { throw new IncompatibleFilterException( - "Cannot set batch on a scan using a filter" + - " that returns true for filter.hasFilterRow"); + "Cannot set batch on a scan using a filter" + " that returns true for filter.hasFilterRow"); } this.batch = batch; return this; @@ -522,10 +521,9 @@ public Scan setRowOffsetPerColumnFamily(int offset) { } /** - * Set the number of rows for caching that will be passed to scanners. - * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will - * apply. - * Higher caching values will enable faster scanners but will use more memory. + * Set the number of rows for caching that will be passed to scanners. If not set, the + * Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher + * caching values will enable faster scanners but will use more memory. * @param caching the number of rows for caching */ public Scan setCaching(int caching) { @@ -541,10 +539,9 @@ public long getMaxResultSize() { } /** - * Set the maximum result size. The default is -1; this means that no specific - * maximum result size will be set for this scan, and the global configured - * value will be used instead. (Defaults to unlimited). - * + * Set the maximum result size. The default is -1; this means that no specific maximum result size + * will be set for this scan, and the global configured value will be used instead. (Defaults to + * unlimited). * @param maxResultSize The maximum result size in bytes. */ public Scan setMaxResultSize(long maxResultSize) { @@ -560,19 +557,17 @@ public Scan setFilter(Filter filter) { /** * Setting the familyMap - * @param familyMap map of family to qualifier - * @return this + * @param familyMap map of family to qualifier n */ - public Scan setFamilyMap(Map> familyMap) { + public Scan setFamilyMap(Map> familyMap) { this.familyMap = familyMap; return this; } /** - * Getting the familyMap - * @return familyMap + * Getting the familyMap n */ - public Map> getFamilyMap() { + public Map> getFamilyMap() { return this.familyMap; } @@ -580,7 +575,7 @@ public Scan setFamilyMap(Map> familyMap) { * @return the number of families in familyMap */ public int numFamilies() { - if(hasFamilies()) { + if (hasFamilies()) { return this.familyMap.size(); } return 0; @@ -597,7 +592,7 @@ public boolean hasFamilies() { * @return the keys of the familyMap */ public byte[][] getFamilies() { - if(hasFamilies()) { + if (hasFamilies()) { return this.familyMap.keySet().toArray(new byte[0][0]); } return null; @@ -606,7 +601,7 @@ public byte[][] getFamilies() { /** * @return the startrow */ - public byte [] getStartRow() { + public byte[] getStartRow() { return this.startRow; } @@ -653,8 +648,7 @@ public int getMaxResultsPerColumnFamily() { } /** - * Method for retrieving the scan's offset per row per column - * family (#kvs to be skipped) + * Method for retrieving the scan's offset per row per column family (#kvs to be skipped) * @return row offset */ public int getRowOffsetPerColumnFamily() { @@ -669,14 +663,14 @@ public int getCaching() { } /** - * @return TimeRange + * n */ public TimeRange getTimeRange() { return this.tr; } /** - * @return RowFilter + * n */ @Override public Filter getFilter() { @@ -693,12 +687,9 @@ public boolean hasFilter() { /** * Set whether blocks should be cached for this Scan. *

    - * This is true by default. When true, default settings of the table and - * family are used (this will never override caching blocks if the block - * cache is disabled for that family or entirely). - * - * @param cacheBlocks if false, default settings are overridden and blocks - * will not be cached + * This is true by default. When true, default settings of the table and family are used (this + * will never override caching blocks if the block cache is disabled for that family or entirely). + * @param cacheBlocks if false, default settings are overridden and blocks will not be cached */ public Scan setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; @@ -707,8 +698,7 @@ public Scan setCacheBlocks(boolean cacheBlocks) { /** * Get whether blocks should be cached for this Scan. - * @return true if default caching should be used, false if blocks should not - * be cached + * @return true if default caching should be used, false if blocks should not be cached */ public boolean getCacheBlocks() { return cacheBlocks; @@ -718,9 +708,7 @@ public boolean getCacheBlocks() { * Set whether this scan is a reversed one *

    * This is false by default which means forward(normal) scan. - * - * @param reversed if true, scan will be backward order - * @return this + * @param reversed if true, scan will be backward order n */ public Scan setReversed(boolean reversed) { this.reversed = reversed; @@ -737,12 +725,9 @@ public boolean isReversed() { /** * Setting whether the caller wants to see the partial results when server returns - * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. - * By default this value is false and the complete results will be assembled client side - * before being delivered to the caller. - * @param allowPartialResults - * @return this - * @see Result#mayHaveMoreCellsInRow() + * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By + * default this value is false and the complete results will be assembled client side before being + * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow() * @see #setBatch(int) */ public Scan setAllowPartialResults(final boolean allowPartialResults) { @@ -765,34 +750,30 @@ public Scan setLoadColumnFamiliesOnDemand(boolean value) { } /** - * Compile the table and column family (i.e. schema) information - * into a String. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map + * Compile the table and column family (i.e. schema) information into a String. Useful for parsing + * and aggregation by debugging, logging, and administration tools. n */ @Override public Map getFingerprint() { Map map = new HashMap<>(); List families = new ArrayList<>(); - if(this.familyMap.isEmpty()) { + if (this.familyMap.isEmpty()) { map.put("families", "ALL"); return map; } else { map.put("families", families); } - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. + * @param maxCols a limit on the number of columns output prior to truncation n */ @Override public Map toMap(int maxCols) { @@ -816,11 +797,10 @@ public Map toMap(int maxCols) { map.put("timeRange", timeRange); int colCount = 0; // iterate through affected families and list out up to maxCols columns - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { List columns = new ArrayList<>(); familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); - if(entry.getValue() == null) { + if (entry.getValue() == null) { colCount++; --maxCols; columns.add("ALL"); @@ -829,7 +809,7 @@ public Map toMap(int maxCols) { if (maxCols <= 0) { continue; } - for (byte [] column : entry.getValue()) { + for (byte[] column : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -849,13 +829,10 @@ public Map toMap(int maxCols) { } /** - * Enable/disable "raw" mode for this scan. - * If "raw" is enabled the scan will return all - * delete marker and deleted rows that have not - * been collected, yet. - * This is mostly useful for Scan on column families - * that have KEEP_DELETED_ROWS enabled. - * It is an error to specify any column when "raw" is set. + * Enable/disable "raw" mode for this scan. If "raw" is enabled the scan will return all delete + * marker and deleted rows that have not been collected, yet. This is mostly useful for Scan on + * column families that have KEEP_DELETED_ROWS enabled. It is an error to specify any column when + * "raw" is set. * @param raw True/False to enable/disable "raw" mode. */ public Scan setRaw(boolean raw) { @@ -959,8 +936,7 @@ public int getLimit() { * reaches this value. *

    * This condition will be tested at last, after all other conditions such as stopRow, filter, etc. - * @param limit the limit of rows for this scan - * @return this + * @param limit the limit of rows for this scan n */ public Scan setLimit(int limit) { this.limit = limit; @@ -969,8 +945,7 @@ public Scan setLimit(int limit) { /** * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also - * set {@code readType} to {@link ReadType#PREAD}. - * @return this + * set {@code readType} to {@link ReadType#PREAD}. n */ public Scan setOneRowLimit() { return setLimit(1).setReadType(ReadType.PREAD); @@ -978,7 +953,9 @@ public Scan setOneRowLimit() { @InterfaceAudience.Public public enum ReadType { - DEFAULT, STREAM, PREAD + DEFAULT, + STREAM, + PREAD } /** @@ -992,8 +969,7 @@ public ReadType getReadType() { * Set the read type for this scan. *

    * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For - * example, we will always use pread if this is a get scan. - * @return this + * example, we will always use pread if this is a get scan. n */ public Scan setReadType(ReadType readType) { this.readType = readType; @@ -1026,20 +1002,14 @@ Scan resetMvccReadPoint() { * When the server is slow or we scan a table with many deleted data or we use a sparse filter, * the server will response heartbeat to prevent timeout. However the scanner will return a Result * only when client can do it. So if there are many heartbeats, the blocking time on - * ResultScanner#next() may be very long, which is not friendly to online services. - * - * Set this to true then you can get a special Result whose #isCursor() returns true and is not - * contains any real data. It only tells you where the server has scanned. You can call next - * to continue scanning or open a new scanner with this row key as start row whenever you want. - * - * Users can get a cursor when and only when there is a response from the server but we can not - * return a Result to users, for example, this response is a heartbeat or there are partial cells - * but users do not allow partial result. - * - * Now the cursor is in row level which means the special Result will only contains a row key. - * {@link Result#isCursor()} - * {@link Result#getCursor()} - * {@link Cursor} + * ResultScanner#next() may be very long, which is not friendly to online services. Set this to + * true then you can get a special Result whose #isCursor() returns true and is not contains any + * real data. It only tells you where the server has scanned. You can call next to continue + * scanning or open a new scanner with this row key as start row whenever you want. Users can get + * a cursor when and only when there is a response from the server but we can not return a Result + * to users, for example, this response is a heartbeat or there are partial cells but users do not + * allow partial result. Now the cursor is in row level which means the special Result will only + * contains a row key. {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor} */ public Scan setNeedCursorResult(boolean needCursorResult) { this.needCursorResult = needCursorResult; @@ -1051,11 +1021,9 @@ public boolean isNeedCursorResult() { } /** - * Create a new Scan with a cursor. It only set the position information like start row key. - * The others (like cfs, stop row, limit) should still be filled in by the user. - * {@link Result#isCursor()} - * {@link Result#getCursor()} - * {@link Cursor} + * Create a new Scan with a cursor. It only set the position information like start row key. The + * others (like cfs, stop row, limit) should still be filled in by the user. + * {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor} */ public static Scan createScanFromCursor(Cursor cursor) { return new Scan().withStartRow(cursor.getRow()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java index 583c45593124..7c6a9f27ad33 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -39,7 +38,7 @@ interface ScanResultCache { /** * Add the given results to cache and get valid results back. - * @param results the results of a scan next. Must not be null. + * @param results the results of a scan next. Must not be null. * @param isHeartbeatMessage indicate whether the results is gotten from a heartbeat response. * @return valid results, never null. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java index be3108bd34cb..b574b2c2bd5e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumerBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumerBase.java index 538cf9d96383..e16348b0dc90 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumerBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumerBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java index 12e3e3bd990f..8eeb2d55ef66 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,10 @@ public ServerStatistics getStats(ServerName server) { } public static ServerStatisticTracker create(Configuration conf) { - if (!conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, - HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE)) { + if ( + !conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, + HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE) + ) { return null; } return new ServerStatisticTracker(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerType.java index 1d1bf6e3c6d7..0f73e06b95a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerType.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Select server type i.e destination for RPC request associated with ring buffer. - * e.g slow/large log records are maintained by HRegionServer, whereas balancer decisions - * are maintained by HMaster. + * Select server type i.e destination for RPC request associated with ring buffer. e.g slow/large + * log records are maintained by HRegionServer, whereas balancer decisions are maintained by + * HMaster. */ @InterfaceAudience.Public public enum ServerType { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java index d246a93abb7d..501f412bc575 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,8 +53,8 @@ public interface ServiceCaller { /** * Represent the actual protobuf rpc call. - * @param stub the asynchronous stub - * @param controller the rpc controller, has already been prepared for you + * @param stub the asynchronous stub + * @param controller the rpc controller, has already been prepared for you * @param rpcCallback the rpc callback, has already been prepared for you */ void call(S stub, RpcController controller, RpcCallback rpcCallback); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java index 1a184da86e45..a28b98a7673f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +17,8 @@ */ package org.apache.hadoop.hbase.client; +import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; + import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Collection; @@ -33,19 +34,17 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdge; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; -import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; -import org.apache.hadoop.hbase.util.EnvironmentEdge; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Holds back the requests if they reach any thresholds. @@ -57,7 +56,8 @@ class SimpleRequestController implements RequestController { /** * The maximum heap size for each request. */ - public static final String HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = "hbase.client.max.perrequest.heapsize"; + public static final String HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = + "hbase.client.max.perrequest.heapsize"; /** * Default value of {@link #HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE}. @@ -80,10 +80,11 @@ class SimpleRequestController implements RequestController { /** * Default value of {@link #HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE}. */ - static final long DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE; + static final long DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = + DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE; final AtomicLong tasksInProgress = new AtomicLong(0); - final ConcurrentMap taskCounterPerRegion - = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); + final ConcurrentMap taskCounterPerRegion = + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); final ConcurrentMap taskCounterPerServer = new ConcurrentHashMap<>(); /** * The number of tasks simultaneously executed on the cluster. @@ -100,10 +101,9 @@ class SimpleRequestController implements RequestController { private final long maxRowsPerRequest; private final long maxHeapSizeSubmit; /** - * The number of tasks we run in parallel on a single region. With 1 (the - * default) , we ensure that the ordering of the queries is respected: we - * don't start a set of operations on a region before the previous one is - * done. As well, this limits the pressure we put on the region server. + * The number of tasks we run in parallel on a single region. With 1 (the default) , we ensure + * that the ordering of the queries is respected: we don't start a set of operations on a region + * before the previous one is done. As well, this limits the pressure we put on the region server. */ final int maxConcurrentTasksPerRegion; @@ -113,37 +113,32 @@ class SimpleRequestController implements RequestController { final int maxConcurrentTasksPerServer; private final int thresholdToLogUndoneTaskDetails; public static final String THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = - "hbase.client.threshold.log.details"; + "hbase.client.threshold.log.details"; private static final int DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = 10; public static final String THRESHOLD_TO_LOG_REGION_DETAILS = - "hbase.client.threshold.log.region.details"; + "hbase.client.threshold.log.region.details"; private static final int DEFAULT_THRESHOLD_TO_LOG_REGION_DETAILS = 2; private final int thresholdToLogRegionDetails; + SimpleRequestController(final Configuration conf) { - this.maxTotalConcurrentTasks = checkAndGet(conf, - HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); - this.maxConcurrentTasksPerServer = checkAndGet(conf, - HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS); - this.maxConcurrentTasksPerRegion = checkAndGet(conf, - HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS); - this.maxHeapSizePerRequest = checkAndGet(conf, - HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, - DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE); - this.maxRowsPerRequest = checkAndGet(conf, - HBASE_CLIENT_MAX_PERREQUEST_ROWS, - DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_ROWS); - this.maxHeapSizeSubmit = checkAndGet(conf, - HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, - DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE); - this.thresholdToLogUndoneTaskDetails = conf.getInt( - THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS, - DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS); - this.thresholdToLogRegionDetails = conf.getInt( - THRESHOLD_TO_LOG_REGION_DETAILS, - DEFAULT_THRESHOLD_TO_LOG_REGION_DETAILS); + this.maxTotalConcurrentTasks = checkAndGet(conf, HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); + this.maxConcurrentTasksPerServer = + checkAndGet(conf, HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS); + this.maxConcurrentTasksPerRegion = + checkAndGet(conf, HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS); + this.maxHeapSizePerRequest = checkAndGet(conf, HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, + DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE); + this.maxRowsPerRequest = + checkAndGet(conf, HBASE_CLIENT_MAX_PERREQUEST_ROWS, DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_ROWS); + this.maxHeapSizeSubmit = + checkAndGet(conf, HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE); + this.thresholdToLogUndoneTaskDetails = conf.getInt(THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS, + DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS); + this.thresholdToLogRegionDetails = + conf.getInt(THRESHOLD_TO_LOG_REGION_DETAILS, DEFAULT_THRESHOLD_TO_LOG_REGION_DETAILS); } private static int checkAndGet(Configuration conf, String key, int defaultValue) { @@ -217,12 +212,8 @@ public void reset() throws InterruptedIOException { @Override public Checker newChecker() { List checkers = new ArrayList<>(4); - checkers.add(new TaskCountChecker(maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, - taskCounterPerServer, - taskCounterPerRegion)); + checkers.add(new TaskCountChecker(maxTotalConcurrentTasks, maxConcurrentTasksPerServer, + maxConcurrentTasksPerRegion, tasksInProgress, taskCounterPerServer, taskCounterPerRegion)); checkers.add(new RequestHeapSizeChecker(maxHeapSizePerRequest)); checkers.add(new SubmittedSizeChecker(maxHeapSizeSubmit)); checkers.add(new RequestRowsChecker(maxRowsPerRequest)); @@ -235,9 +226,9 @@ public void incTaskCounters(Collection regions, ServerName sn) { computeIfAbsent(taskCounterPerServer, sn, AtomicInteger::new).incrementAndGet(); - regions.forEach((regBytes) - -> computeIfAbsent(taskCounterPerRegion, regBytes, AtomicInteger::new).incrementAndGet() - ); + regions + .forEach((regBytes) -> computeIfAbsent(taskCounterPerRegion, regBytes, AtomicInteger::new) + .incrementAndGet()); } @Override @@ -260,8 +251,8 @@ public long getNumberOfTasksInProgress() { } @Override - public void waitForMaximumCurrentTasks(long max, long id, - int periodToTrigger, Consumer trigger) throws InterruptedIOException { + public void waitForMaximumCurrentTasks(long max, long id, int periodToTrigger, + Consumer trigger) throws InterruptedIOException { assert max >= 0; long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress, oldInProgress = Long.MAX_VALUE; @@ -284,8 +275,8 @@ public void waitForMaximumCurrentTasks(long max, long id, } } } catch (InterruptedException e) { - throw new InterruptedIOException("#" + id + ", interrupted." + - " currentNumberOfTask=" + currentInProgress); + throw new InterruptedIOException( + "#" + id + ", interrupted." + " currentNumberOfTask=" + currentInProgress); } } } @@ -313,13 +304,14 @@ private void logDetailsOfUndoneTasks(long taskInProgress) { } @Override - public void waitForFreeSlot(long id, int periodToTrigger, Consumer trigger) throws InterruptedIOException { + public void waitForFreeSlot(long id, int periodToTrigger, Consumer trigger) + throws InterruptedIOException { waitForMaximumCurrentTasks(maxTotalConcurrentTasks - 1, id, periodToTrigger, trigger); } /** - * limit the heapsize of total submitted data. Reduce the limit of heapsize - * for submitting quickly if there is no running task. + * limit the heapsize of total submitted data. Reduce the limit of heapsize for submitting quickly + * if there is no running task. */ static class SubmittedSizeChecker implements RowChecker { @@ -356,7 +348,7 @@ public void reset() { */ static class TaskCountChecker implements RowChecker { - private static final long MAX_WAITING_TIME = 1000; //ms + private static final long MAX_WAITING_TIME = 1000; // ms private final Set regionsIncluded = new HashSet<>(); private final Set serversIncluded = new HashSet<>(); private final int maxConcurrentTasksPerRegion; @@ -367,12 +359,10 @@ static class TaskCountChecker implements RowChecker { private final Set busyRegions = new TreeSet<>(Bytes.BYTES_COMPARATOR); private final AtomicLong tasksInProgress; - TaskCountChecker(final int maxTotalConcurrentTasks, - final int maxConcurrentTasksPerServer, - final int maxConcurrentTasksPerRegion, - final AtomicLong tasksInProgress, - final Map taskCounterPerServer, - final Map taskCounterPerRegion) { + TaskCountChecker(final int maxTotalConcurrentTasks, final int maxConcurrentTasksPerServer, + final int maxConcurrentTasksPerRegion, final AtomicLong tasksInProgress, + final Map taskCounterPerServer, + final Map taskCounterPerRegion) { this.maxTotalConcurrentTasks = maxTotalConcurrentTasks; this.maxConcurrentTasksPerRegion = maxConcurrentTasksPerRegion; this.maxConcurrentTasksPerServer = maxConcurrentTasksPerServer; @@ -408,18 +398,15 @@ private void waitForRegion() throws InterruptedIOException { tasksInProgress.wait(10); } } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted." - + " tasksInProgress=" + tasksInProgress); + throw new InterruptedIOException("Interrupted." + " tasksInProgress=" + tasksInProgress); } } } /** - * 1) check the regions is allowed. 2) check the concurrent tasks for - * regions. 3) check the total concurrent tasks. 4) check the concurrent - * tasks for server. - * - * @param loc the destination of data + * 1) check the regions is allowed. 2) check the concurrent tasks for regions. 3) check the + * total concurrent tasks. 4) check the concurrent tasks for server. + * @param loc the destination of data * @param heapSizeOfRow the data size * @return either Include {@link RequestController.ReturnCode} or skip * {@link RequestController.ReturnCode} @@ -436,8 +423,8 @@ public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) { // Too many tasks on this region already. return ReturnCode.SKIP; } - int newServers = serversIncluded.size() - + (serversIncluded.contains(loc.getServerName()) ? 0 : 1); + int newServers = + serversIncluded.size() + (serversIncluded.contains(loc.getServerName()) ? 0 : 1); if ((newServers + tasksInProgress.get()) > maxTotalConcurrentTasks) { // Too many tasks. return ReturnCode.SKIP; @@ -479,8 +466,8 @@ public void reset() { @Override public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) { - long currentRows = serverRows.containsKey(loc.getServerName()) - ? serverRows.get(loc.getServerName()) : 0L; + long currentRows = + serverRows.containsKey(loc.getServerName()) ? serverRows.get(loc.getServerName()) : 0L; // accept at least one row if (currentRows == 0 || currentRows < maxRowsPerRequest) { return ReturnCode.INCLUDE; @@ -491,8 +478,8 @@ public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) { @Override public void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow) { if (code == ReturnCode.INCLUDE) { - long currentRows = serverRows.containsKey(loc.getServerName()) - ? serverRows.get(loc.getServerName()) : 0L; + long currentRows = + serverRows.containsKey(loc.getServerName()) ? serverRows.get(loc.getServerName()) : 0L; serverRows.put(loc.getServerName(), currentRows + 1); } } @@ -519,7 +506,8 @@ public void reset() { public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) { // Is it ok for limit of request size? long currentRequestSize = serverRequestSizes.containsKey(loc.getServerName()) - ? serverRequestSizes.get(loc.getServerName()) : 0L; + ? serverRequestSizes.get(loc.getServerName()) + : 0L; // accept at least one request if (currentRequestSize == 0 || currentRequestSize + heapSizeOfRow <= maxHeapSizePerRequest) { return ReturnCode.INCLUDE; @@ -531,7 +519,8 @@ public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) { public void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow) { if (code == ReturnCode.INCLUDE) { long currentRequestSize = serverRequestSizes.containsKey(loc.getServerName()) - ? serverRequestSizes.get(loc.getServerName()) : 0L; + ? serverRequestSizes.get(loc.getServerName()) + : 0L; serverRequestSizes.put(loc.getServerName(), currentRequestSize + heapSizeOfRow); } } @@ -545,11 +534,10 @@ interface RowChecker { ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow); /** - * Add the final ReturnCode to the checker. The ReturnCode may be reversed, - * so the checker need the final decision to update the inner state. - * - * @param code The final decision - * @param loc the destination of data + * Add the final ReturnCode to the checker. The ReturnCode may be reversed, so the checker need + * the final decision to update the inner state. + * @param code The final decision + * @param loc the destination of data * @param heapSizeOfRow the data size */ void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SingleResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SingleResponse.java index 252142ac80d7..17c4b38b3556 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SingleResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SingleResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -58,6 +56,7 @@ public Entry getEntry() { public void setEntry(Entry entry) { this.entry = entry; } + @Override public ResponseType type() { return ResponseType.SINGLE; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java index 86df9fda2076..3311539c2614 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.StringUtils; @@ -26,8 +24,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * SlowLog params object that contains detailed info as params and region name : to be used - * for filter purpose + * SlowLog params object that contains detailed info as params and region name : to be used for + * filter purpose */ @InterfaceAudience.Private public class SlowLogParams { @@ -55,9 +53,7 @@ public String getParams() { @Override public String toString() { - return new ToStringBuilder(this) - .append("regionName", regionName) - .append("params", params) + return new ToStringBuilder(this).append("regionName", regionName).append("params", params) .toString(); } @@ -73,17 +69,12 @@ public boolean equals(Object o) { SlowLogParams that = (SlowLogParams) o; - return new EqualsBuilder() - .append(regionName, that.regionName) - .append(params, that.params) + return new EqualsBuilder().append(regionName, that.regionName).append(params, that.params) .isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(regionName) - .append(params) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(regionName).append(params).toHashCode(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java index bbc6a503a3e3..2dfb2fa7a199 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Map; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -58,25 +57,23 @@ public SnapshotDescription(String name, TableName table, SnapshotType type, Stri /** * SnapshotDescription Parameterized Constructor - * - * @param name Name of the snapshot - * @param table TableName associated with the snapshot - * @param type Type of the snapshot - enum SnapshotType - * @param owner Snapshot Owner + * @param name Name of the snapshot + * @param table TableName associated with the snapshot + * @param type Type of the snapshot - enum SnapshotType + * @param owner Snapshot Owner * @param creationTime Creation time for Snapshot - * @param version Snapshot Version + * @param version Snapshot Version * @deprecated since 2.3.0 and will be removed in 4.0.0. Use - * {@link #SnapshotDescription(String, TableName, SnapshotType, String, long, int, Map)} + * {@link #SnapshotDescription(String, TableName, SnapshotType, String, long, int, Map)} */ @Deprecated public SnapshotDescription(String name, TableName table, SnapshotType type, String owner, - long creationTime, int version) { + long creationTime, int version) { this(name, table, type, owner, creationTime, version, null); } /** * SnapshotDescription Parameterized Constructor - * * @param name Name of the snapshot * @param table TableName associated with the snapshot * @param type Type of the snapshot - enum SnapshotType @@ -86,7 +83,7 @@ public SnapshotDescription(String name, TableName table, SnapshotType type, Stri * @param snapshotProps Additional properties for snapshot e.g. TTL */ public SnapshotDescription(String name, TableName table, SnapshotType type, String owner, - long creationTime, int version, Map snapshotProps) { + long creationTime, int version, Map snapshotProps) { this.name = name; this.table = table; this.snapShotType = type; @@ -101,18 +98,15 @@ private long getLongFromSnapshotProps(Map snapshotProps, String return MapUtils.getLongValue(snapshotProps, property, -1); } - - /** * SnapshotDescription Parameterized Constructor - * * @param snapshotName Name of the snapshot * @param tableName TableName associated with the snapshot * @param type Type of the snapshot - enum SnapshotType * @param snapshotProps Additional properties for snapshot e.g. TTL */ public SnapshotDescription(String snapshotName, TableName tableName, SnapshotType type, - Map snapshotProps) { + Map snapshotProps) { this(snapshotName, tableName, type, null, -1, -1, snapshotProps); } @@ -149,19 +143,15 @@ public int getVersion() { return this.version; } - public long getMaxFileSize() { return maxFileSize; } + public long getMaxFileSize() { + return maxFileSize; + } @Override public String toString() { - return new ToStringBuilder(this) - .append("name", name) - .append("table", table) - .append("snapShotType", snapShotType) - .append("owner", owner) - .append("creationTime", creationTime) - .append("ttl", ttl) - .append("version", version) - .append("maxFileSize", maxFileSize) - .toString(); + return new ToStringBuilder(this).append("name", name).append("table", table) + .append("snapShotType", snapShotType).append("owner", owner) + .append("creationTime", creationTime).append("ttl", ttl).append("version", version) + .append("maxFileSize", maxFileSize).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java index a4e4cc08be7e..1c453aa24be9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,5 +24,7 @@ */ @InterfaceAudience.Public public enum SnapshotType { - DISABLED, FLUSH, SKIPFLUSH + DISABLED, + FLUSH, + SKIPFLUSH } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java index b73dee1e44fa..625271177a70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +26,7 @@ @InterfaceAudience.Private public interface StatisticTrackable { /** - * Update stats per region. - * */ + * Update stats per region. + */ void updateRegionStats(ServerName server, byte[] region, RegionLoadStats stats); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java index 4d4f15386699..a982a1c320dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,10 +31,9 @@ import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; /** - * Base class which provides clients with an RPC connection to - * call coprocessor endpoint {@link com.google.protobuf.Service}s. - * Note that clients should not use this class directly, except through - * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}. + * Base class which provides clients with an RPC connection to call coprocessor endpoint + * {@link com.google.protobuf.Service}s. Note that clients should not use this class directly, + * except through {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}. * @deprecated Please stop using this class again, as it is too low level, which is part of the rpc * framework for HBase. Will be deleted in 4.0.0. */ @@ -71,6 +70,6 @@ public Message callBlockingMethod(Descriptors.MethodDescriptor method, RpcContro } protected abstract Message callExecService(RpcController controller, - Descriptors.MethodDescriptor method, Message request, Message responsePrototype) - throws IOException; + Descriptors.MethodDescriptor method, Message request, Message responsePrototype) + throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index 53da0cfb9120..0f93ab21a2c1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,10 +43,10 @@ import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; /** - * Used to communicate with a single HBase table. - * Obtain an instance from a {@link Connection} and call {@link #close()} afterwards. - * - *

    Table can be used to get, put, delete or scan data from a table. + * Used to communicate with a single HBase table. Obtain an instance from a {@link Connection} and + * call {@link #close()} afterwards. + *

    + * Table can be used to get, put, delete or scan data from a table. * @see ConnectionFactory * @see Connection * @see Admin @@ -64,13 +63,13 @@ public interface Table extends Closeable { /** * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance. *

    - * The reference returned is not a copy, so any change made to it will - * affect this instance. + * The reference returned is not a copy, so any change made to it will affect this instance. */ Configuration getConfiguration(); /** - * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table. + * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this + * table. * @throws java.io.IOException if a remote or network exception occurs. */ TableDescriptor getDescriptor() throws IOException; @@ -83,13 +82,9 @@ public interface Table extends Closeable { /** * Test for the existence of columns in the table, as specified by the Get. *

    - * * This will return true if the Get matches one or more keys, false if not. *

    - * - * This is a server-side call so it prevents any data from being transfered to - * the client. - * + * This is a server-side call so it prevents any data from being transfered to the client. * @param get the Get * @return true if the specified Get matches one or more keys, false if not * @throws IOException e @@ -101,16 +96,12 @@ default boolean exists(Get get) throws IOException { /** * Test for the existence of columns in the table, as specified by the Gets. *

    - * - * This will return an array of booleans. Each value will be true if the related Get matches - * one or more keys, false if not. + * This will return an array of booleans. Each value will be true if the related Get matches one + * or more keys, false if not. *

    - * - * This is a server-side call so it prevents any data from being transferred to - * the client. - * + * This is a server-side call so it prevents any data from being transferred to the client. * @param gets the Gets - * @return Array of boolean. True if the specified Get matches one or more keys, false if not. + * @return Array of boolean. True if the specified Get matches one or more keys, false if not. * @throws IOException e */ default boolean[] exists(List gets) throws IOException { @@ -118,21 +109,18 @@ default boolean[] exists(List gets) throws IOException { } /** - * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. - * The ordering of execution of the actions is not defined. Meaning if you do a Put and a - * Get in the same {@link #batch} call, you will not necessarily be - * guaranteed that the Get returns what the Put had put. - * + * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. The + * ordering of execution of the actions is not defined. Meaning if you do a Put and a Get in the + * same {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the + * Put had put. * @param actions list of Get, Put, Delete, Increment, Append, RowMutations. - * @param results Empty Object[], same size as actions. Provides access to partial - * results, in case an exception is thrown. A null in the result array means that - * the call for that action failed, even after retries. The order of the objects - * in the results array corresponds to the order of actions in the request list. - * @throws IOException - * @since 0.90.0 + * @param results Empty Object[], same size as actions. Provides access to partial results, in + * case an exception is thrown. A null in the result array means that the call for + * that action failed, even after retries. The order of the objects in the results + * array corresponds to the order of actions in the request list. n * @since 0.90.0 */ - default void batch(final List actions, final Object[] results) throws IOException, - InterruptedException { + default void batch(final List actions, final Object[] results) + throws IOException, InterruptedException { throw new NotImplementedException("Add an implementation!"); } @@ -147,17 +135,16 @@ default void batch(final List actions, final Object[] results) th */ @Deprecated default void batchCallback(final List actions, final Object[] results, - final Batch.Callback callback) throws IOException, InterruptedException { + final Batch.Callback callback) throws IOException, InterruptedException { throw new NotImplementedException("Add an implementation!"); } /** * Extracts certain cells from a given row. * @param get The object that specifies what data to fetch and from which row. - * @return The data coming from the specified row, if it exists. If the row - * specified doesn't exist, the {@link Result} instance returned won't - * contain any {@link org.apache.hadoop.hbase.KeyValue}, as indicated by - * {@link Result#isEmpty()}. + * @return The data coming from the specified row, if it exists. If the row specified doesn't + * exist, the {@link Result} instance returned won't contain any + * {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 */ @@ -167,31 +154,27 @@ default Result get(Get get) throws IOException { /** * Extracts specified cells from the given rows, as a batch. - * * @param gets The objects that specify what data to fetch and from which rows. - * @return The data coming from the specified rows, if it exists. If the row specified doesn't - * exist, the {@link Result} instance returned won't contain any - * {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If there - * are any failures even after retries, there will be a null in the results' array - * for those Gets, AND an exception will be thrown. The ordering of the Result array - * corresponds to the order of the list of passed in Gets. + * @return The data coming from the specified rows, if it exists. If the row specified doesn't + * exist, the {@link Result} instance returned won't contain any + * {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If + * there are any failures even after retries, there will be a null in the + * results' array for those Gets, AND an exception will be thrown. The ordering of the + * Result array corresponds to the order of the list of passed in Gets. * @throws IOException if a remote or network exception occurs. * @since 0.90.0 - * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. - * Currently {@link #get(List)} doesn't run any validations on the client-side, - * currently there is no need, but this may change in the future. An - * {@link IllegalArgumentException} will be thrown in this case. + * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. Currently + * {@link #get(List)} doesn't run any validations on the client-side, currently there is + * no need, but this may change in the future. An {@link IllegalArgumentException} will + * be thrown in this case. */ default Result[] get(List gets) throws IOException { throw new NotImplementedException("Add an implementation!"); } /** - * Returns a scanner on the current table as specified by the {@link Scan} - * object. - * Note that the passed {@link Scan}'s start row and caching properties - * maybe changed. - * + * Returns a scanner on the current table as specified by the {@link Scan} object. Note that the + * passed {@link Scan}'s start row and caching properties maybe changed. * @param scan A configured {@link Scan} object. * @return A scanner. * @throws IOException if a remote or network exception occurs. @@ -203,7 +186,6 @@ default ResultScanner getScanner(Scan scan) throws IOException { /** * Gets a scanner on the current table for the given family. - * * @param family The column family to scan. * @return A scanner. * @throws IOException if a remote or network exception occurs. @@ -215,8 +197,7 @@ default ResultScanner getScanner(byte[] family) throws IOException { /** * Gets a scanner on the current table for the given family and qualifier. - * - * @param family The column family to scan. + * @param family The column family to scan. * @param qualifier The column qualifier to scan. * @return A scanner. * @throws IOException if a remote or network exception occurs. @@ -228,7 +209,6 @@ default ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOExcep /** * Puts some data in the table. - * * @param put The data to put. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 @@ -240,15 +220,14 @@ default void put(Put put) throws IOException { /** * Batch puts the specified data into the table. *

    - * This can be used for group commit, or for submitting user defined batches. Before sending - * a batch of mutations to the server, the client runs a few validations on the input list. If an + * This can be used for group commit, or for submitting user defined batches. Before sending a + * batch of mutations to the server, the client runs a few validations on the input list. If an * error is found, for example, a mutation was supplied but was missing it's column an - * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there - * are any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be - * thrown. RetriesExhaustedWithDetailsException contains lists of failed mutations and - * corresponding remote exceptions. The ordering of mutations and exceptions in the - * encapsulating exception corresponds to the order of the input list of Put requests. - * + * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there are + * any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be thrown. + * RetriesExhaustedWithDetailsException contains lists of failed mutations and corresponding + * remote exceptions. The ordering of mutations and exceptions in the encapsulating exception + * corresponds to the order of the input list of Put requests. * @param puts The list of mutations to apply. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 @@ -259,7 +238,6 @@ default void put(List puts) throws IOException { /** * Deletes the specified cells/row. - * * @param delete The object that specifies what to delete. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 @@ -271,19 +249,18 @@ default void delete(Delete delete) throws IOException { /** * Batch Deletes the specified cells/rows from the table. *

    - * If a specified row does not exist, {@link Delete} will report as though sucessful - * delete; no exception will be thrown. If there are any failures even after retries, - * a {@link RetriesExhaustedWithDetailsException} will be thrown. - * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and - * corresponding remote exceptions. - * - * @param deletes List of things to delete. The input list gets modified by this - * method. All successfully applied {@link Delete}s in the list are removed (in particular it - * gets re-ordered, so the order in which the elements are inserted in the list gives no - * guarantee as to the order in which the {@link Delete}s are executed). - * @throws IOException if a remote or network exception occurs. In that case - * the {@code deletes} argument will contain the {@link Delete} instances - * that have not be successfully applied. + * If a specified row does not exist, {@link Delete} will report as though sucessful delete; no + * exception will be thrown. If there are any failures even after retries, a + * {@link RetriesExhaustedWithDetailsException} will be thrown. + * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and corresponding + * remote exceptions. + * @param deletes List of things to delete. The input list gets modified by this method. All + * successfully applied {@link Delete}s in the list are removed (in particular it + * gets re-ordered, so the order in which the elements are inserted in the list + * gives no guarantee as to the order in which the {@link Delete}s are executed). + * @throws IOException if a remote or network exception occurs. In that case the {@code deletes} + * argument will contain the {@link Delete} instances that have not be + * successfully applied. * @since 0.20.1 * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also, * {@link #put(List)} runs pre-flight validations on the input list on client. Currently @@ -309,7 +286,7 @@ default void delete(List deletes) throws IOException { * * * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { @@ -318,9 +295,8 @@ default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { /** * A helper class for sending checkAndMutate request. - * * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated interface CheckAndMutateBuilder { @@ -350,7 +326,7 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { /** * @param compareOp comparison operator to use - * @param value the expected value + * @param value the expected value */ CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value); @@ -387,7 +363,7 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { * * * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) { @@ -396,9 +372,8 @@ default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter /** * A helper class for sending checkAndMutate request with a filter. - * * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated interface CheckAndMutateWithFilterBuilder { @@ -428,9 +403,8 @@ interface CheckAndMutateWithFilterBuilder { } /** - * checkAndMutate that atomically checks if a row matches the specified condition. If it does, - * it performs the specified action. - * + * checkAndMutate that atomically checks if a row matches the specified condition. If it does, it + * performs the specified action. * @param checkAndMutate The CheckAndMutate object. * @return A CheckAndMutateResult object that represents the result for the CheckAndMutate. * @throws IOException if a remote or network exception occurs. @@ -443,10 +417,9 @@ default CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throw * Batch version of checkAndMutate. The specified CheckAndMutates are batched only in the sense * that they are sent to a RS in one RPC, but each CheckAndMutate operation is still executed * atomically (and thus, each may fail independently of others). - * * @param checkAndMutates The list of CheckAndMutate. * @return A list of CheckAndMutateResult objects that represents the result for each - * CheckAndMutate. + * CheckAndMutate. * @throws IOException if a remote or network exception occurs. */ default List checkAndMutate(List checkAndMutates) @@ -455,9 +428,8 @@ default List checkAndMutate(List checkAndM } /** - * Performs multiple mutations atomically on a single row. Currently - * {@link Put} and {@link Delete} are supported. - * + * Performs multiple mutations atomically on a single row. Currently {@link Put} and + * {@link Delete} are supported. * @param rm object that specifies the set of mutations to perform atomically * @return results of Increment/Append operations * @throws IOException if a remote or network exception occurs. @@ -469,10 +441,9 @@ default Result mutateRow(final RowMutations rm) throws IOException { /** * Appends values to one or more columns within a single row. *

    - * This operation guaranteed atomicity to readers. Appends are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. - * + * This operation guaranteed atomicity to readers. Appends are done under a single row lock, so + * write operations to a row are synchronized, and readers are guaranteed to see this operation + * fully completed. * @param append object that specifies the columns and values to be appended * @throws IOException e * @return values of columns after the append operation (maybe null) @@ -484,12 +455,11 @@ default Result append(final Append append) throws IOException { /** * Increments one or more columns within a single row. *

    - * This operation ensures atomicity to readers. Increments are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. - * - * @param increment object that specifies the columns and amounts to be used - * for the increment operations + * This operation ensures atomicity to readers. Increments are done under a single row lock, so + * write operations to a row are synchronized, and readers are guaranteed to see this operation + * fully completed. + * @param increment object that specifies the columns and amounts to be used for the increment + * operations * @throws IOException e * @return values of columns after the increment */ @@ -501,50 +471,47 @@ default Result increment(final Increment increment) throws IOException { * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} *

    * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the - * amount is negative). + * @param amount The amount to increment the cell with (or decrement, if the amount is + * negative). * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) - throws IOException { + throws IOException { Increment increment = new Increment(row).addColumn(family, qualifier, amount); Cell cell = increment(increment).getColumnLatestCell(family, qualifier); return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } /** - * Atomically increments a column value. If the column value already exists - * and is not a big-endian long, this could throw an exception. If the column - * value does not yet exist it is initialized to amount and - * written to the specified column. - * - *

    Setting durability to {@link Durability#SKIP_WAL} means that in a fail - * scenario you will lose any increments that have not been flushed. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. - * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the - * amount is negative). + * Atomically increments a column value. If the column value already exists and is not a + * big-endian long, this could throw an exception. If the column value does not yet exist it is + * initialized to amount and written to the specified column. + *

    + * Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose + * any increments that have not been flushed. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. + * @param qualifier The column qualifier of the cell to increment. + * @param amount The amount to increment the cell with (or decrement, if the amount is + * negative). * @param durability The persistence guarantee for this increment. * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ - default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, Durability durability) throws IOException { - Increment increment = new Increment(row) - .addColumn(family, qualifier, amount) - .setDurability(durability); + default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, + Durability durability) throws IOException { + Increment increment = + new Increment(row).addColumn(family, qualifier, amount).setDurability(durability); Cell cell = increment(increment).getColumnLatestCell(family, qualifier); return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } /** * Releases any resources held or pending changes in internal buffers. - * * @throws IOException if a remote or network exception occurs. */ @Override @@ -565,6 +532,7 @@ default void close() throws IOException { * invocations: *

    *

    + * *
        * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
    @@ -573,8 +541,8 @@ default void close() throws IOException {
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
        * 
    - *
    - *
    + * + * * @param row The row key used to identify the remote region location * @return A CoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any @@ -591,18 +559,18 @@ default CoprocessorRpcChannel coprocessorService(byte[] row) { * range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each * {@link Service} instance. - * @param service the protocol buffer {@code Service} implementation to call + * @param service the protocol buffer {@code Service} implementation to call * @param startKey start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. If - * {@code null}, selection will continue through the last table region. + * selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. If + * {@code null}, selection will continue through the last table region. * @param callable this instance's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will be - * invoked once per table region, using the {@link Service} instance connected to that - * region. - * @param the {@link Service} subclass to connect to - * @param Return type for the {@code callable} parameter's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will + * be invoked once per table region, using the {@link Service} instance connected + * to that region. + * @param the {@link Service} subclass to connect to + * @param Return type for the {@code callable} parameter's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method * @return a map of result values keyed by region name * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking * interface for of a protobuf stub, so it is not possible to do it in an asynchronous @@ -613,8 +581,8 @@ default CoprocessorRpcChannel coprocessorService(byte[] row) { */ @Deprecated default Map coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable) - throws ServiceException, Throwable { + byte[] startKey, byte[] endKey, final Batch.Call callable) + throws ServiceException, Throwable { Map results = Collections.synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); coprocessorService(service, startKey, endKey, callable, new Batch.Callback() { @@ -638,18 +606,18 @@ public void update(byte[] region, byte[] row, R value) { * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} * method will be called with the return value from each region's * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. - * @param service the protocol buffer {@code Service} implementation to call + * @param service the protocol buffer {@code Service} implementation to call * @param startKey start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. If - * {@code null}, selection will continue through the last table region. + * selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. If + * {@code null}, selection will continue through the last table region. * @param callable this instance's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will be - * invoked once per table region, using the {@link Service} instance connected to that - * region. - * @param the {@link Service} subclass to connect to - * @param Return type for the {@code callable} parameter's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will + * be invoked once per table region, using the {@link Service} instance connected + * to that region. + * @param the {@link Service} subclass to connect to + * @param Return type for the {@code callable} parameter's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking * interface for of a protobuf stub, so it is not possible to do it in an asynchronous * way, even if now we are building the {@link Table} implementation based on the @@ -659,8 +627,8 @@ public void update(byte[] region, byte[] row, R value) { */ @Deprecated default void coprocessorService(final Class service, byte[] startKey, - byte[] endKey, final Batch.Call callable, final Batch.Callback callback) - throws ServiceException, Throwable { + byte[] endKey, final Batch.Call callable, final Batch.Callback callback) + throws ServiceException, Throwable { throw new NotImplementedException("Add an implementation!"); } @@ -669,14 +637,14 @@ default void coprocessorService(final Class service, b * range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to * the same region server will be batched into one call. The coprocessor service is invoked * according to the service instance, method name and parameters. - * @param methodDescriptor the descriptor for the protobuf service method to call. - * @param request the method call parameters - * @param startKey start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. If - * {@code null}, selection will continue through the last table region. + * @param methodDescriptor the descriptor for the protobuf service method to call. + * @param request the method call parameters + * @param startKey start region selection with region containing this row. If + * {@code null}, the selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. If + * {@code null}, selection will continue through the last table region. * @param responsePrototype the proto type of the response of the method in Service. - * @param the response type for the coprocessor Service method + * @param the response type for the coprocessor Service method * @return a map of result values keyed by region name * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking * interface for of a protobuf stub, so it is not possible to do it in an asynchronous @@ -687,8 +655,8 @@ default void coprocessorService(final Class service, b */ @Deprecated default Map batchCoprocessorService( - Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, - byte[] endKey, R responsePrototype) throws ServiceException, Throwable { + Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { final Map results = Collections.synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, @@ -712,15 +680,15 @@ public void update(byte[] region, byte[] row, R result) { * The given * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} * method will be called with the return value from each region's invocation. - * @param methodDescriptor the descriptor for the protobuf service method to call. - * @param request the method call parameters - * @param startKey start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. If - * {@code null}, selection will continue through the last table region. + * @param methodDescriptor the descriptor for the protobuf service method to call. + * @param request the method call parameters + * @param startKey start region selection with region containing this row. If + * {@code null}, the selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. If + * {@code null}, selection will continue through the last table region. * @param responsePrototype the proto type of the response of the method in Service. - * @param callback callback to invoke with the response for each region - * @param the response type for the coprocessor Service method + * @param callback callback to invoke with the response for each region + * @param the response type for the coprocessor Service method * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking * interface for of a protobuf stub, so it is not possible to do it in an asynchronous * way, even if now we are building the {@link Table} implementation based on the @@ -730,9 +698,8 @@ public void update(byte[] region, byte[] row, R result) { */ @Deprecated default void batchCoprocessorService( - Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, - byte[] endKey, R responsePrototype, Batch.Callback callback) - throws ServiceException, Throwable { + Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, + R responsePrototype, Batch.Callback callback) throws ServiceException, Throwable { throw new NotImplementedException("Add an implementation!"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java index d71a07e80eb4..75e16e89a5de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +23,7 @@ * For creating {@link Table} instance. *

    * The implementation should have default configurations set before returning the builder to user. - * So users are free to only set the configurations they care about to create a new - * Table instance. + * So users are free to only set the configurations they care about to create a new Table instance. */ @InterfaceAudience.Public public interface TableBuilder { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java index fa543c062445..796fd6496f7b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,8 +41,9 @@ abstract class TableBuilderBase implements TableBuilder { throw new IllegalArgumentException("Given table name is null"); } this.tableName = tableName; - this.operationTimeout = tableName.isSystemTable() ? connConf.getMetaOperationTimeout() - : connConf.getOperationTimeout(); + this.operationTimeout = tableName.isSystemTable() + ? connConf.getMetaOperationTimeout() + : connConf.getOperationTimeout(); this.rpcTimeout = connConf.getRpcTimeout(); this.readRpcTimeout = connConf.getReadRpcTimeout(); this.writeRpcTimeout = connConf.getWriteRpcTimeout(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java index dd106244abd1..ea60b07d63eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,10 +31,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * TableDescriptor contains the details about an HBase table such as the descriptors of - * all the column families, is the table a catalog table, hbase:meta , - * if the table is read only, the maximum size of the memstore, - * when the region split should occur, coprocessors associated with it etc... + * TableDescriptor contains the details about an HBase table such as the descriptors of all the + * column families, is the table a catalog table, hbase:meta , if the table is read + * only, the maximum size of the memstore, when the region split should occur, coprocessors + * associated with it etc... */ @InterfaceAudience.Public public interface TableDescriptor { @@ -45,10 +44,10 @@ public interface TableDescriptor { @InterfaceAudience.Private Comparator COMPARATOR_IGNORE_REPLICATION = - getComparator(ColumnFamilyDescriptor.COMPARATOR_IGNORE_REPLICATION); + getComparator(ColumnFamilyDescriptor.COMPARATOR_IGNORE_REPLICATION); static Comparator - getComparator(Comparator cfComparator) { + getComparator(Comparator cfComparator) { return (TableDescriptor lhs, TableDescriptor rhs) -> { int result = lhs.getTableName().compareTo(rhs.getTableName()); if (result != 0) { @@ -61,8 +60,8 @@ public interface TableDescriptor { return result; } - for (Iterator it = lhsFamilies.iterator(), it2 = - rhsFamilies.iterator(); it.hasNext();) { + for (Iterator it = lhsFamilies.iterator(), + it2 = rhsFamilies.iterator(); it.hasNext();) { result = cfComparator.compare(it.next(), it2.next()); if (result != 0) { return result; @@ -75,77 +74,63 @@ public interface TableDescriptor { /** * Returns the count of the column families of the table. - * * @return Count of column families of the table */ int getColumnFamilyCount(); /** * Return the list of attached co-processor represented - * * @return The list of CoprocessorDescriptor */ Collection getCoprocessorDescriptors(); /** * Returns the durability setting for the table. - * * @return durability setting for the table. */ Durability getDurability(); /** - * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of - * all the column families of the table. - * - * @return An array of {@link ColumnFamilyDescriptor} of all the column - * families. + * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of all the column + * families of the table. + * @return An array of {@link ColumnFamilyDescriptor} of all the column families. */ ColumnFamilyDescriptor[] getColumnFamilies(); /** - * Returns all the column family names of the current table. The map of - * TableDescriptor contains mapping of family name to ColumnDescriptor. - * This returns all the keys of the family map which represents the column - * family names of the table. - * + * Returns all the column family names of the current table. The map of TableDescriptor contains + * mapping of family name to ColumnDescriptor. This returns all the keys of the family map which + * represents the column family names of the table. * @return Immutable sorted set of the keys of the families. */ Set getColumnFamilyNames(); /** - * Returns the ColumnDescriptor for a specific column family with name as - * specified by the parameter column. - * + * Returns the ColumnDescriptor for a specific column family with name as specified by the + * parameter column. * @param name Column family name - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. */ ColumnFamilyDescriptor getColumnFamily(final byte[] name); /** - * This gets the class associated with the flush policy which determines the - * stores need to be flushed when flushing a region. The class used by default - * is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. - * - * @return the class name of the flush policy for this table. If this returns - * null, the default flush policy is used. + * This gets the class associated with the flush policy which determines the stores need to be + * flushed when flushing a region. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.FlushPolicy. + * @return the class name of the flush policy for this table. If this returns null, the default + * flush policy is used. */ String getFlushPolicyClassName(); /** - * Returns the maximum size upto which a region can grow to after which a - * region split is triggered. The region size is represented by the size of - * the biggest store file in that region. - * + * Returns the maximum size upto which a region can grow to after which a region split is + * triggered. The region size is represented by the size of the biggest store file in that region. * @return max hregion size for table, -1 if not set. */ long getMaxFileSize(); /** - * Returns the size of the memstore after which a flush to filesystem is - * triggered. - * + * Returns the size of the memstore after which a flush to filesystem is triggered. * @return memory cache flush size for each hregion, -1 if not set. */ long getMemStoreFlushSize(); @@ -161,19 +146,16 @@ public interface TableDescriptor { int getRegionReplication(); /** - * This gets the class associated with the region split policy which - * determines when a region split should occur. The class used by default is - * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy - * - * @return the class name of the region split policy for this table. If this - * returns null, the default split policy is used. + * This gets the class associated with the region split policy which determines when a region + * split should occur. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy + * @return the class name of the region split policy for this table. If this returns null, the + * default split policy is used. */ String getRegionSplitPolicyClassName(); /** - * Get the name of the table - * - * @return TableName + * Get the name of the table n */ TableName getTableName(); @@ -186,7 +168,6 @@ public interface TableDescriptor { /** * Getter for accessing the metadata associated with the key. - * * @param key The key. * @return A clone value. Null if no mapping for the key */ @@ -194,7 +175,6 @@ public interface TableDescriptor { /** * Getter for accessing the metadata associated with the key. - * * @param key The key. * @return A clone value. Null if no mapping for the key */ @@ -202,7 +182,6 @@ public interface TableDescriptor { /** * Getter for accessing the metadata associated with the key. - * * @param key The key. * @return Null if no mapping for the key */ @@ -214,9 +193,7 @@ public interface TableDescriptor { Map getValues(); /** - * Check if the table has an attached co-processor represented by the name - * className - * + * Check if the table has an attached co-processor represented by the name className * @param classNameToMatch - Class name of the co-processor * @return true of the table has a co-processor className */ @@ -224,7 +201,6 @@ public interface TableDescriptor { /** * Checks to see if this table contains the given column family - * * @param name Family name or column name. * @return true if the table contains the specified family name */ @@ -236,71 +212,62 @@ public interface TableDescriptor { boolean hasRegionMemStoreReplication(); /** - * Check if the compaction enable flag of the table is true. If flag is false - * then no minor/major compactions will be done in real. - * + * Check if the compaction enable flag of the table is true. If flag is false then no minor/major + * compactions will be done in real. * @return true if table compaction enabled */ boolean isCompactionEnabled(); /** - * Check if the split enable flag of the table is true. If flag is false - * then no region split will be done. - * + * Check if the split enable flag of the table is true. If flag is false then no region split will + * be done. * @return true if table region split enabled */ boolean isSplitEnabled(); /** - * Check if the merge enable flag of the table is true. If flag is false - * then no region merge will be done. - * + * Check if the merge enable flag of the table is true. If flag is false then no region merge will + * be done. * @return true if table region merge enabled */ boolean isMergeEnabled(); /** * Checks if this table is hbase:meta region. - * * @return true if this table is hbase:meta region */ boolean isMetaRegion(); /** * Checks if the table is a hbase:meta table - * * @return true if table is hbase:meta region. */ boolean isMetaTable(); /** - * Check if normalization enable flag of the table is true. If flag is false - * then region normalizer won't attempt to normalize this table. - * + * Check if normalization enable flag of the table is true. If flag is false then region + * normalizer won't attempt to normalize this table. * @return true if region normalization is enabled for this table */ boolean isNormalizationEnabled(); /** - * Check if there is the target region count. If so, the normalize plan will - * be calculated based on the target region count. - * + * Check if there is the target region count. If so, the normalize plan will be calculated based + * on the target region count. * @return target region count after normalize done */ int getNormalizerTargetRegionCount(); /** - * Check if there is the target region size. If so, the normalize plan will - * be calculated based on the target region size. - * + * Check if there is the target region size. If so, the normalize plan will be calculated based on + * the target region size. * @return target region size after normalize done */ long getNormalizerTargetRegionSize(); /** - * Check if the readOnly flag of the table is set. If the readOnly flag is set - * then the contents of the table can only be read from but not modified. - * + * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents + * of the table can only be read from but not modified. * @return true if all columns in the table should be read only */ boolean isReadOnly(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 7dbd98650af3..cf12923326c1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,139 +62,126 @@ public class TableDescriptorBuilder { public static final String SPLIT_POLICY = "SPLIT_POLICY"; private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); /** - * Used by HBase Shell interface to access this metadata - * attribute which denotes the maximum size of the store file after which a - * region split occurs. + * Used by HBase Shell interface to access this metadata attribute which denotes the maximum size + * of the store file after which a region split occurs. */ @InterfaceAudience.Private public static final String MAX_FILESIZE = "MAX_FILESIZE"; - private static final Bytes MAX_FILESIZE_KEY - = new Bytes(Bytes.toBytes(MAX_FILESIZE)); + private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); /** - * Used by rest interface to access this metadata attribute - * which denotes if the table is Read Only. + * Used by rest interface to access this metadata attribute which denotes if the table is Read + * Only. */ @InterfaceAudience.Private public static final String READONLY = "READONLY"; - private static final Bytes READONLY_KEY - = new Bytes(Bytes.toBytes(READONLY)); + private static final Bytes READONLY_KEY = new Bytes(Bytes.toBytes(READONLY)); /** - * Used by HBase Shell interface to access this metadata - * attribute which denotes if the table is compaction enabled. + * Used by HBase Shell interface to access this metadata attribute which denotes if the table is + * compaction enabled. */ @InterfaceAudience.Private public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; - private static final Bytes COMPACTION_ENABLED_KEY - = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); + private static final Bytes COMPACTION_ENABLED_KEY = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); /** - * Used by HBase Shell interface to access this metadata - * attribute which denotes if the table is split enabled. + * Used by HBase Shell interface to access this metadata attribute which denotes if the table is + * split enabled. */ @InterfaceAudience.Private public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); /** - * Used by HBase Shell interface to access this metadata - * attribute which denotes if the table is merge enabled. + * Used by HBase Shell interface to access this metadata attribute which denotes if the table is + * merge enabled. */ @InterfaceAudience.Private public static final String MERGE_ENABLED = "MERGE_ENABLED"; private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); /** - * Used by HBase Shell interface to access this metadata - * attribute which represents the maximum size of the memstore after which its - * contents are flushed onto the disk. + * Used by HBase Shell interface to access this metadata attribute which represents the maximum + * size of the memstore after which its contents are flushed onto the disk. */ @InterfaceAudience.Private public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; - private static final Bytes MEMSTORE_FLUSHSIZE_KEY - = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); + private static final Bytes MEMSTORE_FLUSHSIZE_KEY = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); @InterfaceAudience.Private public static final String FLUSH_POLICY = "FLUSH_POLICY"; private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); /** - * Used by rest interface to access this metadata attribute - * which denotes if it is a catalog table, either hbase:meta . + * Used by rest interface to access this metadata attribute which denotes if it is a catalog + * table, either hbase:meta . */ @InterfaceAudience.Private public static final String IS_META = "IS_META"; - private static final Bytes IS_META_KEY - = new Bytes(Bytes.toBytes(IS_META)); + private static final Bytes IS_META_KEY = new Bytes(Bytes.toBytes(IS_META)); /** * {@link Durability} setting for the table. */ @InterfaceAudience.Private public static final String DURABILITY = "DURABILITY"; - private static final Bytes DURABILITY_KEY - = new Bytes(Bytes.toBytes("DURABILITY")); + private static final Bytes DURABILITY_KEY = new Bytes(Bytes.toBytes("DURABILITY")); /** * The number of region replicas for the table. */ @InterfaceAudience.Private public static final String REGION_REPLICATION = "REGION_REPLICATION"; - private static final Bytes REGION_REPLICATION_KEY - = new Bytes(Bytes.toBytes(REGION_REPLICATION)); + private static final Bytes REGION_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_REPLICATION)); /** - * The flag to indicate whether or not the memstore should be - * replicated for read-replicas (CONSISTENCY => TIMELINE). + * The flag to indicate whether or not the memstore should be replicated for read-replicas + * (CONSISTENCY => TIMELINE). */ @InterfaceAudience.Private public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; - private static final Bytes REGION_MEMSTORE_REPLICATION_KEY - = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); + private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = + new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); - private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY - = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); + private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY = + new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); /** - * Used by shell/rest interface to access this metadata - * attribute which denotes if the table should be treated by region - * normalizer. + * Used by shell/rest interface to access this metadata attribute which denotes if the table + * should be treated by region normalizer. */ @InterfaceAudience.Private public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; - private static final Bytes NORMALIZATION_ENABLED_KEY - = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); + private static final Bytes NORMALIZATION_ENABLED_KEY = + new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); @InterfaceAudience.Private - public static final String NORMALIZER_TARGET_REGION_COUNT = - "NORMALIZER_TARGET_REGION_COUNT"; + public static final String NORMALIZER_TARGET_REGION_COUNT = "NORMALIZER_TARGET_REGION_COUNT"; private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = - new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); + new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); @InterfaceAudience.Private public static final String NORMALIZER_TARGET_REGION_SIZE_MB = "NORMALIZER_TARGET_REGION_SIZE_MB"; private static final Bytes NORMALIZER_TARGET_REGION_SIZE_MB_KEY = - new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE_MB)); + new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE_MB)); // TODO: Keeping backward compatability with HBASE-25651 change. Can be removed in later version @InterfaceAudience.Private @Deprecated public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; @Deprecated private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = - new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); + new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); /** - * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global - * default value + * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */ private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; @InterfaceAudience.Private public static final String PRIORITY = "PRIORITY"; - private static final Bytes PRIORITY_KEY - = new Bytes(Bytes.toBytes(PRIORITY)); + private static final Bytes PRIORITY_KEY = new Bytes(Bytes.toBytes(PRIORITY)); private static final Bytes RSGROUP_KEY = - new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); + new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); /** * Relative priority of the table used for rpc scheduling @@ -223,8 +209,8 @@ public class TableDescriptorBuilder { public static final boolean DEFAULT_MERGE_ENABLED = true; /** - * Constant that denotes the maximum default size of the memstore in bytes after which - * the contents are flushed to the store files. + * Constant that denotes the maximum default size of the memstore in bytes after which the + * contents are flushed to the store files. */ public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; @@ -236,16 +222,14 @@ public class TableDescriptorBuilder { private final static Set RESERVED_KEYWORDS = new HashSet<>(); static { - DEFAULT_VALUES.put(MAX_FILESIZE, - String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); + DEFAULT_VALUES.put(MAX_FILESIZE, String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); - DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, - String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); - DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name + DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); + DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); - DEFAULT_VALUES.keySet().stream() - .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); + DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) + .forEach(RESERVED_KEYWORDS::add); RESERVED_KEYWORDS.add(IS_META_KEY); } @@ -296,16 +280,15 @@ public static PrettyPrinter.Unit getUnit(String key) { private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; - private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile( - "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + - CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); + private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile("(" + + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); private static final Pattern CP_HTD_ATTR_KEY_PATTERN = Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); /** * Table descriptor for namespace table * @deprecated since 3.0.0 and will be removed in 4.0.0. We have folded the data in namespace - * table into meta table, so do not use it any more. + * table into meta table, so do not use it any more. * @see HBASE-21154 */ @Deprecated @@ -395,8 +378,8 @@ public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor famil return this; } - public TableDescriptorBuilder setColumnFamilies( - final Collection families) { + public TableDescriptorBuilder + setColumnFamilies(final Collection families) { families.forEach(desc::setColumnFamily); return this; } @@ -555,12 +538,11 @@ public String getValue(String key) { public TableDescriptorBuilder setReplicationScope(int scope) { Map newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); newFamilies.putAll(desc.families); - newFamilies - .forEach((cf, cfDesc) -> { - desc.removeColumnFamily(cf); - desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope) - .build()); - }); + newFamilies.forEach((cf, cfDesc) -> { + desc.removeColumnFamily(cf); + desc + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build()); + }); return this; } @@ -579,21 +561,19 @@ private static final class ModifyableTableDescriptor private final TableName name; /** - * A map which holds the metadata information of the table. This metadata - * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE, - * READONLY, MEMSTORE_FLUSHSIZE etc... + * A map which holds the metadata information of the table. This metadata includes values like + * IS_META, SPLIT_POLICY, MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... */ private final Map values = new HashMap<>(); /** * Maps column family name to the respective FamilyDescriptors */ - private final Map families - = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); + private final Map families = + new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); /** * Construct a table descriptor specifying a TableName object - * * @param name Table name. */ private ModifyableTableDescriptor(final TableName name) { @@ -605,8 +585,7 @@ private ModifyableTableDescriptor(final TableDescriptor desc) { } /** - * Construct a table descriptor by cloning the descriptor passed as a - * parameter. + * Construct a table descriptor by cloning the descriptor passed as a parameter. *

    * Makes a deep copy of the supplied descriptor. * @param name The new name @@ -616,8 +595,8 @@ private ModifyableTableDescriptor(final TableName name, final TableDescriptor de this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); } - private ModifyableTableDescriptor(final TableName name, final Collection families, - Map values) { + private ModifyableTableDescriptor(final TableName name, + final Collection families, Map values) { this.name = name; families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); this.values.putAll(values); @@ -627,7 +606,6 @@ private ModifyableTableDescriptor(final TableName name, final Collection hbase:meta region. - * * @return true if this table is hbase:meta region */ @Override @@ -637,7 +615,6 @@ public boolean isMetaRegion() { /** * Checks if the table is a hbase:meta table - * * @return true if table is hbase:meta region. */ @Override @@ -674,7 +651,6 @@ private T getOrDefault(Bytes key, Function function, T defaultVal /** * Getter for fetching an unmodifiable {@link #values} map. - * * @return unmodifiable map {@link #values}. * @see #values */ @@ -686,35 +662,30 @@ public Map getValues() { /** * Setter for storing metadata as a (key, value) pair in {@link #values} map - * - * @param key The key. + * @param key The key. * @param value The value. If null, removes the setting. * @return the modifyable TD * @see #values */ public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { - return setValue(toBytesOrNull(key, v -> v), - toBytesOrNull(value, v -> v)); + return setValue(toBytesOrNull(key, v -> v), toBytesOrNull(value, v -> v)); } public ModifyableTableDescriptor setValue(String key, String value) { - return setValue(toBytesOrNull(key, Bytes::toBytes), - toBytesOrNull(value, Bytes::toBytes)); + return setValue(toBytesOrNull(key, Bytes::toBytes), toBytesOrNull(value, Bytes::toBytes)); } /** - * @param key The key. + * @param key The key. * @param value The value. If null, removes the setting. */ - private ModifyableTableDescriptor setValue(final Bytes key, - final String value) { + private ModifyableTableDescriptor setValue(final Bytes key, final String value) { return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } /** * Setter for storing metadata as a (key, value) pair in {@link #values} map - * - * @param key The key. + * @param key The key. * @param value The value. If null, removes the setting. */ public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { @@ -736,9 +707,7 @@ private static Bytes toBytesOrNull(T t, Function f) { /** * Remove metadata represented by the key from the {@link #values} map - * - * @param key Key whose key and value we're to remove from TableDescriptor - * parameters. + * @param key Key whose key and value we're to remove from TableDescriptor parameters. * @return the modifyable TD */ public ModifyableTableDescriptor removeValue(final String key) { @@ -747,9 +716,7 @@ public ModifyableTableDescriptor removeValue(final String key) { /** * Remove metadata represented by the key from the {@link #values} map - * - * @param key Key whose key and value we're to remove from TableDescriptor - * parameters. + * @param key Key whose key and value we're to remove from TableDescriptor parameters. * @return the modifyable TD */ public ModifyableTableDescriptor removeValue(Bytes key) { @@ -758,9 +725,7 @@ public ModifyableTableDescriptor removeValue(Bytes key) { /** * Remove metadata represented by the key from the {@link #values} map - * - * @param key Key whose key and value we're to remove from TableDescriptor - * parameters. + * @param key Key whose key and value we're to remove from TableDescriptor parameters. * @return the modifyable TD */ public ModifyableTableDescriptor removeValue(final byte[] key) { @@ -768,10 +733,8 @@ public ModifyableTableDescriptor removeValue(final byte[] key) { } /** - * Check if the readOnly flag of the table is set. If the readOnly flag is - * set then the contents of the table can only be read from but not - * modified. - * + * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents + * of the table can only be read from but not modified. * @return true if all columns in the table should be read only */ @Override @@ -780,13 +743,10 @@ public boolean isReadOnly() { } /** - * Setting the table as read only sets all the columns in the table as read - * only. By default all tables are modifiable, but if the readOnly flag is - * set to true then the contents of the table can only be read but not - * modified. - * - * @param readOnly True if all of the columns in the table should be read - * only. + * Setting the table as read only sets all the columns in the table as read only. By default all + * tables are modifiable, but if the readOnly flag is set to true then the contents of the table + * can only be read but not modified. + * @param readOnly True if all of the columns in the table should be read only. * @return the modifyable TD */ public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { @@ -794,9 +754,8 @@ public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { } /** - * Check if the compaction enable flag of the table is true. If flag is - * false then no minor/major compactions will be done in real. - * + * Check if the compaction enable flag of the table is true. If flag is false then no + * minor/major compactions will be done in real. * @return true if table compaction enabled */ @Override @@ -806,7 +765,6 @@ public boolean isCompactionEnabled() { /** * Setting the table compaction enable flag. - * * @param isEnable True if enable compaction. * @return the modifyable TD */ @@ -817,7 +775,6 @@ public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { /** * Check if the split enable flag of the table is true. If flag is false then no split will be * done. - * * @return true if table region split enabled */ @Override @@ -828,7 +785,6 @@ public boolean isSplitEnabled() { /** * Setting the table region split enable flag. * @param isEnable True if enable region split. - * * @return the modifyable TD */ public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { @@ -838,7 +794,6 @@ public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { /** * Check if the region merge enable flag of the table is true. If flag is false then no merge * will be done. - * * @return true if table region merge enabled */ @Override @@ -849,7 +804,6 @@ public boolean isMergeEnabled() { /** * Setting the table region merge enable flag. * @param isEnable True if enable region merge. - * * @return the modifyable TD */ public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { @@ -857,8 +811,8 @@ public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { } /** - * Check if normalization enable flag of the table is true. If flag is false - * then no region normalizer won't attempt to normalize this table. + * Check if normalization enable flag of the table is true. If flag is false then no region + * normalizer won't attempt to normalize this table. * @return true if region normalization is enabled for this table **/ @Override @@ -886,13 +840,13 @@ public int getNormalizerTargetRegionCount() { public long getNormalizerTargetRegionSize() { long target_region_size = getOrDefault(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long::valueOf, Long.valueOf(-1)); - return target_region_size == Long.valueOf(-1) ? getOrDefault( - NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) : target_region_size; + return target_region_size == Long.valueOf(-1) + ? getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) + : target_region_size; } /** * Setting the table normalization enable flag. - * * @param isEnable True if enable normalization. * @return the modifyable TD */ @@ -919,9 +873,7 @@ public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long region } /** - * Sets the {@link Durability} setting for the table. This defaults to - * Durability.USE_DEFAULT. - * + * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT. * @param durability enum value * @return the modifyable TD */ @@ -931,7 +883,6 @@ public ModifyableTableDescriptor setDurability(Durability durability) { /** * Returns the durability setting for the table. - * * @return durability setting for the table. */ @Override @@ -940,9 +891,7 @@ public Durability getDurability() { } /** - * Get the name of the table - * - * @return TableName + * Get the name of the table n */ @Override public TableName getTableName() { @@ -950,10 +899,9 @@ public TableName getTableName() { } /** - * This sets the class associated with the region split policy which - * determines when a region split should occur. The class used by default is - * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy - * + * This sets the class associated with the region split policy which determines when a region + * split should occur. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy * @param clazz the class name * @return the modifyable TD */ @@ -962,12 +910,11 @@ public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { } /** - * This gets the class associated with the region split policy which - * determines when a region split should occur. The class used by default is - * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy - * - * @return the class name of the region split policy for this table. If this - * returns null, the default split policy is used. + * This gets the class associated with the region split policy which determines when a region + * split should occur. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy + * @return the class name of the region split policy for this table. If this returns null, the + * default split policy is used. */ @Override public String getRegionSplitPolicyClassName() { @@ -975,12 +922,10 @@ public String getRegionSplitPolicyClassName() { } /** - * Returns the maximum size upto which a region can grow to after which a - * region split is triggered. The region size is represented by the size of - * the biggest store file in that region. - * + * Returns the maximum size upto which a region can grow to after which a region split is + * triggered. The region size is represented by the size of the biggest store file in that + * region. * @return max hregion size for table, -1 if not set. - * * @see #setMaxFileSize(long) */ @Override @@ -989,19 +934,17 @@ public long getMaxFileSize() { } /** - * Sets the maximum size upto which a region can grow to after which a - * region split is triggered. The region size is represented by the size of - * the biggest store file in that region, i.e. If the biggest store file - * grows beyond the maxFileSize, then the region split is triggered. This - * defaults to a value of 256 MB. + * Sets the maximum size upto which a region can grow to after which a region split is + * triggered. The region size is represented by the size of the biggest store file in that + * region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is + * triggered. This defaults to a value of 256 MB. *

    - * This is not an absolute value and might vary. Assume that a single row - * exceeds the maxFileSize then the storeFileSize will be greater than - * maxFileSize since a single row cannot be split across multiple regions + * This is not an absolute value and might vary. Assume that a single row exceeds the + * maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot + * be split across multiple regions *

    - * - * @param maxFileSize The maximum file size that a store file can grow to - * before a split is triggered. + * @param maxFileSize The maximum file size that a store file can grow to before a split is + * triggered. * @return the modifyable TD */ public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { @@ -1009,16 +952,13 @@ public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { } public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { - return setMaxFileSize(Long.parseLong(PrettyPrinter. - valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); + return setMaxFileSize( + Long.parseLong(PrettyPrinter.valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); } /** - * Returns the size of the memstore after which a flush to filesystem is - * triggered. - * + * Returns the size of the memstore after which a flush to filesystem is triggered. * @return memory cache flush size for each hregion, -1 if not set. - * * @see #setMemStoreFlushSize(long) */ @Override @@ -1027,10 +967,8 @@ public long getMemStoreFlushSize() { } /** - * Represents the maximum size of the memstore after which the contents of - * the memstore are flushed to the filesystem. This defaults to a size of 64 - * MB. - * + * Represents the maximum size of the memstore after which the contents of the memstore are + * flushed to the filesystem. This defaults to a size of 64 MB. * @param memstoreFlushSize memory cache flush size for each hregion * @return the modifyable TD */ @@ -1040,16 +978,14 @@ public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) throws HBaseException { - return setMemStoreFlushSize(Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, - PrettyPrinter.Unit.BYTE))); + return setMemStoreFlushSize( + Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, PrettyPrinter.Unit.BYTE))); } /** - * This sets the class associated with the flush policy which determines - * determines the stores need to be flushed when flushing a region. The - * class used by default is defined in + * This sets the class associated with the flush policy which determines determines the stores + * need to be flushed when flushing a region. The class used by default is defined in * org.apache.hadoop.hbase.regionserver.FlushPolicy. - * * @param clazz the class name * @return the modifyable TD */ @@ -1058,12 +994,11 @@ public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { } /** - * This gets the class associated with the flush policy which determines the - * stores need to be flushed when flushing a region. The class used by - * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. - * - * @return the class name of the flush policy for this table. If this - * returns null, the default flush policy is used. + * This gets the class associated with the flush policy which determines the stores need to be + * flushed when flushing a region. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.FlushPolicy. + * @return the class name of the flush policy for this table. If this returns null, the default + * flush policy is used. */ @Override public String getFlushPolicyClassName() { @@ -1073,7 +1008,6 @@ public String getFlushPolicyClassName() { /** * Adds a column family. For the updating purpose please use * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. - * * @param family to add. * @return the modifyable TD */ @@ -1083,18 +1017,18 @@ public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor fa } int flength = family.getName() == null ? 0 : family.getName().length; if (flength > Byte.MAX_VALUE) { - throw new IllegalArgumentException("The length of family name is bigger than " + Byte.MAX_VALUE); + throw new IllegalArgumentException( + "The length of family name is bigger than " + Byte.MAX_VALUE); } if (hasColumnFamily(family.getName())) { - throw new IllegalArgumentException("Family '" - + family.getNameAsString() + "' already exists so cannot be added"); + throw new IllegalArgumentException( + "Family '" + family.getNameAsString() + "' already exists so cannot be added"); } return putColumnFamily(family); } /** * Modifies the existing column family. - * * @param family to update * @return this (for chained invocation) */ @@ -1103,8 +1037,8 @@ public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor throw new IllegalArgumentException("Family name cannot be null or empty"); } if (!hasColumnFamily(family.getName())) { - throw new IllegalArgumentException("Column family '" + family.getNameAsString() - + "' does not exist"); + throw new IllegalArgumentException( + "Column family '" + family.getNameAsString() + "' does not exist"); } return putColumnFamily(family); } @@ -1116,7 +1050,6 @@ private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) /** * Checks to see if this table contains the given column family - * * @param familyName Family name or column name. * @return true if the table contains the specified family name */ @@ -1138,8 +1071,8 @@ public String toString() { } /** - * @return Name of this table and then a map of all of the column family - * descriptors (with only the non-default column family attributes) + * @return Name of this table and then a map of all of the column family descriptors (with only + * the non-default column family attributes) */ @Override public String toStringCustomizedValues() { @@ -1181,9 +1114,10 @@ private StringBuilder getValues(boolean printDefaults) { } } // see if a reserved key is a default value. may not want to print it out - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + if ( + printDefaults || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) + ) { reservedKeys.add(entry.getKey()); } } @@ -1243,13 +1177,11 @@ private StringBuilder getValues(boolean printDefaults) { } /** - * Compare the contents of the descriptor with another one passed as a - * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor, - * if yes then the contents of the descriptors are compared. - * + * Compare the contents of the descriptor with another one passed as a parameter. Checks if the + * obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the + * descriptors are compared. * @param obj The object to compare * @return true if the contents of the the two descriptors exactly match - * * @see java.lang.Object#equals(java.lang.Object) */ @Override @@ -1280,13 +1212,11 @@ public int hashCode() { // Comparable /** - * Compares the descriptor with another descriptor which is passed as a - * parameter. This compares the content of the two descriptors and not the - * reference. - * + * Compares the descriptor with another descriptor which is passed as a parameter. This compares + * the content of the two descriptors and not the reference. * @param other The MTD to compare - * @return 0 if the contents of the descriptors are exactly matching, 1 if - * there is a mismatch in the contents + * @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch + * in the contents */ @Override public int compareTo(final ModifyableTableDescriptor other) { @@ -1308,7 +1238,6 @@ public int getRegionReplication() { /** * Sets the number of replicas per region. - * * @param regionReplication the replication factor per region * @return the modifyable TD */ @@ -1321,17 +1250,16 @@ public ModifyableTableDescriptor setRegionReplication(int regionReplication) { */ @Override public boolean hasRegionMemStoreReplication() { - return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION); + return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, + DEFAULT_REGION_MEMSTORE_REPLICATION); } /** - * Enable or Disable the memstore replication from the primary region to the - * replicas. The replication will be used only for meta operations (e.g. - * flush, compaction, ...) - * - * @param memstoreReplication true if the new data written to the primary - * region should be replicated. false if the secondaries can tollerate to - * have new data only when the primary flushes the memstore. + * Enable or Disable the memstore replication from the primary region to the replicas. The + * replication will be used only for meta operations (e.g. flush, compaction, ...) + * @param memstoreReplication true if the new data written to the primary region should be + * replicated. false if the secondaries can tollerate to have new + * data only when the primary flushes the memstore. * @return the modifyable TD */ public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { @@ -1348,11 +1276,9 @@ public int getPriority() { } /** - * Returns all the column family names of the current table. The map of - * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor. - * This returns all the keys of the family map which represents the column - * family names of the table. - * + * Returns all the column family names of the current table. The map of TableDescriptor contains + * mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map + * which represents the column family names of the table. * @return Immutable sorted set of the keys of the families. */ @Override @@ -1361,12 +1287,10 @@ public Set getColumnFamilyNames() { } /** - * Returns the ColumnFamilyDescriptor for a specific column family with name as - * specified by the parameter column. - * + * Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the + * parameter column. * @param column Column family name - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. */ @Override public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { @@ -1374,12 +1298,10 @@ public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { } /** - * Removes the ColumnFamilyDescriptor with name specified by the parameter column - * from the table descriptor - * + * Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table + * descriptor * @param column Name of the column family to be removed. - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. */ public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { return this.families.remove(column); @@ -1387,35 +1309,29 @@ public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { /** * Add a table coprocessor to this table. The coprocessor type must be - * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't - * check if the class can be loaded or not. Whether a coprocessor is - * loadable or not will be determined when a region is opened. - * - * @param className Full class name. - * @throws IOException - * @return the modifyable TD + * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class + * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a + * region is opened. + * @param className Full class name. n * @return the modifyable TD */ public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { - return setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER) - .build()); + return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) + .setPriority(Coprocessor.PRIORITY_USER).build()); } /** * Add a table coprocessor to this table. The coprocessor type must be - * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't - * check if the class can be loaded or not. Whether a coprocessor is - * loadable or not will be determined when a region is opened. - * + * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class + * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a + * region is opened. * @throws IOException any illegal parameter key/value * @return the modifyable TD */ - public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) - throws IOException { + public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException { checkHasCoprocessor(cp.getClassName()); if (cp.getPriority() < 0) { - throw new IOException("Priority must be bigger than or equal with zero, current:" - + cp.getPriority()); + throw new IOException( + "Priority must be bigger than or equal with zero, current:" + cp.getPriority()); } // Validate parameter kvs and then add key/values to kvString. StringBuilder kvString = new StringBuilder(); @@ -1424,8 +1340,7 @@ public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throw new IOException("Illegal parameter key = " + e.getKey()); } if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { - throw new IOException("Illegal parameter (" + e.getKey() - + ") value = " + e.getValue()); + throw new IOException("Illegal parameter (" + e.getKey() + ") value = " + e.getValue()); } if (kvString.length() != 0) { kvString.append(','); @@ -1435,29 +1350,26 @@ public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) kvString.append(e.getValue()); } - String value = cp.getJarPath().orElse("") - + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|" - + kvString.toString(); + String value = cp.getJarPath().orElse("") + "|" + cp.getClassName() + "|" + + Integer.toString(cp.getPriority()) + "|" + kvString.toString(); return setCoprocessorToMap(value); } /** * Add a table coprocessor to this table. The coprocessor type must be - * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't - * check if the class can be loaded or not. Whether a coprocessor is - * loadable or not will be determined when a region is opened. - * - * @param specStr The Coprocessor specification all in in one String - * @throws IOException - * @return the modifyable TD - * @deprecated used by HTableDescriptor and admin.rb. - * As of release 2.0.0, this will be removed in HBase 3.0.0. + * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class + * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a + * region is opened. + * @param specStr The Coprocessor specification all in in one String n * @return the modifyable + * TD + * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed + * in HBase 3.0.0. */ @Deprecated public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) throws IOException { - CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow( - () -> new IllegalArgumentException( + CoprocessorDescriptor cpDesc = + toCoprocessorDescriptor(specStr).orElseThrow(() -> new IllegalArgumentException( "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); checkHasCoprocessor(cpDesc.getClassName()); return setCoprocessorToMap(specStr); @@ -1486,7 +1398,8 @@ private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { if (!keyMatcher.matches()) { continue; } - maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); + maxCoprocessorNumber = + Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); } maxCoprocessorNumber++; String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); @@ -1494,32 +1407,27 @@ private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { } /** - * Check if the table has an attached co-processor represented by the name - * className - * + * Check if the table has an attached co-processor represented by the name className * @param classNameToMatch - Class name of the co-processor * @return true of the table has a co-processor className */ @Override public boolean hasCoprocessor(String classNameToMatch) { - return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName() - .equals(classNameToMatch)); + return getCoprocessorDescriptors().stream() + .anyMatch(cp -> cp.getClassName().equals(classNameToMatch)); } /** - * Return the list of attached co-processor represented by their name - * className - * + * Return the list of attached co-processor represented by their name className * @return The list of co-processors classNames */ @Override public List getCoprocessorDescriptors() { List result = new ArrayList<>(); - for (Map.Entry e: getValues().entrySet()) { + for (Map.Entry e : getValues().entrySet()) { String key = Bytes.toString(e.getKey().get()).trim(); if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { - toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()) - .ifPresent(result::add); + toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add); } } return result; @@ -1527,22 +1435,18 @@ public List getCoprocessorDescriptors() { /** * Remove a coprocessor from those set on the table - * * @param className Class name of the co-processor */ public void removeCoprocessor(String className) { Bytes match = null; Matcher keyMatcher; Matcher valueMatcher; - for (Map.Entry e : this.values - .entrySet()) { - keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e - .getKey().get())); + for (Map.Entry e : this.values.entrySet()) { + keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); if (!keyMatcher.matches()) { continue; } - valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes - .toString(e.getValue().get())); + valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get())); if (!valueMatcher.matches()) { continue; } @@ -1558,9 +1462,8 @@ public void removeCoprocessor(String className) { if (match != null) { ModifyableTableDescriptor.this.removeValue(match); } else { - throw new IllegalArgumentException(String - .format("coprocessor with class name %s was not found in the table attribute", - className)); + throw new IllegalArgumentException(String.format( + "coprocessor with class name %s was not found in the table attribute", className)); } } @@ -1572,15 +1475,11 @@ private byte[] toByteArray() { } /** - * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance - * with pb magic prefix - * @return An instance of {@link ModifyableTableDescriptor} made from - * bytes - * @throws DeserializationException - * @see #toByteArray() + * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix + * @return An instance of {@link ModifyableTableDescriptor} made from bytes n + * * @see #toByteArray() */ - private static TableDescriptor parseFrom(final byte[] bytes) - throws DeserializationException { + private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException { if (!ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); } @@ -1619,15 +1518,14 @@ private static Optional toCoprocessorDescriptor(String sp if (matcher.matches()) { // jar file path can be empty if the cp class can be loaded // from class loader. - String path = matcher.group(1).trim().isEmpty() ? - null : matcher.group(1).trim(); + String path = matcher.group(1).trim().isEmpty() ? null : matcher.group(1).trim(); String className = matcher.group(2).trim(); if (className.isEmpty()) { return Optional.empty(); } String priorityStr = matcher.group(3).trim(); - int priority = priorityStr.isEmpty() ? - Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); + int priority = + priorityStr.isEmpty() ? Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); String cfgSpec = null; try { cfgSpec = matcher.group(4); @@ -1642,11 +1540,8 @@ private static Optional toCoprocessorDescriptor(String sp ourConf.put(m.group(1), m.group(2)); } } - return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className) - .setJarPath(path) - .setPriority(priority) - .setProperties(ourConf) - .build()); + return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperties(ourConf).build()); } return Optional.empty(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorUtils.java index 90e05a9a13c1..0b08dd8e7d6d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -86,11 +84,12 @@ public Set getColumnsModified() { } } - private TableDescriptorUtils() { } + private TableDescriptorUtils() { + } /** - * Compares two {@link TableDescriptor} and indicate which columns were added, deleted, - * or modified from oldTD to newTD + * Compares two {@link TableDescriptor} and indicate which columns were added, deleted, or + * modified from oldTD to newTD * @return a TableDescriptorDelta that contains the added/deleted/modified column names */ public static TableDescriptorDelta computeDelta(TableDescriptor oldTD, TableDescriptor newTD) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java index 0c818f8becba..d4db9eb49e67 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; @@ -62,6 +63,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.primitives.Booleans; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @@ -69,6 +71,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -86,7 +89,7 @@ class TableOverAsyncTable implements Table { private final IOExceptionSupplier poolSupplier; TableOverAsyncTable(AsyncConnectionImpl conn, AsyncTable table, - IOExceptionSupplier poolSupplier) { + IOExceptionSupplier poolSupplier) { this.conn = conn; this.table = table; this.poolSupplier = poolSupplier; @@ -141,7 +144,7 @@ public void batch(List actions, Object[] results) throws IOExcept @Override public void batchCallback(List actions, Object[] results, Callback callback) - throws IOException, InterruptedException { + throws IOException, InterruptedException { ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); CountDownLatch latch = new CountDownLatch(actions.size()); AsyncTableRegionLocator locator = conn.getRegionLocator(getName()); @@ -164,8 +167,8 @@ public void batchCallback(List actions, Object[] results, Cal (l, le) -> { if (le != null) { errors.add(new ThrowableWithExtraContext(le, EnvironmentEdgeManager.currentTime(), - "Error when finding the region for row " + - Bytes.toStringBinary(actions.get(index).getRow()))); + "Error when finding the region for row " + + Bytes.toStringBinary(actions.get(index).getRow()))); } else { callback.update(l.getRegion().getRegionName(), actions.get(index).getRow(), r); } @@ -330,13 +333,13 @@ public Result increment(Increment increment) throws IOException { @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) - throws IOException { + throws IOException { return FutureUtils.get(table.incrementColumnValue(row, family, qualifier, amount)); } @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, - Durability durability) throws IOException { + Durability durability) throws IOException { return FutureUtils.get(table.incrementColumnValue(row, family, qualifier, amount, durability)); } @@ -346,16 +349,16 @@ public void close() { @SuppressWarnings("deprecation") private static final class RegionCoprocessorRpcChannel extends RegionCoprocessorRpcChannelImpl - implements CoprocessorRpcChannel { + implements CoprocessorRpcChannel { RegionCoprocessorRpcChannel(AsyncConnectionImpl conn, TableName tableName, RegionInfo region, - byte[] row, long rpcTimeoutNs, long operationTimeoutNs) { + byte[] row, long rpcTimeoutNs, long operationTimeoutNs) { super(conn, tableName, region, row, rpcTimeoutNs, operationTimeoutNs); } @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { ClientCoprocessorRpcController c = new ClientCoprocessorRpcController(); CoprocessorBlockingRpcCallback callback = new CoprocessorBlockingRpcCallback<>(); super.callMethod(method, c, request, responsePrototype, callback); @@ -374,7 +377,7 @@ public void callMethod(MethodDescriptor method, RpcController controller, Messag @Override public Message callBlockingMethod(MethodDescriptor method, RpcController controller, - Message request, Message responsePrototype) throws ServiceException { + Message request, Message responsePrototype) throws ServiceException { ClientCoprocessorRpcController c = new ClientCoprocessorRpcController(); CoprocessorBlockingRpcCallback done = new CoprocessorBlockingRpcCallback<>(); callMethod(method, c, request, responsePrototype, done); @@ -401,31 +404,31 @@ public RegionCoprocessorRpcChannel coprocessorService(byte[] row) { /** * Get the corresponding start keys and regions for an arbitrary range of keys. *

    - * @param startKey Starting row in range, inclusive - * @param endKey Ending row in range + * @param startKey Starting row in range, inclusive + * @param endKey Ending row in range * @param includeEndKey true if endRow is inclusive, false if exclusive * @return A pair of list of start keys and list of HRegionLocations that contain the specified * range * @throws IOException if a remote or network exception occurs */ private Pair, List> getKeysAndRegionsInRange(final byte[] startKey, - final byte[] endKey, final boolean includeEndKey) throws IOException { + final byte[] endKey, final boolean includeEndKey) throws IOException { return getKeysAndRegionsInRange(startKey, endKey, includeEndKey, false); } /** * Get the corresponding start keys and regions for an arbitrary range of keys. *

    - * @param startKey Starting row in range, inclusive - * @param endKey Ending row in range + * @param startKey Starting row in range, inclusive + * @param endKey Ending row in range * @param includeEndKey true if endRow is inclusive, false if exclusive - * @param reload true to reload information or false to use cached information + * @param reload true to reload information or false to use cached information * @return A pair of list of start keys and list of HRegionLocations that contain the specified * range * @throws IOException if a remote or network exception occurs */ private Pair, List> getKeysAndRegionsInRange(final byte[] startKey, - final byte[] endKey, final boolean includeEndKey, final boolean reload) throws IOException { + final byte[] endKey, final boolean includeEndKey, final boolean reload) throws IOException { final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW); if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) { throw new IllegalArgumentException( @@ -440,9 +443,11 @@ private Pair, List> getKeysAndRegionsInRange(final keysInRange.add(currentKey); regionsInRange.add(regionLocation); currentKey = regionLocation.getRegion().getEndKey(); - } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) && - (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0 || - (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0))); + } while ( + !Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) + && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0 + || (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0)) + ); return new Pair<>(keysInRange, regionsInRange); } @@ -462,7 +467,7 @@ private interface StubCall { } private void coprocessorService(String serviceName, byte[] startKey, byte[] endKey, - Callback callback, StubCall call) throws Throwable { + Callback callback, StubCall call) throws Throwable { // get regions covered by the row range ExecutorService pool = Context.current().wrap(this.poolSupplier.get()); List keys = getStartKeysInRange(startKey, endKey); @@ -496,18 +501,17 @@ private void coprocessorService(String serviceName, byte[] startKey, byte[] Bytes.toStringBinary(e.getKey()), ee); throw ee.getCause(); } catch (InterruptedException ie) { - throw new InterruptedIOException("Interrupted calling coprocessor service " + serviceName + - " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie); + throw new InterruptedIOException("Interrupted calling coprocessor service " + serviceName + + " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie); } } } @Override public void coprocessorService(Class service, byte[] startKey, - byte[] endKey, Call callable, Callback callback) throws ServiceException, Throwable { + byte[] endKey, Call callable, Callback callback) throws ServiceException, Throwable { final Supplier supplier = new TableOperationSpanBuilder(conn) - .setTableName(table.getName()) - .setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); + .setTableName(table.getName()).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); TraceUtil.trace(() -> { final Context context = Context.current(); coprocessorService(service.getName(), startKey, endKey, callback, channel -> { @@ -522,17 +526,15 @@ public void coprocessorService(Class service, byte[] s @SuppressWarnings("unchecked") @Override public void batchCoprocessorService(MethodDescriptor methodDescriptor, - Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) - throws ServiceException, Throwable { + Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) + throws ServiceException, Throwable { final Supplier supplier = new TableOperationSpanBuilder(conn) - .setTableName(table.getName()) - .setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); + .setTableName(table.getName()).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); TraceUtil.trace(() -> { final Context context = Context.current(); coprocessorService(methodDescriptor.getFullName(), startKey, endKey, callback, channel -> { try (Scope ignored = context.makeCurrent()) { - return (R) channel.callBlockingMethod( - methodDescriptor, null, request, responsePrototype); + return (R) channel.callBlockingMethod(methodDescriptor, null, request, responsePrototype); } }); }, supplier); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java index 40612e9b2025..ffd8cf8409d4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.exceptions.DeserializationException; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** @@ -40,53 +42,49 @@ public static enum State { /** * Covert from PB version of State - * - * @param state convert from - * @return POJO + * @param state convert from n */ public static State convert(HBaseProtos.TableState.State state) { State ret; switch (state) { - case ENABLED: - ret = State.ENABLED; - break; - case DISABLED: - ret = State.DISABLED; - break; - case DISABLING: - ret = State.DISABLING; - break; - case ENABLING: - ret = State.ENABLING; - break; - default: - throw new IllegalStateException(state.toString()); + case ENABLED: + ret = State.ENABLED; + break; + case DISABLED: + ret = State.DISABLED; + break; + case DISABLING: + ret = State.DISABLING; + break; + case ENABLING: + ret = State.ENABLING; + break; + default: + throw new IllegalStateException(state.toString()); } return ret; } /** - * Covert to PB version of State - * - * @return PB + * Covert to PB version of State n */ public HBaseProtos.TableState.State convert() { HBaseProtos.TableState.State state; switch (this) { - case ENABLED: - state = HBaseProtos.TableState.State.ENABLED; - break; - case DISABLED: - state = HBaseProtos.TableState.State.DISABLED; - break; - case DISABLING: - state = HBaseProtos.TableState.State.DISABLING; - break; - case ENABLING: - state = HBaseProtos.TableState.State.ENABLING; - break; - default: - throw new IllegalStateException(this.toString()); + case ENABLED: + state = HBaseProtos.TableState.State.ENABLED; + break; + case DISABLED: + state = HBaseProtos.TableState.State.DISABLED; + break; + case DISABLING: + state = HBaseProtos.TableState.State.DISABLING; + break; + case ENABLING: + state = HBaseProtos.TableState.State.ENABLING; + break; + default: + throw new IllegalStateException(this.toString()); } return state; } @@ -141,7 +139,7 @@ public boolean isDisabledOrDisabling() { /** * Create instance of TableState. * @param tableName name of the table - * @param state table state + * @param state table state */ public TableState(TableName tableName, State state) { this.tableName = tableName; @@ -156,9 +154,7 @@ public State getState() { } /** - * Table name for state - * - * @return milliseconds + * Table name for state n */ public TableName getTableName() { return tableName; @@ -180,28 +176,22 @@ public boolean inStates(State state) { */ public boolean inStates(State... states) { for (State s : states) { - if (s.equals(this.state)) - return true; + if (s.equals(this.state)) return true; } return false; } - /** - * Covert to PB version of TableState - * @return PB + * Covert to PB version of TableState n */ public HBaseProtos.TableState convert() { - return HBaseProtos.TableState.newBuilder() - .setState(this.state.convert()).build(); + return HBaseProtos.TableState.newBuilder().setState(this.state.convert()).build(); } /** * Covert from PB version of TableState - * - * @param tableName table this state of - * @param tableState convert from - * @return POJO + * @param tableName table this state of + * @param tableState convert from n */ public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) { TableState.State state = State.convert(tableState.getState()); @@ -209,7 +199,7 @@ public static TableState convert(TableName tableName, HBaseProtos.TableState tab } public static TableState parseFrom(TableName tableName, byte[] bytes) - throws DeserializationException { + throws DeserializationException { try { return convert(tableName, HBaseProtos.TableState.parseFrom(bytes)); } catch (InvalidProtocolBufferException e) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index bc521d0f7b19..1634b13ec7e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -115,7 +115,7 @@ private static ZooKeeperProtos.MetaRegionServer getMetaProto(byte[] data) throws } private static void tryComplete(MutableInt remaining, HRegionLocation[] locs, - CompletableFuture future) { + CompletableFuture future) { remaining.decrement(); if (remaining.intValue() > 0) { return; @@ -123,8 +123,8 @@ private static void tryComplete(MutableInt remaining, HRegionLocation[] locs, future.complete(new RegionLocations(locs)); } - private Pair getStateAndServerName( - ZooKeeperProtos.MetaRegionServer proto) { + private Pair + getStateAndServerName(ZooKeeperProtos.MetaRegionServer proto) { RegionState.State state; if (proto.hasState()) { state = RegionState.State.convert(proto.getState()); @@ -137,7 +137,7 @@ private Pair getStateAndServerName( } private void getMetaRegionLocation(CompletableFuture future, - List metaReplicaZNodes) { + List metaReplicaZNodes) { if (metaReplicaZNodes.isEmpty()) { future.completeExceptionally(new IOException("No meta znode available")); } @@ -178,8 +178,8 @@ private void getMetaRegionLocation(CompletableFuture future, } else { Pair stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { - LOG.warn("Meta region for replica " + replicaId + " is in state " + - stateAndServerName.getFirst()); + LOG.warn("Meta region for replica " + replicaId + " is in state " + + stateAndServerName.getFirst()); locs[replicaId] = null; } else { locs[replicaId] = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java index 3957d1e1d0e7..ee66bcada1ba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Configurable policy for the amount of time a client should wait for a new request to the - * server when given the server load statistics. + * Configurable policy for the amount of time a client should wait for a new request to the server + * when given the server load statistics. *

    * Must have a single-argument constructor that takes a {@link org.apache.hadoop.conf.Configuration} *

    @@ -30,8 +30,7 @@ @InterfaceAudience.Public public interface ClientBackoffPolicy { - public static final String BACKOFF_POLICY_CLASS = - "hbase.client.statistics.backoff-policy"; + public static final String BACKOFF_POLICY_CLASS = "hbase.client.statistics.backoff-policy"; /** * @return the number of ms to wait on the client based on the diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java index 035894723749..a786702b1693 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.ReflectionUtils; @InterfaceAudience.Private @InterfaceStability.Evolving @@ -37,17 +37,16 @@ private ClientBackoffPolicyFactory() { public static ClientBackoffPolicy create(Configuration conf) { // create the backoff policy String className = - conf.get(ClientBackoffPolicy.BACKOFF_POLICY_CLASS, NoBackoffPolicy.class - .getName()); - return ReflectionUtils.instantiateWithCustomCtor(className, - new Class[] { Configuration.class }, new Object[] { conf }); + conf.get(ClientBackoffPolicy.BACKOFF_POLICY_CLASS, NoBackoffPolicy.class.getName()); + return ReflectionUtils.instantiateWithCustomCtor(className, + new Class[] { Configuration.class }, new Object[] { conf }); } /** * Default backoff policy that doesn't create any backoff for the client, regardless of load */ public static class NoBackoffPolicy implements ClientBackoffPolicy { - public NoBackoffPolicy(Configuration conf){ + public NoBackoffPolicy(Configuration conf) { // necessary to meet contract } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java index f769e2ed611d..aa84207e1ed1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,11 +23,12 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Simple exponential backoff policy on for the client that uses a percent^4 times the - * max backoff to generate the backoff time. + * Simple exponential backoff policy on for the client that uses a percent^4 times the max backoff + * to generate the backoff time. */ @InterfaceAudience.Public public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy { @@ -77,8 +78,7 @@ public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistic heapOccupancy = heapOccupancyHighWatermark; } percent = Math.max(percent, - scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark, - 0.1, 1.0)); + scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark, 0.1, 1.0)); } percent = Math.max(percent, compactionPressure); // square the percent as a value less than 1. Closer we move to 100 percent, @@ -92,13 +92,13 @@ public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistic /** Scale valueIn in the range [baseMin,baseMax] to the range [limitMin,limitMax] */ private static double scale(double valueIn, double baseMin, double baseMax, double limitMin, - double limitMax) { - Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", - baseMin, baseMax); - Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", - limitMin, limitMax); + double limitMax) { + Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", baseMin, + baseMax); + Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", limitMin, + limitMax); Preconditions.checkArgument(valueIn >= baseMin && valueIn <= baseMax, - "Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax); + "Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax); return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java index e18309d9bbe9..ab5915ec9750 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.client.backoff; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.client.RegionLoadStats; -import org.apache.hadoop.hbase.util.Bytes; - import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.hbase.client.RegionLoadStats; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Track the statistics for a single region @@ -33,14 +32,12 @@ public class ServerStatistics { private Map stats = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** - * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, - * as something gets set - * @param region - * @param currentStats + * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as + * something gets set nn */ public void update(byte[] region, RegionLoadStats currentStats) { RegionStatistics regionStat = this.stats.get(region); - if(regionStat == null){ + if (regionStat == null) { regionStat = new RegionStatistics(); this.stats.put(region, regionStat); } @@ -49,7 +46,7 @@ public void update(byte[] region, RegionLoadStats currentStats) { } @InterfaceAudience.Private - public RegionStatistics getStatsForRegion(byte[] regionName){ + public RegionStatistics getStatsForRegion(byte[] regionName) { return stats.get(regionName); } @@ -64,11 +61,11 @@ public void update(RegionLoadStats currentStats) { this.compactionPressure = currentStats.getCompactionPressure(); } - public int getMemStoreLoadPercent(){ + public int getMemStoreLoadPercent() { return this.memstoreLoad; } - public int getHeapOccupancyPercent(){ + public int getHeapOccupancyPercent() { return this.heapOccupancy; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java index 2c1647f85421..805e27962cfd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.coprocessor; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * A collection of interfaces and utilities used for interacting with custom RPC - * interfaces exposed by Coprocessors. + * A collection of interfaces and utilities used for interacting with custom RPC interfaces exposed + * by Coprocessors. */ @InterfaceAudience.Public public abstract class Batch { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java index ab838cca4d33..900f96440dca 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,18 +35,16 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; /** - * ColumnInterpreter for doing Aggregation's with BigDecimal columns. This class - * is required at the RegionServer also. - * + * ColumnInterpreter for doing Aggregation's with BigDecimal columns. This class is required at the + * RegionServer also. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public class BigDecimalColumnInterpreter extends ColumnInterpreter { +public class BigDecimalColumnInterpreter + extends ColumnInterpreter { @Override - public BigDecimal getValue(byte[] colFamily, byte[] colQualifier, Cell kv) - throws IOException { + public BigDecimal getValue(byte[] colFamily, byte[] colQualifier, Cell kv) throws IOException { if (kv == null || CellUtil.cloneValue(kv) == null) { return null; } @@ -88,8 +85,9 @@ public BigDecimal increment(BigDecimal bd) { @Override public BigDecimal multiply(BigDecimal bd1, BigDecimal bd2) { - return (bd1 == null || bd2 == null) ? null : bd1.multiply(bd2) - .setScale(2,RoundingMode.HALF_EVEN); + return (bd1 == null || bd2 == null) + ? null + : bd1.multiply(bd2).setScale(2, RoundingMode.HALF_EVEN); } @Override @@ -99,8 +97,7 @@ public BigDecimal getMinValue() { @Override public double divideForAvg(BigDecimal bd1, Long l2) { - return (l2 == null || bd1 == null) ? Double.NaN : (bd1.doubleValue() / l2 - .doubleValue()); + return (l2 == null || bd1 == null) ? Double.NaN : (bd1.doubleValue() / l2.doubleValue()); } @Override @@ -120,7 +117,7 @@ public EmptyMsg getRequestData() { @Override public void initialize(EmptyMsg msg) { - //nothing + // nothing } private BigDecimalMsg getProtoForType(BigDecimal t) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/DoubleColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/DoubleColumnInterpreter.java index 19261e196c60..767a8d384a11 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/DoubleColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/DoubleColumnInterpreter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,30 +30,27 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; /** - * a concrete column interpreter implementation. The cell value is a Double value - * and its promoted data type is also a Double value. For computing aggregation - * function, this class is used to find the datatype of the cell value. Client - * is supposed to instantiate it and passed along as a parameter. See - * TestDoubleColumnInterpreter methods for its sample usage. - * Its methods handle null arguments gracefully. + * a concrete column interpreter implementation. The cell value is a Double value and its promoted + * data type is also a Double value. For computing aggregation function, this class is used to find + * the datatype of the cell value. Client is supposed to instantiate it and passed along as a + * parameter. See TestDoubleColumnInterpreter methods for its sample usage. Its methods handle null + * arguments gracefully. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public class DoubleColumnInterpreter extends ColumnInterpreter{ - - @Override - public Double getValue(byte[] colFamily, byte[] colQualifier, Cell c) - throws IOException { - if (c == null || c.getValueLength() != Bytes.SIZEOF_DOUBLE) - return null; +public class DoubleColumnInterpreter + extends ColumnInterpreter { + + @Override + public Double getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException { + if (c == null || c.getValueLength() != Bytes.SIZEOF_DOUBLE) return null; return PrivateCellUtil.getValueAsDouble(c); } @Override public Double add(Double d1, Double d2) { if (d1 == null || d2 == null) { - return (d1 == null) ? d2 : d1; + return (d1 == null) ? d2 : d1; } return d1 + d2; } @@ -63,8 +59,7 @@ public Double add(Double d1, Double d2) { public int compare(final Double d1, final Double d2) { if (d1 == null ^ d2 == null) { return d1 == null ? -1 : 1; // either of one is null. - } else if (d1 == null) - return 0; // both are null + } else if (d1 == null) return 0; // both are null return d1.compareTo(d2); // natural ordering. } @@ -90,8 +85,7 @@ public Double getMinValue() { @Override public double divideForAvg(Double d1, Long l2) { - return (l2 == null || d1 == null) ? Double.NaN : (d1.doubleValue() / l2 - .doubleValue()); + return (l2 == null || d1 == null) ? Double.NaN : (d1.doubleValue() / l2.doubleValue()); } @Override @@ -111,7 +105,7 @@ public EmptyMsg getRequestData() { @Override public void initialize(EmptyMsg msg) { - //nothing + // nothing } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java index b2df9ea1999d..2a05fbfd0ddb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,23 +30,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.LongMsg; /** - * a concrete column interpreter implementation. The cell value is a Long value - * and its promoted data type is also a Long value. For computing aggregation - * function, this class is used to find the datatype of the cell value. Client - * is supposed to instantiate it and passed along as a parameter. See - * TestAggregateProtocol methods for its sample usage. - * Its methods handle null arguments gracefully. + * a concrete column interpreter implementation. The cell value is a Long value and its promoted + * data type is also a Long value. For computing aggregation function, this class is used to find + * the datatype of the cell value. Client is supposed to instantiate it and passed along as a + * parameter. See TestAggregateProtocol methods for its sample usage. Its methods handle null + * arguments gracefully. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public class LongColumnInterpreter extends ColumnInterpreter { +public class LongColumnInterpreter + extends ColumnInterpreter { @Override - public Long getValue(byte[] colFamily, byte[] colQualifier, Cell kv) - throws IOException { - if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG) - return null; + public Long getValue(byte[] colFamily, byte[] colQualifier, Cell kv) throws IOException { + if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG) return null; return PrivateCellUtil.getValueAsLong(kv); } @@ -64,8 +60,7 @@ public Long add(Long l1, Long l2) { public int compare(final Long l1, final Long l2) { if (l1 == null ^ l2 == null) { return l1 == null ? -1 : 1; // either of one is null. - } else if (l1 == null) - return 0; // both are null + } else if (l1 == null) return 0; // both are null return l1.compareTo(l2); // natural ordering. } @@ -91,8 +86,7 @@ public Long getMinValue() { @Override public double divideForAvg(Long l1, Long l2) { - return (l2 == null || l1 == null) ? Double.NaN : (l1.doubleValue() / l2 - .doubleValue()); + return (l2 == null || l1 == null) ? Double.NaN : (l1.doubleValue() / l2.doubleValue()); } @Override @@ -112,7 +106,7 @@ public EmptyMsg getRequestData() { @Override public void initialize(EmptyMsg msg) { - //nothing + // nothing } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java index 0af56d90cbb7..e2038a17b376 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,25 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.metrics; import java.util.concurrent.atomic.AtomicLong; - import org.apache.yetus.audience.InterfaceAudience; - /** * Provides metrics related to scan operations (both server side and client side metrics). *

    - * The data can be passed to mapreduce framework or other systems. - * We use atomic longs so that one thread can increment, - * while another atomically resets to zero after the values are reported - * to hadoop's counters. + * The data can be passed to mapreduce framework or other systems. We use atomic longs so that one + * thread can increment, while another atomically resets to zero after the values are reported to + * hadoop's counters. *

    - * Some of these metrics are general for any client operation such as put - * However, there is no need for this. So they are defined under scan operation - * for now. + * Some of these metrics are general for any client operation such as put However, there is no need + * for this. So they are defined under scan operation for now. */ @InterfaceAudience.Public public class ScanMetrics extends ServerSideScanMetrics { @@ -45,13 +40,14 @@ public class ScanMetrics extends ServerSideScanMetrics { public static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS"; public static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS"; public static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = "MILLIS_BETWEEN_NEXTS"; - public static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = "NOT_SERVING_REGION_EXCEPTION"; + public static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = + "NOT_SERVING_REGION_EXCEPTION"; public static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS"; public static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = "BYTES_IN_REMOTE_RESULTS"; public static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED"; public static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES"; public static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES"; - + /** * number of RPC calls */ @@ -65,7 +61,8 @@ public class ScanMetrics extends ServerSideScanMetrics { /** * sum of milliseconds between sequential next calls */ - public final AtomicLong sumOfMillisSecBetweenNexts = createCounter(MILLIS_BETWEEN_NEXTS_METRIC_NAME); + public final AtomicLong sumOfMillisSecBetweenNexts = + createCounter(MILLIS_BETWEEN_NEXTS_METRIC_NAME); /** * number of NotServingRegionException caught @@ -80,7 +77,8 @@ public class ScanMetrics extends ServerSideScanMetrics { /** * number of bytes in Result objects from remote region servers */ - public final AtomicLong countOfBytesInRemoteResults = createCounter(BYTES_IN_REMOTE_RESULTS_METRIC_NAME); + public final AtomicLong countOfBytesInRemoteResults = + createCounter(BYTES_IN_REMOTE_RESULTS_METRIC_NAME); /** * number of regions diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java index 6e72cedc9350..7a266de33453 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; @@ -36,9 +35,8 @@ public class ServerSideScanMetrics { private final Map counters = new HashMap<>(); /** - * Create a new counter with the specified name - * @param counterName - * @return {@link AtomicLong} instance for the counter with counterName + * Create a new counter with the specified name n * @return {@link AtomicLong} instance for the + * counter with counterName */ protected AtomicLong createCounter(String counterName) { AtomicLong c = new AtomicLong(0); @@ -52,7 +50,8 @@ protected AtomicLong createCounter(String counterName) { /** * number of rows filtered during scan RPC */ - public final AtomicLong countOfRowsFiltered = createCounter(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME); + public final AtomicLong countOfRowsFiltered = + createCounter(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME); /** * number of rows scanned during scan RPC. Not every row scanned will be returned to the client @@ -61,8 +60,7 @@ protected AtomicLong createCounter(String counterName) { public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME); /** - * @param counterName - * @param value + * nn */ public void setCounter(String counterName, long value) { AtomicLong c = this.counters.get(counterName); @@ -72,24 +70,21 @@ public void setCounter(String counterName, long value) { } /** - * @param counterName - * @return true if a counter exists with the counterName + * n * @return true if a counter exists with the counterName */ public boolean hasCounter(String counterName) { return this.counters.containsKey(counterName); } /** - * @param counterName - * @return {@link AtomicLong} instance for this counter name, null if counter does not exist. + * n * @return {@link AtomicLong} instance for this counter name, null if counter does not exist. */ public AtomicLong getCounter(String counterName) { return this.counters.get(counterName); } /** - * @param counterName - * @param delta + * nn */ public void addToCounter(String counterName, long delta) { AtomicLong c = this.counters.get(counterName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index 05343eae4ccd..4145a348b08c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,11 +42,13 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; @@ -63,7 +64,8 @@ public final class ReplicationPeerConfigUtil { public static final String HBASE_REPLICATION_PEER_BASE_CONFIG = "hbase.replication.peer.base.config"; - private ReplicationPeerConfigUtil() {} + private ReplicationPeerConfigUtil() { + } public static String convertToString(Set namespaces) { if (namespaces == null) { @@ -73,13 +75,13 @@ public static String convertToString(Set namespaces) { } /** convert map to TableCFs Object */ - public static ReplicationProtos.TableCF[] convert( - Map> tableCfs) { + public static ReplicationProtos.TableCF[] + convert(Map> tableCfs) { if (tableCfs == null) { return null; } List tableCFList = new ArrayList<>(tableCfs.entrySet().size()); - ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder(); + ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder(); for (Map.Entry> entry : tableCfs.entrySet()) { tableCFBuilder.clear(); tableCFBuilder.setTableName(ProtobufUtil.toProtoTableName(entry.getKey())); @@ -102,10 +104,9 @@ public static String convertToString(Map } /** - * Convert string to TableCFs Object. - * This is only for read TableCFs information from TableCF node. - * Input String Format: ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;ns3.table3. - * */ + * Convert string to TableCFs Object. This is only for read TableCFs information from TableCF + * node. Input String Format: ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;ns3.table3. + */ public static ReplicationProtos.TableCF[] convert(String tableCFsConfig) { if (tableCFsConfig == null || tableCFsConfig.trim().length() == 0) { return null; @@ -122,7 +123,7 @@ public static ReplicationProtos.TableCF[] convert(String tableCFsConfig) { continue; } // 2 split to "table" and "cf1,cf2" - // for each table: "table#cf1,cf2" or "table" + // for each table: "table#cf1,cf2" or "table" String[] pair = tab.split(":"); String tabName = pair[0].trim(); if (pair.length > 2 || tabName.length() == 0) { @@ -139,8 +140,7 @@ public static ReplicationProtos.TableCF[] convert(String tableCFsConfig) { ns = dbs[0]; tName = dbs[1]; } - tableCFBuilder.setTableName( - ProtobufUtil.toProtoTableName(TableName.valueOf(ns, tName))); + tableCFBuilder.setTableName(ProtobufUtil.toProtoTableName(TableName.valueOf(ns, tName))); // 3 parse "cf1,cf2" part to List if (pair.length == 2) { @@ -158,18 +158,17 @@ public static ReplicationProtos.TableCF[] convert(String tableCFsConfig) { } /** - * Convert TableCFs Object to String. - * Output String Format: ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;table3 - * */ + * Convert TableCFs Object to String. Output String Format: + * ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;table3 + */ public static String convert(ReplicationProtos.TableCF[] tableCFs) { StringBuilder sb = new StringBuilder(); for (int i = 0, n = tableCFs.length; i < n; i++) { ReplicationProtos.TableCF tableCF = tableCFs[i]; String namespace = tableCF.getTableName().getNamespace().toStringUtf8(); if (StringUtils.isNotEmpty(namespace)) { - sb.append(namespace).append("."). - append(tableCF.getTableName().getQualifier().toStringUtf8()) - .append(":"); + sb.append(namespace).append(".") + .append(tableCF.getTableName().getQualifier().toStringUtf8()).append(":"); } else { sb.append(tableCF.getTableName().toString()).append(":"); } @@ -185,10 +184,10 @@ public static String convert(ReplicationProtos.TableCF[] tableCFs) { } /** - * Get TableCF in TableCFs, if not exist, return null. - * */ + * Get TableCF in TableCFs, if not exist, return null. + */ public static ReplicationProtos.TableCF getTableCF(ReplicationProtos.TableCF[] tableCFs, - String table) { + String table) { for (int i = 0, n = tableCFs.length; i < n; i++) { ReplicationProtos.TableCF tableCF = tableCFs[i]; if (tableCF.getTableName().getQualifier().toStringUtf8().equals(table)) { @@ -199,10 +198,9 @@ public static ReplicationProtos.TableCF getTableCF(ReplicationProtos.TableCF[] t } /** - * Parse bytes into TableCFs. - * It is used for backward compatibility. - * Old format bytes have no PB_MAGIC Header - * */ + * Parse bytes into TableCFs. It is used for backward compatibility. Old format bytes have no + * PB_MAGIC Header + */ public static ReplicationProtos.TableCF[] parseTableCFs(byte[] bytes) throws IOException { if (bytes == null) { return null; @@ -211,16 +209,16 @@ public static ReplicationProtos.TableCF[] parseTableCFs(byte[] bytes) throws IOE } /** - * Convert tableCFs string into Map. - * */ + * Convert tableCFs string into Map. + */ public static Map> parseTableCFsFromConfig(String tableCFsConfig) { ReplicationProtos.TableCF[] tableCFs = convert(tableCFsConfig); return convert2Map(tableCFs); } /** - * Convert tableCFs Object to Map. - * */ + * Convert tableCFs Object to Map. + */ public static Map> convert2Map(ReplicationProtos.TableCF[] tableCFs) { if (tableCFs == null || tableCFs.length == 0) { return null; @@ -248,11 +246,11 @@ public static Map> convert2Map(ReplicationProtos.TableCF * @throws DeserializationException deserialization exception */ public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) - throws DeserializationException { + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { int pbLen = ProtobufUtil.lengthOfPBMagic(); ReplicationProtos.ReplicationPeer.Builder builder = - ReplicationProtos.ReplicationPeer.newBuilder(); + ReplicationProtos.ReplicationPeer.newBuilder(); ReplicationProtos.ReplicationPeer peer; try { ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen); @@ -311,7 +309,7 @@ public static ReplicationPeerConfig convert(ReplicationProtos.ReplicationPeer pe } Map> excludeTableCFsMap = convert2Map(peer.getExcludeTableCfsList() - .toArray(new ReplicationProtos.TableCF[peer.getExcludeTableCfsCount()])); + .toArray(new ReplicationProtos.TableCF[peer.getExcludeTableCfsCount()])); if (excludeTableCFsMap != null) { builder.setExcludeTableCFsMap(excludeTableCFsMap); } @@ -330,7 +328,7 @@ public static ReplicationPeerConfig convert(ReplicationProtos.ReplicationPeer pe public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig peerConfig) { ReplicationProtos.ReplicationPeer.Builder builder = - ReplicationProtos.ReplicationPeer.newBuilder(); + ReplicationProtos.ReplicationPeer.newBuilder(); // we used to set cluster key as required so here we must always set it, until we can make sure // that no one uses the old proto file. builder.setClusterkey(peerConfig.getClusterKey() != null ? peerConfig.getClusterKey() : ""); @@ -340,16 +338,13 @@ public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig pe for (Map.Entry entry : peerConfig.getPeerData().entrySet()) { builder.addData(HBaseProtos.BytesBytesPair.newBuilder() - .setFirst(UnsafeByteOperations.unsafeWrap(entry.getKey())) - .setSecond(UnsafeByteOperations.unsafeWrap(entry.getValue())) - .build()); + .setFirst(UnsafeByteOperations.unsafeWrap(entry.getKey())) + .setSecond(UnsafeByteOperations.unsafeWrap(entry.getValue())).build()); } for (Map.Entry entry : peerConfig.getConfiguration().entrySet()) { - builder.addConfiguration(HBaseProtos.NameStringPair.newBuilder() - .setName(entry.getKey()) - .setValue(entry.getValue()) - .build()); + builder.addConfiguration(HBaseProtos.NameStringPair.newBuilder().setName(entry.getKey()) + .setValue(entry.getValue()).build()); } ReplicationProtos.TableCF[] tableCFs = convert(peerConfig.getTableCFsMap()); @@ -400,24 +395,25 @@ public static byte[] toByteArray(final ReplicationPeerConfig peerConfig) { } public static ReplicationPeerDescription - toReplicationPeerDescription(ReplicationProtos.ReplicationPeerDescription desc) { + toReplicationPeerDescription(ReplicationProtos.ReplicationPeerDescription desc) { boolean enabled = - ReplicationProtos.ReplicationState.State.ENABLED == desc.getState().getState(); + ReplicationProtos.ReplicationState.State.ENABLED == desc.getState().getState(); ReplicationPeerConfig config = convert(desc.getConfig()); return new ReplicationPeerDescription(desc.getId(), enabled, config, toSyncReplicationState(desc.getSyncReplicationState())); } public static ReplicationProtos.ReplicationPeerDescription - toProtoReplicationPeerDescription(ReplicationPeerDescription desc) { + toProtoReplicationPeerDescription(ReplicationPeerDescription desc) { ReplicationProtos.ReplicationPeerDescription.Builder builder = - ReplicationProtos.ReplicationPeerDescription.newBuilder(); + ReplicationProtos.ReplicationPeerDescription.newBuilder(); builder.setId(desc.getPeerId()); ReplicationProtos.ReplicationState.Builder stateBuilder = - ReplicationProtos.ReplicationState.newBuilder(); - stateBuilder.setState(desc.isEnabled() ? ReplicationProtos.ReplicationState.State.ENABLED : - ReplicationProtos.ReplicationState.State.DISABLED); + ReplicationProtos.ReplicationState.newBuilder(); + stateBuilder.setState(desc.isEnabled() + ? ReplicationProtos.ReplicationState.State.ENABLED + : ReplicationProtos.ReplicationState.State.DISABLED); builder.setState(stateBuilder.build()); builder.setConfig(convert(desc.getPeerConfig())); @@ -427,21 +423,21 @@ public static byte[] toByteArray(final ReplicationPeerConfig peerConfig) { } public static ReplicationProtos.SyncReplicationState - toSyncReplicationState(SyncReplicationState state) { + toSyncReplicationState(SyncReplicationState state) { ReplicationProtos.SyncReplicationState.Builder syncReplicationStateBuilder = - ReplicationProtos.SyncReplicationState.newBuilder(); + ReplicationProtos.SyncReplicationState.newBuilder(); syncReplicationStateBuilder - .setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal())); + .setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal())); return syncReplicationStateBuilder.build(); } public static SyncReplicationState - toSyncReplicationState(ReplicationProtos.SyncReplicationState state) { + toSyncReplicationState(ReplicationProtos.SyncReplicationState state) { return SyncReplicationState.valueOf(state.getState().getNumber()); } public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig( - Map> tableCfs, ReplicationPeerConfig peerConfig) { + Map> tableCfs, ReplicationPeerConfig peerConfig) { ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(peerConfig); Map> preTableCfs = peerConfig.getTableCFsMap(); if (preTableCfs == null) { @@ -453,21 +449,19 @@ public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig( } /** - * Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig - * - * This merges the user supplied peer configuration - * {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs - * provided as property hbase.replication.peer.base.configs in hbase configuration. - * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". - * If value is empty, it will remove the existing key-value from peer config. - * + * Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig This + * merges the user supplied peer configuration + * {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs provided as + * property hbase.replication.peer.base.configs in hbase configuration. Expected format for this + * hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". If value is empty, it will remove the existing + * key-value from peer config. * @param conf Configuration * @return ReplicationPeerConfig containing updated configs. */ public static ReplicationPeerConfig updateReplicationBasePeerConfigs(Configuration conf, ReplicationPeerConfig receivedPeerConfig) { - ReplicationPeerConfigBuilder copiedPeerConfigBuilder = ReplicationPeerConfig. - newBuilder(receivedPeerConfig); + ReplicationPeerConfigBuilder copiedPeerConfigBuilder = + ReplicationPeerConfig.newBuilder(receivedPeerConfig); Map receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); @@ -493,8 +487,8 @@ public static ReplicationPeerConfig updateReplicationBasePeerConfigs(Configurati } public static ReplicationPeerConfig appendExcludeTableCFsToReplicationPeerConfig( - Map> excludeTableCfs, ReplicationPeerConfig peerConfig) - throws ReplicationException { + Map> excludeTableCfs, ReplicationPeerConfig peerConfig) + throws ReplicationException { if (excludeTableCfs == null) { throw new ReplicationException("exclude tableCfs is null"); } @@ -508,8 +502,8 @@ public static ReplicationPeerConfig appendExcludeTableCFsToReplicationPeerConfig return builder.build(); } - private static Map> mergeTableCFs( - Map> preTableCfs, Map> tableCfs) { + private static Map> + mergeTableCFs(Map> preTableCfs, Map> tableCfs) { Map> newTableCfs = copyTableCFsMap(preTableCfs); for (Map.Entry> entry : tableCfs.entrySet()) { TableName table = entry.getKey(); @@ -535,7 +529,7 @@ private static Map> mergeTableCFs( } private static Map> - copyTableCFsMap(Map> preTableCfs) { + copyTableCFsMap(Map> preTableCfs) { Map> newTableCfs = new HashMap<>(); preTableCfs.forEach( (table, cfs) -> newTableCfs.put(table, cfs != null ? Lists.newArrayList(cfs) : null)); @@ -543,8 +537,8 @@ private static Map> mergeTableCFs( } public static ReplicationPeerConfig removeTableCFsFromReplicationPeerConfig( - Map> tableCfs, ReplicationPeerConfig peerConfig, - String id) throws ReplicationException { + Map> tableCfs, ReplicationPeerConfig peerConfig, String id) + throws ReplicationException { Map> preTableCfs = peerConfig.getTableCFsMap(); if (preTableCfs == null) { throw new ReplicationException("Table-Cfs for peer: " + id + " is null"); @@ -567,14 +561,14 @@ public static ReplicationPeerConfig removeTableCFsFromReplicationPeerConfig( } } else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) { throw new ReplicationException("Cannot remove cf of table: " + table - + " which doesn't specify cfs from table-cfs config in peer: " + id); + + " which doesn't specify cfs from table-cfs config in peer: " + id); } else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) { throw new ReplicationException("Cannot remove table: " + table - + " which has specified cfs from table-cfs config in peer: " + id); + + " which has specified cfs from table-cfs config in peer: " + id); } } else { throw new ReplicationException( - "No table: " + table + " in table-cfs config of peer: " + id); + "No table: " + table + " in table-cfs config of peer: " + id); } } ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(peerConfig); @@ -583,8 +577,8 @@ public static ReplicationPeerConfig removeTableCFsFromReplicationPeerConfig( } public static ReplicationPeerConfig removeExcludeTableCFsFromReplicationPeerConfig( - Map> excludeTableCfs, ReplicationPeerConfig peerConfig, String id) - throws ReplicationException { + Map> excludeTableCfs, ReplicationPeerConfig peerConfig, String id) + throws ReplicationException { if (excludeTableCfs == null) { throw new ReplicationException("exclude tableCfs is null"); } @@ -610,14 +604,14 @@ public static ReplicationPeerConfig removeExcludeTableCFsFromReplicationPeerConf } } else if (cfs == null && (removeCfs != null && !removeCfs.isEmpty())) { throw new ReplicationException("Cannot remove cf of table: " + table - + " which doesn't specify cfs from exclude-table-cfs config in peer: " + id); + + " which doesn't specify cfs from exclude-table-cfs config in peer: " + id); } else if (cfs != null && (removeCfs == null || removeCfs.isEmpty())) { throw new ReplicationException("Cannot remove table: " + table - + " which has specified cfs from exclude-table-cfs config in peer: " + id); + + " which has specified cfs from exclude-table-cfs config in peer: " + id); } } else { throw new ReplicationException( - "No table: " + table + " in exclude-table-cfs config of peer: " + id); + "No table: " + table + " in exclude-table-cfs config of peer: " + id); } } ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(peerConfig); @@ -633,7 +627,7 @@ public static ReplicationPeerConfig removeExcludeTableCFsFromReplicationPeerConf * @throws IOException when create peer cluster configuration failed */ public static Configuration getPeerClusterConfiguration(Configuration conf, - ReplicationPeerDescription peer) throws IOException { + ReplicationPeerDescription peer) throws IOException { ReplicationPeerConfig peerConfig = peer.getPeerConfig(); Configuration otherConf; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFs.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFs.java index aea354beac01..b4b703991e11 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFs.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFs.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +18,13 @@ package org.apache.hadoop.hbase.client.replication; import java.util.Map; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; /** - * Used by {@link org.apache.hadoop.hbase.client.Admin#listReplicatedTableCFs()}. - * The cfs is a map of <ColumnFamily, ReplicationScope>. + * Used by {@link org.apache.hadoop.hbase.client.Admin#listReplicatedTableCFs()}. The cfs is a map + * of <ColumnFamily, ReplicationScope>. */ @InterfaceAudience.Public public class TableCFs { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java index c410e2d6054a..7244124fc00d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,11 +47,16 @@ private SecurityCapability(int value) { public static SecurityCapability valueOf(int value) { switch (value) { - case 0: return SIMPLE_AUTHENTICATION; - case 1: return SECURE_AUTHENTICATION; - case 2: return AUTHORIZATION; - case 3: return CELL_AUTHORIZATION; - case 4: return CELL_VISIBILITY; + case 0: + return SIMPLE_AUTHENTICATION; + case 1: + return SECURE_AUTHENTICATION; + case 2: + return AUTHORIZATION; + case 3: + return CELL_AUTHORIZATION; + case 4: + return CELL_VISIBILITY; default: throw new IllegalArgumentException("Unknown SecurityCapability value " + value); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/ConnectionSpanBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/ConnectionSpanBuilder.java index fa402adf2750..c0be7d9ec694 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/ConnectionSpanBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/ConnectionSpanBuilder.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_CONNECTION_STRING; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_SYSTEM; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_SYSTEM_VALUE; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_USER; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -65,8 +65,7 @@ public ConnectionSpanBuilder addAttribute(final AttributeKey key, T value @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) + final SpanBuilder builder = TraceUtil.getGlobalTracer().spanBuilder(name) // TODO: what about clients embedded in Master/RegionServer/Gateways/&c? .setSpanKind(SpanKind.CLIENT); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); @@ -77,33 +76,27 @@ public Span build() { * Static utility method that performs the primary logic of this builder. It is visible to other * classes in this package so that other builders can use this functionality as a mix-in. * @param attributes the attributes map to be populated. - * @param conn the source of connection attribute values. + * @param conn the source of connection attribute values. */ - static void populateConnectionAttributes( - final Map, Object> attributes, - final AsyncConnectionImpl conn - ) { - final Supplier connStringSupplier = () -> conn.getConnectionRegistry() - .getConnectionString(); + static void populateConnectionAttributes(final Map, Object> attributes, + final AsyncConnectionImpl conn) { + final Supplier connStringSupplier = + () -> conn.getConnectionRegistry().getConnectionString(); populateConnectionAttributes(attributes, connStringSupplier, conn::getUser); } /** * Static utility method that performs the primary logic of this builder. It is visible to other * classes in this package so that other builders can use this functionality as a mix-in. - * @param attributes the attributes map to be populated. + * @param attributes the attributes map to be populated. * @param connectionStringSupplier the source of the {@code db.connection_string} attribute value. - * @param userSupplier the source of the {@code db.user} attribute value. + * @param userSupplier the source of the {@code db.user} attribute value. */ - static void populateConnectionAttributes( - final Map, Object> attributes, - final Supplier connectionStringSupplier, - final Supplier userSupplier - ) { + static void populateConnectionAttributes(final Map, Object> attributes, + final Supplier connectionStringSupplier, final Supplier userSupplier) { attributes.put(DB_SYSTEM, DB_SYSTEM_VALUE); attributes.put(DB_CONNECTION_STRING, connectionStringSupplier.get()); - attributes.put(DB_USER, Optional.ofNullable(userSupplier.get()) - .map(Object::toString) - .orElse(null)); + attributes.put(DB_USER, + Optional.ofNullable(userSupplier.get()).map(Object::toString).orElse(null)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/IpcClientSpanBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/IpcClientSpanBuilder.java index 07edbcb2807d..546ce428fc4c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/IpcClientSpanBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/IpcClientSpanBuilder.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.NET_PEER_NAME; @@ -23,6 +22,7 @@ import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_METHOD; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SERVICE; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SYSTEM; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -34,12 +34,14 @@ import org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RpcSystem; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; /** * Construct {@link Span} instances originating from the client side of an IPC. - * - * @see Semantic conventions for RPC spans + * @see Semantic + * conventions for RPC spans */ @InterfaceAudience.Private public class IpcClientSpanBuilder implements Supplier { @@ -68,8 +70,7 @@ public IpcClientSpanBuilder setRemoteAddress(final Address remoteAddress) { @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) + final SpanBuilder builder = TraceUtil.getGlobalTracer().spanBuilder(name) // TODO: what about clients embedded in Master/RegionServer/Gateways/&c? .setSpanKind(SpanKind.CLIENT); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); @@ -80,12 +81,10 @@ public Span build() { * Static utility method that performs the primary logic of this builder. It is visible to other * classes in this package so that other builders can use this functionality as a mix-in. * @param attributes the attributes map to be populated. - * @param md the source of the RPC attribute values. + * @param md the source of the RPC attribute values. */ - static void populateMethodDescriptorAttributes( - final Map, Object> attributes, - final Descriptors.MethodDescriptor md - ) { + static void populateMethodDescriptorAttributes(final Map, Object> attributes, + final Descriptors.MethodDescriptor md) { final String packageAndService = getRpcPackageAndService(md.getService()); final String method = getRpcName(md); attributes.put(RPC_SYSTEM, RpcSystem.HBASE_RPC.name()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableOperationSpanBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableOperationSpanBuilder.java index e436b7536d3d..4da5b513c543 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableOperationSpanBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableOperationSpanBuilder.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.CONTAINER_DB_OPERATIONS_KEY; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_OPERATION; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -96,8 +96,7 @@ public TableOperationSpanBuilder setOperation(final Operation operation) { // contained within the provided "batch" object. public TableOperationSpanBuilder setContainerOperations(final RowMutations mutations) { - final Operation[] ops = mutations.getMutations() - .stream() + final Operation[] ops = mutations.getMutations().stream() .flatMap(row -> Stream.concat(Stream.of(valueFrom(row)), unpackRowOperations(row).stream())) .toArray(Operation[]::new); return setContainerOperations(ops); @@ -106,13 +105,12 @@ public TableOperationSpanBuilder setContainerOperations(final RowMutations mutat public TableOperationSpanBuilder setContainerOperations(final Row row) { final Operation[] ops = Stream.concat(Stream.of(valueFrom(row)), unpackRowOperations(row).stream()) - .toArray(Operation[]::new); + .toArray(Operation[]::new); return setContainerOperations(ops); } - public TableOperationSpanBuilder setContainerOperations( - final Collection operations - ) { + public TableOperationSpanBuilder + setContainerOperations(final Collection operations) { final Operation[] ops = operations.stream() .flatMap(row -> Stream.concat(Stream.of(valueFrom(row)), unpackRowOperations(row).stream())) .toArray(Operation[]::new); @@ -127,10 +125,8 @@ private static Set unpackRowOperations(final Row row) { } if (row instanceof RowMutations) { final RowMutations mutations = (RowMutations) row; - final List operations = mutations.getMutations() - .stream() - .map(TableOperationSpanBuilder::valueFrom) - .collect(Collectors.toList()); + final List operations = mutations.getMutations().stream() + .map(TableOperationSpanBuilder::valueFrom).collect(Collectors.toList()); ops.addAll(operations); } return ops; @@ -150,14 +146,9 @@ private static Set unpackRowOperations(final CheckAndMutate cam) { return ops; } - public TableOperationSpanBuilder setContainerOperations( - final Operation... operations - ) { - final List ops = Arrays.stream(operations) - .map(op -> op == null ? unknown : op.name()) - .sorted() - .distinct() - .collect(Collectors.toList()); + public TableOperationSpanBuilder setContainerOperations(final Operation... operations) { + final List ops = Arrays.stream(operations).map(op -> op == null ? unknown : op.name()) + .sorted().distinct().collect(Collectors.toList()); attributes.put(CONTAINER_DB_OPERATIONS_KEY, ops); return this; } @@ -170,11 +161,9 @@ public TableOperationSpanBuilder setTableName(final TableName tableName) { @SuppressWarnings("unchecked") public Span build() { - final String name = attributes.getOrDefault(DB_OPERATION, unknown) - + " " - + (tableName != null ? tableName.getNameWithNamespaceInclAsString() : unknown); - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) + final String name = attributes.getOrDefault(DB_OPERATION, unknown) + " " + + (tableName != null ? tableName.getNameWithNamespaceInclAsString() : unknown); + final SpanBuilder builder = TraceUtil.getGlobalTracer().spanBuilder(name) // TODO: what about clients embedded in Master/RegionServer/Gateways/&c? .setSpanKind(SpanKind.CLIENT); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableSpanBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableSpanBuilder.java index 0d89c582bbb2..a62b24b94462 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableSpanBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/trace/TableSpanBuilder.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.DB_NAME; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.TABLE_KEY; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -62,8 +62,7 @@ public TableSpanBuilder setTableName(final TableName tableName) { @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) + final SpanBuilder builder = TraceUtil.getGlobalTracer().spanBuilder(name) // TODO: what about clients embedded in Master/RegionServer/Gateways/&c? .setSpanKind(SpanKind.CLIENT); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); @@ -74,12 +73,10 @@ public Span build() { * Static utility method that performs the primary logic of this builder. It is visible to other * classes in this package so that other builders can use this functionality as a mix-in. * @param attributes the attributes map to be populated. - * @param tableName the source of attribute values. + * @param tableName the source of attribute values. */ - static void populateTableNameAttributes( - final Map, Object> attributes, - final TableName tableName - ) { + static void populateTableNameAttributes(final Map, Object> attributes, + final TableName tableName) { attributes.put(DB_NAME, tableName.getNamespaceAsString()); attributes.put(TABLE_KEY, tableName.getNameAsString()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java index 89c8d989fbf0..0e24e0c98143 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -28,26 +26,20 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** - * Defines how value for specific column is interpreted and provides utility - * methods like compare, add, multiply etc for them. Takes column family, column - * qualifier and return the cell value. Its concrete implementation should - * handle null case gracefully. - * Refer to {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter} - * for an example. + * Defines how value for specific column is interpreted and provides utility methods like compare, + * add, multiply etc for them. Takes column family, column qualifier and return the cell value. Its + * concrete implementation should handle null case gracefully. Refer to + * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter} for an example. *

    - * Takes two generic parameters and three Message parameters. - * The cell value type of the interpreter is <T>. - * During some computations like sum, average, the return type can be different - * than the cell value data type, for eg, sum of int cell values might overflow - * in case of a int result, we should use Long for its result. Therefore, this - * class mandates to use a different (promoted) data type for result of these - * computations <S>. All computations are performed on the promoted data type - * <S>. There is a conversion method - * {@link ColumnInterpreter#castToReturnType(Object)} which takes a <T> type and - * returns a <S> type. - * The AggregateIm>lementation uses PB messages to initialize the - * user's ColumnInterpreter implementation, and for sending the responses - * back to AggregationClient. + * Takes two generic parameters and three Message parameters. The cell value type of the interpreter + * is <T>. During some computations like sum, average, the return type can be different than + * the cell value data type, for eg, sum of int cell values might overflow in case of a int result, + * we should use Long for its result. Therefore, this class mandates to use a different (promoted) + * data type for result of these computations <S>. All computations are performed on the + * promoted data type <S>. There is a conversion method + * {@link ColumnInterpreter#castToReturnType(Object)} which takes a <T> type and returns a + * <S> type. The AggregateIm>lementation uses PB messages to initialize the user's + * ColumnInterpreter implementation, and for sending the responses back to AggregationClient. * @param T Cell value data type * @param S Promoted data type * @param P PB message that is used to transport initializer specific bytes @@ -56,31 +48,21 @@ */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public abstract class ColumnInterpreter { +public abstract class ColumnInterpreter { /** - * - * @param colFamily - * @param colQualifier - * @param c - * @return value of type T - * @throws IOException + * nnn * @return value of type T n */ - public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) - throws IOException; + public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException; /** - * @param l1 - * @param l2 - * @return sum or non null value among (if either of them is null); otherwise - * returns a null. + * nn * @return sum or non null value among (if either of them is null); otherwise returns a null. */ public abstract S add(S l1, S l2); /** - * returns the maximum value for this type T - * @return max + * returns the maximum value for this type T n */ public abstract T getMaxValue(); @@ -88,28 +70,22 @@ public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) public abstract T getMinValue(); /** - * @param o1 - * @param o2 - * @return multiplication + * nnn */ public abstract S multiply(S o1, S o2); /** - * @param o - * @return increment + * nn */ public abstract S increment(S o); /** - * provides casting opportunity between the data types. - * @param o - * @return cast + * provides casting opportunity between the data types. nn */ public abstract S castToReturnType(T o); /** - * This takes care if either of arguments are null. returns 0 if they are - * equal or both are null; + * This takes care if either of arguments are null. returns 0 if they are equal or both are null; *

      *
    • > 0 if l1 > l2 or l1 is not null and l2 is null.
    • *
    • < 0 if l1 < l2 or l1 is null and l2 is not null.
    • @@ -118,65 +94,54 @@ public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) public abstract int compare(final T l1, final T l2); /** - * used for computing average of <S> data values. Not providing the divide - * method that takes two <S> values as it is not needed as of now. - * @param o - * @param l - * @return Average + * used for computing average of <S> data values. Not providing the divide method that takes + * two <S> values as it is not needed as of now. nnn */ public abstract double divideForAvg(S o, Long l); /** - * This method should return any additional data that is needed on the - * server side to construct the ColumnInterpreter. The server - * will pass this to the {@link #initialize} - * method. If there is no ColumnInterpreter specific data (for e.g., - * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter}) - * then null should be returned. + * This method should return any additional data that is needed on the server side to construct + * the ColumnInterpreter. The server will pass this to the {@link #initialize} method. If there is + * no ColumnInterpreter specific data (for e.g., + * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter}) then null should be + * returned. * @return the PB message */ public abstract P getRequestData(); /** - * This method should initialize any field(s) of the ColumnInterpreter with - * a parsing of the passed message bytes (used on the server side). - * @param msg + * This method should initialize any field(s) of the ColumnInterpreter with a parsing of the + * passed message bytes (used on the server side). n */ public abstract void initialize(P msg); - + /** - * This method gets the PB message corresponding to the cell type - * @param t - * @return the PB message for the cell-type instance + * This method gets the PB message corresponding to the cell type n * @return the PB message for + * the cell-type instance */ public abstract Q getProtoForCellType(T t); /** - * This method gets the PB message corresponding to the cell type - * @param q - * @return the cell-type instance from the PB message + * This method gets the PB message corresponding to the cell type n * @return the cell-type + * instance from the PB message */ public abstract T getCellValueFromProto(Q q); /** - * This method gets the PB message corresponding to the promoted type - * @param s - * @return the PB message for the promoted-type instance + * This method gets the PB message corresponding to the promoted type n * @return the PB message + * for the promoted-type instance */ public abstract R getProtoForPromotedType(S s); /** - * This method gets the promoted type from the proto message - * @param r - * @return the promoted-type instance from the PB message + * This method gets the promoted type from the proto message n * @return the promoted-type + * instance from the PB message */ public abstract S getPromotedValueFromProto(R r); /** - * The response message comes as type S. This will convert/cast it to T. - * In some sense, performs the opposite of {@link #castToReturnType(Object)} - * @param response - * @return cast + * The response message comes as type S. This will convert/cast it to T. In some sense, performs + * the opposite of {@link #castToReturnType(Object)} nn */ public abstract T castToCellType(S response); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java index 7b1ac43c4bfb..ff9ed066fd42 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +33,10 @@ public CoprocessorException() { } /** - * Constructor with a Class object and exception message. - * @param clazz - * @param s + * Constructor with a Class object and exception message. nn */ public CoprocessorException(Class clazz, String s) { - super( "Coprocessor [" + clazz.getName() + "]: " + s); + super("Coprocessor [" + clazz.getName() + "]: " + s); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java index 0e40e97eee17..71999ad269fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.exceptions; import java.io.EOFException; @@ -48,7 +46,8 @@ @InterfaceStability.Evolving public final class ClientExceptionsUtil { - private ClientExceptionsUtil() {} + private ClientExceptionsUtil() { + } public static boolean isMetaClearingException(Throwable cur) { cur = findException(cur); @@ -57,25 +56,21 @@ public static boolean isMetaClearingException(Throwable cur) { return true; } return !isSpecialException(cur) || (cur instanceof RegionMovedException) - || cur instanceof NotServingRegionException; + || cur instanceof NotServingRegionException; } public static boolean isSpecialException(Throwable cur) { return (cur instanceof RegionMovedException || cur instanceof RegionOpeningException - || cur instanceof RegionTooBusyException || cur instanceof RpcThrottlingException - || cur instanceof MultiActionResultTooLarge || cur instanceof RetryImmediatelyException - || cur instanceof CallQueueTooBigException || cur instanceof CallDroppedException - || cur instanceof NotServingRegionException || cur instanceof RequestTooBigException); + || cur instanceof RegionTooBusyException || cur instanceof RpcThrottlingException + || cur instanceof MultiActionResultTooLarge || cur instanceof RetryImmediatelyException + || cur instanceof CallQueueTooBigException || cur instanceof CallDroppedException + || cur instanceof NotServingRegionException || cur instanceof RequestTooBigException); } - /** - * Look for an exception we know in the remote exception: - * - hadoop.ipc wrapped exceptions - * - nested exceptions - * - * Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException / - * RpcThrottlingException + * Look for an exception we know in the remote exception: - hadoop.ipc wrapped exceptions - nested + * exceptions Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException / + * RpcThrottlingException * @return null if we didn't find the exception, the exception otherwise. */ public static Throwable findException(Object exception) { @@ -92,7 +87,7 @@ public static Throwable findException(Object exception) { cur = re.unwrapRemoteException(); // unwrapRemoteException can return the exception given as a parameter when it cannot - // unwrap it. In this case, there is no need to look further + // unwrap it. In this case, there is no need to look further // noinspection ObjectEquality if (cur == re) { return cur; @@ -150,8 +145,7 @@ public static boolean isConnectionException(Throwable e) { /** * Translates exception for preemptive fast fail checks. * @param t exception to check - * @return translated exception - * @throws IOException + * @return translated exception n */ public static Throwable translatePFFE(Throwable t) throws IOException { if (t instanceof NoSuchMethodError) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosedException.java index b479f145a676..68d263f78a21 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,9 +35,8 @@ public ConnectionClosedException(String string) { /** * ConnectionClosedException with cause - * * @param message the message for this exception - * @param cause the cause for this exception + * @param cause the cause for this exception */ public ConnectionClosedException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java index ab20cbc9abff..fa3c4a9aeb1d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,16 +37,13 @@ */ import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** -* Thrown when the client believes that we are trying to communicate to has -* been repeatedly unresponsive for a while. -* -* On receiving such an exception. The ConnectionManager will skip all -* retries and fast fail the operation. -*/ + * Thrown when the client believes that we are trying to communicate to has been repeatedly + * unresponsive for a while. On receiving such an exception. The ConnectionManager will skip all + * retries and fast fail the operation. + */ @InterfaceAudience.Public public class ConnectionClosingException extends IOException { public ConnectionClosingException(String string) { @@ -55,9 +52,8 @@ public ConnectionClosingException(String string) { /** * ConnectionClosingException with cause - * * @param message the message for this exception - * @param cause the cause for this exception + * @param cause the cause for this exception */ public ConnectionClosingException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java index cbd931ecb1e7..ae15777a7f09 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,14 @@ public FailedSanityCheckException() { } /** - * @param message + * n */ public FailedSanityCheckException(String message) { super(message); } /** - * @param message - * @param cause + * nn */ public FailedSanityCheckException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java index ca80ed565a2a..2b55190b636c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java @@ -35,6 +35,6 @@ public class MasterRegistryFetchException extends HBaseIOException { public MasterRegistryFetchException(Set masters, Throwable failure) { super(String.format("Exception making rpc to masters %s", PrettyPrinter.toString(masters)), - failure); + failure); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java index 1ed5b55410ff..e53b1a7fc2a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java index 5399f07cb566..3d2d0db083ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,8 @@ */ package org.apache.hadoop.hbase.exceptions; -import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hadoop.hbase.client.DoNotRetryRegionException; - +import org.apache.yetus.audience.InterfaceAudience; /** * Thrown when something is wrong in trying to merge two regions. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OutOfOrderScannerNextException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OutOfOrderScannerNextException.java index 545a7f107021..58f80898981a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OutOfOrderScannerNextException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OutOfOrderScannerNextException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java index 0b4db8df66b1..6d28f3288fdc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java @@ -1,34 +1,29 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.exceptions; import java.net.ConnectException; - import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when the client believes that we are trying to communicate to has - * been repeatedly unresponsive for a while. - * - * On receiving such an exception. The ConnectionManager will skip all + * Thrown when the client believes that we are trying to communicate to has been repeatedly + * unresponsive for a while. On receiving such an exception. The ConnectionManager will skip all * retries and fast fail the operation. * @deprecated since 2.3.0, and will be removed in 4.0.0. */ @@ -42,13 +37,13 @@ public class PreemptiveFastFailException extends ConnectException { private boolean guaranteedClientSideOnly; /** - * @param count num of consecutive failures - * @param timeOfFirstFailureMilliSec when first failure happened + * @param count num of consecutive failures + * @param timeOfFirstFailureMilliSec when first failure happened * @param timeOfLatestAttemptMilliSec when last attempt happened - * @param serverName server we failed to connect to + * @param serverName server we failed to connect to */ public PreemptiveFastFailException(long count, long timeOfFirstFailureMilliSec, - long timeOfLatestAttemptMilliSec, ServerName serverName) { + long timeOfLatestAttemptMilliSec, ServerName serverName) { super("Exception happened " + count + " times. to" + serverName); this.failureCount = count; this.timeOfFirstFailureMilliSec = timeOfFirstFailureMilliSec; @@ -56,16 +51,15 @@ public PreemptiveFastFailException(long count, long timeOfFirstFailureMilliSec, } /** - * @param count num of consecutive failures - * @param timeOfFirstFailureMilliSec when first failure happened + * @param count num of consecutive failures + * @param timeOfFirstFailureMilliSec when first failure happened * @param timeOfLatestAttemptMilliSec when last attempt happened - * @param serverName server we failed to connect to - * @param guaranteedClientSideOnly if true, guarantees that no mutations - * have been applied on the server + * @param serverName server we failed to connect to + * @param guaranteedClientSideOnly if true, guarantees that no mutations have been applied on + * the server */ public PreemptiveFastFailException(long count, long timeOfFirstFailureMilliSec, - long timeOfLatestAttemptMilliSec, ServerName serverName, - boolean guaranteedClientSideOnly) { + long timeOfLatestAttemptMilliSec, ServerName serverName, boolean guaranteedClientSideOnly) { super("Exception happened " + count + " times. to" + serverName); this.failureCount = count; this.timeOfFirstFailureMilliSec = timeOfFirstFailureMilliSec; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionMovedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionMovedException.java index e79c138e703e..2dcdb13ab5fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionMovedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionMovedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,8 +26,8 @@ import org.slf4j.LoggerFactory; /** - * Subclass if the server knows the region is now on another server. - * This allows the client to call the new region server without calling the master. + * Subclass if the server knows the region is now on another server. This allows the client to call + * the new region server without calling the master. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -45,7 +45,6 @@ public class RegionMovedException extends NotServingRegionException { private static final String STARTCODE_FIELD = "startCode="; private static final String LOCATIONSEQNUM_FIELD = "locationSeqNum="; - public RegionMovedException(ServerName serverName, long locationSeqNum) { this.hostname = serverName.getHostname(); this.port = serverName.getPort(); @@ -61,7 +60,7 @@ public int getPort() { return port; } - public ServerName getServerName(){ + public ServerName getServerName() { return ServerName.valueOf(hostname, port, startCode); } @@ -70,9 +69,8 @@ public long getLocationSeqNum() { } /** - * For hadoop.ipc internal call. Do NOT use. - * We have to parse the hostname to recreate the exception. - * The input is the one generated by {@link #getMessage()} + * For hadoop.ipc internal call. Do NOT use. We have to parse the hostname to recreate the + * exception. The input is the one generated by {@link #getMessage()} */ public RegionMovedException(String s) { int posHostname = s.indexOf(HOST_FIELD) + HOST_FIELD.length(); @@ -88,11 +86,11 @@ public RegionMovedException(String s) { // TODO: this whole thing is extremely brittle. tmpHostname = s.substring(posHostname, s.indexOf(' ', posHostname)); tmpPort = Integer.parseInt(s.substring(posPort, s.indexOf(' ', posPort))); - tmpStartCode = Long.parseLong(s.substring(posStartCode, s.indexOf('.', posStartCode))); + tmpStartCode = Long.parseLong(s.substring(posStartCode, s.indexOf('.', posStartCode))); tmpSeqNum = Long.parseLong(s.substring(posSeqNum, s.indexOf('.', posSeqNum))); } catch (Exception ignored) { - LOG.warn("Can't parse the hostname, port and startCode from this string: " + - s + ", continuing"); + LOG.warn( + "Can't parse the hostname, port and startCode from this string: " + s + ", continuing"); } hostname = tmpHostname; @@ -105,7 +103,7 @@ public RegionMovedException(String s) { public String getMessage() { // TODO: deserialization above depends on this. That is bad, but also means this // should be modified carefully. - return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + " " + - STARTCODE_FIELD + startCode + ". As of " + LOCATIONSEQNUM_FIELD + locationSeqNum + "."; + return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + " " + + STARTCODE_FIELD + startCode + ". As of " + LOCATIONSEQNUM_FIELD + locationSeqNum + "."; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java index a854b996ae9c..347c6b987a15 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java @@ -1,5 +1,4 @@ /* -/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,8 +24,8 @@ import org.slf4j.LoggerFactory; /** - * Subclass if the server knows the region is now on another server. - * This allows the client to call the new region server without calling the master. + * Subclass if the server knows the region is now on another server. This allows the client to call + * the new region server without calling the master. */ @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RequestTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RequestTooBigException.java index ae94823f0d47..56eec36c67e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RequestTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RequestTooBigException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.exceptions; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when the size of the rpc request received by the server is too large. - * - * On receiving such an exception, the client does not retry the offending rpc. + * Thrown when the size of the rpc request received by the server is too large. On receiving such an + * exception, the client does not retry the offending rpc. * @since 1.3.0 */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ScannerResetException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ScannerResetException.java index 548772857c6a..9ee7750c5737 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ScannerResetException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ScannerResetException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.exceptions; import org.apache.hadoop.hbase.DoNotRetryIOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java index dbcfa7efa18a..2e19d5bc0044 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.exceptions; import org.apache.yetus.audience.InterfaceAudience; @@ -35,7 +33,7 @@ public UnknownProtocolException(String mesg) { } public UnknownProtocolException(Class protocol) { - this(protocol, "Server is not handling protocol "+protocol.getName()); + this(protocol, "Server is not handling protocol " + protocol.getName()); } public UnknownProtocolException(Class protocol, String mesg) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java index a33da473af1f..bfd285975ff4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Objects; - import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** * A BigDecimal comparator which numerical compares against the specified byte array */ @@ -80,7 +78,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { @Override public byte[] toByteArray() { ComparatorProtos.BigDecimalComparator.Builder builder = - ComparatorProtos.BigDecimalComparator.newBuilder(); + ComparatorProtos.BigDecimalComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); return builder.build().toByteArray(); } @@ -92,15 +90,15 @@ public byte[] toByteArray() { * @see #toByteArray */ public static BigDecimalComparator parseFrom(final byte[] pbBytes) - throws DeserializationException { + throws DeserializationException { ComparatorProtos.BigDecimalComparator proto; try { proto = ComparatorProtos.BigDecimalComparator.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } - return new BigDecimalComparator(Bytes.toBigDecimal(proto.getComparable().getValue() - .toByteArray())); + return new BigDecimalComparator( + Bytes.toBigDecimal(proto.getComparable().getValue().toByteArray())); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java index 10be12a7896e..0c8274a86110 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,23 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * A binary comparator which lexicographically compares against the specified - * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. + * A binary comparator which lexicographically compares against the specified byte array using + * {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. * @since 2.0.0 */ @InterfaceAudience.Public @@ -47,7 +45,7 @@ public BinaryComparator(byte[] value) { } @Override - public int compareTo(byte [] value, int offset, int length) { + public int compareTo(byte[] value, int offset, int length) { return Bytes.compareTo(this.value, 0, this.value.length, value, offset, length); } @@ -60,7 +58,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { ComparatorProtos.BinaryComparator.Builder builder = ComparatorProtos.BinaryComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); @@ -69,12 +67,10 @@ public int compareTo(ByteBuffer value, int offset, int length) { /** * @param pbBytes A pb serialized {@link BinaryComparator} instance - * @return An instance of {@link BinaryComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link BinaryComparator} made from bytes n * @see + * #toByteArray */ - public static BinaryComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static BinaryComparator parseFrom(final byte[] pbBytes) throws DeserializationException { ComparatorProtos.BinaryComparator proto; try { proto = ComparatorProtos.BinaryComparator.parseFrom(pbBytes); @@ -85,9 +81,8 @@ public static BinaryComparator parseFrom(final byte [] pbBytes) } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java index 87d075ca45e9..2546227fce35 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -29,18 +27,16 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; /** - * A comparator which compares against a specified byte array, but only - * compares specific portion of the byte array. For the rest it is similar to - * {@link BinaryComparator}. + * A comparator which compares against a specified byte array, but only compares specific portion of + * the byte array. For the rest it is similar to {@link BinaryComparator}. */ @InterfaceAudience.Public @SuppressWarnings("ComparableType") public class BinaryComponentComparator extends ByteArrayComparable { - private int offset; //offset of component from beginning. + private int offset; // offset of component from beginning. /** * Constructor - * * @param value value of the component * @param offset offset of the component from begining */ @@ -57,20 +53,19 @@ public int compareTo(byte[] value) { @Override public int compareTo(byte[] value, int offset, int length) { return Bytes.compareTo(this.value, 0, this.value.length, value, offset + this.offset, - this.value.length); + this.value.length); } @Override public boolean equals(Object other) { - if (other == this){ + if (other == this) { return true; } - if (!(other instanceof BinaryComponentComparator)){ + if (!(other instanceof BinaryComponentComparator)) { return false; } - BinaryComponentComparator bcc = (BinaryComponentComparator)other; - return offset == bcc.offset && - (compareTo(bcc.value) == 0); + BinaryComponentComparator bcc = (BinaryComponentComparator) other; + return offset == bcc.offset && (compareTo(bcc.value) == 0); } @Override @@ -86,7 +81,7 @@ public int hashCode() { @Override public byte[] toByteArray() { ComparatorProtos.BinaryComponentComparator.Builder builder = - ComparatorProtos.BinaryComponentComparator.newBuilder(); + ComparatorProtos.BinaryComponentComparator.newBuilder(); builder.setValue(ByteString.copyFrom(this.value)); builder.setOffset(this.offset); return builder.build().toByteArray(); @@ -99,7 +94,7 @@ public byte[] toByteArray() { * @see #toByteArray */ public static BinaryComponentComparator parseFrom(final byte[] pbBytes) - throws DeserializationException { + throws DeserializationException { ComparatorProtos.BinaryComponentComparator proto; try { proto = ComparatorProtos.BinaryComponentComparator.parseFrom(pbBytes); @@ -111,15 +106,15 @@ public static BinaryComponentComparator parseFrom(final byte[] pbBytes) /** * @param other paramemter to compare against - * @return true if and only if the fields of the comparator that are - * serialized are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this){ + if (other == this) { return true; } - if (!(other instanceof BinaryComponentComparator)){ + if (!(other instanceof BinaryComponentComparator)) { return false; } return super.areSerializedFieldsEqual(other); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java index 0c06b4957ae4..f97fd070be6a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * A comparator which compares against a specified byte array, but only compares - * up to the length of this byte array. For the rest it is similar to - * {@link BinaryComparator}. + * A comparator which compares against a specified byte array, but only compares up to the length of + * this byte array. For the rest it is similar to {@link BinaryComparator}. */ @InterfaceAudience.Public @SuppressWarnings("ComparableType") // Should this move to Comparator usage? @@ -48,9 +45,9 @@ public BinaryPrefixComparator(byte[] value) { } @Override - public int compareTo(byte [] value, int offset, int length) { + public int compareTo(byte[] value, int offset, int length) { return Bytes.compareTo(this.value, 0, this.value.length, value, offset, - this.value.length <= length ? this.value.length : length); + this.value.length <= length ? this.value.length : length); } @Override @@ -65,7 +62,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { ComparatorProtos.BinaryPrefixComparator.Builder builder = ComparatorProtos.BinaryPrefixComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); @@ -74,12 +71,11 @@ public int compareTo(ByteBuffer value, int offset, int length) { /** * @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance - * @return An instance of {@link BinaryPrefixComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link BinaryPrefixComparator} made from bytes n * @see + * #toByteArray */ - public static BinaryPrefixComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static BinaryPrefixComparator parseFrom(final byte[] pbBytes) + throws DeserializationException { ComparatorProtos.BinaryPrefixComparator proto; try { proto = ComparatorProtos.BinaryPrefixComparator.parseFrom(pbBytes); @@ -90,9 +86,8 @@ public static BinaryPrefixComparator parseFrom(final byte [] pbBytes) } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java index bb31e9d2a909..15ca8890abac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,21 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * A bit comparator which performs the specified bitwise operation on each of the bytes - * with the specified byte array. Then returns whether the result is non-zero. + * A bit comparator which performs the specified bitwise operation on each of the bytes with the + * specified byte array. Then returns whether the result is non-zero. */ @InterfaceAudience.Public @SuppressWarnings("ComparableType") // Should this move to Comparator usage? @@ -46,11 +44,12 @@ public enum BitwiseOp { /** xor */ XOR } + protected BitwiseOp bitOperator; /** * Constructor - * @param value value + * @param value value * @param bitOperator operator to use on the bit comparison */ public BitComparator(byte[] value, BitwiseOp bitOperator) { @@ -69,9 +68,8 @@ public BitwiseOp getOperator() { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { - ComparatorProtos.BitComparator.Builder builder = - ComparatorProtos.BitComparator.newBuilder(); + public byte[] toByteArray() { + ComparatorProtos.BitComparator.Builder builder = ComparatorProtos.BitComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); ComparatorProtos.BitComparator.BitwiseOp bitwiseOpPb = ComparatorProtos.BitComparator.BitwiseOp.valueOf(bitOperator.name()); @@ -81,12 +79,9 @@ public BitwiseOp getOperator() { /** * @param pbBytes A pb serialized {@link BitComparator} instance - * @return An instance of {@link BitComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link BitComparator} made from bytes n * @see #toByteArray */ - public static BitComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static BitComparator parseFrom(final byte[] pbBytes) throws DeserializationException { ComparatorProtos.BitComparator proto; try { proto = ComparatorProtos.BitComparator.parseFrom(pbBytes); @@ -94,20 +89,19 @@ public static BitComparator parseFrom(final byte [] pbBytes) throw new DeserializationException(e); } BitwiseOp bitwiseOp = BitwiseOp.valueOf(proto.getBitwiseOp().name()); - return new BitComparator(proto.getComparable().getValue().toByteArray(),bitwiseOp); + return new BitComparator(proto.getComparable().getValue().toByteArray(), bitwiseOp); } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof BitComparator)) return false; - BitComparator comparator = (BitComparator)other; + BitComparator comparator = (BitComparator) other; return super.areSerializedFieldsEqual(other) && this.getOperator().equals(comparator.getOperator()); } @@ -118,17 +112,17 @@ public int compareTo(byte[] value, int offset, int length) { return 1; } int b = 0; - //Iterating backwards is faster because we can quit after one non-zero byte. + // Iterating backwards is faster because we can quit after one non-zero byte. for (int i = length - 1; i >= 0 && b == 0; i--) { switch (bitOperator) { case AND: - b = (this.value[i] & value[i+offset]) & 0xff; + b = (this.value[i] & value[i + offset]) & 0xff; break; case OR: - b = (this.value[i] | value[i+offset]) & 0xff; + b = (this.value[i] | value[i + offset]) & 0xff; break; case XOR: - b = (this.value[i] ^ value[i+offset]) & 0xff; + b = (this.value[i] ^ value[i + offset]) & 0xff; break; } } @@ -141,7 +135,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { return 1; } int b = 0; - //Iterating backwards is faster because we can quit after one non-zero byte. + // Iterating backwards is faster because we can quit after one non-zero byte. for (int i = length - 1; i >= 0 && b == 0; i--) { switch (bitOperator) { case AND: @@ -158,4 +152,3 @@ public int compareTo(ByteBuffer value, int offset, int length) { return b == 0 ? 1 : 0; } } - diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java index 55ca2caf446c..cfaf8c279930 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * Simple filter that returns first N columns on row only. - * This filter was written to test filters in Get and as soon as it gets - * its quota of columns, {@link #filterAllRemaining()} returns true. This - * makes this filter unsuitable as a Scan filter. + * Simple filter that returns first N columns on row only. This filter was written to test filters + * in Get and as soon as it gets its quota of columns, {@link #filterAllRemaining()} returns true. + * This makes this filter unsuitable as a Scan filter. */ @InterfaceAudience.Public public class ColumnCountGetFilter extends FilterBase { @@ -74,9 +71,9 @@ public void reset() { this.count = 0; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); return new ColumnCountGetFilter(limit); } @@ -85,7 +82,7 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.ColumnCountGetFilter.Builder builder = FilterProtos.ColumnCountGetFilter.newBuilder(); builder.setLimit(this.limit); @@ -97,8 +94,8 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return An instance of {@link ColumnCountGetFilter} made from bytes * @see #toByteArray */ - public static ColumnCountGetFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ColumnCountGetFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.ColumnCountGetFilter proto; try { proto = FilterProtos.ColumnCountGetFilter.parseFrom(pbBytes); @@ -110,15 +107,15 @@ public static ColumnCountGetFilter parseFrom(final byte [] pbBytes) /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof ColumnCountGetFilter)) return false; - ColumnCountGetFilter other = (ColumnCountGetFilter)o; + ColumnCountGetFilter other = (ColumnCountGetFilter) o; return this.getLimit() == other.getLimit(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java index f11ddbd27922..31f607c22cc8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,24 +20,24 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * A filter, based on the ColumnCountGetFilter, takes two arguments: limit and offset. - * This filter can be used for row-based indexing, where references to other tables are stored across many columns, - * in order to efficient lookups and paginated results for end users. Only most recent versions are considered - * for pagination. + * A filter, based on the ColumnCountGetFilter, takes two arguments: limit and offset. This filter + * can be used for row-based indexing, where references to other tables are stored across many + * columns, in order to efficient lookups and paginated results for end users. Only most recent + * versions are considered for pagination. */ @InterfaceAudience.Public public class ColumnPaginationFilter extends FilterBase { @@ -49,16 +48,13 @@ public class ColumnPaginationFilter extends FilterBase { private int count = 0; /** - * Initializes filter with an integer offset and limit. The offset is arrived at - * scanning sequentially and skipping entries. @limit number of columns are - * then retrieved. If multiple column families are involved, the columns may be spread - * across them. - * - * @param limit Max number of columns to return. + * Initializes filter with an integer offset and limit. The offset is arrived at scanning + * sequentially and skipping entries. @limit number of columns are then retrieved. If multiple + * column families are involved, the columns may be spread across them. + * @param limit Max number of columns to return. * @param offset The integer offset where to start pagination. */ - public ColumnPaginationFilter(final int limit, final int offset) - { + public ColumnPaginationFilter(final int limit, final int offset) { Preconditions.checkArgument(limit >= 0, "limit must be positive %s", limit); Preconditions.checkArgument(offset >= 0, "offset must be positive %s", offset); this.limit = limit; @@ -66,40 +62,38 @@ public ColumnPaginationFilter(final int limit, final int offset) } /** - * Initializes filter with a string/bookmark based offset and limit. The offset is arrived - * at, by seeking to it using scanner hints. If multiple column families are involved, - * pagination starts at the first column family which contains @columnOffset. Columns are - * then retrieved sequentially upto @limit number of columns which maybe spread across - * multiple column families, depending on how the scan is setup. - * - * @param limit Max number of columns to return. + * Initializes filter with a string/bookmark based offset and limit. The offset is arrived at, by + * seeking to it using scanner hints. If multiple column families are involved, pagination starts + * at the first column family which contains @columnOffset. Columns are then retrieved + * sequentially upto @limit number of columns which maybe spread across multiple column families, + * depending on how the scan is setup. + * @param limit Max number of columns to return. * @param columnOffset The string/bookmark offset on where to start pagination. */ public ColumnPaginationFilter(final int limit, final byte[] columnOffset) { Preconditions.checkArgument(limit >= 0, "limit must be positive %s", limit); - Preconditions.checkArgument(columnOffset != null, - "columnOffset must be non-null %s", - columnOffset); + Preconditions.checkArgument(columnOffset != null, "columnOffset must be non-null %s", + columnOffset); this.limit = limit; this.columnOffset = columnOffset; } /** - * @return limit + * n */ public int getLimit() { return limit; } /** - * @return offset + * n */ public int getOffset() { return offset; } /** - * @return columnOffset + * n */ public byte[] getColumnOffset() { return columnOffset; @@ -112,8 +106,7 @@ public boolean filterRowKey(Cell cell) throws IOException { } @Override - public ReturnCode filterCell(final Cell c) - { + public ReturnCode filterCell(final Cell c) { if (columnOffset != null) { if (count >= limit) { return ReturnCode.NEXT_ROW; @@ -134,8 +127,7 @@ public ReturnCode filterCell(final Cell c) return ReturnCode.NEXT_ROW; } - ReturnCode code = count < offset ? ReturnCode.NEXT_COL : - ReturnCode.INCLUDE_AND_NEXT_COL; + ReturnCode code = count < offset ? ReturnCode.NEXT_COL : ReturnCode.INCLUDE_AND_NEXT_COL; count++; return code; } @@ -147,14 +139,13 @@ public Cell getNextCellHint(Cell cell) { } @Override - public void reset() - { + public void reset() { this.count = 0; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2, - "Expected 2 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 2, "Expected 2 but got: %s", + filterArguments.size()); int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); int offset = ParseFilter.convertByteArrayToInt(filterArguments.get(1)); return new ColumnPaginationFilter(limit, offset); @@ -164,7 +155,7 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.ColumnPaginationFilter.Builder builder = FilterProtos.ColumnPaginationFilter.newBuilder(); builder.setLimit(this.limit); @@ -179,12 +170,11 @@ public static Filter createFilterFromArguments(ArrayList filterArgument /** * @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance - * @return An instance of {@link ColumnPaginationFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link ColumnPaginationFilter} made from bytes n * @see + * #toByteArray */ - public static ColumnPaginationFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ColumnPaginationFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.ColumnPaginationFilter proto; try { proto = FilterProtos.ColumnPaginationFilter.parseFrom(pbBytes); @@ -192,26 +182,25 @@ public static ColumnPaginationFilter parseFrom(final byte [] pbBytes) throw new DeserializationException(e); } if (proto.hasColumnOffset()) { - return new ColumnPaginationFilter(proto.getLimit(), - proto.getColumnOffset().toByteArray()); + return new ColumnPaginationFilter(proto.getLimit(), proto.getColumnOffset().toByteArray()); } - return new ColumnPaginationFilter(proto.getLimit(),proto.getOffset()); + return new ColumnPaginationFilter(proto.getLimit(), proto.getOffset()); } /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof ColumnPaginationFilter)) return false; - ColumnPaginationFilter other = (ColumnPaginationFilter)o; + ColumnPaginationFilter other = (ColumnPaginationFilter) o; if (this.columnOffset != null) { - return this.getLimit() == other.getLimit() && - Bytes.equals(this.getColumnOffset(), other.getColumnOffset()); + return this.getLimit() == other.getLimit() + && Bytes.equals(this.getColumnOffset(), other.getColumnOffset()); } return this.getLimit() == other.getLimit() && this.getOffset() == other.getOffset(); } @@ -219,11 +208,10 @@ boolean areSerializedFieldsEqual(Filter o) { @Override public String toString() { if (this.columnOffset != null) { - return (this.getClass().getSimpleName() + "(" + this.limit + ", " + - Bytes.toStringBinary(this.columnOffset) + ")"); + return (this.getClass().getSimpleName() + "(" + this.limit + ", " + + Bytes.toStringBinary(this.columnOffset) + ")"); } - return String.format("%s (%d, %d)", this.getClass().getSimpleName(), - this.limit, this.offset); + return String.format("%s (%d, %d)", this.getClass().getSimpleName(), this.limit, this.offset); } @Override @@ -233,7 +221,8 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return columnOffset == null ? Objects.hash(this.limit, this.offset) : - Objects.hash(this.limit, Bytes.hashCode(this.columnOffset)); + return columnOffset == null + ? Objects.hash(this.limit, this.offset) + : Objects.hash(this.limit, Bytes.hashCode(this.columnOffset)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java index 3ad0f1783c56..d883c449017c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,35 +15,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * This filter is used for selecting only those keys with columns that matches - * a particular prefix. For example, if prefix is 'an', it will pass keys with - * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'. + * This filter is used for selecting only those keys with columns that matches a particular prefix. + * For example, if prefix is 'an', it will pass keys with columns like 'and', 'anti' but not keys + * with columns like 'ball', 'act'. */ @InterfaceAudience.Public public class ColumnPrefixFilter extends FilterBase { - protected byte [] prefix = null; + protected byte[] prefix = null; - public ColumnPrefixFilter(final byte [] prefix) { + public ColumnPrefixFilter(final byte[] prefix) { this.prefix = prefix; } @@ -91,16 +89,16 @@ public ReturnCode filterColumn(Cell cell) { private static int compareQualifierPart(Cell cell, int length, byte[] prefix) { if (cell instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) cell).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) cell).getQualifierPosition(), length, prefix, 0, length); + ((ByteBufferExtendedCell) cell).getQualifierPosition(), length, prefix, 0, length); } return Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), length, prefix, 0, - length); + length); } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); + byte[] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); return new ColumnPrefixFilter(columnPrefix); } @@ -108,9 +106,8 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.ColumnPrefixFilter.Builder builder = - FilterProtos.ColumnPrefixFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.ColumnPrefixFilter.Builder builder = FilterProtos.ColumnPrefixFilter.newBuilder(); if (this.prefix != null) builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix)); return builder.build().toByteArray(); } @@ -121,8 +118,7 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static ColumnPrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ColumnPrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ColumnPrefixFilter proto; try { proto = FilterProtos.ColumnPrefixFilter.parseFrom(pbBytes); @@ -134,15 +130,15 @@ public static ColumnPrefixFilter parseFrom(final byte [] pbBytes) /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnPrefixFilter)) return false; + if (o == this) return true; + if (!(o instanceof ColumnPrefixFilter)) return false; - ColumnPrefixFilter other = (ColumnPrefixFilter)o; + ColumnPrefixFilter other = (ColumnPrefixFilter) o; return Bytes.equals(this.getPrefix(), other.getPrefix()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java index 9937a663d453..46465ac6d1f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import static org.apache.hadoop.hbase.util.Bytes.len; @@ -24,30 +22,25 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * This filter is used for selecting only those keys with columns that are - * between minColumn to maxColumn. For example, if minColumn is 'an', and - * maxColumn is 'be', it will pass keys with columns like 'ana', 'bad', but not - * keys with columns like 'bed', 'eye' - * - * If minColumn is null, there is no lower bound. If maxColumn is null, there is - * no upper bound. - * - * minColumnInclusive and maxColumnInclusive specify if the ranges are inclusive - * or not. + * This filter is used for selecting only those keys with columns that are between minColumn to + * maxColumn. For example, if minColumn is 'an', and maxColumn is 'be', it will pass keys with + * columns like 'ana', 'bad', but not keys with columns like 'bed', 'eye' If minColumn is null, + * there is no lower bound. If maxColumn is null, there is no upper bound. minColumnInclusive and + * maxColumnInclusive specify if the ranges are inclusive or not. */ @InterfaceAudience.Public public class ColumnRangeFilter extends FilterBase { @@ -57,17 +50,15 @@ public class ColumnRangeFilter extends FilterBase { protected boolean maxColumnInclusive = false; /** - * Create a filter to select those keys with columns that are between minColumn - * and maxColumn. - * @param minColumn minimum value for the column range. If if it's null, - * there is no lower bound. + * Create a filter to select those keys with columns that are between minColumn and maxColumn. + * @param minColumn minimum value for the column range. If if it's null, there is no + * lower bound. * @param minColumnInclusive if true, include minColumn in the range. - * @param maxColumn maximum value for the column range. If it's null, - * @param maxColumnInclusive if true, include maxColumn in the range. - * there is no upper bound. + * @param maxColumn maximum value for the column range. If it's null, + * @param maxColumnInclusive if true, include maxColumn in the range. there is no upper bound. */ public ColumnRangeFilter(final byte[] minColumn, boolean minColumnInclusive, - final byte[] maxColumn, boolean maxColumnInclusive) { + final byte[] maxColumn, boolean maxColumnInclusive) { this.minColumn = minColumn; this.minColumnInclusive = minColumnInclusive; this.maxColumn = maxColumn; @@ -151,54 +142,49 @@ public ReturnCode filterCell(final Cell c) { return ReturnCode.NEXT_ROW; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 4, - "Expected 4 but got: %s", filterArguments.size()); - byte [] minColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 4, "Expected 4 but got: %s", + filterArguments.size()); + byte[] minColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); boolean minColumnInclusive = ParseFilter.convertByteArrayToBoolean(filterArguments.get(1)); - byte [] maxColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(2)); + byte[] maxColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(2)); boolean maxColumnInclusive = ParseFilter.convertByteArrayToBoolean(filterArguments.get(3)); - if (minColumn.length == 0) - minColumn = null; - if (maxColumn.length == 0) - maxColumn = null; - return new ColumnRangeFilter(minColumn, minColumnInclusive, - maxColumn, maxColumnInclusive); + if (minColumn.length == 0) minColumn = null; + if (maxColumn.length == 0) maxColumn = null; + return new ColumnRangeFilter(minColumn, minColumnInclusive, maxColumn, maxColumnInclusive); } /** * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.ColumnRangeFilter.Builder builder = - FilterProtos.ColumnRangeFilter.newBuilder(); - if (this.minColumn != null) builder.setMinColumn( - UnsafeByteOperations.unsafeWrap(this.minColumn)); + public byte[] toByteArray() { + FilterProtos.ColumnRangeFilter.Builder builder = FilterProtos.ColumnRangeFilter.newBuilder(); + if (this.minColumn != null) + builder.setMinColumn(UnsafeByteOperations.unsafeWrap(this.minColumn)); builder.setMinColumnInclusive(this.minColumnInclusive); - if (this.maxColumn != null) builder.setMaxColumn( - UnsafeByteOperations.unsafeWrap(this.maxColumn)); + if (this.maxColumn != null) + builder.setMaxColumn(UnsafeByteOperations.unsafeWrap(this.maxColumn)); builder.setMaxColumnInclusive(this.maxColumnInclusive); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link ColumnRangeFilter} instance - * @return An instance of {@link ColumnRangeFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link ColumnRangeFilter} made from bytes n * @see + * #toByteArray */ - public static ColumnRangeFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ColumnRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ColumnRangeFilter proto; try { proto = FilterProtos.ColumnRangeFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } - return new ColumnRangeFilter(proto.hasMinColumn()?proto.getMinColumn().toByteArray():null, - proto.getMinColumnInclusive(),proto.hasMaxColumn()?proto.getMaxColumn().toByteArray():null, + return new ColumnRangeFilter(proto.hasMinColumn() ? proto.getMinColumn().toByteArray() : null, + proto.getMinColumnInclusive(), + proto.hasMaxColumn() ? proto.getMaxColumn().toByteArray() : null, proto.getMaxColumnInclusive()); } @@ -217,9 +203,9 @@ boolean areSerializedFieldsEqual(Filter o) { } ColumnRangeFilter other = (ColumnRangeFilter) o; return Bytes.equals(this.getMinColumn(), other.getMinColumn()) - && this.getMinColumnInclusive() == other.getMinColumnInclusive() - && Bytes.equals(this.getMaxColumn(), other.getMaxColumn()) - && this.getMaxColumnInclusive() == other.getMaxColumnInclusive(); + && this.getMinColumnInclusive() == other.getMinColumnInclusive() + && Bytes.equals(this.getMaxColumn(), other.getMaxColumn()) + && this.getMaxColumnInclusive() == other.getMaxColumnInclusive(); } @Override @@ -229,10 +215,9 @@ public Cell getNextCellHint(Cell cell) { @Override public String toString() { - return this.getClass().getSimpleName() + " " - + (this.minColumnInclusive ? "[" : "(") + Bytes.toStringBinary(this.minColumn) - + ", " + Bytes.toStringBinary(this.maxColumn) - + (this.maxColumnInclusive ? "]" : ")"); + return this.getClass().getSimpleName() + " " + (this.minColumnInclusive ? "[" : "(") + + Bytes.toStringBinary(this.minColumn) + ", " + Bytes.toStringBinary(this.maxColumn) + + (this.maxColumnInclusive ? "]" : ")"); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java index 6516b9692e14..0074fe40a3a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; @@ -40,12 +37,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Different from {@link SingleColumnValueFilter} which returns an entire row - * when specified condition is matched, {@link ColumnValueFilter} return the matched cell only. + * Different from {@link SingleColumnValueFilter} which returns an entire row when specified + * condition is matched, {@link ColumnValueFilter} return the matched cell only. *

      - * This filter is used to filter cells based on column and value. - * It takes a {@link org.apache.hadoop.hbase.CompareOperator} operator (<, <=, =, !=, >, >=), and - * and a {@link ByteArrayComparable} comparator. + * This filter is used to filter cells based on column and value. It takes a + * {@link org.apache.hadoop.hbase.CompareOperator} operator (<, <=, =, !=, >, >=), and and a + * {@link ByteArrayComparable} comparator. */ @InterfaceAudience.Public public class ColumnValueFilter extends FilterBase { @@ -58,14 +55,13 @@ public class ColumnValueFilter extends FilterBase { // columns in the same row can be skipped faster by NEXT_ROW instead of NEXT_COL. private boolean columnFound = false; - public ColumnValueFilter(final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value) { + public ColumnValueFilter(final byte[] family, final byte[] qualifier, final CompareOperator op, + final byte[] value) { this(family, qualifier, op, new BinaryComparator(value)); } - public ColumnValueFilter(final byte[] family, final byte[] qualifier, - final CompareOperator op, - final ByteArrayComparable comparator) { + public ColumnValueFilter(final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator) { this.family = Preconditions.checkNotNull(family, "family should not be null."); this.qualifier = qualifier == null ? new byte[0] : qualifier; this.op = Preconditions.checkNotNull(op, "CompareOperator should not be null"); @@ -73,7 +69,7 @@ public ColumnValueFilter(final byte[] family, final byte[] qualifier, } /** - * @return operator + * n */ public CompareOperator getCompareOperator() { return op; @@ -120,15 +116,16 @@ public ReturnCode filterCell(Cell c) throws IOException { columnFound = true; // 2. Check value match: // True means filter out, just skip this cell, else include it. - return compareValue(getCompareOperator(), getComparator(), c) ? - ReturnCode.SKIP : ReturnCode.INCLUDE; + return compareValue(getCompareOperator(), getComparator(), c) + ? ReturnCode.SKIP + : ReturnCode.INCLUDE; } /** * This method is used to determine a cell should be included or filtered out. - * @param op one of operators {@link CompareOperator} + * @param op one of operators {@link CompareOperator} * @param comparator comparator used to compare cells. - * @param cell cell to be compared. + * @param cell cell to be compared. * @return true means cell should be filtered out, included otherwise. */ private boolean compareValue(final CompareOperator op, final ByteArrayComparable comparator, @@ -146,20 +143,18 @@ private boolean compareValue(final CompareOperator op, final ByteArrayComparable * @return a ColumnValueFilter */ public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 4, - "Expect 4 arguments: %s", filterArguments.size()); + Preconditions.checkArgument(filterArguments.size() == 4, "Expect 4 arguments: %s", + filterArguments.size()); byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); CompareOperator operator = ParseFilter.createCompareOperator(filterArguments.get(2)); ByteArrayComparable comparator = ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); - if (comparator instanceof RegexStringComparator || - comparator instanceof SubstringComparator) { - if (operator != CompareOperator.EQUAL && - operator != CompareOperator.NOT_EQUAL) { - throw new IllegalArgumentException("A regexstring comparator and substring comparator " + - "can only be used with EQUAL and NOT_EQUAL"); + if (comparator instanceof RegexStringComparator || comparator instanceof SubstringComparator) { + if (operator != CompareOperator.EQUAL && operator != CompareOperator.NOT_EQUAL) { + throw new IllegalArgumentException("A regexstring comparator and substring comparator " + + "can only be used with EQUAL and NOT_EQUAL"); } } @@ -170,8 +165,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments * @return A pb instance to represent this instance. */ FilterProtos.ColumnValueFilter convert() { - FilterProtos.ColumnValueFilter.Builder builder = - FilterProtos.ColumnValueFilter.newBuilder(); + FilterProtos.ColumnValueFilter.Builder builder = FilterProtos.ColumnValueFilter.newBuilder(); builder.setFamily(UnsafeByteOperations.unsafeWrap(this.family)); builder.setQualifier(UnsafeByteOperations.unsafeWrap(this.qualifier)); @@ -221,10 +215,10 @@ boolean areSerializedFieldsEqual(Filter o) { } ColumnValueFilter other = (ColumnValueFilter) o; - return Bytes.equals(this.getFamily(), other.getFamily()) && - Bytes.equals(this.getQualifier(), other.getQualifier()) && - this.getCompareOperator().equals(other.getCompareOperator()) && - this.getComparator().areSerializedFieldsEqual(other.getComparator()); + return Bytes.equals(this.getFamily(), other.getFamily()) + && Bytes.equals(this.getQualifier(), other.getQualifier()) + && this.getCompareOperator().equals(other.getCompareOperator()) + && this.getComparator().areSerializedFieldsEqual(other.getComparator()); } @Override @@ -234,9 +228,8 @@ public boolean isFamilyEssential(byte[] name) throws IOException { @Override public String toString() { - return String.format("%s (%s, %s, %s, %s)", - getClass().getSimpleName(), Bytes.toStringBinary(this.family), - Bytes.toStringBinary(this.qualifier), this.op.name(), + return String.format("%s (%s, %s, %s, %s)", getClass().getSimpleName(), + Bytes.toStringBinary(this.family), Bytes.toStringBinary(this.qualifier), this.op.name(), Bytes.toStringBinary(this.comparator.getValue())); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java index f6b63ec59e8c..1f55f8480459 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -36,8 +34,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType; /** - * This is a generic filter to be used to filter by comparison. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator. + * This is a generic filter to be used to filter by comparison. It takes an operator (equal, + * greater, not equal, etc) and a byte [] comparator. *

      * To filter by row key, use {@link RowFilter}. *

      @@ -47,8 +45,8 @@ *

      * To filter by value, use {@link ValueFilter}. *

      - * These filters can be wrapped with {@link SkipFilter} and {@link WhileMatchFilter} - * to add more control. + * These filters can be wrapped with {@link SkipFilter} and {@link WhileMatchFilter} to add more + * control. *

      * Multiple filters can be combined using {@link FilterList}. */ @@ -59,11 +57,10 @@ public abstract class CompareFilter extends FilterBase { /** * Constructor. - * @param op the compare op for row matching + * @param op the compare op for row matching * @param comparator the comparator for row matching */ - public CompareFilter(final CompareOperator op, - final ByteArrayComparable comparator) { + public CompareFilter(final CompareOperator op, final ByteArrayComparable comparator) { this.op = op; this.comparator = comparator; } @@ -86,7 +83,7 @@ public boolean filterRowKey(Cell cell) throws IOException { } protected boolean compareRow(final CompareOperator op, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { if (op == CompareOperator.NO_OP) { return true; } @@ -95,7 +92,7 @@ protected boolean compareRow(final CompareOperator op, final ByteArrayComparable } protected boolean compareFamily(final CompareOperator op, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { if (op == CompareOperator.NO_OP) { return true; } @@ -103,8 +100,8 @@ protected boolean compareFamily(final CompareOperator op, final ByteArrayCompara return compare(op, compareResult); } - protected boolean compareQualifier(final CompareOperator op, - final ByteArrayComparable comparator, final Cell cell) { + protected boolean compareQualifier(final CompareOperator op, final ByteArrayComparable comparator, + final Cell cell) { // We do not call through to the non-deprecated method for perf reasons. if (op == CompareOperator.NO_OP) { return true; @@ -114,7 +111,7 @@ protected boolean compareQualifier(final CompareOperator op, } protected boolean compareValue(final CompareOperator op, final ByteArrayComparable comparator, - final Cell cell) { + final Cell cell) { if (op == CompareOperator.NO_OP) { return true; } @@ -142,19 +139,17 @@ static boolean compare(final CompareOperator op, int compareResult) { } // returns an array of heterogeneous objects - public static ArrayList extractArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2, - "Expected 2 but got: %s", filterArguments.size()); + public static ArrayList extractArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 2, "Expected 2 but got: %s", + filterArguments.size()); CompareOperator op = ParseFilter.createCompareOperator(filterArguments.get(0)); - ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(1))); - - if (comparator instanceof RegexStringComparator || - comparator instanceof SubstringComparator) { - if (op != CompareOperator.EQUAL && - op != CompareOperator.NOT_EQUAL) { - throw new IllegalArgumentException ("A regexstring comparator and substring comparator" + - " can only be used with EQUAL and NOT_EQUAL"); + ByteArrayComparable comparator = + ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(1))); + + if (comparator instanceof RegexStringComparator || comparator instanceof SubstringComparator) { + if (op != CompareOperator.EQUAL && op != CompareOperator.NOT_EQUAL) { + throw new IllegalArgumentException("A regexstring comparator and substring comparator" + + " can only be used with EQUAL and NOT_EQUAL"); } } ArrayList arguments = new ArrayList<>(2); @@ -167,8 +162,7 @@ public static ArrayList extractArguments(ArrayList filterArgume * @return A pb instance to represent this instance. */ FilterProtos.CompareFilter convert() { - FilterProtos.CompareFilter.Builder builder = - FilterProtos.CompareFilter.newBuilder(); + FilterProtos.CompareFilter.Builder builder = FilterProtos.CompareFilter.newBuilder(); HBaseProtos.CompareType compareOp = CompareType.valueOf(this.op.name()); builder.setCompareOp(compareOp); if (this.comparator != null) builder.setComparator(ProtobufUtil.toComparator(this.comparator)); @@ -176,27 +170,23 @@ FilterProtos.CompareFilter convert() { } /** - * - * @param o - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof CompareFilter)) return false; - CompareFilter other = (CompareFilter)o; - return this.getCompareOperator().equals(other.getCompareOperator()) && - (this.getComparator() == other.getComparator() + CompareFilter other = (CompareFilter) o; + return this.getCompareOperator().equals(other.getCompareOperator()) + && (this.getComparator() == other.getComparator() || this.getComparator().areSerializedFieldsEqual(other.getComparator())); } @Override public String toString() { - return String.format("%s (%s, %s)", - this.getClass().getSimpleName(), - this.op.name(), - Bytes.toStringBinary(this.comparator.getValue())); + return String.format("%s (%s, %s)", this.getClass().getSimpleName(), this.op.name(), + Bytes.toStringBinary(this.comparator.getValue())); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java index ee79ac391ff1..1f453c6f678d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,27 +23,24 @@ import java.util.List; import java.util.Objects; import java.util.Set; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * A filter for adding inter-column timestamp matching - * Only cells with a correspondingly timestamped entry in - * the target column will be retained - * Not compatible with Scan.setBatch as operations need - * full rows for correct filtering + * A filter for adding inter-column timestamp matching Only cells with a correspondingly timestamped + * entry in the target column will be retained Not compatible with Scan.setBatch as operations need + * full rows for correct filtering */ @InterfaceAudience.Public public class DependentColumnFilter extends CompareFilter { @@ -56,50 +52,43 @@ public class DependentColumnFilter extends CompareFilter { protected Set stampSet = new HashSet<>(); /** - * Build a dependent column filter with value checking - * dependent column varies will be compared using the supplied - * compareOp and comparator, for usage of which - * refer to {@link CompareFilter} - * - * @param family dependent column family - * @param qualifier dependent column qualifier + * Build a dependent column filter with value checking dependent column varies will be compared + * using the supplied compareOp and comparator, for usage of which refer to {@link CompareFilter} + * @param family dependent column family + * @param qualifier dependent column qualifier * @param dropDependentColumn whether the column should be discarded after - * @param op Value comparison op - * @param valueComparator comparator + * @param op Value comparison op + * @param valueComparator comparator */ - public DependentColumnFilter(final byte [] family, final byte[] qualifier, - final boolean dropDependentColumn, final CompareOperator op, - final ByteArrayComparable valueComparator) { + public DependentColumnFilter(final byte[] family, final byte[] qualifier, + final boolean dropDependentColumn, final CompareOperator op, + final ByteArrayComparable valueComparator) { // set up the comparator super(op, valueComparator); this.columnFamily = family; this.columnQualifier = qualifier; this.dropDependentColumn = dropDependentColumn; } - + /** - * Constructor for DependentColumn filter. - * Cells where a Cell from target column - * with the same timestamp do not exist will be dropped. - * - * @param family name of target column family + * Constructor for DependentColumn filter. Cells where a Cell from target column with the same + * timestamp do not exist will be dropped. + * @param family name of target column family * @param qualifier name of column qualifier */ - public DependentColumnFilter(final byte [] family, final byte [] qualifier) { + public DependentColumnFilter(final byte[] family, final byte[] qualifier) { this(family, qualifier, false); } - + /** - * Constructor for DependentColumn filter. - * Cells where a Cell from target column - * with the same timestamp do not exist will be dropped. - * - * @param family name of dependent column family - * @param qualifier name of dependent qualifier + * Constructor for DependentColumn filter. Cells where a Cell from target column with the same + * timestamp do not exist will be dropped. + * @param family name of dependent column family + * @param qualifier name of dependent qualifier * @param dropDependentColumn whether the dependent columns Cells should be discarded */ - public DependentColumnFilter(final byte [] family, final byte [] qualifier, - final boolean dropDependentColumn) { + public DependentColumnFilter(final byte[] family, final byte[] qualifier, + final boolean dropDependentColumn) { this(family, qualifier, dropDependentColumn, CompareOperator.NO_OP, null); } @@ -137,16 +126,15 @@ public boolean filterAllRemaining() { public ReturnCode filterCell(final Cell c) { // Check if the column and qualifier match if (!CellUtil.matchingColumn(c, this.columnFamily, this.columnQualifier)) { - // include non-matches for the time being, they'll be discarded afterwards - return ReturnCode.INCLUDE; + // include non-matches for the time being, they'll be discarded afterwards + return ReturnCode.INCLUDE; } // If it doesn't pass the op, skip it - if (comparator != null - && compareValue(getCompareOperator(), comparator, c)) + if (comparator != null && compareValue(getCompareOperator(), comparator, c)) return ReturnCode.SKIP; - + stampSet.add(c.getTimestamp()); - if(dropDependentColumn) { + if (dropDependentColumn) { return ReturnCode.SKIP; } return ReturnCode.INCLUDE; @@ -161,7 +149,7 @@ public void filterRowCells(List kvs) { public boolean hasFilterRow() { return true; } - + @Override public boolean filterRow() { return false; @@ -169,34 +157,32 @@ public boolean filterRow() { @Override public void reset() { - stampSet.clear(); + stampSet.clear(); } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2 || - filterArguments.size() == 3 || - filterArguments.size() == 5, - "Expected 2, 3 or 5 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument( + filterArguments.size() == 2 || filterArguments.size() == 3 || filterArguments.size() == 5, + "Expected 2, 3 or 5 but got: %s", filterArguments.size()); if (filterArguments.size() == 2) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); return new DependentColumnFilter(family, qualifier); } else if (filterArguments.size() == 3) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); boolean dropDependentColumn = ParseFilter.convertByteArrayToBoolean(filterArguments.get(2)); return new DependentColumnFilter(family, qualifier, dropDependentColumn); } else if (filterArguments.size() == 5) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); boolean dropDependentColumn = ParseFilter.convertByteArrayToBoolean(filterArguments.get(2)); CompareOperator op = ParseFilter.createCompareOperator(filterArguments.get(3)); - ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(4))); - return new DependentColumnFilter(family, qualifier, dropDependentColumn, - op, comparator); + ByteArrayComparable comparator = + ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(4))); + return new DependentColumnFilter(family, qualifier, dropDependentColumn, op, comparator); } else { throw new IllegalArgumentException("Expected 2, 3 or 5 but got: " + filterArguments.size()); } @@ -206,7 +192,7 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.DependentColumnFilter.Builder builder = FilterProtos.DependentColumnFilter.newBuilder(); builder.setCompareFilter(super.convert()); @@ -222,12 +208,11 @@ public static Filter createFilterFromArguments(ArrayList filterArgument /** * @param pbBytes A pb serialized {@link DependentColumnFilter} instance - * @return An instance of {@link DependentColumnFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link DependentColumnFilter} made from bytes n * @see + * #toByteArray */ - public static DependentColumnFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static DependentColumnFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.DependentColumnFilter proto; try { proto = FilterProtos.DependentColumnFilter.parseFrom(pbBytes); @@ -235,7 +220,7 @@ public static DependentColumnFilter parseFrom(final byte [] pbBytes) throw new DeserializationException(e); } final CompareOperator valueCompareOp = - CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name()); + CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name()); ByteArrayComparable valueComparator = null; try { if (proto.getCompareFilter().hasComparator()) { @@ -245,24 +230,23 @@ public static DependentColumnFilter parseFrom(final byte [] pbBytes) throw new DeserializationException(ioe); } return new DependentColumnFilter( - proto.hasColumnFamily()?proto.getColumnFamily().toByteArray():null, - proto.hasColumnQualifier()?proto.getColumnQualifier().toByteArray():null, + proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null, + proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null, proto.getDropDependentColumn(), valueCompareOp, valueComparator); } /** - * @param o - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") + value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof DependentColumnFilter)) return false; - DependentColumnFilter other = (DependentColumnFilter)o; + DependentColumnFilter other = (DependentColumnFilter) o; return other != null && super.areSerializedFieldsEqual(other) && Bytes.equals(this.getFamily(), other.getFamily()) && Bytes.equals(this.getQualifier(), other.getQualifier()) @@ -271,13 +255,10 @@ boolean areSerializedFieldsEqual(Filter o) { @Override public String toString() { - return String.format("%s (%s, %s, %s, %s, %s)", - this.getClass().getSimpleName(), - Bytes.toStringBinary(this.columnFamily), - Bytes.toStringBinary(this.columnQualifier), - this.dropDependentColumn, - this.op.name(), - this.comparator != null ? Bytes.toStringBinary(this.comparator.getValue()) : "null"); + return String.format("%s (%s, %s, %s, %s, %s)", this.getClass().getSimpleName(), + Bytes.toStringBinary(this.columnFamily), Bytes.toStringBinary(this.columnQualifier), + this.dropDependentColumn, this.op.name(), + this.comparator != null ? Bytes.toStringBinary(this.comparator.getValue()) : "null"); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java index f1406cd4c945..4e682eb1d37b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,39 +19,39 @@ import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** *

      - * This filter is used to filter based on the column family. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * column family portion of a key. - *

      - * This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and {@link org.apache.hadoop.hbase.filter.SkipFilter} - * to add more control. - *

      + * This filter is used to filter based on the column family. It takes an operator (equal, greater, + * not equal, etc) and a byte [] comparator for the column family portion of a key. + *

      + *

      + * This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and + * {@link org.apache.hadoop.hbase.filter.SkipFilter} to add more control. + *

      + *

      * Multiple filters can be combined using {@link org.apache.hadoop.hbase.filter.FilterList}. *

      - * If an already known column family is looked for, use {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])} - * directly rather than a filter. + * If an already known column family is looked for, use + * {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])} directly rather than a filter. */ @InterfaceAudience.Public public class FamilyFilter extends CompareFilter { /** * Constructor. - * - * @param op the compare op for column family matching + * @param op the compare op for column family matching * @param familyComparator the comparator for column family matching */ - public FamilyFilter(final CompareOperator op, - final ByteArrayComparable familyComparator) { + public FamilyFilter(final CompareOperator op, final ByteArrayComparable familyComparator) { super(op, familyComparator); } @@ -67,10 +66,10 @@ public ReturnCode filterCell(final Cell c) { return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOperator compareOp = (CompareOperator)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + CompareOperator compareOp = (CompareOperator) arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable) arguments.get(1); return new FamilyFilter(compareOp, comparator); } @@ -78,21 +77,17 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.FamilyFilter.Builder builder = - FilterProtos.FamilyFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.FamilyFilter.Builder builder = FilterProtos.FamilyFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link FamilyFilter} instance - * @return An instance of {@link FamilyFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link FamilyFilter} made from bytes n * @see #toByteArray */ - public static FamilyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static FamilyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FamilyFilter proto; try { proto = FilterProtos.FamilyFilter.parseFrom(pbBytes); @@ -109,21 +104,21 @@ public static FamilyFilter parseFrom(final byte [] pbBytes) } catch (IOException ioe) { throw new DeserializationException(ioe); } - return new FamilyFilter(valueCompareOp,valueComparator); + return new FamilyFilter(valueCompareOp, valueComparator); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof FamilyFilter)) return false; - FamilyFilter other = (FamilyFilter)o; + FamilyFilter other = (FamilyFilter) o; return super.areSerializedFieldsEqual(other); - } + } @Override public boolean equals(Object obj) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index 8fba32c34294..2c623306ba0b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,63 +15,51 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; /** - * Interface for row and column filters directly applied within the regionserver. - * - * A filter can expect the following call sequence: + * Interface for row and column filters directly applied within the regionserver. A filter can + * expect the following call sequence: *
        - *
      • {@link #reset()} : reset the filter state before filtering a new row.
      • - *
      • {@link #filterAllRemaining()}: true means row scan is over; false means keep going.
      • - *
      • {@link #filterRowKey(Cell)}: true means drop this row; false means include.
      • - *
      • {@link #filterCell(Cell)}: decides whether to include or exclude this Cell. - * See {@link ReturnCode}.
      • - *
      • {@link #transformCell(Cell)}: if the Cell is included, let the filter transform the - * Cell.
      • - *
      • {@link #filterRowCells(List)}: allows direct modification of the final list to be submitted - *
      • {@link #filterRow()}: last chance to drop entire row based on the sequence of - * filter calls. Eg: filter a row if it doesn't contain a specified column.
      • + *
      • {@link #reset()} : reset the filter state before filtering a new row.
      • + *
      • {@link #filterAllRemaining()}: true means row scan is over; false means keep going.
      • + *
      • {@link #filterRowKey(Cell)}: true means drop this row; false means include.
      • + *
      • {@link #filterCell(Cell)}: decides whether to include or exclude this Cell. See + * {@link ReturnCode}.
      • + *
      • {@link #transformCell(Cell)}: if the Cell is included, let the filter transform the Cell. + *
      • + *
      • {@link #filterRowCells(List)}: allows direct modification of the final list to be submitted + *
      • {@link #filterRow()}: last chance to drop entire row based on the sequence of filter calls. + * Eg: filter a row if it doesn't contain a specified column.
      • *
      - * - * Filter instances are created one per region/scan. This abstract class replaces - * the old RowFilterInterface. - * - * When implementing your own filters, consider inheriting {@link FilterBase} to help - * you reduce boilerplate. - * + * Filter instances are created one per region/scan. This abstract class replaces the old + * RowFilterInterface. When implementing your own filters, consider inheriting {@link FilterBase} to + * help you reduce boilerplate. * @see FilterBase */ @InterfaceAudience.Public public abstract class Filter { protected transient boolean reversed; + /** - * Reset the state of the filter between rows. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * Reset the state of the filter between rows. Concrete implementers can signal a failure + * condition in their code by throwing an {@link IOException}. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract public void reset() throws IOException; /** * Filters a row based on the row key. If this returns true, the entire row will be excluded. If - * false, each KeyValue in the row will be passed to {@link #filterCell(Cell)} below. - * If {@link #filterAllRemaining()} returns true, then {@link #filterRowKey(Cell)} should - * also return true. - * - * Concrete implementers can signal a failure condition in their code by throwing an + * false, each KeyValue in the row will be passed to {@link #filterCell(Cell)} below. If + * {@link #filterAllRemaining()} returns true, then {@link #filterRowKey(Cell)} should also return + * true. Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @param firstRowCell The first cell coming in the new row * @return true, remove entire row, false, include the row (maybe). * @throws IOException in case an I/O or an filter specific failure needs to be signaled. @@ -80,11 +67,8 @@ public abstract class Filter { abstract public boolean filterRowKey(Cell firstRowCell) throws IOException; /** - * If this returns true, the scan will terminate. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * If this returns true, the scan will terminate. Concrete implementers can signal a failure + * condition in their code by throwing an {@link IOException}. * @return true to end scan, false to continue. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ @@ -93,19 +77,12 @@ public abstract class Filter { /** * A way to filter based on the column family, column qualifier and/or the column value. Return * code is described below. This allows filters to filter only certain number of columns, then - * terminate without matching ever column. - * - * If filterRowKey returns true, filterCell needs to be consistent with it. - * - * filterCell can assume that filterRowKey has already been called for the row. - * - * If your filter returns ReturnCode.NEXT_ROW, it should return + * terminate without matching ever column. If filterRowKey returns true, filterCell needs to be + * consistent with it. filterCell can assume that filterRowKey has already been called for the + * row. If your filter returns ReturnCode.NEXT_ROW, it should return * ReturnCode.NEXT_ROW until {@link #reset()} is called just in case the caller calls - * for the next row. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * for the next row. Concrete implementers can signal a failure condition in their code by + * throwing an {@link IOException}. * @param c the Cell in question * @return code as described below * @throws IOException in case an I/O or an filter specific failure needs to be signaled. @@ -116,18 +93,13 @@ public ReturnCode filterCell(final Cell c) throws IOException { } /** - * Give the filter a chance to transform the passed KeyValue. If the Cell is changed a new - * Cell object must be returned. - * - * @see org.apache.hadoop.hbase.KeyValue#shallowCopy() - * The transformed KeyValue is what is eventually returned to the client. Most filters will - * return the passed KeyValue unchanged. + * Give the filter a chance to transform the passed KeyValue. If the Cell is changed a new Cell + * object must be returned. + * @see org.apache.hadoop.hbase.KeyValue#shallowCopy() The transformed KeyValue is what is + * eventually returned to the client. Most filters will return the passed KeyValue unchanged. * @see org.apache.hadoop.hbase.filter.KeyOnlyFilter#transformCell(Cell) for an example of a - * transformation. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * transformation. Concrete implementers can signal a failure condition in their code by + * throwing an {@link IOException}. * @param v the KeyValue in question * @return the changed KeyValue * @throws IOException in case an I/O or an filter specific failure needs to be signaled. @@ -172,14 +144,12 @@ public enum ReturnCode { * Include KeyValue and done with row, seek to next. See NEXT_ROW. */ INCLUDE_AND_SEEK_NEXT_ROW, -} + } /** * Chance to alter the list of Cells to be submitted. Modifications to the list will carry on - * * Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @param kvs the list of Cells to be filtered * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ @@ -188,19 +158,15 @@ public enum ReturnCode { /** * Primarily used to check for conflicts with scans(such as scans that do not read a full row at a * time). - * * @return True if this filter actively uses filterRowCells(List) or filterRow(). */ abstract public boolean hasFilterRow(); /** - * Last chance to veto row based on previous {@link #filterCell(Cell)} calls. The filter - * needs to retain state then return a particular value for this call if they wish to exclude a - * row if a certain column is missing (for example). - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * Last chance to veto row based on previous {@link #filterCell(Cell)} calls. The filter needs to + * retain state then return a particular value for this call if they wish to exclude a row if a + * certain column is missing (for example). Concrete implementers can signal a failure condition + * in their code by throwing an {@link IOException}. * @return true to exclude row, false to include row. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ @@ -209,11 +175,8 @@ public enum ReturnCode { /** * If the filter returns the match code SEEK_NEXT_USING_HINT, then it should also tell which is * the next key it must seek to. After receiving the match code SEEK_NEXT_USING_HINT, the - * QueryMatcher would call this function to find out which key it must next seek to. - * - * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * + * QueryMatcher would call this function to find out which key it must next seek to. Concrete + * implementers can signal a failure condition in their code by throwing an {@link IOException}. * @return KeyValue which must be next seeked. return null if the filter is not sure which key to * seek to next. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. @@ -224,48 +187,35 @@ public enum ReturnCode { * Check that given column family is essential for filter to check row. Most filters always return * true here. But some could have more sophisticated logic which could significantly reduce * scanning process by not even touching columns until we are 100% sure that it's data is needed - * in result. - * - * Concrete implementers can signal a failure condition in their code by throwing an + * in result. Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract public boolean isFamilyEssential(byte[] name) throws IOException; /** - * TODO: JAVADOC - * - * Concrete implementers can signal a failure condition in their code by throwing an + * TODO: JAVADOC Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @return The filter serialized using pb * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract public byte[] toByteArray() throws IOException; /** - * * Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. - * * @param pbBytes A pb serialized {@link Filter} instance - * @return An instance of {@link Filter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link Filter} made from bytes n * @see #toByteArray */ - public static Filter parseFrom(final byte [] pbBytes) throws DeserializationException { + public static Filter parseFrom(final byte[] pbBytes) throws DeserializationException { throw new DeserializationException( "parseFrom called on base Filter, but should be called on derived type"); } /** * Concrete implementers can signal a failure condition in their code by throwing an - * {@link IOException}. - * - * @param other - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * {@link IOException}. n * @return true if and only if the fields of the filter that are + * serialized are equal to the corresponding fields in other. Used for testing. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract boolean areSerializedFieldsEqual(Filter other); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java index 9fb796af45b7..ff637c7f0527 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,32 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.yetus.audience.InterfaceAudience; /** - * Abstract base class to help you implement new Filters. Common "ignore" or NOOP type - * methods can go here, helping to reduce boiler plate in an ever-expanding filter - * library. - * - * If you could instantiate FilterBase, it would end up being a "null" filter - - * that is one that never filters anything. + * Abstract base class to help you implement new Filters. Common "ignore" or NOOP type methods can + * go here, helping to reduce boiler plate in an ever-expanding filter library. If you could + * instantiate FilterBase, it would end up being a "null" filter - that is one that never filters + * anything. */ @InterfaceAudience.Private // TODO add filter limited private level public abstract class FilterBase extends Filter { /** - * Filters that are purely stateless and do nothing in their reset() methods can inherit - * this null/empty implementation. - * - * {@inheritDoc} + * Filters that are purely stateless and do nothing in their reset() methods can inherit this + * null/empty implementation. {@inheritDoc} */ @Override public void reset() throws IOException { @@ -53,10 +46,8 @@ public boolean filterRowKey(Cell cell) throws IOException { } /** - * Filters that never filter all remaining can inherit this implementation that - * never stops the filter early. - * - * {@inheritDoc} + * Filters that never filter all remaining can inherit this implementation that never stops the + * filter early. {@inheritDoc} */ @Override public boolean filterAllRemaining() throws IOException { @@ -64,9 +55,7 @@ public boolean filterAllRemaining() throws IOException { } /** - * By default no transformation takes place - * - * {@inheritDoc} + * By default no transformation takes place {@inheritDoc} */ @Override public Cell transformCell(Cell v) throws IOException { @@ -74,20 +63,16 @@ public Cell transformCell(Cell v) throws IOException { } /** - * Filters that never filter by modifying the returned List of Cells can - * inherit this implementation that does nothing. - * - * {@inheritDoc} + * Filters that never filter by modifying the returned List of Cells can inherit this + * implementation that does nothing. {@inheritDoc} */ @Override public void filterRowCells(List ignored) throws IOException { } /** - * Fitlers that never filter by modifying the returned List of Cells can - * inherit this implementation that does nothing. - * - * {@inheritDoc} + * Fitlers that never filter by modifying the returned List of Cells can inherit this + * implementation that does nothing. {@inheritDoc} */ @Override public boolean hasFilterRow() { @@ -96,9 +81,7 @@ public boolean hasFilterRow() { /** * Filters that never filter by rows based on previously gathered state from - * {@link #filterCell(Cell)} can inherit this implementation that - * never filters a row. - * + * {@link #filterCell(Cell)} can inherit this implementation that never filters a row. * {@inheritDoc} */ @Override @@ -107,10 +90,8 @@ public boolean filterRow() throws IOException { } /** - * Filters that are not sure which key must be next seeked to, can inherit - * this implementation that, by default, returns a null Cell. - * - * {@inheritDoc} + * Filters that are not sure which key must be next seeked to, can inherit this implementation + * that, by default, returns a null Cell. {@inheritDoc} */ @Override public Cell getNextCellHint(Cell currentCell) throws IOException { @@ -118,10 +99,8 @@ public Cell getNextCellHint(Cell currentCell) throws IOException { } /** - * By default, we require all scan's column families to be present. Our - * subclasses may be more precise. - * - * {@inheritDoc} + * By default, we require all scan's column families to be present. Our subclasses may be more + * precise. {@inheritDoc} */ @Override public boolean isFamilyEssential(byte[] name) throws IOException { @@ -134,7 +113,7 @@ public boolean isFamilyEssential(byte[] name) throws IOException { * @param filterArguments the filter's arguments * @return constructed filter object */ - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { throw new IllegalArgumentException("This method has not been implemented"); } @@ -155,11 +134,9 @@ public byte[] toByteArray() throws IOException { } /** - * Default implementation so that writers of custom filters aren't forced to implement. - * - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * Default implementation so that writers of custom filters aren't forced to implement. n + * * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index 32fa799e8749..be3035858f13 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,12 +23,12 @@ import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; @@ -61,7 +60,7 @@ public enum Operator { /** * Constructor that takes a set of {@link Filter}s and an operator. * @param operator Operator to process filter set with. - * @param filters Set of row filters. + * @param filters Set of row filters. */ public FilterList(final Operator operator, final List filters) { if (operator == Operator.MUST_PASS_ALL) { @@ -85,8 +84,7 @@ public FilterList(final List filters) { /** * Constructor that takes a var arg number of {@link Filter}s. The default operator MUST_PASS_ALL - * is assumed. - * @param filters + * is assumed. n */ public FilterList(final Filter... filters) { this(Operator.MUST_PASS_ALL, Arrays.asList(filters)); @@ -103,23 +101,21 @@ public FilterList(final Operator operator) { /** * Constructor that takes a var arg number of {@link Filter}s and an operator. * @param operator Operator to process filter set with. - * @param filters Filters to use + * @param filters Filters to use */ public FilterList(final Operator operator, final Filter... filters) { this(operator, Arrays.asList(filters)); } /** - * Get the operator. - * @return operator + * Get the operator. n */ public Operator getOperator() { return operator; } /** - * Get the filters. - * @return filters + * Get the filters. n */ public List getFilters() { return filterListBase.getFilters(); @@ -201,9 +197,7 @@ public byte[] toByteArray() throws IOException { /** * @param pbBytes A pb serialized {@link FilterList} instance - * @return An instance of {@link FilterList} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link FilterList} made from bytes n * @see #toByteArray */ public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FilterList proto; @@ -226,9 +220,8 @@ public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationE } /** - * @param other - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter other) { @@ -237,7 +230,7 @@ boolean areSerializedFieldsEqual(Filter other) { FilterList o = (FilterList) other; return this.getOperator().equals(o.getOperator()) - && ((this.getFilters() == o.getFilters()) || this.getFilters().equals(o.getFilters())); + && ((this.getFilters() == o.getFilters()) || this.getFilters().equals(o.getFilters())); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java index a9defbf0240f..4a15af277266 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.yetus.audience.InterfaceAudience; @@ -95,8 +92,7 @@ protected int compareCell(Cell a, Cell b) { * the current child, we should set the traverse result (transformed cell) of previous node(s) as * the initial value. (HBASE-18879). * @param c The cell in question. - * @return the transformed cell. - * @throws IOException + * @return the transformed cell. n */ @Override public Cell transformCell(Cell c) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java index 04aad2ce5195..760b79d497d3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; -import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; +import org.apache.hadoop.hbase.Cell; +import org.apache.yetus.audience.InterfaceAudience; /** * FilterListWithAND represents an ordered list of filters which will be evaluated with an AND @@ -72,7 +69,8 @@ protected String formatLogFilters(List logFilters) { * The jump step will be: * *
      -   * INCLUDE < SKIP < INCLUDE_AND_NEXT_COL < NEXT_COL < INCLUDE_AND_SEEK_NEXT_ROW < NEXT_ROW < SEEK_NEXT_USING_HINT
      +   * INCLUDE < SKIP < INCLUDE_AND_NEXT_COL < NEXT_COL < INCLUDE_AND_SEEK_NEXT_ROW < NEXT_ROW
      +   *     < SEEK_NEXT_USING_HINT
          * 
      * * Here, we have the following map to describe The Maximal Step Rule. if current return code (for @@ -91,7 +89,7 @@ protected String formatLogFilters(List logFilters) { * SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT * * - * @param rc Return code which is calculated by previous sub-filter(s) in filter list. + * @param rc Return code which is calculated by previous sub-filter(s) in filter list. * @param localRC Return code of the current sub-filter in filter list. * @return Return code which is merged by the return code of previous sub-filter(s) and the return * code of current sub-filter. @@ -120,8 +118,10 @@ private ReturnCode mergeReturnCode(ReturnCode rc, ReturnCode localRC) { } break; case INCLUDE_AND_SEEK_NEXT_ROW: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, + ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; } if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { @@ -140,8 +140,10 @@ private ReturnCode mergeReturnCode(ReturnCode rc, ReturnCode localRC) { } break; case NEXT_COL: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.SKIP, - ReturnCode.NEXT_COL)) { + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.SKIP, + ReturnCode.NEXT_COL) + ) { return ReturnCode.NEXT_COL; } if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, ReturnCode.NEXT_ROW)) { @@ -152,7 +154,7 @@ private ReturnCode mergeReturnCode(ReturnCode rc, ReturnCode localRC) { return ReturnCode.NEXT_ROW; } throw new IllegalStateException( - "Received code is not valid. rc: " + rc + ", localRC: " + localRC); + "Received code is not valid. rc: " + rc + ", localRC: " + localRC); } private boolean isIncludeRelatedReturnCode(ReturnCode rc) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java index 725260ef7e51..dd50a1bbb8d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.yetus.audience.InterfaceAudience; /** * FilterListWithOR represents an ordered list of filters which will be evaluated with an OR @@ -82,39 +79,40 @@ protected String formatLogFilters(List logFilters) { * next family for RegionScanner, INCLUDE_AND_NEXT_ROW is the same. so we should pass current cell * to the filter, if row mismatch or row match but column family mismatch. (HBASE-18368) * @see org.apache.hadoop.hbase.filter.Filter.ReturnCode - * @param subFilter which sub-filter to calculate the return code by using previous cell and - * previous return code. - * @param prevCell the previous cell passed to given sub-filter. + * @param subFilter which sub-filter to calculate the return code by using previous cell and + * previous return code. + * @param prevCell the previous cell passed to given sub-filter. * @param currentCell the current cell which will pass to given sub-filter. - * @param prevCode the previous return code for given sub-filter. + * @param prevCode the previous return code for given sub-filter. * @return return code calculated by using previous cell and previous return code. null means can * not decide which return code should return, so we will pass the currentCell to * subFilter for getting currentCell's return code, and it won't impact the sub-filter's * internal states. */ private ReturnCode calculateReturnCodeByPrevCellAndRC(Filter subFilter, Cell currentCell, - Cell prevCell, ReturnCode prevCode) throws IOException { + Cell prevCell, ReturnCode prevCode) throws IOException { if (prevCell == null || prevCode == null) { return null; } switch (prevCode) { - case INCLUDE: - case SKIP: + case INCLUDE: + case SKIP: return null; - case SEEK_NEXT_USING_HINT: + case SEEK_NEXT_USING_HINT: Cell nextHintCell = subFilter.getNextCellHint(prevCell); return nextHintCell != null && compareCell(currentCell, nextHintCell) < 0 - ? ReturnCode.SEEK_NEXT_USING_HINT : null; - case NEXT_COL: - case INCLUDE_AND_NEXT_COL: + ? ReturnCode.SEEK_NEXT_USING_HINT + : null; + case NEXT_COL: + case INCLUDE_AND_NEXT_COL: // Once row changed, reset() will clear prevCells, so we need not to compare their rows // because rows are the same here. return CellUtil.matchingColumn(prevCell, currentCell) ? ReturnCode.NEXT_COL : null; - case NEXT_ROW: - case INCLUDE_AND_SEEK_NEXT_ROW: + case NEXT_ROW: + case INCLUDE_AND_SEEK_NEXT_ROW: // As described above, rows are definitely the same, so we only compare the family. return CellUtil.matchingFamily(prevCell, currentCell) ? ReturnCode.NEXT_ROW : null; - default: + default: throw new IllegalStateException("Received code is not valid."); } } @@ -129,7 +127,8 @@ private ReturnCode calculateReturnCodeByPrevCellAndRC(Filter subFilter, Cell cur * The jump step will be: * *
      -   * INCLUDE < SKIP < INCLUDE_AND_NEXT_COL < NEXT_COL < INCLUDE_AND_SEEK_NEXT_ROW < NEXT_ROW < SEEK_NEXT_USING_HINT
      +   * INCLUDE < SKIP < INCLUDE_AND_NEXT_COL < NEXT_COL < INCLUDE_AND_SEEK_NEXT_ROW < NEXT_ROW
      +   *     < SEEK_NEXT_USING_HINT
          * 
      * * Here, we have the following map to describe The Minimal Step Rule. if current return code (for @@ -148,7 +147,7 @@ private ReturnCode calculateReturnCodeByPrevCellAndRC(Filter subFilter, Cell cur * SEEK_NEXT_USING_HINT INCLUDE INCLUDE INCLUDE SKIP SKIP SKIP SEEK_NEXT_USING_HINT * * - * @param rc Return code which is calculated by previous sub-filter(s) in filter list. + * @param rc Return code which is calculated by previous sub-filter(s) in filter list. * @param localRC Return code of the current sub-filter in filter list. * @return Return code which is merged by the return code of previous sub-filter(s) and the return * code of current sub-filter. @@ -156,90 +155,101 @@ private ReturnCode calculateReturnCodeByPrevCellAndRC(Filter subFilter, Cell cur private ReturnCode mergeReturnCode(ReturnCode rc, ReturnCode localRC) { if (rc == null) return localRC; switch (localRC) { - case INCLUDE: - return ReturnCode.INCLUDE; - case INCLUDE_AND_NEXT_COL: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.SKIP, - ReturnCode.SEEK_NEXT_USING_HINT)) { + case INCLUDE: return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, - ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - break; - case INCLUDE_AND_SEEK_NEXT_ROW: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.SKIP, - ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.NEXT_COL)) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, ReturnCode.NEXT_ROW)) { - return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; - } - break; - case SKIP: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW, - ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.SKIP; - } - break; - case NEXT_COL: - if (isInReturnCodes(rc, ReturnCode.INCLUDE)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { - return ReturnCode.NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.SKIP; - } - break; - case NEXT_ROW: - if (isInReturnCodes(rc, ReturnCode.INCLUDE)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL)) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { - return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; - } - if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.SKIP; - } - if (isInReturnCodes(rc, ReturnCode.NEXT_COL)) { - return ReturnCode.NEXT_COL; - } - if (isInReturnCodes(rc, ReturnCode.NEXT_ROW)) { - return ReturnCode.NEXT_ROW; - } - break; - case SEEK_NEXT_USING_HINT: - if (isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { - return ReturnCode.INCLUDE; - } - if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { - return ReturnCode.SKIP; - } - if (isInReturnCodes(rc, ReturnCode.SEEK_NEXT_USING_HINT)) { - return ReturnCode.SEEK_NEXT_USING_HINT; - } - break; + case INCLUDE_AND_NEXT_COL: + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT) + ) { + return ReturnCode.INCLUDE; + } + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, + ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW) + ) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + break; + case INCLUDE_AND_SEEK_NEXT_ROW: + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT) + ) { + return ReturnCode.INCLUDE; + } + if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.NEXT_COL)) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, ReturnCode.NEXT_ROW)) { + return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; + } + break; + case SKIP: + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, + ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { + return ReturnCode.INCLUDE; + } + if ( + isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW, + ReturnCode.SEEK_NEXT_USING_HINT) + ) { + return ReturnCode.SKIP; + } + break; + case NEXT_COL: + if (isInReturnCodes(rc, ReturnCode.INCLUDE)) { + return ReturnCode.INCLUDE; + } + if (isInReturnCodes(rc, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { + return ReturnCode.NEXT_COL; + } + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT)) { + return ReturnCode.SKIP; + } + break; + case NEXT_ROW: + if (isInReturnCodes(rc, ReturnCode.INCLUDE)) { + return ReturnCode.INCLUDE; + } + if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_NEXT_COL)) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + if (isInReturnCodes(rc, ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { + return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; + } + if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.SEEK_NEXT_USING_HINT)) { + return ReturnCode.SKIP; + } + if (isInReturnCodes(rc, ReturnCode.NEXT_COL)) { + return ReturnCode.NEXT_COL; + } + if (isInReturnCodes(rc, ReturnCode.NEXT_ROW)) { + return ReturnCode.NEXT_ROW; + } + break; + case SEEK_NEXT_USING_HINT: + if ( + isInReturnCodes(rc, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, + ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { + return ReturnCode.INCLUDE; + } + if (isInReturnCodes(rc, ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) { + return ReturnCode.SKIP; + } + if (isInReturnCodes(rc, ReturnCode.SEEK_NEXT_USING_HINT)) { + return ReturnCode.SEEK_NEXT_USING_HINT; + } + break; } throw new IllegalStateException( - "Received code is not valid. rc: " + rc + ", localRC: " + localRC); + "Received code is not valid. rc: " + rc + ", localRC: " + localRC); } private void updatePrevFilterRCList(int index, ReturnCode currentRC) { @@ -287,8 +297,10 @@ public ReturnCode filterCell(Cell c) throws IOException { rc = mergeReturnCode(rc, localRC); // For INCLUDE* case, we need to update the transformed cell. - if (isInReturnCodes(localRC, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, - ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) { + if ( + isInReturnCodes(localRC, ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL, + ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) + ) { subFiltersIncludedCell.set(i, true); } } @@ -380,7 +392,6 @@ public Cell getNextCellHint(Cell currentCell) throws IOException { return minKeyHint; } - @Override public boolean equals(Object obj) { if (obj == null || (!(obj instanceof FilterListWithOR))) { @@ -390,9 +401,8 @@ public boolean equals(Object obj) { return true; } FilterListWithOR f = (FilterListWithOR) obj; - return this.filters.equals(f.getFilters()) && - this.prevFilterRCList.equals(f.prevFilterRCList) && - this.prevCellList.equals(f.prevCellList); + return this.filters.equals(f.getFilters()) && this.prevFilterRCList.equals(f.prevFilterRCList) + && this.prevCellList.equals(f.prevCellList); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java index dc4207d821cf..83f8409facc8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +20,15 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** * A filter that will only return the first KV from each row. *

      @@ -55,14 +54,14 @@ public boolean filterRowKey(Cell cell) throws IOException { @Override public ReturnCode filterCell(final Cell c) { - if(foundKV) return ReturnCode.NEXT_ROW; + if (foundKV) return ReturnCode.NEXT_ROW; foundKV = true; return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.isEmpty(), - "Expected 0 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.isEmpty(), "Expected 0 but got: %s", + filterArguments.size()); return new FirstKeyOnlyFilter(); } @@ -74,7 +73,6 @@ protected boolean hasFoundKV() { } /** - * * @param value update {@link #foundKV} flag with value. */ protected void setFoundKV(boolean value) { @@ -85,9 +83,8 @@ protected void setFoundKV(boolean value) { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.FirstKeyOnlyFilter.Builder builder = - FilterProtos.FirstKeyOnlyFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.FirstKeyOnlyFilter.Builder builder = FilterProtos.FirstKeyOnlyFilter.newBuilder(); return builder.build().toByteArray(); } @@ -97,9 +94,8 @@ protected void setFoundKV(boolean value) { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static FirstKeyOnlyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - // There is nothing to deserialize. Why do this at all? + public static FirstKeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { + // There is nothing to deserialize. Why do this at all? try { FilterProtos.FirstKeyOnlyFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { @@ -111,8 +107,8 @@ public static FirstKeyOnlyFilter parseFrom(final byte [] pbBytes) /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java index 91d02952fe27..cec4a2f06ff2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; - +import org.apache.yetus.audience.InterfaceAudience; /** - * This filter was deprecated in 2.0.0 and should be removed in 3.0.0. We keep the code here - * to prevent the proto serialization exceptions puzzle those users who use older version clients - * to communicate with newer version servers. - * + * This filter was deprecated in 2.0.0 and should be removed in 3.0.0. We keep the code here to + * prevent the proto serialization exceptions puzzle those users who use older version clients to + * communicate with newer version servers. * @deprecated Deprecated in 2.0.0 and will be removed in 3.0.0. * @see HBASE-13347 */ @@ -36,12 +33,11 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { /** * @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance - * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from + * bytes n * @see #toByteArray */ - public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { throw new DeserializationException( "Stop using FirstKeyValueMatchingQualifiersFilter, which has been permanently removed"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java index cf2c95f9e633..a41763fcea30 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -79,8 +79,8 @@ public FuzzyRowFilter(List> fuzzyKeysData) { for (Pair aFuzzyKeysData : fuzzyKeysData) { if (aFuzzyKeysData.getFirst().length != aFuzzyKeysData.getSecond().length) { - Pair readable = - new Pair<>(Bytes.toStringBinary(aFuzzyKeysData.getFirst()), Bytes.toStringBinary(aFuzzyKeysData.getSecond())); + Pair readable = new Pair<>(Bytes.toStringBinary(aFuzzyKeysData.getFirst()), + Bytes.toStringBinary(aFuzzyKeysData.getSecond())); throw new IllegalArgumentException("Fuzzy pair lengths do not match: " + readable); } @@ -99,7 +99,6 @@ public FuzzyRowFilter(List> fuzzyKeysData) { this.tracker = new RowTracker(); } - private void preprocessSearchKey(Pair p) { if (!UNSAFE_UNALIGNED) { // do nothing @@ -117,9 +116,7 @@ private void preprocessSearchKey(Pair p) { /** * We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as - * fixed positions - * @param mask - * @return mask array + * fixed positions n * @return mask array */ private byte[] preprocessMask(byte[] mask) { if (!UNSAFE_UNALIGNED) { @@ -157,9 +154,8 @@ public ReturnCode filterCell(final Cell c) { for (int j = 0; j < fuzzyData.getSecond().length; j++) { fuzzyData.getSecond()[j] >>= 2; } - SatisfiesCode satisfiesCode = - satisfies(isReversed(), c.getRowArray(), c.getRowOffset(), c.getRowLength(), - fuzzyData.getFirst(), fuzzyData.getSecond()); + SatisfiesCode satisfiesCode = satisfies(isReversed(), c.getRowArray(), c.getRowOffset(), + c.getRowLength(), fuzzyData.getFirst(), fuzzyData.getSecond()); if (satisfiesCode == SatisfiesCode.YES) { lastFoundIndex = index; return ReturnCode.INCLUDE; @@ -197,14 +193,15 @@ private class RowTracker { RowTracker() { nextRows = new PriorityQueue<>(fuzzyKeysData.size(), - new Comparator>>() { - @Override - public int compare(Pair> o1, - Pair> o2) { - return isReversed()? Bytes.compareTo(o2.getFirst(), o1.getFirst()): - Bytes.compareTo(o1.getFirst(), o2.getFirst()); - } - }); + new Comparator>>() { + @Override + public int compare(Pair> o1, + Pair> o2) { + return isReversed() + ? Bytes.compareTo(o2.getFirst(), o1.getFirst()) + : Bytes.compareTo(o1.getFirst(), o2.getFirst()); + } + }); } byte[] nextRow() { @@ -233,14 +230,15 @@ boolean updateTracker(Cell currentCell) { } boolean lessThan(Cell currentCell, byte[] nextRowKey) { - int compareResult = CellComparator.getInstance().compareRows(currentCell, nextRowKey, 0, nextRowKey.length); + int compareResult = + CellComparator.getInstance().compareRows(currentCell, nextRowKey, 0, nextRowKey.length); return (!isReversed() && compareResult < 0) || (isReversed() && compareResult > 0); } void updateWith(Cell currentCell, Pair fuzzyData) { byte[] nextRowKeyCandidate = - getNextForFuzzyRule(isReversed(), currentCell.getRowArray(), currentCell.getRowOffset(), - currentCell.getRowLength(), fuzzyData.getFirst(), fuzzyData.getSecond()); + getNextForFuzzyRule(isReversed(), currentCell.getRowArray(), currentCell.getRowOffset(), + currentCell.getRowLength(), fuzzyData.getFirst(), fuzzyData.getSecond()); if (nextRowKeyCandidate != null) { nextRows.add(new Pair<>(nextRowKeyCandidate, fuzzyData)); } @@ -270,9 +268,8 @@ public byte[] toByteArray() { /** * @param pbBytes A pb serialized {@link FuzzyRowFilter} instance - * @return An instance of {@link FuzzyRowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link FuzzyRowFilter} made from bytes n * @see + * #toByteArray */ public static FuzzyRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FuzzyRowFilter proto; @@ -321,12 +318,12 @@ static SatisfiesCode satisfies(byte[] row, byte[] fuzzyKeyBytes, byte[] fuzzyKey } static SatisfiesCode satisfies(boolean reverse, byte[] row, byte[] fuzzyKeyBytes, - byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyMeta) { return satisfies(reverse, row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); } static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int length, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { if (!UNSAFE_UNALIGNED) { return satisfiesNoUnsafe(reverse, row, offset, length, fuzzyKeyBytes, fuzzyKeyMeta); @@ -390,7 +387,7 @@ static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int leng } static SatisfiesCode satisfiesNoUnsafe(boolean reverse, byte[] row, int offset, int length, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { if (row == null) { // do nothing, let scan to proceed return SatisfiesCode.YES; @@ -440,7 +437,7 @@ static byte[] getNextForFuzzyRule(byte[] row, byte[] fuzzyKeyBytes, byte[] fuzzy } static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, byte[] fuzzyKeyBytes, - byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyMeta) { return getNextForFuzzyRule(reverse, row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); } @@ -526,7 +523,7 @@ public static Order orderFor(boolean reverse) { * otherwise */ static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, int offset, int length, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { // To find out the next "smallest" byte array that satisfies fuzzy rule and "greater" than // the given one we do the following: // 1. setting values on all "fixed" positions to the values from fuzzyKeyBytes @@ -536,7 +533,7 @@ static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, int offset, int l // It is easier to perform this by using fuzzyKeyBytes copy and setting "non-fixed" position // values than otherwise. byte[] result = - Arrays.copyOf(fuzzyKeyBytes, length > fuzzyKeyBytes.length ? length : fuzzyKeyBytes.length); + Arrays.copyOf(fuzzyKeyBytes, length > fuzzyKeyBytes.length ? length : fuzzyKeyBytes.length); if (reverse && length > fuzzyKeyBytes.length) { // we need trailing 0xff's instead of trailing 0x00's for (int i = fuzzyKeyBytes.length; i < result.length; i++) { @@ -586,29 +583,23 @@ static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, int offset, int l } } - return reverse? result: trimTrailingZeroes(result, fuzzyKeyMeta, toInc); + return reverse ? result : trimTrailingZeroes(result, fuzzyKeyMeta, toInc); } /** - * For forward scanner, next cell hint should not contain any trailing zeroes - * unless they are part of fuzzyKeyMeta - * hint = '\x01\x01\x01\x00\x00' - * will skip valid row '\x01\x01\x01' - * - * @param result - * @param fuzzyKeyMeta - * @param toInc - position of incremented byte + * For forward scanner, next cell hint should not contain any trailing zeroes unless they are part + * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01' nn * @param + * toInc - position of incremented byte * @return trimmed version of result */ - + private static byte[] trimTrailingZeroes(byte[] result, byte[] fuzzyKeyMeta, int toInc) { - int off = fuzzyKeyMeta.length >= result.length? result.length -1: - fuzzyKeyMeta.length -1; - for( ; off >= 0; off--){ - if(fuzzyKeyMeta[off] != 0) break; + int off = fuzzyKeyMeta.length >= result.length ? result.length - 1 : fuzzyKeyMeta.length - 1; + for (; off >= 0; off--) { + if (fuzzyKeyMeta[off] != 0) break; } - if (off < toInc) off = toInc; - byte[] retValue = new byte[off+1]; + if (off < toInc) off = toInc; + byte[] retValue = new byte[off + 1]; System.arraycopy(result, 0, retValue, 0, retValue.length); return retValue; } @@ -627,8 +618,10 @@ boolean areSerializedFieldsEqual(Filter o) { for (int i = 0; i < fuzzyKeysData.size(); ++i) { Pair thisData = this.fuzzyKeysData.get(i); Pair otherData = other.fuzzyKeysData.get(i); - if (!(Bytes.equals(thisData.getFirst(), otherData.getFirst()) && Bytes.equals( - thisData.getSecond(), otherData.getSecond()))) { + if ( + !(Bytes.equals(thisData.getFirst(), otherData.getFirst()) + && Bytes.equals(thisData.getSecond(), otherData.getSecond())) + ) { return false; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java index 7c0966856d83..7f42fb633c33 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,34 +15,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * A Filter that stops after the given row. There is no "RowStopFilter" because - * the Scan spec allows you to specify a stop row. - * - * Use this filter to include the stop row, eg: [A,Z]. + * A Filter that stops after the given row. There is no "RowStopFilter" because the Scan spec allows + * you to specify a stop row. Use this filter to include the stop row, eg: [A,Z]. */ @InterfaceAudience.Public public class InclusiveStopFilter extends FilterBase { - private byte [] stopRowKey; + private byte[] stopRowKey; private boolean done = false; - public InclusiveStopFilter(final byte [] stopRowKey) { + public InclusiveStopFilter(final byte[] stopRowKey) { this.stopRowKey = stopRowKey; } @@ -61,7 +57,8 @@ public ReturnCode filterCell(final Cell c) { public boolean filterRowKey(Cell firstRowCell) { // if stopRowKey is <= buffer, then true, filter row. if (filterAllRemaining()) return true; - int cmp = CellComparator.getInstance().compareRows(firstRowCell, stopRowKey, 0, stopRowKey.length); + int cmp = + CellComparator.getInstance().compareRows(firstRowCell, stopRowKey, 0, stopRowKey.length); done = reversed ? cmp < 0 : cmp > 0; return done; } @@ -71,10 +68,10 @@ public boolean filterAllRemaining() { return done; } - public static Filter createFilterFromArguments (ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] stopRowKey = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); + byte[] stopRowKey = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); return new InclusiveStopFilter(stopRowKey); } @@ -82,42 +79,42 @@ public static Filter createFilterFromArguments (ArrayList filterArgumen * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.InclusiveStopFilter.Builder builder = FilterProtos.InclusiveStopFilter.newBuilder(); - if (this.stopRowKey != null) builder.setStopRowKey( - UnsafeByteOperations.unsafeWrap(this.stopRowKey)); + if (this.stopRowKey != null) + builder.setStopRowKey(UnsafeByteOperations.unsafeWrap(this.stopRowKey)); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link InclusiveStopFilter} instance - * @return An instance of {@link InclusiveStopFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link InclusiveStopFilter} made from bytes n * @see + * #toByteArray */ - public static InclusiveStopFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static InclusiveStopFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.InclusiveStopFilter proto; try { proto = FilterProtos.InclusiveStopFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } - return new InclusiveStopFilter(proto.hasStopRowKey()?proto.getStopRowKey().toByteArray():null); + return new InclusiveStopFilter( + proto.hasStopRowKey() ? proto.getStopRowKey().toByteArray() : null); } /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof InclusiveStopFilter)) return false; - InclusiveStopFilter other = (InclusiveStopFilter)o; + InclusiveStopFilter other = (InclusiveStopFilter) o; return Bytes.equals(this.getStopRowKey(), other.getStopRowKey()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java index 4826e05e2a37..c91543d29af1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,7 @@ public class IncompatibleFilterException extends RuntimeException { private static final long serialVersionUID = 3236763276623198231L; -/** constructor */ + /** constructor */ public IncompatibleFilterException() { super(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java index 0ccdd902e365..9e48cbba1fe8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,6 @@ public class InvalidRowFilterException extends RuntimeException { private static final long serialVersionUID = 2667894046345657865L; - /** constructor */ public InvalidRowFilterException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java index 26e53cff39d1..6aa410730d8f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +24,6 @@ import java.util.Iterator; import java.util.Objects; import java.util.Optional; - import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -39,21 +37,27 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * A filter that will only return the key component of each KV (the value will - * be rewritten as empty). + * A filter that will only return the key component of each KV (the value will be rewritten as + * empty). *

      - * This filter can be used to grab all of the keys without having to also grab - * the values. + * This filter can be used to grab all of the keys without having to also grab the values. */ @InterfaceAudience.Public public class KeyOnlyFilter extends FilterBase { boolean lenAsVal; - public KeyOnlyFilter() { this(false); } - public KeyOnlyFilter(boolean lenAsVal) { this.lenAsVal = lenAsVal; } + + public KeyOnlyFilter() { + this(false); + } + + public KeyOnlyFilter(boolean lenAsVal) { + this.lenAsVal = lenAsVal; + } @Override public boolean filterRowKey(Cell cell) throws IOException { @@ -79,9 +83,9 @@ public ReturnCode filterCell(final Cell ignored) throws IOException { return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { Preconditions.checkArgument((filterArguments.isEmpty() || filterArguments.size() == 1), - "Expected: 0 or 1 but got: %s", filterArguments.size()); + "Expected: 0 or 1 but got: %s", filterArguments.size()); KeyOnlyFilter filter = new KeyOnlyFilter(); if (filterArguments.size() == 1) { filter.lenAsVal = ParseFilter.convertByteArrayToBoolean(filterArguments.get(0)); @@ -93,21 +97,17 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.KeyOnlyFilter.Builder builder = - FilterProtos.KeyOnlyFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.KeyOnlyFilter.Builder builder = FilterProtos.KeyOnlyFilter.newBuilder(); builder.setLenAsVal(this.lenAsVal); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link KeyOnlyFilter} instance - * @return An instance of {@link KeyOnlyFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link KeyOnlyFilter} made from bytes n * @see #toByteArray */ - public static KeyOnlyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static KeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.KeyOnlyFilter proto; try { proto = FilterProtos.KeyOnlyFilter.parseFrom(pbBytes); @@ -119,15 +119,15 @@ public static KeyOnlyFilter parseFrom(final byte [] pbBytes) /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof KeyOnlyFilter)) return false; - KeyOnlyFilter other = (KeyOnlyFilter)o; + KeyOnlyFilter other = (KeyOnlyFilter) o; return this.lenAsVal == other.lenAsVal; } @@ -212,7 +212,6 @@ public Type getType() { return cell.getType(); } - @Override public long getSequenceId() { return 0; @@ -268,8 +267,8 @@ public long heapSize() { } static class KeyOnlyByteBufferExtendedCell extends ByteBufferExtendedCell { - public static final int FIXED_OVERHEAD = ClassSize.OBJECT + ClassSize.REFERENCE - + Bytes.SIZEOF_BOOLEAN; + public static final int FIXED_OVERHEAD = + ClassSize.OBJECT + ClassSize.REFERENCE + Bytes.SIZEOF_BOOLEAN; private ByteBufferExtendedCell cell; private boolean lenAsVal; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java index 53198732e39c..ead0ee104470 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.ByteBufferUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.hadoop.hbase.util.Bytes; - /** * A long comparator which numerical compares against the specified byte array @@ -55,41 +53,38 @@ public int compareTo(ByteBuffer value, int offset, int length) { return Long.compare(longValue, that); } - /** - * @return The comparator serialized using pb - */ - @Override - public byte [] toByteArray() { - ComparatorProtos.LongComparator.Builder builder = - ComparatorProtos.LongComparator.newBuilder(); - builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); - return builder.build().toByteArray(); - } + /** + * @return The comparator serialized using pb + */ + @Override + public byte[] toByteArray() { + ComparatorProtos.LongComparator.Builder builder = ComparatorProtos.LongComparator.newBuilder(); + builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); + return builder.build().toByteArray(); + } - /** - * @param pbBytes A pb serialized {@link LongComparator} instance - * @return An instance of {@link LongComparator} made from bytes - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException - * @see #toByteArray - */ - public static LongComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { - ComparatorProtos.LongComparator proto; - try { - proto = ComparatorProtos.LongComparator.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new LongComparator(Bytes.toLong(proto.getComparable().getValue().toByteArray())); + /** + * @param pbBytes A pb serialized {@link LongComparator} instance + * @return An instance of {@link LongComparator} made from bytes + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @see #toByteArray + */ + public static LongComparator parseFrom(final byte[] pbBytes) throws DeserializationException { + ComparatorProtos.LongComparator proto; + try { + proto = ComparatorProtos.LongComparator.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); } + return new LongComparator(Bytes.toLong(proto.getComparable().getValue().toByteArray())); + } - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(LongComparator other) { - if (other == this) return true; - return super.areSerializedFieldsEqual(other); - } + /** + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(LongComparator other) { + if (other == this) return true; + return super.areSerializedFieldsEqual(other); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java index d1278021a397..1fc22cfcdf78 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java @@ -21,7 +21,6 @@ import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; @@ -30,24 +29,25 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** * Filter to support scan multiple row key ranges. It can construct the row key ranges from the - * passed list which can be accessed by each region server. - * - * HBase is quite efficient when scanning only one small row key range. If user needs to specify - * multiple row key ranges in one scan, the typical solutions are: 1. through FilterList which is a - * list of row key Filters, 2. using the SQL layer over HBase to join with two table, such as hive, - * phoenix etc. However, both solutions are inefficient. Both of them can't utilize the range info - * to perform fast forwarding during scan which is quite time consuming. If the number of ranges - * are quite big (e.g. millions), join is a proper solution though it is slow. However, there are - * cases that user wants to specify a small number of ranges to scan (e.g. <1000 ranges). Both - * solutions can't provide satisfactory performance in such case. MultiRowRangeFilter is to support - * such usec ase (scan multiple row key ranges), which can construct the row key ranges from user - * specified list and perform fast-forwarding during scan. Thus, the scan will be quite efficient. + * passed list which can be accessed by each region server. HBase is quite efficient when scanning + * only one small row key range. If user needs to specify multiple row key ranges in one scan, the + * typical solutions are: 1. through FilterList which is a list of row key Filters, 2. using the SQL + * layer over HBase to join with two table, such as hive, phoenix etc. However, both solutions are + * inefficient. Both of them can't utilize the range info to perform fast forwarding during scan + * which is quite time consuming. If the number of ranges are quite big (e.g. millions), join is a + * proper solution though it is slow. However, there are cases that user wants to specify a small + * number of ranges to scan (e.g. <1000 ranges). Both solutions can't provide satisfactory + * performance in such case. MultiRowRangeFilter is to support such usec ase (scan multiple row key + * ranges), which can construct the row key ranges from user specified list and perform + * fast-forwarding during scan. Thus, the scan will be quite efficient. */ @InterfaceAudience.Public public class MultiRowRangeFilter extends FilterBase { @@ -73,13 +73,11 @@ public MultiRowRangeFilter(List list) { } /** - * Constructor for creating a MultiRowRangeFilter from multiple rowkey prefixes. - * - * As MultiRowRangeFilter javadoc says (See the solution 1 of the first statement), - * if you try to create a filter list that scans row keys corresponding to given prefixes (e.g., + * Constructor for creating a MultiRowRangeFilter from multiple rowkey prefixes. As + * MultiRowRangeFilter javadoc says (See the solution 1 of the first statement), if + * you try to create a filter list that scans row keys corresponding to given prefixes (e.g., * FilterList composed of multiple PrefixFilters), this constructor * provides a way to avoid creating an inefficient one. - * * @param rowKeyPrefixes the array of byte array */ public MultiRowRangeFilter(byte[][] rowKeyPrefixes) { @@ -92,7 +90,7 @@ private static List createRangeListFromRowKeyPrefixes(byte[][] rowKeyP } List list = new ArrayList<>(); - for (byte[] rowKeyPrefix: rowKeyPrefixes) { + for (byte[] rowKeyPrefix : rowKeyPrefixes) { byte[] stopRow = ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowKeyPrefix); list.add(new RowRange(rowKeyPrefix, true, stopRow, false)); } @@ -135,7 +133,7 @@ public boolean filterRowKey(Cell firstRowCell) { currentReturnCode = ReturnCode.NEXT_ROW; return false; } - if(index != ROW_BEFORE_FIRST_RANGE) { + if (index != ROW_BEFORE_FIRST_RANGE) { range = ranges.get(index); } else { range = ranges.get(0); @@ -146,7 +144,7 @@ public boolean filterRowKey(Cell firstRowCell) { return false; } if (!ranges.hasFoundFirstRange()) { - if(index != ROW_BEFORE_FIRST_RANGE) { + if (index != ROW_BEFORE_FIRST_RANGE) { currentReturnCode = ReturnCode.INCLUDE; } else { currentReturnCode = ReturnCode.SEEK_NEXT_USING_HINT; @@ -183,8 +181,8 @@ public Cell getNextCellHint(Cell currentKV) { */ @Override public byte[] toByteArray() { - FilterProtos.MultiRowRangeFilter.Builder builder = FilterProtos.MultiRowRangeFilter - .newBuilder(); + FilterProtos.MultiRowRangeFilter.Builder builder = + FilterProtos.MultiRowRangeFilter.newBuilder(); for (RowRange range : rangeList) { if (range != null) { FilterProtos.RowRange.Builder rangebuilder = FilterProtos.RowRange.newBuilder(); @@ -206,7 +204,7 @@ public byte[] toByteArray() { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException */ public static MultiRowRangeFilter parseFrom(final byte[] pbBytes) - throws DeserializationException { + throws DeserializationException { FilterProtos.MultiRowRangeFilter proto; try { proto = FilterProtos.MultiRowRangeFilter.parseFrom(pbBytes); @@ -217,9 +215,11 @@ public static MultiRowRangeFilter parseFrom(final byte[] pbBytes) List rangeProtos = proto.getRowRangeListList(); List rangeList = new ArrayList<>(length); for (FilterProtos.RowRange rangeProto : rangeProtos) { - RowRange range = new RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow() - .toByteArray() : null, rangeProto.getStartRowInclusive(), rangeProto.hasStopRow() ? - rangeProto.getStopRow().toByteArray() : null, rangeProto.getStopRowInclusive()); + RowRange range = + new RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow().toByteArray() : null, + rangeProto.getStartRowInclusive(), + rangeProto.hasStopRow() ? rangeProto.getStopRow().toByteArray() : null, + rangeProto.getStopRowInclusive()); rangeList.add(range); } return new MultiRowRangeFilter(rangeList); @@ -232,21 +232,20 @@ public static MultiRowRangeFilter parseFrom(final byte[] pbBytes) */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) - return true; - if (!(o instanceof MultiRowRangeFilter)) - return false; + if (o == this) return true; + if (!(o instanceof MultiRowRangeFilter)) return false; MultiRowRangeFilter other = (MultiRowRangeFilter) o; - if (this.rangeList.size() != other.rangeList.size()) - return false; + if (this.rangeList.size() != other.rangeList.size()) return false; for (int i = 0; i < rangeList.size(); ++i) { RowRange thisRange = this.rangeList.get(i); RowRange otherRange = other.rangeList.get(i); - if (!(Bytes.equals(thisRange.startRow, otherRange.startRow) && Bytes.equals( - thisRange.stopRow, otherRange.stopRow) && (thisRange.startRowInclusive == - otherRange.startRowInclusive) && (thisRange.stopRowInclusive == - otherRange.stopRowInclusive))) { + if ( + !(Bytes.equals(thisRange.startRow, otherRange.startRow) + && Bytes.equals(thisRange.stopRow, otherRange.stopRow) + && (thisRange.startRowInclusive == otherRange.startRowInclusive) + && (thisRange.stopRowInclusive == otherRange.stopRowInclusive)) + ) { return false; } } @@ -255,7 +254,6 @@ boolean areSerializedFieldsEqual(Filter o) { /** * sort the ranges and if the ranges with overlap, then merge them. - * * @param ranges the list of ranges to sort and merge. * @return the ranges after sort and merge. */ @@ -266,7 +264,7 @@ public static List sortAndMerge(List ranges) { List invalidRanges = new ArrayList<>(); List newRanges = new ArrayList<>(ranges.size()); Collections.sort(ranges); - if(ranges.get(0).isValid()) { + if (ranges.get(0).isValid()) { if (ranges.size() == 1) { newRanges.add(ranges.get(0)); } @@ -284,30 +282,32 @@ public static List sortAndMerge(List ranges) { if (!range.isValid()) { invalidRanges.add(range); } - if(Bytes.equals(lastStopRow, HConstants.EMPTY_BYTE_ARRAY)) { - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + if (Bytes.equals(lastStopRow, HConstants.EMPTY_BYTE_ARRAY)) { + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); break; } // with overlap in the ranges - if ((Bytes.compareTo(lastStopRow, range.startRow) > 0) || - (Bytes.compareTo(lastStopRow, range.startRow) == 0 && !(lastStopRowInclusive == false && - range.isStartRowInclusive() == false))) { - if(Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { + if ( + (Bytes.compareTo(lastStopRow, range.startRow) > 0) + || (Bytes.compareTo(lastStopRow, range.startRow) == 0 + && !(lastStopRowInclusive == false && range.isStartRowInclusive() == false)) + ) { + if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, range.stopRow, - range.stopRowInclusive)); + range.stopRowInclusive)); break; } // if first range contains second range, ignore the second range if (Bytes.compareTo(lastStopRow, range.stopRow) >= 0) { - if((Bytes.compareTo(lastStopRow, range.stopRow) == 0)) { - if(lastStopRowInclusive == true || range.stopRowInclusive == true) { + if ((Bytes.compareTo(lastStopRow, range.stopRow) == 0)) { + if (lastStopRowInclusive == true || range.stopRowInclusive == true) { lastStopRowInclusive = true; } } if ((i + 1) == ranges.size()) { - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); } } else { lastStopRow = range.stopRow; @@ -319,19 +319,21 @@ public static List sortAndMerge(List ranges) { invalidRanges.add(range); } } else { - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); break; } - while ((Bytes.compareTo(lastStopRow, range.startRow) > 0) || - (Bytes.compareTo(lastStopRow, range.startRow) == 0 && - (lastStopRowInclusive == true || range.startRowInclusive==true))) { - if(Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { + while ( + (Bytes.compareTo(lastStopRow, range.startRow) > 0) + || (Bytes.compareTo(lastStopRow, range.startRow) == 0 + && (lastStopRowInclusive == true || range.startRowInclusive == true)) + ) { + if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { break; } // if this first range contain second range, ignore the second range if (Bytes.compareTo(lastStopRow, range.stopRow) >= 0) { - if(lastStopRowInclusive == true || range.stopRowInclusive == true) { + if (lastStopRowInclusive == true || range.stopRowInclusive == true) { lastStopRowInclusive = true; } i++; @@ -357,21 +359,23 @@ public static List sortAndMerge(List ranges) { } } } - if(Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { - if((Bytes.compareTo(lastStopRow, range.startRow) < 0) || - (Bytes.compareTo(lastStopRow, range.startRow) == 0 && - lastStopRowInclusive == false && range.startRowInclusive == false)) { + if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { + if ( + (Bytes.compareTo(lastStopRow, range.startRow) < 0) + || (Bytes.compareTo(lastStopRow, range.startRow) == 0 + && lastStopRowInclusive == false && range.startRowInclusive == false) + ) { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + lastStopRowInclusive)); newRanges.add(range); } else { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, range.stopRow, - range.stopRowInclusive)); + range.stopRowInclusive)); break; } } - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); if ((i + 1) == ranges.size()) { newRanges.add(range); } @@ -381,8 +385,8 @@ public static List sortAndMerge(List ranges) { lastStopRowInclusive = range.stopRowInclusive; } } else { - newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, - lastStopRowInclusive)); + newRanges.add( + new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); if ((i + 1) == ranges.size()) { newRanges.add(range); } @@ -393,8 +397,8 @@ public static List sortAndMerge(List ranges) { } } // check the remaining ranges - for(int j=i; j < ranges.size(); j++) { - if(!ranges.get(j).isValid()) { + for (int j = i; j < ranges.size(); j++) { + if (!ranges.get(j).isValid()) { invalidRanges.add(ranges.get(j)); } } @@ -403,21 +407,20 @@ public static List sortAndMerge(List ranges) { throwExceptionForInvalidRanges(invalidRanges, true); } // If no valid ranges found, throw the exception - if(newRanges.isEmpty()) { + if (newRanges.isEmpty()) { throw new IllegalArgumentException("No valid ranges found."); } return newRanges; } private static void throwExceptionForInvalidRanges(List invalidRanges, - boolean details) { + boolean details) { StringBuilder sb = new StringBuilder(); sb.append(invalidRanges.size()).append(" invaild ranges.\n"); if (details) { for (RowRange range : invalidRanges) { - sb.append( - "Invalid range: start row => " + Bytes.toString(range.startRow) + ", stop row => " - + Bytes.toString(range.stopRow)).append('\n'); + sb.append("Invalid range: start row => " + Bytes.toString(range.startRow) + ", stop row => " + + Bytes.toString(range.stopRow)).append('\n'); } } throw new IllegalArgumentException(sb.toString()); @@ -431,24 +434,30 @@ private static abstract class BasicRowRange implements Comparable public BasicRowRange() { } + /** * If the startRow is empty or null, set it to HConstants.EMPTY_BYTE_ARRAY, means begin at the * start row of the table. If the stopRow is empty or null, set it to * HConstants.EMPTY_BYTE_ARRAY, means end of the last row of table. */ public BasicRowRange(String startRow, boolean startRowInclusive, String stopRow, - boolean stopRowInclusive) { - this((startRow == null || startRow.isEmpty()) ? HConstants.EMPTY_BYTE_ARRAY : - Bytes.toBytes(startRow), startRowInclusive, - (stopRow == null || stopRow.isEmpty()) ? HConstants.EMPTY_BYTE_ARRAY : - Bytes.toBytes(stopRow), stopRowInclusive); - } - - public BasicRowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, - boolean stopRowInclusive) { + boolean stopRowInclusive) { + this( + (startRow == null || startRow.isEmpty()) + ? HConstants.EMPTY_BYTE_ARRAY + : Bytes.toBytes(startRow), + startRowInclusive, + (stopRow == null || stopRow.isEmpty()) + ? HConstants.EMPTY_BYTE_ARRAY + : Bytes.toBytes(stopRow), + stopRowInclusive); + } + + public BasicRowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, + boolean stopRowInclusive) { this.startRow = (startRow == null) ? HConstants.EMPTY_BYTE_ARRAY : startRow; this.startRowInclusive = startRowInclusive; - this.stopRow = (stopRow == null) ? HConstants.EMPTY_BYTE_ARRAY :stopRow; + this.stopRow = (stopRow == null) ? HConstants.EMPTY_BYTE_ARRAY : stopRow; this.stopRowInclusive = stopRowInclusive; } @@ -479,38 +488,38 @@ public boolean contains(byte[] row) { } public boolean contains(byte[] buffer, int offset, int length) { - if(startRowInclusive) { - if(stopRowInclusive) { + if (startRowInclusive) { + if (stopRowInclusive) { return Bytes.compareTo(buffer, offset, length, startRow, 0, startRow.length) >= 0 - && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) || - Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) <= 0); + && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) <= 0); } else { return Bytes.compareTo(buffer, offset, length, startRow, 0, startRow.length) >= 0 - && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) || - Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) < 0); + && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) < 0); } } else { - if(stopRowInclusive) { + if (stopRowInclusive) { return Bytes.compareTo(buffer, offset, length, startRow, 0, startRow.length) > 0 - && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) || - Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) <= 0); + && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) <= 0); } else { return Bytes.compareTo(buffer, offset, length, startRow, 0, startRow.length) > 0 - && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) || - Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) < 0); + && (Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(buffer, offset, length, stopRow, 0, stopRow.length) < 0); } } } public boolean isValid() { return Bytes.equals(startRow, HConstants.EMPTY_BYTE_ARRAY) - || Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) - || Bytes.compareTo(startRow, stopRow) < 0 - || (Bytes.compareTo(startRow, stopRow) == 0 && stopRowInclusive == true); + || Bytes.equals(stopRow, HConstants.EMPTY_BYTE_ARRAY) + || Bytes.compareTo(startRow, stopRow) < 0 + || (Bytes.compareTo(startRow, stopRow) == 0 && stopRowInclusive == true); } @Override - public boolean equals(Object obj){ + public boolean equals(Object obj) { if (!(obj instanceof BasicRowRange)) { return false; } @@ -518,18 +527,16 @@ public boolean equals(Object obj){ return true; } BasicRowRange rr = (BasicRowRange) obj; - return Bytes.equals(this.stopRow, rr.getStopRow()) && - Bytes.equals(this.startRow, this.getStartRow()) && - this.startRowInclusive == rr.isStartRowInclusive() && - this.stopRowInclusive == rr.isStopRowInclusive(); + return Bytes.equals(this.stopRow, rr.getStopRow()) + && Bytes.equals(this.startRow, this.getStartRow()) + && this.startRowInclusive == rr.isStartRowInclusive() + && this.stopRowInclusive == rr.isStopRowInclusive(); } @Override public int hashCode() { - return Objects.hash(Bytes.hashCode(this.stopRow), - Bytes.hashCode(this.startRow), - this.startRowInclusive, - this.stopRowInclusive); + return Objects.hash(Bytes.hashCode(this.stopRow), Bytes.hashCode(this.startRow), + this.startRowInclusive, this.stopRowInclusive); } /** @@ -538,10 +545,8 @@ public int hashCode() { public abstract byte[] getComparisonData(); /** - * Returns whether the bounding row used for binary-search is inclusive or not. - * - * For forward scans, we would check the starRow, but we would check the stopRow for - * the reverse scan case. + * Returns whether the bounding row used for binary-search is inclusive or not. For forward + * scans, we would check the starRow, but we would check the stopRow for the reverse scan case. */ public abstract boolean isSearchRowInclusive(); @@ -567,8 +572,8 @@ public int compareTo(BasicRowRange other) { */ @InterfaceAudience.Private private static class ReversedRowRange extends BasicRowRange { - public ReversedRowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, - boolean stopRowInclusive) { + public ReversedRowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, + boolean stopRowInclusive) { super(startRow, startRowInclusive, stopRow, stopRowInclusive); } @@ -592,18 +597,19 @@ public boolean isAscendingOrder() { public static class RowRange extends BasicRowRange { public RowRange() { } + /** * If the startRow is empty or null, set it to HConstants.EMPTY_BYTE_ARRAY, means begin at the * start row of the table. If the stopRow is empty or null, set it to * HConstants.EMPTY_BYTE_ARRAY, means end of the last row of table. */ public RowRange(String startRow, boolean startRowInclusive, String stopRow, - boolean stopRowInclusive) { + boolean stopRowInclusive) { super(startRow, startRowInclusive, stopRow, stopRowInclusive); } - public RowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, - boolean stopRowInclusive) { + public RowRange(byte[] startRow, boolean startRowInclusive, byte[] stopRow, + boolean stopRowInclusive) { super(startRow, startRowInclusive, stopRow, stopRowInclusive); } @@ -625,8 +631,8 @@ public boolean isAscendingOrder() { /** * Abstraction over the ranges of rows to return from this filter, regardless of forward or - * reverse scans being used. This Filter can use this class, agnostic of iteration direction, - * as the same algorithm can be applied in both cases. + * reverse scans being used. This Filter can use this class, agnostic of iteration direction, as + * the same algorithm can be applied in both cases. */ @InterfaceAudience.Private private static class RangeIteration { @@ -657,16 +663,15 @@ void initialize(boolean reversed) { /** * Rebuilds the sorted ranges (by startKey) into an equivalent sorted list of ranges, only by - * stopKey instead. Descending order and the ReversedRowRange compareTo implementation make - * sure that we can use Collections.binarySearch(). + * stopKey instead. Descending order and the ReversedRowRange compareTo implementation make sure + * that we can use Collections.binarySearch(). */ static List flipAndReverseRanges(List ranges) { List flippedRanges = new ArrayList<>(ranges.size()); for (int i = ranges.size() - 1; i >= 0; i--) { RowRange origRange = ranges.get(i); - ReversedRowRange newRowRange = new ReversedRowRange( - origRange.startRow, origRange.startRowInclusive, origRange.stopRow, - origRange.isStopRowInclusive()); + ReversedRowRange newRowRange = new ReversedRowRange(origRange.startRow, + origRange.startRowInclusive, origRange.stopRow, origRange.isStopRowInclusive()); flippedRanges.add(newRowRange); } return flippedRanges; @@ -674,7 +679,6 @@ static List flipAndReverseRanges(List ranges) { /** * Calculates the position where the given rowkey fits in the ranges list. - * * @param rowKey the row key to calculate * @return index the position of the row key */ @@ -705,7 +709,7 @@ public int getNextRangeIndex(byte[] rowKey) { return insertionPosition; } // the row key equals one of the start keys, and the the range exclude the start key - if(ranges.get(index).isSearchRowInclusive() == false) { + if (ranges.get(index).isSearchRowInclusive() == false) { exclusive = true; } return index; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java index fd2eb27c23f2..0d75d16bb3b8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java @@ -22,32 +22,33 @@ import java.util.Comparator; import java.util.Objects; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * This filter is used for selecting only those keys with columns that matches - * a particular prefix. For example, if prefix is 'an', it will pass keys will - * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'. + * This filter is used for selecting only those keys with columns that matches a particular prefix. + * For example, if prefix is 'an', it will pass keys will columns like 'and', 'anti' but not keys + * with columns like 'ball', 'act'. */ @InterfaceAudience.Public public class MultipleColumnPrefixFilter extends FilterBase { private static final Logger LOG = LoggerFactory.getLogger(MultipleColumnPrefixFilter.class); - protected byte [] hint = null; - protected TreeSet sortedPrefixes = createTreeSet(); + protected byte[] hint = null; + protected TreeSet sortedPrefixes = createTreeSet(); private final static int MAX_LOG_PREFIXES = 5; - public MultipleColumnPrefixFilter(final byte [][] prefixes) { + public MultipleColumnPrefixFilter(final byte[][] prefixes) { if (prefixes != null) { for (byte[] prefix : prefixes) { if (!sortedPrefixes.add(prefix)) { @@ -58,11 +59,11 @@ public MultipleColumnPrefixFilter(final byte [][] prefixes) { } } - public byte [][] getPrefix() { + public byte[][] getPrefix() { int count = 0; - byte [][] temp = new byte [sortedPrefixes.size()][]; - for (byte [] prefixes : sortedPrefixes) { - temp [count++] = prefixes; + byte[][] temp = new byte[sortedPrefixes.size()][]; + for (byte[] prefixes : sortedPrefixes) { + temp[count++] = prefixes; } return temp; } @@ -83,17 +84,17 @@ public ReturnCode filterCell(final Cell c) { } public ReturnCode filterColumn(Cell cell) { - byte [] qualifier = CellUtil.cloneQualifier(cell); - TreeSet lesserOrEqualPrefixes = - (TreeSet) sortedPrefixes.headSet(qualifier, true); + byte[] qualifier = CellUtil.cloneQualifier(cell); + TreeSet lesserOrEqualPrefixes = + (TreeSet) sortedPrefixes.headSet(qualifier, true); if (lesserOrEqualPrefixes.size() != 0) { - byte [] largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last(); - + byte[] largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last(); + if (Bytes.startsWith(qualifier, largestPrefixSmallerThanQualifier)) { return ReturnCode.INCLUDE; } - + if (lesserOrEqualPrefixes.size() == sortedPrefixes.size()) { return ReturnCode.NEXT_ROW; } else { @@ -106,10 +107,10 @@ public ReturnCode filterColumn(Cell cell) { } } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - byte [][] prefixes = new byte [filterArguments.size()][]; - for (int i = 0 ; i < filterArguments.size(); i++) { - byte [] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(i)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + byte[][] prefixes = new byte[filterArguments.size()][]; + for (int i = 0; i < filterArguments.size(); i++) { + byte[] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(i)); prefixes[i] = columnPrefix; } return new MultipleColumnPrefixFilter(prefixes); @@ -119,10 +120,10 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.MultipleColumnPrefixFilter.Builder builder = FilterProtos.MultipleColumnPrefixFilter.newBuilder(); - for (byte [] element : sortedPrefixes) { + for (byte[] element : sortedPrefixes) { if (element != null) builder.addSortedPrefixes(UnsafeByteOperations.unsafeWrap(element)); } return builder.build().toByteArray(); @@ -130,12 +131,11 @@ public static Filter createFilterFromArguments(ArrayList filterArgument /** * @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance - * @return An instance of {@link MultipleColumnPrefixFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link MultipleColumnPrefixFilter} made from bytes n * @see + * #toByteArray */ - public static MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static MultipleColumnPrefixFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.MultipleColumnPrefixFilter proto; try { proto = FilterProtos.MultipleColumnPrefixFilter.parseFrom(pbBytes); @@ -143,7 +143,7 @@ public static MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes) throw new DeserializationException(e); } int numPrefixes = proto.getSortedPrefixesCount(); - byte [][] prefixes = new byte[numPrefixes][]; + byte[][] prefixes = new byte[numPrefixes][]; for (int i = 0; i < numPrefixes; ++i) { prefixes[i] = proto.getSortedPrefixes(i).toByteArray(); } @@ -153,15 +153,15 @@ public static MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes) /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof MultipleColumnPrefixFilter)) return false; - MultipleColumnPrefixFilter other = (MultipleColumnPrefixFilter)o; + MultipleColumnPrefixFilter other = (MultipleColumnPrefixFilter) o; return this.sortedPrefixes.equals(other.sortedPrefixes); } @@ -170,18 +170,17 @@ public Cell getNextCellHint(Cell cell) { return PrivateCellUtil.createFirstOnRowCol(cell, hint, 0, hint.length); } - public TreeSet createTreeSet() { + public TreeSet createTreeSet() { return new TreeSet<>(new Comparator() { - @Override - public int compare (Object o1, Object o2) { - if (o1 == null || o2 == null) - throw new IllegalArgumentException ("prefixes can't be null"); - - byte [] b1 = (byte []) o1; - byte [] b2 = (byte []) o2; - return Bytes.compareTo (b1, 0, b1.length, b2, 0, b2.length); - } - }); + @Override + public int compare(Object o1, Object o2) { + if (o1 == null || o2 == null) throw new IllegalArgumentException("prefixes can't be null"); + + byte[] b1 = (byte[]) o1; + byte[] b2 = (byte[]) o2; + return Bytes.compareTo(b1, 0, b1.length, b2, 0, b2.length); + } + }); } @Override @@ -204,8 +203,8 @@ protected String toString(int maxPrefixes) { } } - return String.format("%s (%d/%d): [%s]", this.getClass().getSimpleName(), - count, this.sortedPrefixes.size(), prefixes.toString()); + return String.format("%s (%d/%d): [%s]", this.getClass().getSimpleName(), count, + this.sortedPrefixes.size(), prefixes.toString()); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java index de3edb9d7c19..fc0562ecb3e9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,20 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * A binary comparator which lexicographically compares against the specified - * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. + * A binary comparator which lexicographically compares against the specified byte array using + * {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. */ @InterfaceAudience.Public @SuppressWarnings("ComparableType") // Should this move to Comparator usage? @@ -45,7 +43,7 @@ public int compareTo(byte[] value) { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="EQ_UNUSUAL", justification="") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_UNUSUAL", justification = "") public boolean equals(Object obj) { return obj == null; } @@ -69,22 +67,19 @@ public int compareTo(ByteBuffer value, int offset, int length) { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { - ComparatorProtos.NullComparator.Builder builder = - ComparatorProtos.NullComparator.newBuilder(); + public byte[] toByteArray() { + ComparatorProtos.NullComparator.Builder builder = ComparatorProtos.NullComparator.newBuilder(); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link NullComparator} instance - * @return An instance of {@link NullComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link NullComparator} made from bytes n * @see + * #toByteArray */ - public static NullComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static NullComparator parseFrom(final byte[] pbBytes) throws DeserializationException { try { - // Just parse. Don't use what we parse since on end we are returning new NullComparator. + // Just parse. Don't use what we parse since on end we are returning new NullComparator. ComparatorProtos.NullComparator.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); @@ -93,9 +88,8 @@ public static NullComparator parseFrom(final byte [] pbBytes) } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java index 98831c6f5baf..445adf2129e9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,25 +20,23 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * Implementation of Filter interface that limits results to a specific page - * size. It terminates scanning once the number of filter-passed rows is > - * the given page size. + * Implementation of Filter interface that limits results to a specific page size. It terminates + * scanning once the number of filter-passed rows is > the given page size. *

      - * Note that this filter cannot guarantee that the number of results returned - * to a client are <= page size. This is because the filter is applied - * separately on different region servers. It does however optimize the scan of - * individual HRegions by making sure that the page size is never exceeded - * locally. + * Note that this filter cannot guarantee that the number of results returned to a client are <= + * page size. This is because the filter is applied separately on different region servers. It does + * however optimize the scan of individual HRegions by making sure that the page size is never + * exceeded locally. */ @InterfaceAudience.Public public class PageFilter extends FilterBase { @@ -48,7 +45,6 @@ public class PageFilter extends FilterBase { /** * Constructor that takes a maximum page size. - * * @param pageSize Maximum result size. */ public PageFilter(final long pageSize) { @@ -82,15 +78,15 @@ public boolean filterRow() { this.rowsAccepted++; return this.rowsAccepted > this.pageSize; } - + @Override public boolean hasFilterRow() { return true; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); long pageSize = ParseFilter.convertByteArrayToLong(filterArguments.get(0)); return new PageFilter(pageSize); } @@ -99,21 +95,17 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.PageFilter.Builder builder = - FilterProtos.PageFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.PageFilter.Builder builder = FilterProtos.PageFilter.newBuilder(); builder.setPageSize(this.pageSize); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link PageFilter} instance - * @return An instance of {@link PageFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link PageFilter} made from bytes n * @see #toByteArray */ - public static PageFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static PageFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.PageFilter proto; try { proto = FilterProtos.PageFilter.parseFrom(pbBytes); @@ -126,7 +118,7 @@ public static PageFilter parseFrom(final byte [] pbBytes) /** * @param o other Filter to compare with * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { @@ -137,7 +129,7 @@ boolean areSerializedFieldsEqual(Filter o) { return false; } - PageFilter other = (PageFilter)o; + PageFilter other = (PageFilter) o; return this.getPageSize() == other.getPageSize(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index b9132a3ba295..0a304481ec18 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +18,11 @@ package org.apache.hadoop.hbase.filter; import java.nio.ByteBuffer; - import org.apache.yetus.audience.InterfaceAudience; /** - * ParseConstants holds a bunch of constants related to parsing Filter Strings - * Used by {@link ParseFilter} + * ParseConstants holds a bunch of constants related to parsing Filter Strings Used by + * {@link ParseFilter} */ @InterfaceAudience.Public public final class ParseConstants { @@ -97,7 +95,7 @@ public final class ParseConstants { /** * SKIP Array */ - public static final byte [] SKIP_ARRAY = new byte [ ] {'S', 'K', 'I', 'P'}; + public static final byte[] SKIP_ARRAY = new byte[] { 'S', 'K', 'I', 'P' }; public static final ByteBuffer SKIP_BUFFER = ByteBuffer.wrap(SKIP_ARRAY); /** @@ -123,19 +121,19 @@ public final class ParseConstants { /** * WHILE Array */ - public static final byte [] WHILE_ARRAY = new byte [] {'W', 'H', 'I', 'L', 'E'}; + public static final byte[] WHILE_ARRAY = new byte[] { 'W', 'H', 'I', 'L', 'E' }; public static final ByteBuffer WHILE_BUFFER = ByteBuffer.wrap(WHILE_ARRAY); /** * OR Array */ - public static final byte [] OR_ARRAY = new byte [] {'O','R'}; + public static final byte[] OR_ARRAY = new byte[] { 'O', 'R' }; public static final ByteBuffer OR_BUFFER = ByteBuffer.wrap(OR_ARRAY); /** * AND Array */ - public static final byte [] AND_ARRAY = new byte [] {'A','N', 'D'}; + public static final byte[] AND_ARRAY = new byte[] { 'A', 'N', 'D' }; public static final ByteBuffer AND_BUFFER = ByteBuffer.wrap(AND_ARRAY); /** @@ -156,39 +154,39 @@ public final class ParseConstants { /** * LESS_THAN Array */ - public static final byte [] LESS_THAN_ARRAY = new byte [] {'<'}; + public static final byte[] LESS_THAN_ARRAY = new byte[] { '<' }; public static final ByteBuffer LESS_THAN_BUFFER = ByteBuffer.wrap(LESS_THAN_ARRAY); /** * LESS_THAN_OR_EQUAL_TO Array */ - public static final byte [] LESS_THAN_OR_EQUAL_TO_ARRAY = new byte [] {'<', '='}; + public static final byte[] LESS_THAN_OR_EQUAL_TO_ARRAY = new byte[] { '<', '=' }; public static final ByteBuffer LESS_THAN_OR_EQUAL_TO_BUFFER = ByteBuffer.wrap(LESS_THAN_OR_EQUAL_TO_ARRAY); /** * GREATER_THAN Array */ - public static final byte [] GREATER_THAN_ARRAY = new byte [] {'>'}; + public static final byte[] GREATER_THAN_ARRAY = new byte[] { '>' }; public static final ByteBuffer GREATER_THAN_BUFFER = ByteBuffer.wrap(GREATER_THAN_ARRAY); /** * GREATER_THAN_OR_EQUAL_TO Array */ - public static final byte [] GREATER_THAN_OR_EQUAL_TO_ARRAY = new byte [] {'>', '='}; + public static final byte[] GREATER_THAN_OR_EQUAL_TO_ARRAY = new byte[] { '>', '=' }; public static final ByteBuffer GREATER_THAN_OR_EQUAL_TO_BUFFER = ByteBuffer.wrap(GREATER_THAN_OR_EQUAL_TO_ARRAY); /** * EQUAL_TO Array */ - public static final byte [] EQUAL_TO_ARRAY = new byte [] {'='}; + public static final byte[] EQUAL_TO_ARRAY = new byte[] { '=' }; public static final ByteBuffer EQUAL_TO_BUFFER = ByteBuffer.wrap(EQUAL_TO_ARRAY); /** * NOT_EQUAL_TO Array */ - public static final byte [] NOT_EQUAL_TO_ARRAY = new byte [] {'!', '='}; + public static final byte[] NOT_EQUAL_TO_ARRAY = new byte[] { '!', '=' }; public static final ByteBuffer NOT_EQUAL_TO_BUFFER = ByteBuffer.wrap(NOT_EQUAL_TO_ARRAY); /** @@ -199,17 +197,17 @@ public final class ParseConstants { /** * AND Byte Array */ - public static final byte [] AND = new byte [] {'A','N','D'}; + public static final byte[] AND = new byte[] { 'A', 'N', 'D' }; /** * OR Byte Array */ - public static final byte [] OR = new byte [] {'O', 'R'}; + public static final byte[] OR = new byte[] { 'O', 'R' }; /** * LPAREN Array */ - public static final byte [] LPAREN_ARRAY = new byte [] {'('}; + public static final byte[] LPAREN_ARRAY = new byte[] { '(' }; public static final ByteBuffer LPAREN_BUFFER = ByteBuffer.wrap(LPAREN_ARRAY); /** @@ -230,31 +228,31 @@ public final class ParseConstants { /** * BinaryType byte array */ - public static final byte [] binaryType = new byte [] {'b','i','n','a','r','y'}; + public static final byte[] binaryType = new byte[] { 'b', 'i', 'n', 'a', 'r', 'y' }; /** * BinaryPrefixType byte array */ - public static final byte [] binaryPrefixType = new byte [] {'b','i','n','a','r','y', - 'p','r','e','f','i','x'}; + public static final byte[] binaryPrefixType = + new byte[] { 'b', 'i', 'n', 'a', 'r', 'y', 'p', 'r', 'e', 'f', 'i', 'x' }; /** * RegexStringType byte array */ - public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', - 's','t','r','i','n','g'}; + public static final byte[] regexStringType = + new byte[] { 'r', 'e', 'g', 'e', 'x', 's', 't', 'r', 'i', 'n', 'g' }; /** * RegexStringNoCaseType byte array */ - public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', - 's','t','r','i','n','g', - 'n','o','c','a','s','e'}; + public static final byte[] regexStringNoCaseType = new byte[] { 'r', 'e', 'g', 'e', 'x', 's', 't', + 'r', 'i', 'n', 'g', 'n', 'o', 'c', 'a', 's', 'e' }; /** * SubstringType byte array */ - public static final byte [] substringType = new byte [] {'s','u','b','s','t','r','i','n','g'}; + public static final byte[] substringType = + new byte[] { 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g' }; /** * ASCII for Minus Sign diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index e06c6b5c4139..b08ce971c213 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,10 +27,9 @@ import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; -import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; - +import java.util.regex.Pattern; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -39,13 +37,12 @@ import org.slf4j.LoggerFactory; /** - * This class allows a user to specify a filter via a string - * The string is parsed using the methods of this class and - * a filter object is constructed. This filter object is then wrapped - * in a scanner object which is then returned + * This class allows a user to specify a filter via a string The string is parsed using the methods + * of this class and a filter object is constructed. This filter object is then wrapped in a scanner + * object which is then returned *

      - * This class addresses the HBASE-4168 JIRA. More documentation on this - * Filter Language can be found at: https://issues.apache.org/jira/browse/HBASE-4176 + * This class addresses the HBASE-4168 JIRA. More documentation on this Filter Language can be found + * at: https://issues.apache.org/jira/browse/HBASE-4176 */ @InterfaceAudience.Public public class ParseFilter { @@ -57,44 +54,36 @@ public class ParseFilter { static { // Registers all the filter supported by the Filter Language filterHashMap = new HashMap<>(); - filterHashMap.put("KeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + - "KeyOnlyFilter"); - filterHashMap.put("FirstKeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + - "FirstKeyOnlyFilter"); - filterHashMap.put("PrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "PrefixFilter"); - filterHashMap.put("ColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnPrefixFilter"); - filterHashMap.put("MultipleColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "MultipleColumnPrefixFilter"); - filterHashMap.put("ColumnCountGetFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnCountGetFilter"); - filterHashMap.put("PageFilter", ParseConstants.FILTER_PACKAGE + "." + - "PageFilter"); - filterHashMap.put("ColumnPaginationFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnPaginationFilter"); - filterHashMap.put("InclusiveStopFilter", ParseConstants.FILTER_PACKAGE + "." + - "InclusiveStopFilter"); - filterHashMap.put("TimestampsFilter", ParseConstants.FILTER_PACKAGE + "." + - "TimestampsFilter"); - filterHashMap.put("RowFilter", ParseConstants.FILTER_PACKAGE + "." + - "RowFilter"); - filterHashMap.put("FamilyFilter", ParseConstants.FILTER_PACKAGE + "." + - "FamilyFilter"); - filterHashMap.put("QualifierFilter", ParseConstants.FILTER_PACKAGE + "." + - "QualifierFilter"); - filterHashMap.put("ValueFilter", ParseConstants.FILTER_PACKAGE + "." + - "ValueFilter"); - filterHashMap.put("ColumnRangeFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnRangeFilter"); - filterHashMap.put("SingleColumnValueFilter", ParseConstants.FILTER_PACKAGE + "." + - "SingleColumnValueFilter"); - filterHashMap.put("SingleColumnValueExcludeFilter", ParseConstants.FILTER_PACKAGE + "." + - "SingleColumnValueExcludeFilter"); - filterHashMap.put("DependentColumnFilter", ParseConstants.FILTER_PACKAGE + "." + - "DependentColumnFilter"); - filterHashMap.put("ColumnValueFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnValueFilter"); + filterHashMap.put("KeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + "KeyOnlyFilter"); + filterHashMap.put("FirstKeyOnlyFilter", + ParseConstants.FILTER_PACKAGE + "." + "FirstKeyOnlyFilter"); + filterHashMap.put("PrefixFilter", ParseConstants.FILTER_PACKAGE + "." + "PrefixFilter"); + filterHashMap.put("ColumnPrefixFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnPrefixFilter"); + filterHashMap.put("MultipleColumnPrefixFilter", + ParseConstants.FILTER_PACKAGE + "." + "MultipleColumnPrefixFilter"); + filterHashMap.put("ColumnCountGetFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnCountGetFilter"); + filterHashMap.put("PageFilter", ParseConstants.FILTER_PACKAGE + "." + "PageFilter"); + filterHashMap.put("ColumnPaginationFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnPaginationFilter"); + filterHashMap.put("InclusiveStopFilter", + ParseConstants.FILTER_PACKAGE + "." + "InclusiveStopFilter"); + filterHashMap.put("TimestampsFilter", ParseConstants.FILTER_PACKAGE + "." + "TimestampsFilter"); + filterHashMap.put("RowFilter", ParseConstants.FILTER_PACKAGE + "." + "RowFilter"); + filterHashMap.put("FamilyFilter", ParseConstants.FILTER_PACKAGE + "." + "FamilyFilter"); + filterHashMap.put("QualifierFilter", ParseConstants.FILTER_PACKAGE + "." + "QualifierFilter"); + filterHashMap.put("ValueFilter", ParseConstants.FILTER_PACKAGE + "." + "ValueFilter"); + filterHashMap.put("ColumnRangeFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnRangeFilter"); + filterHashMap.put("SingleColumnValueFilter", + ParseConstants.FILTER_PACKAGE + "." + "SingleColumnValueFilter"); + filterHashMap.put("SingleColumnValueExcludeFilter", + ParseConstants.FILTER_PACKAGE + "." + "SingleColumnValueExcludeFilter"); + filterHashMap.put("DependentColumnFilter", + ParseConstants.FILTER_PACKAGE + "." + "DependentColumnFilter"); + filterHashMap.put("ColumnValueFilter", + ParseConstants.FILTER_PACKAGE + "." + "ColumnValueFilter"); // Creates the operatorPrecedenceHashMap operatorPrecedenceHashMap = new HashMap<>(); @@ -110,8 +99,7 @@ public class ParseFilter { * @param filterString filter string given by the user * @return filter object we constructed */ - public Filter parseFilterString (String filterString) - throws CharacterCodingException { + public Filter parseFilterString(String filterString) throws CharacterCodingException { return parseFilterString(Bytes.toBytes(filterString)); } @@ -121,20 +109,21 @@ public Filter parseFilterString (String filterString) * @param filterStringAsByteArray filter string given by the user * @return filter object we constructed */ - public Filter parseFilterString (byte [] filterStringAsByteArray) - throws CharacterCodingException { + public Filter parseFilterString(byte[] filterStringAsByteArray) throws CharacterCodingException { // stack for the operators and parenthesis - Stack operatorStack = new Stack<>(); + Stack operatorStack = new Stack<>(); // stack for the filter objects - Stack filterStack = new Stack<>(); + Stack filterStack = new Stack<>(); Filter filter = null; - for (int i=0; i - * A simpleFilterExpression is of the form: FilterName('arg', 'arg', 'arg') - * The user given filter string can have many simpleFilterExpressions combined - * using operators. - *

      - * This function extracts a simpleFilterExpression from the - * larger filterString given the start offset of the simpler expression - *

      - * @param filterStringAsByteArray filter string given by the user - * @param filterExpressionStartOffset start index of the simple filter expression - * @return byte array containing the simple filter expression - */ - public byte [] extractFilterSimpleExpression (byte [] filterStringAsByteArray, - int filterExpressionStartOffset) - throws CharacterCodingException { + /** + * Extracts a simple filter expression from the filter string given by the user + *

      + * A simpleFilterExpression is of the form: FilterName('arg', 'arg', 'arg') The user given filter + * string can have many simpleFilterExpressions combined using operators. + *

      + * This function extracts a simpleFilterExpression from the larger filterString given the start + * offset of the simpler expression + *

      + * @param filterStringAsByteArray filter string given by the user + * @param filterExpressionStartOffset start index of the simple filter expression + * @return byte array containing the simple filter expression + */ + public byte[] extractFilterSimpleExpression(byte[] filterStringAsByteArray, + int filterExpressionStartOffset) throws CharacterCodingException { int quoteCount = 0; - for (int i=filterExpressionStartOffset; i - * @param filterStringAsByteArray filter string given by the user - * @return filter object we constructed - */ - public Filter parseSimpleFilterExpression (byte [] filterStringAsByteArray) + /** + * Constructs a filter object given a simple filter expression + *

      + * @param filterStringAsByteArray filter string given by the user + * @return filter object we constructed + */ + public Filter parseSimpleFilterExpression(byte[] filterStringAsByteArray) throws CharacterCodingException { String filterName = Bytes.toString(getFilterName(filterStringAsByteArray)); @@ -265,22 +252,24 @@ public Filter parseSimpleFilterExpression (byte [] filterStringAsByteArray) LOG.error("Method {} threw an exception for {}", methodName, filterName, e); } throw new IllegalArgumentException( - "Incorrect filter string " + new String(filterStringAsByteArray, StandardCharsets.UTF_8)); + "Incorrect filter string " + new String(filterStringAsByteArray, StandardCharsets.UTF_8)); } -/** - * Returns the filter name given a simple filter expression - *

      - * @param filterStringAsByteArray a simple filter expression - * @return name of filter in the simple filter expression - */ - public static byte [] getFilterName (byte [] filterStringAsByteArray) { + /** + * Returns the filter name given a simple filter expression + *

      + * @param filterStringAsByteArray a simple filter expression + * @return name of filter in the simple filter expression + */ + public static byte[] getFilterName(byte[] filterStringAsByteArray) { int filterNameStartIndex = 0; int filterNameEndIndex = 0; - for (int i=filterNameStartIndex; i - * @param filterStringAsByteArray filter string given by the user - * @return an ArrayList containing the arguments of the filter in the filter string - */ - public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { + /** + * Returns the arguments of the filter from the filter string + *

      + * @param filterStringAsByteArray filter string given by the user + * @return an ArrayList containing the arguments of the filter in the filter string + */ + public static ArrayList getFilterArguments(byte[] filterStringAsByteArray) { int argumentListStartIndex = Bytes.searchDelimiterIndex(filterStringAsByteArray, 0, - filterStringAsByteArray.length, - ParseConstants.LPAREN); + filterStringAsByteArray.length, ParseConstants.LPAREN); if (argumentListStartIndex == -1) { throw new IllegalArgumentException("Incorrect argument list"); } int argumentStartIndex = 0; int argumentEndIndex = 0; - ArrayList filterArguments = new ArrayList<>(); + ArrayList filterArguments = new ArrayList<>(); - for (int i = argumentListStartIndex + 1; i, != etc argumentStartIndex = i; for (int j = argumentStartIndex; j < filterStringAsByteArray.length; j++) { - if (filterStringAsByteArray[j] == ParseConstants.WHITESPACE || - filterStringAsByteArray[j] == ParseConstants.COMMA || - filterStringAsByteArray[j] == ParseConstants.RPAREN) { + if ( + filterStringAsByteArray[j] == ParseConstants.WHITESPACE + || filterStringAsByteArray[j] == ParseConstants.COMMA + || filterStringAsByteArray[j] == ParseConstants.RPAREN + ) { argumentEndIndex = j - 1; i = j; - byte [] filterArgument = new byte [argumentEndIndex - argumentStartIndex + 1]; - Bytes.putBytes(filterArgument, 0, filterStringAsByteArray, - argumentStartIndex, argumentEndIndex - argumentStartIndex + 1); + byte[] filterArgument = new byte[argumentEndIndex - argumentStartIndex + 1]; + Bytes.putBytes(filterArgument, 0, filterStringAsByteArray, argumentStartIndex, + argumentEndIndex - argumentStartIndex + 1); filterArguments.add(filterArgument); break; } else if (j == filterStringAsByteArray.length - 1) { @@ -365,19 +357,19 @@ public Filter parseSimpleFilterExpression (byte [] filterStringAsByteArray) return filterArguments; } -/** - * This function is called while parsing the filterString and an operator is parsed - *

      - * @param operatorStack the stack containing the operators and parenthesis - * @param filterStack the stack containing the filters - * @param operator the operator found while parsing the filterString - */ - public void reduce(Stack operatorStack, - Stack filterStack, - ByteBuffer operator) { - while (!operatorStack.empty() && - !(ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek())) && - hasHigherPriority(operatorStack.peek(), operator)) { + /** + * This function is called while parsing the filterString and an operator is parsed + *

      + * @param operatorStack the stack containing the operators and parenthesis + * @param filterStack the stack containing the filters + * @param operator the operator found while parsing the filterString + */ + public void reduce(Stack operatorStack, Stack filterStack, + ByteBuffer operator) { + while ( + !operatorStack.empty() && !(ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek())) + && hasHigherPriority(operatorStack.peek(), operator) + ) { filterStack.push(popArguments(operatorStack, filterStack)); } } @@ -387,10 +379,10 @@ public void reduce(Stack operatorStack, * from the filterStack and evaluates them *

      * @param operatorStack the stack containing the operators - * @param filterStack the stack containing the filters + * @param filterStack the stack containing the filters * @return the evaluated filter */ - public static Filter popArguments (Stack operatorStack, Stack filterStack) { + public static Filter popArguments(Stack operatorStack, Stack filterStack) { ByteBuffer argumentOnTopOfStack = operatorStack.peek(); if (argumentOnTopOfStack.equals(ParseConstants.OR_BUFFER)) { @@ -452,7 +444,7 @@ public static Filter popArguments (Stack operatorStack, Stack operatorStack, Stack - * If a has higher precedence than b, it returns true - * If they have the same precedence, it returns false - */ + /** + * Returns which operator has higher precedence + *

      + * If a has higher precedence than b, it returns true If they have the same precedence, it returns + * false + */ public boolean hasHigherPriority(ByteBuffer a, ByteBuffer b) { if ((operatorPrecedenceHashMap.get(a) - operatorPrecedenceHashMap.get(b)) < 0) { return true; @@ -477,62 +469,65 @@ public boolean hasHigherPriority(ByteBuffer a, ByteBuffer b) { return false; } -/** - * Removes the single quote escaping a single quote - thus it returns an unescaped argument - *

      - * @param filterStringAsByteArray filter string given by user - * @param argumentStartIndex start index of the argument - * @param argumentEndIndex end index of the argument - * @return returns an unescaped argument - */ - public static byte [] createUnescapdArgument (byte [] filterStringAsByteArray, - int argumentStartIndex, int argumentEndIndex) { + /** + * Removes the single quote escaping a single quote - thus it returns an unescaped argument + *

      + * @param filterStringAsByteArray filter string given by user + * @param argumentStartIndex start index of the argument + * @param argumentEndIndex end index of the argument + * @return returns an unescaped argument + */ + public static byte[] createUnescapdArgument(byte[] filterStringAsByteArray, + int argumentStartIndex, int argumentEndIndex) { int unescapedArgumentLength = 2; for (int i = argumentStartIndex + 1; i <= argumentEndIndex - 1; i++) { - unescapedArgumentLength ++; - if (filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE && - i != (argumentEndIndex - 1) && - filterStringAsByteArray[i+1] == ParseConstants.SINGLE_QUOTE) { + unescapedArgumentLength++; + if ( + filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE && i != (argumentEndIndex - 1) + && filterStringAsByteArray[i + 1] == ParseConstants.SINGLE_QUOTE + ) { i++; continue; } } - byte [] unescapedArgument = new byte [unescapedArgumentLength]; + byte[] unescapedArgument = new byte[unescapedArgumentLength]; int count = 1; unescapedArgument[0] = '\''; for (int i = argumentStartIndex + 1; i <= argumentEndIndex - 1; i++) { - if (filterStringAsByteArray [i] == ParseConstants.SINGLE_QUOTE && - i != (argumentEndIndex - 1) && - filterStringAsByteArray [i+1] == ParseConstants.SINGLE_QUOTE) { - unescapedArgument[count++] = filterStringAsByteArray [i+1]; + if ( + filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE && i != (argumentEndIndex - 1) + && filterStringAsByteArray[i + 1] == ParseConstants.SINGLE_QUOTE + ) { + unescapedArgument[count++] = filterStringAsByteArray[i + 1]; i++; - } - else { - unescapedArgument[count++] = filterStringAsByteArray [i]; + } else { + unescapedArgument[count++] = filterStringAsByteArray[i]; } } unescapedArgument[unescapedArgumentLength - 1] = '\''; return unescapedArgument; } -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'OR' - *

      - * @param filterStringAsByteArray filter string given by the user - * @param indexOfOr index at which an 'O' was read - * @return true if the keyword 'OR' is at the current index - */ - public static boolean checkForOr (byte [] filterStringAsByteArray, int indexOfOr) + /** + * Checks if the current index of filter string we are on is the beginning of the keyword 'OR' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfOr index at which an 'O' was read + * @return true if the keyword 'OR' is at the current index + */ + public static boolean checkForOr(byte[] filterStringAsByteArray, int indexOfOr) throws CharacterCodingException, ArrayIndexOutOfBoundsException { try { - if (filterStringAsByteArray[indexOfOr] == ParseConstants.O && - filterStringAsByteArray[indexOfOr+1] == ParseConstants.R && - (filterStringAsByteArray[indexOfOr-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfOr-1] == ParseConstants.RPAREN) && - (filterStringAsByteArray[indexOfOr+2] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfOr+2] == ParseConstants.LPAREN)) { + if ( + filterStringAsByteArray[indexOfOr] == ParseConstants.O + && filterStringAsByteArray[indexOfOr + 1] == ParseConstants.R + && (filterStringAsByteArray[indexOfOr - 1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfOr - 1] == ParseConstants.RPAREN) + && (filterStringAsByteArray[indexOfOr + 2] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfOr + 2] == ParseConstants.LPAREN) + ) { return true; } else { return false; @@ -542,24 +537,26 @@ public static boolean checkForOr (byte [] filterStringAsByteArray, int indexOfOr } } -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'AND' - *

      - * @param filterStringAsByteArray filter string given by the user - * @param indexOfAnd index at which an 'A' was read - * @return true if the keyword 'AND' is at the current index - */ - public static boolean checkForAnd (byte [] filterStringAsByteArray, int indexOfAnd) + /** + * Checks if the current index of filter string we are on is the beginning of the keyword 'AND' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfAnd index at which an 'A' was read + * @return true if the keyword 'AND' is at the current index + */ + public static boolean checkForAnd(byte[] filterStringAsByteArray, int indexOfAnd) throws CharacterCodingException { try { - if (filterStringAsByteArray[indexOfAnd] == ParseConstants.A && - filterStringAsByteArray[indexOfAnd+1] == ParseConstants.N && - filterStringAsByteArray[indexOfAnd+2] == ParseConstants.D && - (filterStringAsByteArray[indexOfAnd-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfAnd-1] == ParseConstants.RPAREN) && - (filterStringAsByteArray[indexOfAnd+3] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfAnd+3] == ParseConstants.LPAREN)) { + if ( + filterStringAsByteArray[indexOfAnd] == ParseConstants.A + && filterStringAsByteArray[indexOfAnd + 1] == ParseConstants.N + && filterStringAsByteArray[indexOfAnd + 2] == ParseConstants.D + && (filterStringAsByteArray[indexOfAnd - 1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfAnd - 1] == ParseConstants.RPAREN) + && (filterStringAsByteArray[indexOfAnd + 3] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfAnd + 3] == ParseConstants.LPAREN) + ) { return true; } else { return false; @@ -569,27 +566,29 @@ public static boolean checkForAnd (byte [] filterStringAsByteArray, int indexOfA } } -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'SKIP' - *

      - * @param filterStringAsByteArray filter string given by the user - * @param indexOfSkip index at which an 'S' was read - * @return true if the keyword 'SKIP' is at the current index - */ - public static boolean checkForSkip (byte [] filterStringAsByteArray, int indexOfSkip) + /** + * Checks if the current index of filter string we are on is the beginning of the keyword 'SKIP' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfSkip index at which an 'S' was read + * @return true if the keyword 'SKIP' is at the current index + */ + public static boolean checkForSkip(byte[] filterStringAsByteArray, int indexOfSkip) throws CharacterCodingException { try { - if (filterStringAsByteArray[indexOfSkip] == ParseConstants.S && - filterStringAsByteArray[indexOfSkip+1] == ParseConstants.K && - filterStringAsByteArray[indexOfSkip+2] == ParseConstants.I && - filterStringAsByteArray[indexOfSkip+3] == ParseConstants.P && - (indexOfSkip == 0 || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.RPAREN || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.LPAREN) && - (filterStringAsByteArray[indexOfSkip+4] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfSkip+4] == ParseConstants.LPAREN)) { + if ( + filterStringAsByteArray[indexOfSkip] == ParseConstants.S + && filterStringAsByteArray[indexOfSkip + 1] == ParseConstants.K + && filterStringAsByteArray[indexOfSkip + 2] == ParseConstants.I + && filterStringAsByteArray[indexOfSkip + 3] == ParseConstants.P + && (indexOfSkip == 0 + || filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.RPAREN + || filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.LPAREN) + && (filterStringAsByteArray[indexOfSkip + 4] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfSkip + 4] == ParseConstants.LPAREN) + ) { return true; } else { return false; @@ -599,27 +598,30 @@ public static boolean checkForSkip (byte [] filterStringAsByteArray, int indexOf } } -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'WHILE' - *

      - * @param filterStringAsByteArray filter string given by the user - * @param indexOfWhile index at which an 'W' was read - * @return true if the keyword 'WHILE' is at the current index - */ - public static boolean checkForWhile (byte [] filterStringAsByteArray, int indexOfWhile) + /** + * Checks if the current index of filter string we are on is the beginning of the keyword 'WHILE' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfWhile index at which an 'W' was read + * @return true if the keyword 'WHILE' is at the current index + */ + public static boolean checkForWhile(byte[] filterStringAsByteArray, int indexOfWhile) throws CharacterCodingException { try { - if (filterStringAsByteArray[indexOfWhile] == ParseConstants.W && - filterStringAsByteArray[indexOfWhile+1] == ParseConstants.H && - filterStringAsByteArray[indexOfWhile+2] == ParseConstants.I && - filterStringAsByteArray[indexOfWhile+3] == ParseConstants.L && - filterStringAsByteArray[indexOfWhile+4] == ParseConstants.E && - (indexOfWhile == 0 || filterStringAsByteArray[indexOfWhile-1] == ParseConstants.WHITESPACE - || filterStringAsByteArray[indexOfWhile-1] == ParseConstants.RPAREN || - filterStringAsByteArray[indexOfWhile-1] == ParseConstants.LPAREN) && - (filterStringAsByteArray[indexOfWhile+5] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfWhile+5] == ParseConstants.LPAREN)) { + if ( + filterStringAsByteArray[indexOfWhile] == ParseConstants.W + && filterStringAsByteArray[indexOfWhile + 1] == ParseConstants.H + && filterStringAsByteArray[indexOfWhile + 2] == ParseConstants.I + && filterStringAsByteArray[indexOfWhile + 3] == ParseConstants.L + && filterStringAsByteArray[indexOfWhile + 4] == ParseConstants.E + && (indexOfWhile == 0 + || filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.RPAREN + || filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.LPAREN) + && (filterStringAsByteArray[indexOfWhile + 5] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfWhile + 5] == ParseConstants.LPAREN) + ) { return true; } else { return false; @@ -629,57 +631,56 @@ public static boolean checkForWhile (byte [] filterStringAsByteArray, int indexO } } -/** - * Returns a boolean indicating whether the quote was escaped or not - *

      - * @param array byte array in which the quote was found - * @param quoteIndex index of the single quote - * @return returns true if the quote was unescaped - */ - public static boolean isQuoteUnescaped (byte [] array, int quoteIndex) { + /** + * Returns a boolean indicating whether the quote was escaped or not + *

      + * @param array byte array in which the quote was found + * @param quoteIndex index of the single quote + * @return returns true if the quote was unescaped + */ + public static boolean isQuoteUnescaped(byte[] array, int quoteIndex) { if (array == null) { throw new IllegalArgumentException("isQuoteUnescaped called with a null array"); } - if (quoteIndex == array.length - 1 || array[quoteIndex+1] != ParseConstants.SINGLE_QUOTE) { + if (quoteIndex == array.length - 1 || array[quoteIndex + 1] != ParseConstants.SINGLE_QUOTE) { return true; - } - else { + } else { return false; } } -/** - * Takes a quoted byte array and converts it into an unquoted byte array - * For example: given a byte array representing 'abc', it returns a - * byte array representing abc - *

      - * @param quotedByteArray the quoted byte array - * @return Unquoted byte array - */ - public static byte [] removeQuotesFromByteArray (byte [] quotedByteArray) { - if (quotedByteArray == null || - quotedByteArray.length < 2 || - quotedByteArray[0] != ParseConstants.SINGLE_QUOTE || - quotedByteArray[quotedByteArray.length - 1] != ParseConstants.SINGLE_QUOTE) { + /** + * Takes a quoted byte array and converts it into an unquoted byte array For example: given a byte + * array representing 'abc', it returns a byte array representing abc + *

      + * @param quotedByteArray the quoted byte array + * @return Unquoted byte array + */ + public static byte[] removeQuotesFromByteArray(byte[] quotedByteArray) { + if ( + quotedByteArray == null || quotedByteArray.length < 2 + || quotedByteArray[0] != ParseConstants.SINGLE_QUOTE + || quotedByteArray[quotedByteArray.length - 1] != ParseConstants.SINGLE_QUOTE + ) { throw new IllegalArgumentException("removeQuotesFromByteArray needs a quoted byte array"); } else { - byte [] targetString = new byte [quotedByteArray.length - 2]; + byte[] targetString = new byte[quotedByteArray.length - 2]; Bytes.putBytes(targetString, 0, quotedByteArray, 1, quotedByteArray.length - 2); return targetString; } } -/** - * Converts an int expressed in a byte array to an actual int - *

      - * This doesn't use Bytes.toInt because that assumes - * that there will be {@link Bytes#SIZEOF_INT} bytes available. - *

      - * @param numberAsByteArray the int value expressed as a byte array - * @return the int value - */ - public static int convertByteArrayToInt (byte [] numberAsByteArray) { + /** + * Converts an int expressed in a byte array to an actual int + *

      + * This doesn't use Bytes.toInt because that assumes that there will be {@link Bytes#SIZEOF_INT} + * bytes available. + *

      + * @param numberAsByteArray the int value expressed as a byte array + * @return the int value + */ + public static int convertByteArrayToInt(byte[] numberAsByteArray) { long tempResult = ParseFilter.convertByteArrayToLong(numberAsByteArray); @@ -693,16 +694,16 @@ public static int convertByteArrayToInt (byte [] numberAsByteArray) { return result; } -/** - * Converts a long expressed in a byte array to an actual long - *

      - * This doesn't use Bytes.toLong because that assumes - * that there will be {@link Bytes#SIZEOF_INT} bytes available. - *

      - * @param numberAsByteArray the long value expressed as a byte array - * @return the long value - */ - public static long convertByteArrayToLong (byte [] numberAsByteArray) { + /** + * Converts a long expressed in a byte array to an actual long + *

      + * This doesn't use Bytes.toLong because that assumes that there will be {@link Bytes#SIZEOF_INT} + * bytes available. + *

      + * @param numberAsByteArray the long value expressed as a byte array + * @return the long value + */ + public static long convertByteArrayToLong(byte[] numberAsByteArray) { if (numberAsByteArray == null) { throw new IllegalArgumentException("convertByteArrayToLong called with a null array"); } @@ -717,11 +718,12 @@ public static long convertByteArrayToLong (byte [] numberAsByteArray) { } while (i != numberAsByteArray.length) { - if (numberAsByteArray[i] < ParseConstants.ZERO || - numberAsByteArray[i] > ParseConstants.NINE) { + if ( + numberAsByteArray[i] < ParseConstants.ZERO || numberAsByteArray[i] > ParseConstants.NINE + ) { throw new IllegalArgumentException("Byte Array should only contain digits"); } - result = result*10 + (numberAsByteArray[i] - ParseConstants.ZERO); + result = result * 10 + (numberAsByteArray[i] - ParseConstants.ZERO); if (result < 0) { throw new IllegalArgumentException("Long Argument too large"); } @@ -735,37 +737,38 @@ public static long convertByteArrayToLong (byte [] numberAsByteArray) { } } -/** - * Converts a boolean expressed in a byte array to an actual boolean - *

      - * This doesn't used Bytes.toBoolean because Bytes.toBoolean(byte []) - * assumes that 1 stands for true and 0 for false. - * Here, the byte array representing "true" and "false" is parsed - *

      - * @param booleanAsByteArray the boolean value expressed as a byte array - * @return the boolean value - */ - public static boolean convertByteArrayToBoolean (byte [] booleanAsByteArray) { + /** + * Converts a boolean expressed in a byte array to an actual boolean + *

      + * This doesn't used Bytes.toBoolean because Bytes.toBoolean(byte []) assumes that 1 stands for + * true and 0 for false. Here, the byte array representing "true" and "false" is parsed + *

      + * @param booleanAsByteArray the boolean value expressed as a byte array + * @return the boolean value + */ + public static boolean convertByteArrayToBoolean(byte[] booleanAsByteArray) { if (booleanAsByteArray == null) { throw new IllegalArgumentException("convertByteArrayToBoolean called with a null array"); } - if (booleanAsByteArray.length == 4 && - (booleanAsByteArray[0] == 't' || booleanAsByteArray[0] == 'T') && - (booleanAsByteArray[1] == 'r' || booleanAsByteArray[1] == 'R') && - (booleanAsByteArray[2] == 'u' || booleanAsByteArray[2] == 'U') && - (booleanAsByteArray[3] == 'e' || booleanAsByteArray[3] == 'E')) { + if ( + booleanAsByteArray.length == 4 + && (booleanAsByteArray[0] == 't' || booleanAsByteArray[0] == 'T') + && (booleanAsByteArray[1] == 'r' || booleanAsByteArray[1] == 'R') + && (booleanAsByteArray[2] == 'u' || booleanAsByteArray[2] == 'U') + && (booleanAsByteArray[3] == 'e' || booleanAsByteArray[3] == 'E') + ) { return true; - } - else if (booleanAsByteArray.length == 5 && - (booleanAsByteArray[0] == 'f' || booleanAsByteArray[0] == 'F') && - (booleanAsByteArray[1] == 'a' || booleanAsByteArray[1] == 'A') && - (booleanAsByteArray[2] == 'l' || booleanAsByteArray[2] == 'L') && - (booleanAsByteArray[3] == 's' || booleanAsByteArray[3] == 'S') && - (booleanAsByteArray[4] == 'e' || booleanAsByteArray[4] == 'E')) { + } else if ( + booleanAsByteArray.length == 5 + && (booleanAsByteArray[0] == 'f' || booleanAsByteArray[0] == 'F') + && (booleanAsByteArray[1] == 'a' || booleanAsByteArray[1] == 'A') + && (booleanAsByteArray[2] == 'l' || booleanAsByteArray[2] == 'L') + && (booleanAsByteArray[3] == 's' || booleanAsByteArray[3] == 'S') + && (booleanAsByteArray[4] == 'e' || booleanAsByteArray[4] == 'E') + ) { return false; - } - else { + } else { throw new IllegalArgumentException("Incorrect Boolean Expression"); } } @@ -775,37 +778,30 @@ else if (booleanAsByteArray.length == 5 && * @param compareOpAsByteArray the comparatorOperator symbol as a byte array * @return the Compare Operator */ - public static CompareOperator createCompareOperator (byte [] compareOpAsByteArray) { + public static CompareOperator createCompareOperator(byte[] compareOpAsByteArray) { ByteBuffer compareOp = ByteBuffer.wrap(compareOpAsByteArray); - if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) - return CompareOperator.LESS; + if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) return CompareOperator.LESS; else if (compareOp.equals(ParseConstants.LESS_THAN_OR_EQUAL_TO_BUFFER)) return CompareOperator.LESS_OR_EQUAL; - else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) - return CompareOperator.GREATER; + else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) return CompareOperator.GREATER; else if (compareOp.equals(ParseConstants.GREATER_THAN_OR_EQUAL_TO_BUFFER)) return CompareOperator.GREATER_OR_EQUAL; - else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) - return CompareOperator.NOT_EQUAL; - else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) - return CompareOperator.EQUAL; - else - throw new IllegalArgumentException("Invalid compare operator"); + else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) return CompareOperator.NOT_EQUAL; + else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) return CompareOperator.EQUAL; + else throw new IllegalArgumentException("Invalid compare operator"); } -/** - * Parses a comparator of the form comparatorType:comparatorValue form and returns a comparator - *

      - * @param comparator the comparator in the form comparatorType:comparatorValue - * @return the parsed comparator - */ - public static ByteArrayComparable createComparator (byte [] comparator) { - if (comparator == null) - throw new IllegalArgumentException("Incorrect Comparator"); - byte [][] parsedComparator = ParseFilter.parseComparator(comparator); - byte [] comparatorType = parsedComparator[0]; - byte [] comparatorValue = parsedComparator[1]; - + /** + * Parses a comparator of the form comparatorType:comparatorValue form and returns a comparator + *

      + * @param comparator the comparator in the form comparatorType:comparatorValue + * @return the parsed comparator + */ + public static ByteArrayComparable createComparator(byte[] comparator) { + if (comparator == null) throw new IllegalArgumentException("Incorrect Comparator"); + byte[][] parsedComparator = ParseFilter.parseComparator(comparator); + byte[] comparatorType = parsedComparator[0]; + byte[] comparatorValue = parsedComparator[1]; if (Bytes.equals(comparatorType, ParseConstants.binaryType)) return new BinaryComparator(comparatorValue); @@ -815,28 +811,27 @@ else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), - Pattern.CASE_INSENSITIVE | Pattern.DOTALL); + Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); - else - throw new IllegalArgumentException("Incorrect comparatorType"); + else throw new IllegalArgumentException("Incorrect comparatorType"); } -/** - * Splits a column in comparatorType:comparatorValue form into separate byte arrays - *

      - * @param comparator the comparator - * @return the parsed arguments of the comparator as a 2D byte array - */ - public static byte [][] parseComparator (byte [] comparator) { - final int index = Bytes.searchDelimiterIndex(comparator, 0, comparator.length, - ParseConstants.COLON); + /** + * Splits a column in comparatorType:comparatorValue form into separate byte arrays + *

      + * @param comparator the comparator + * @return the parsed arguments of the comparator as a 2D byte array + */ + public static byte[][] parseComparator(byte[] comparator) { + final int index = + Bytes.searchDelimiterIndex(comparator, 0, comparator.length, ParseConstants.COLON); if (index == -1) { throw new IllegalArgumentException("Incorrect comparator"); } - byte [][] result = new byte [2][0]; - result[0] = new byte [index]; + byte[][] result = new byte[2][0]; + result[0] = new byte[index]; System.arraycopy(comparator, 0, result[0], 0, index); final int len = comparator.length - (index + 1); @@ -846,10 +841,10 @@ else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return result; } -/** - * Return a Set of filters supported by the Filter Language - */ - public Set getSupportedFilters () { + /** + * Return a Set of filters supported by the Filter Language + */ + public Set getSupportedFilters() { return filterHashMap.keySet(); } @@ -862,15 +857,13 @@ public static Map getAllFilters() { } /** - * Register a new filter with the parser. If the filter is already registered, - * an IllegalArgumentException will be thrown. - * - * @param name a name for the filter + * Register a new filter with the parser. If the filter is already registered, an + * IllegalArgumentException will be thrown. + * @param name a name for the filter * @param filterClass fully qualified class name */ public static void registerFilter(String name, String filterClass) { - if(LOG.isInfoEnabled()) - LOG.info("Registering new filter " + name); + if (LOG.isInfoEnabled()) LOG.info("Registering new filter " + name); filterHashMap.put(name, filterClass); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java index 8300005edb35..3b40388a06f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,33 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.util.ArrayList; - import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** * Pass results that have same row prefix. */ @InterfaceAudience.Public public class PrefixFilter extends FilterBase { - protected byte [] prefix = null; + protected byte[] prefix = null; protected boolean passedPrefix = false; protected boolean filterRow = true; - public PrefixFilter(final byte [] prefix) { + public PrefixFilter(final byte[] prefix) { this.prefix = prefix; } @@ -52,8 +50,7 @@ public byte[] getPrefix() { @Override public boolean filterRowKey(Cell firstRowCell) { - if (firstRowCell == null || this.prefix == null) - return true; + if (firstRowCell == null || this.prefix == null) return true; if (filterAllRemaining()) return true; int length = firstRowCell.getRowLength(); if (length < prefix.length) return true; @@ -63,11 +60,11 @@ public boolean filterRowKey(Cell firstRowCell) { int cmp; if (firstRowCell instanceof ByteBufferExtendedCell) { cmp = ByteBufferUtils.compareTo(((ByteBufferExtendedCell) firstRowCell).getRowByteBuffer(), - ((ByteBufferExtendedCell) firstRowCell).getRowPosition(), this.prefix.length, - this.prefix, 0, this.prefix.length); + ((ByteBufferExtendedCell) firstRowCell).getRowPosition(), this.prefix.length, this.prefix, + 0, this.prefix.length); } else { cmp = Bytes.compareTo(firstRowCell.getRowArray(), firstRowCell.getRowOffset(), - this.prefix.length, this.prefix, 0, this.prefix.length); + this.prefix.length, this.prefix, 0, this.prefix.length); } if ((!isReversed() && cmp > 0) || (isReversed() && cmp < 0)) { passedPrefix = true; @@ -97,10 +94,10 @@ public boolean filterAllRemaining() { return passedPrefix; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] prefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, "Expected 1 but got: %s", + filterArguments.size()); + byte[] prefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); return new PrefixFilter(prefix); } @@ -108,9 +105,8 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.PrefixFilter.Builder builder = - FilterProtos.PrefixFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.PrefixFilter.Builder builder = FilterProtos.PrefixFilter.newBuilder(); if (this.prefix != null) builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix)); return builder.build().toByteArray(); } @@ -121,28 +117,27 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static PrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static PrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.PrefixFilter proto; try { proto = FilterProtos.PrefixFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } - return new PrefixFilter(proto.hasPrefix()?proto.getPrefix().toByteArray():null); + return new PrefixFilter(proto.hasPrefix() ? proto.getPrefix().toByteArray() : null); } /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof PrefixFilter)) return false; - PrefixFilter other = (PrefixFilter)o; + PrefixFilter other = (PrefixFilter) o; return Bytes.equals(this.getPrefix(), other.getPrefix()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java index 497f63314e60..2d52b300ff87 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,38 +19,36 @@ import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** - * This filter is used to filter based on the column qualifier. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * column qualifier portion of a key. + * This filter is used to filter based on the column qualifier. It takes an operator (equal, + * greater, not equal, etc) and a byte [] comparator for the column qualifier portion of a key. *

      - * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} - * to add more control. + * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} to add more + * control. *

      * Multiple filters can be combined using {@link FilterList}. *

      - * If an already known column qualifier is looked for, - * use {@link org.apache.hadoop.hbase.client.Get#addColumn} - * directly rather than a filter. + * If an already known column qualifier is looked for, use + * {@link org.apache.hadoop.hbase.client.Get#addColumn} directly rather than a filter. */ @InterfaceAudience.Public public class QualifierFilter extends CompareFilter { /** * Constructor. - * @param op the compare op for column qualifier matching + * @param op the compare op for column qualifier matching * @param qualifierComparator the comparator for column qualifier matching */ - public QualifierFilter(final CompareOperator op, - final ByteArrayComparable qualifierComparator) { + public QualifierFilter(final CompareOperator op, final ByteArrayComparable qualifierComparator) { super(op, qualifierComparator); } @@ -63,10 +60,10 @@ public ReturnCode filterCell(final Cell c) { return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOperator compareOp = (CompareOperator)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + CompareOperator compareOp = (CompareOperator) arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable) arguments.get(1); return new QualifierFilter(compareOp, comparator); } @@ -74,9 +71,8 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.QualifierFilter.Builder builder = - FilterProtos.QualifierFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.QualifierFilter.Builder builder = FilterProtos.QualifierFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); } @@ -87,8 +83,7 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static QualifierFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static QualifierFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.QualifierFilter proto; try { proto = FilterProtos.QualifierFilter.parseFrom(pbBytes); @@ -105,12 +100,12 @@ public static QualifierFilter parseFrom(final byte [] pbBytes) } catch (IOException ioe) { throw new DeserializationException(ioe); } - return new QualifierFilter(valueCompareOp,valueComparator); + return new QualifierFilter(valueCompareOp, valueComparator); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java index f89182a704fb..d54d7575f254 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.util.Objects; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** * A filter that includes rows based on a chance. - * */ @InterfaceAudience.Public public class RandomRowFilter extends FilterBase { @@ -40,9 +37,7 @@ public class RandomRowFilter extends FilterBase { protected boolean filterOutRow; /** - * Create a new filter with a specified chance for a row to be included. - * - * @param chance + * Create a new filter with a specified chance for a row to be included. n */ public RandomRowFilter(float chance) { this.chance = chance; @@ -56,9 +51,7 @@ public float getChance() { } /** - * Set the chance that a row is included. - * - * @param chance + * Set the chance that a row is included. n */ public void setChance(float chance) { this.chance = chance; @@ -111,21 +104,18 @@ public void reset() { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.RandomRowFilter.Builder builder = - FilterProtos.RandomRowFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.RandomRowFilter.Builder builder = FilterProtos.RandomRowFilter.newBuilder(); builder.setChance(this.chance); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link RandomRowFilter} instance - * @return An instance of {@link RandomRowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link RandomRowFilter} made from bytes n * @see + * #toByteArray */ - public static RandomRowFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static RandomRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.RandomRowFilter proto; try { proto = FilterProtos.RandomRowFilter.parseFrom(pbBytes); @@ -137,15 +127,15 @@ public static RandomRowFilter parseFrom(final byte [] pbBytes) /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof RandomRowFilter)) return false; - RandomRowFilter other = (RandomRowFilter)o; + RandomRowFilter other = (RandomRowFilter) o; return this.getChance() == other.getChance(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java index d278e7ac7baa..75272c5f2413 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,11 +21,9 @@ import java.nio.charset.IllegalCharsetNameException; import java.util.Arrays; import java.util.regex.Pattern; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.jcodings.Encoding; import org.jcodings.EncodingDB; import org.jcodings.specific.UTF8Encoding; @@ -36,36 +33,39 @@ import org.joni.Syntax; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; + /** - * This comparator is for use with {@link CompareFilter} implementations, such - * as {@link RowFilter}, {@link QualifierFilter}, and {@link ValueFilter}, for - * filtering based on the value of a given column. Use it to test if a given - * regular expression matches a cell value in the column. + * This comparator is for use with {@link CompareFilter} implementations, such as {@link RowFilter}, + * {@link QualifierFilter}, and {@link ValueFilter}, for filtering based on the value of a given + * column. Use it to test if a given regular expression matches a cell value in the column. *

      * Only EQUAL or NOT_EQUAL comparisons are valid with this comparator. *

      * For example: *

      + * *

      - * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
      - *     new RegexStringComparator(
      - *       // v4 IP address
      - *       "(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3,3}" +
      - *         "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))(\\/[0-9]+)?" +
      - *         "|" +
      - *       // v6 IP address
      - *       "((([\\dA-Fa-f]{1,4}:){7}[\\dA-Fa-f]{1,4})(:([\\d]{1,3}.)" +
      - *         "{3}[\\d]{1,3})?)(\\/[0-9]+)?"));
      + * ValueFilter vf = new ValueFilter(CompareOp.EQUAL, new RegexStringComparator(
      + *   // v4 IP address
      + *   "(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3,3}"
      + *     + "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))(\\/[0-9]+)?" + "|" +
      + *     // v6 IP address
      + *     "((([\\dA-Fa-f]{1,4}:){7}[\\dA-Fa-f]{1,4})(:([\\d]{1,3}.)"
      + *     + "{3}[\\d]{1,3})?)(\\/[0-9]+)?"));
        * 
      *

      * Supports {@link java.util.regex.Pattern} flags as well: *

      + * *

        * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
      - *     new RegexStringComparator("regex", Pattern.CASE_INSENSITIVE | Pattern.DOTALL));
      + *   new RegexStringComparator("regex", Pattern.CASE_INSENSITIVE | Pattern.DOTALL));
        * 
      + * * @see java.util.regex.Pattern */ @InterfaceAudience.Public @@ -84,8 +84,7 @@ public enum EngineType { } /** - * Constructor - * Adds Pattern.DOTALL to the underlying Pattern + * Constructor Adds Pattern.DOTALL to the underlying Pattern * @param expr a valid regular expression */ public RegexStringComparator(String expr) { @@ -93,9 +92,8 @@ public RegexStringComparator(String expr) { } /** - * Constructor - * Adds Pattern.DOTALL to the underlying Pattern - * @param expr a valid regular expression + * Constructor Adds Pattern.DOTALL to the underlying Pattern + * @param expr a valid regular expression * @param engine engine implementation type */ public RegexStringComparator(String expr, EngineType engine) { @@ -104,7 +102,7 @@ public RegexStringComparator(String expr, EngineType engine) { /** * Constructor - * @param expr a valid regular expression + * @param expr a valid regular expression * @param flags java.util.regex.Pattern flags */ public RegexStringComparator(String expr, int flags) { @@ -113,8 +111,8 @@ public RegexStringComparator(String expr, int flags) { /** * Constructor - * @param expr a valid regular expression - * @param flags java.util.regex.Pattern flags + * @param expr a valid regular expression + * @param flags java.util.regex.Pattern flags * @param engine engine implementation type */ public RegexStringComparator(String expr, int flags, EngineType engine) { @@ -132,12 +130,10 @@ public RegexStringComparator(String expr, int flags, EngineType engine) { /** * Specifies the {@link Charset} to use to convert the row key to a String. *

      - * The row key needs to be converted to a String in order to be matched - * against the regular expression. This method controls which charset is - * used to do this conversion. + * The row key needs to be converted to a String in order to be matched against the regular + * expression. This method controls which charset is used to do this conversion. *

      - * If the row key is made of arbitrary bytes, the charset {@code ISO-8859-1} - * is recommended. + * If the row key is made of arbitrary bytes, the charset {@code ISO-8859-1} is recommended. * @param charset The charset to use. */ public void setCharset(final Charset charset) { @@ -153,18 +149,17 @@ public int compareTo(byte[] value, int offset, int length) { * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { return engine.toByteArray(); } /** * @param pbBytes A pb serialized {@link RegexStringComparator} instance - * @return An instance of {@link RegexStringComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link RegexStringComparator} made from bytes n * @see + * #toByteArray */ - public static RegexStringComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static RegexStringComparator parseFrom(final byte[] pbBytes) + throws DeserializationException { ComparatorProtos.RegexStringComparator proto; try { proto = ComparatorProtos.RegexStringComparator.parseFrom(pbBytes); @@ -174,8 +169,7 @@ public static RegexStringComparator parseFrom(final byte [] pbBytes) RegexStringComparator comparator; if (proto.hasEngine()) { EngineType engine = EngineType.valueOf(proto.getEngine()); - comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(), - engine); + comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(), engine); } else { comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags()); } @@ -191,15 +185,14 @@ public static RegexStringComparator parseFrom(final byte [] pbBytes) } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof RegexStringComparator)) return false; - RegexStringComparator comparator = (RegexStringComparator)other; + RegexStringComparator comparator = (RegexStringComparator) other; return super.areSerializedFieldsEqual(comparator) && engine.getClass().isInstance(comparator.getEngine()) && engine.getPattern().equals(comparator.getEngine().getPattern()) @@ -212,19 +205,17 @@ Engine getEngine() { } /** - * This is an internal interface for abstracting access to different regular - * expression matching engines. + * This is an internal interface for abstracting access to different regular expression matching + * engines. */ static interface Engine { /** - * Returns the string representation of the configured regular expression - * for matching + * Returns the string representation of the configured regular expression for matching */ String getPattern(); /** - * Returns the set of configured match flags, a bit mask that may include - * {@link Pattern} flags + * Returns the set of configured match flags, a bit mask that may include {@link Pattern} flags */ int getFlags(); @@ -242,11 +233,11 @@ static interface Engine { /** * Return the serialized form of the configured matcher */ - byte [] toByteArray(); + byte[] toByteArray(); /** * Match the given input against the configured pattern - * @param value the data to be matched + * @param value the data to be matched * @param offset offset of the data to be matched * @param length length of the data to be matched * @return 0 if a match was made, 1 otherwise @@ -305,7 +296,7 @@ public int compareTo(byte[] value, int offset, int length) { @Override public byte[] toByteArray() { ComparatorProtos.RegexStringComparator.Builder builder = - ComparatorProtos.RegexStringComparator.newBuilder(); + ComparatorProtos.RegexStringComparator.newBuilder(); builder.setPattern(pattern.pattern()); builder.setPatternFlags(pattern.flags()); builder.setCharset(charset.name()); @@ -317,11 +308,10 @@ public byte[] toByteArray() { /** * Implementation of the Engine interface using Jruby's joni regex engine. *

      - * This engine operates on byte arrays directly so is expected to be more GC - * friendly, and reportedly is twice as fast as Java's Pattern engine. + * This engine operates on byte arrays directly so is expected to be more GC friendly, and + * reportedly is twice as fast as Java's Pattern engine. *

      - * NOTE: Only the {@link Pattern} flags CASE_INSENSITIVE, DOTALL, and - * MULTILINE are supported. + * NOTE: Only the {@link Pattern} flags CASE_INSENSITIVE, DOTALL, and MULTILINE are supported. */ static class JoniRegexEngine implements Engine { private Encoding encoding = UTF8Encoding.INSTANCE; @@ -365,12 +355,12 @@ public int compareTo(byte[] value, int offset, int length) { @Override public byte[] toByteArray() { ComparatorProtos.RegexStringComparator.Builder builder = - ComparatorProtos.RegexStringComparator.newBuilder(); - builder.setPattern(regex); - builder.setPatternFlags(joniToPatternFlags(pattern.getOptions())); - builder.setCharset(encoding.getCharsetName()); - builder.setEngine(EngineType.JONI.name()); - return builder.build().toByteArray(); + ComparatorProtos.RegexStringComparator.newBuilder(); + builder.setPattern(regex); + builder.setPatternFlags(joniToPatternFlags(pattern.getOptions())); + builder.setCharset(encoding.getCharsetName()); + builder.setEngine(EngineType.JONI.name()); + return builder.build().toByteArray(); } private int patternToJoniFlags(int flags) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java index 19c934808ea8..dee91657f745 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,27 +19,26 @@ import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** - * This filter is used to filter based on the key. It takes an operator - * (equal, greater, not equal, etc) and a byte [] comparator for the row, - * and column qualifier portions of a key. + * This filter is used to filter based on the key. It takes an operator (equal, greater, not equal, + * etc) and a byte [] comparator for the row, and column qualifier portions of a key. *

      * This filter can be wrapped with {@link WhileMatchFilter} to add more control. *

      * Multiple filters can be combined using {@link FilterList}. *

      - * If an already known row range needs to be scanned, - * use {@link org.apache.hadoop.hbase.CellScanner} start - * and stop rows directly rather than a filter. + * If an already known row range needs to be scanned, use + * {@link org.apache.hadoop.hbase.CellScanner} start and stop rows directly rather than a filter. */ @InterfaceAudience.Public public class RowFilter extends CompareFilter { @@ -48,11 +46,10 @@ public class RowFilter extends CompareFilter { /** * Constructor. - * @param op the compare op for row matching + * @param op the compare op for row matching * @param rowComparator the comparator for row matching */ - public RowFilter(final CompareOperator op, - final ByteArrayComparable rowComparator) { + public RowFilter(final CompareOperator op, final ByteArrayComparable rowComparator) { super(op, rowComparator); } @@ -63,7 +60,7 @@ public void reset() { @Override public ReturnCode filterCell(final Cell v) { - if(this.filterOutRow) { + if (this.filterOutRow) { return ReturnCode.NEXT_ROW; } return ReturnCode.INCLUDE; @@ -82,33 +79,29 @@ public boolean filterRow() { return this.filterOutRow; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { @SuppressWarnings("rawtypes") // for arguments ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOperator compareOp = (CompareOperator)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + CompareOperator compareOp = (CompareOperator) arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable) arguments.get(1); return new RowFilter(compareOp, comparator); } - /** - * @return The filter serialized using pb - */ + /** + * @return The filter serialized using pb + */ @Override - public byte [] toByteArray() { - FilterProtos.RowFilter.Builder builder = - FilterProtos.RowFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.RowFilter.Builder builder = FilterProtos.RowFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link RowFilter} instance - * @return An instance of {@link RowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link RowFilter} made from bytes n * @see #toByteArray */ - public static RowFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static RowFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.RowFilter proto; try { proto = FilterProtos.RowFilter.parseFrom(pbBytes); @@ -125,12 +118,12 @@ public static RowFilter parseFrom(final byte [] pbBytes) } catch (IOException ioe) { throw new DeserializationException(ioe); } - return new RowFilter(valueCompareOp,valueComparator); + return new RowFilter(valueCompareOp, valueComparator); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java index dbbd59e90b1f..f1b9413718f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,79 +21,68 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** - * A {@link Filter} that checks a single column value, but does not emit the - * tested column. This will enable a performance boost over - * {@link SingleColumnValueFilter}, if the tested column value is not actually - * needed as input (besides for the filtering itself). + * A {@link Filter} that checks a single column value, but does not emit the tested column. This + * will enable a performance boost over {@link SingleColumnValueFilter}, if the tested column value + * is not actually needed as input (besides for the filtering itself). */ @InterfaceAudience.Public public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted; except for the tested column value. If the column is not found or - * the condition fails, the row will not be emitted. - * - * @param family name of column family + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted; except for the tested column value. + * If the column is not found or the condition fails, the row will not be emitted. + * @param family name of column family * @param qualifier name of column qualifier - * @param op operator - * @param value value to compare column values against + * @param op operator + * @param value value to compare column values against */ - public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, - CompareOperator op, byte[] value) { + public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, CompareOperator op, + byte[] value) { super(family, qualifier, op, value); } /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted; except for the tested column value. If the condition fails, the - * row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted; except for the tested column value. + * If the condition fails, the row will not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param op operator + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family + * @param qualifier name of column qualifier + * @param op operator * @param comparator Comparator to use. */ - public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, - CompareOperator op, ByteArrayComparable comparator) { + public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, CompareOperator op, + ByteArrayComparable comparator) { super(family, qualifier, op, comparator); } /** - * Constructor for protobuf deserialization only. - * @param family - * @param qualifier - * @param op - * @param comparator - * @param filterIfMissing - * @param latestVersionOnly + * Constructor for protobuf deserialization only. nnnnnn */ protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier, - final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing, - final boolean latestVersionOnly) { + final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing, + final boolean latestVersionOnly) { super(family, qualifier, op, comparator, filterIfMissing, latestVersionOnly); } // We cleaned result row in FilterRow to be consistent with scanning process. @Override public boolean hasFilterRow() { - return true; + return true; } // Here we remove from row all key values from testing column @@ -110,12 +98,12 @@ public void filterRowCells(List kvs) { } } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - SingleColumnValueFilter tempFilter = (SingleColumnValueFilter) - SingleColumnValueFilter.createFilterFromArguments(filterArguments); - SingleColumnValueExcludeFilter filter = new SingleColumnValueExcludeFilter ( - tempFilter.getFamily(), tempFilter.getQualifier(), - tempFilter.getCompareOperator(), tempFilter.getComparator()); + public static Filter createFilterFromArguments(ArrayList filterArguments) { + SingleColumnValueFilter tempFilter = + (SingleColumnValueFilter) SingleColumnValueFilter.createFilterFromArguments(filterArguments); + SingleColumnValueExcludeFilter filter = + new SingleColumnValueExcludeFilter(tempFilter.getFamily(), tempFilter.getQualifier(), + tempFilter.getCompareOperator(), tempFilter.getComparator()); if (filterArguments.size() == 6) { filter.setFilterIfMissing(tempFilter.getFilterIfMissing()); @@ -128,7 +116,7 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { FilterProtos.SingleColumnValueExcludeFilter.Builder builder = FilterProtos.SingleColumnValueExcludeFilter.newBuilder(); builder.setSingleColumnValueFilter(super.convert()); @@ -137,12 +125,11 @@ public static Filter createFilterFromArguments(ArrayList filterArgument /** * @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance - * @return An instance of {@link SingleColumnValueExcludeFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link SingleColumnValueExcludeFilter} made from bytes n + * * @see #toByteArray */ - public static SingleColumnValueExcludeFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.SingleColumnValueExcludeFilter proto; try { proto = FilterProtos.SingleColumnValueExcludeFilter.parseFrom(pbBytes); @@ -151,8 +138,7 @@ public static SingleColumnValueExcludeFilter parseFrom(final byte [] pbBytes) } FilterProtos.SingleColumnValueFilter parentProto = proto.getSingleColumnValueFilter(); - final CompareOperator compareOp = - CompareOperator.valueOf(parentProto.getCompareOp().name()); + final CompareOperator compareOp = CompareOperator.valueOf(parentProto.getCompareOp().name()); final ByteArrayComparable comparator; try { comparator = ProtobufUtil.toComparator(parentProto.getComparator()); @@ -160,15 +146,15 @@ public static SingleColumnValueExcludeFilter parseFrom(final byte [] pbBytes) throw new DeserializationException(ioe); } - return new SingleColumnValueExcludeFilter(parentProto.hasColumnFamily() ? parentProto - .getColumnFamily().toByteArray() : null, parentProto.hasColumnQualifier() ? parentProto - .getColumnQualifier().toByteArray() : null, compareOp, comparator, parentProto - .getFilterIfMissing(), parentProto.getLatestVersionOnly()); + return new SingleColumnValueExcludeFilter( + parentProto.hasColumnFamily() ? parentProto.getColumnFamily().toByteArray() : null, + parentProto.hasColumnQualifier() ? parentProto.getColumnQualifier().toByteArray() : null, + compareOp, comparator, parentProto.getFilterIfMissing(), parentProto.getLatestVersionOnly()); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java index e89616a64319..365ee06b904b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,56 +20,52 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType; -import org.apache.hadoop.hbase.util.Bytes; - -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * This filter is used to filter cells based on value. It takes a {@link CompareOperator} - * operator (equal, greater, not equal, etc), and either a byte [] value or - * a ByteArrayComparable. + * This filter is used to filter cells based on value. It takes a {@link CompareOperator} operator + * (equal, greater, not equal, etc), and either a byte [] value or a ByteArrayComparable. *

      - * If we have a byte [] value then we just do a lexicographic compare. For - * example, if passed value is 'b' and cell has 'a' and the compare operator - * is LESS, then we will filter out this cell (return true). If this is not - * sufficient (eg you want to deserialize a long and then compare it to a fixed - * long value), then you can pass in your own comparator instead. + * If we have a byte [] value then we just do a lexicographic compare. For example, if passed value + * is 'b' and cell has 'a' and the compare operator is LESS, then we will filter out this cell + * (return true). If this is not sufficient (eg you want to deserialize a long and then compare it + * to a fixed long value), then you can pass in your own comparator instead. *

      - * You must also specify a family and qualifier. Only the value of this column - * will be tested. When using this filter on a - * {@link org.apache.hadoop.hbase.CellScanner} with specified - * inputs, the column to be tested should also be added as input (otherwise - * the filter will regard the column as missing). + * You must also specify a family and qualifier. Only the value of this column will be tested. When + * using this filter on a {@link org.apache.hadoop.hbase.CellScanner} with specified inputs, the + * column to be tested should also be added as input (otherwise the filter will regard the column as + * missing). *

      - * To prevent the entire row from being emitted if the column is not found - * on a row, use {@link #setFilterIfMissing}. - * Otherwise, if the column is found, the entire row will be emitted only if - * the value passes. If the value fails, the row will be filtered out. + * To prevent the entire row from being emitted if the column is not found on a row, use + * {@link #setFilterIfMissing}. Otherwise, if the column is found, the entire row will be emitted + * only if the value passes. If the value fails, the row will be filtered out. *

      - * In order to test values of previous versions (timestamps), set - * {@link #setLatestVersionOnly} to false. The default is true, meaning that - * only the latest version's value is tested and all previous versions are ignored. + * In order to test values of previous versions (timestamps), set {@link #setLatestVersionOnly} to + * false. The default is true, meaning that only the latest version's value is tested and all + * previous versions are ignored. *

      * To filter based on the value of all scanned columns, use {@link ValueFilter}. */ @InterfaceAudience.Public public class SingleColumnValueFilter extends FilterBase { - protected byte [] columnFamily; - protected byte [] columnQualifier; + protected byte[] columnFamily; + protected byte[] columnQualifier; protected CompareOperator op; protected org.apache.hadoop.hbase.filter.ByteArrayComparable comparator; protected boolean foundColumn = false; @@ -79,42 +74,36 @@ public class SingleColumnValueFilter extends FilterBase { protected boolean latestVersionOnly = true; /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted. If the condition fails, the row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted. If the condition fails, the row will + * not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family * @param qualifier name of column qualifier - * @param op operator - * @param value value to compare column values against + * @param op operator + * @param value value to compare column values against */ - public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, - final CompareOperator op, final byte[] value) { - this(family, qualifier, op, - new org.apache.hadoop.hbase.filter.BinaryComparator(value)); + public SingleColumnValueFilter(final byte[] family, final byte[] qualifier, + final CompareOperator op, final byte[] value) { + this(family, qualifier, op, new org.apache.hadoop.hbase.filter.BinaryComparator(value)); } /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted. If the condition fails, the row will not be emitted. + * Constructor for binary compare of the value of a single column. If the column is found and the + * condition passes, all columns of the row will be emitted. If the condition fails, the row will + * not be emitted. *

      - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param op operator + * Use the filterIfColumnMissing flag to set whether the rest of the columns in a row will be + * emitted if the specified column to check is not found in the row. + * @param family name of column family + * @param qualifier name of column qualifier + * @param op operator * @param comparator Comparator to use. */ - public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, - final CompareOperator op, - final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator) { + public SingleColumnValueFilter(final byte[] family, final byte[] qualifier, + final CompareOperator op, final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator) { this.columnFamily = family; this.columnQualifier = qualifier; this.op = op; @@ -122,17 +111,11 @@ public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, } /** - * Constructor for protobuf deserialization only. - * @param family - * @param qualifier - * @param op - * @param comparator - * @param filterIfMissing - * @param latestVersionOnly + * Constructor for protobuf deserialization only. nnnnnn */ protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier, - final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator, - final boolean filterIfMissing, final boolean latestVersionOnly) { + final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator, + final boolean filterIfMissing, final boolean latestVersionOnly) { this(family, qualifier, op, comparator); this.filterIfMissing = filterIfMissing; this.latestVersionOnly = latestVersionOnly; @@ -171,7 +154,8 @@ public boolean filterRowKey(Cell cell) throws IOException { @Override public ReturnCode filterCell(final Cell c) { - // System.out.println("REMOVE KEY=" + keyValue.toString() + ", value=" + Bytes.toString(keyValue.getValue())); + // System.out.println("REMOVE KEY=" + keyValue.toString() + ", value=" + + // Bytes.toString(keyValue.getValue())); if (this.matchedColumn) { // We already found and matched the single column, all keys now pass return ReturnCode.INCLUDE; @@ -184,7 +168,7 @@ public ReturnCode filterCell(final Cell c) { } foundColumn = true; if (filterColumnValue(c)) { - return this.latestVersionOnly? ReturnCode.NEXT_ROW: ReturnCode.INCLUDE; + return this.latestVersionOnly ? ReturnCode.NEXT_ROW : ReturnCode.INCLUDE; } this.matchedColumn = true; return ReturnCode.INCLUDE; @@ -199,9 +183,9 @@ private boolean filterColumnValue(final Cell cell) { public boolean filterRow() { // If column was found, return false if it was matched, true if it was not // If column not found, return true if we filter if missing, false if not - return this.foundColumn? !this.matchedColumn: this.filterIfMissing; + return this.foundColumn ? !this.matchedColumn : this.filterIfMissing; } - + @Override public boolean hasFilterRow() { return true; @@ -215,8 +199,8 @@ public void reset() { /** * Get whether entire row should be filtered if column is not found. - * @return true if row should be skipped if column not found, false if row - * should be let through anyways + * @return true if row should be skipped if column not found, false if row should be let through + * anyways */ public boolean getFilterIfMissing() { return filterIfMissing; @@ -227,7 +211,7 @@ public boolean getFilterIfMissing() { *

      * If true, the entire row will be skipped if the column is not found. *

      - * If false, the row will pass if the column is not found. This is default. + * If false, the row will pass if the column is not found. This is default. * @param filterIfMissing flag */ public void setFilterIfMissing(boolean filterIfMissing) { @@ -235,10 +219,9 @@ public void setFilterIfMissing(boolean filterIfMissing) { } /** - * Get whether only the latest version of the column value should be compared. - * If true, the row will be returned if only the latest version of the column - * value matches. If false, the row will be returned if any version of the - * column value matches. The default is true. + * Get whether only the latest version of the column value should be compared. If true, the row + * will be returned if only the latest version of the column value matches. If false, the row will + * be returned if any version of the column value matches. The default is true. * @return return value */ public boolean getLatestVersionOnly() { @@ -246,36 +229,32 @@ public boolean getLatestVersionOnly() { } /** - * Set whether only the latest version of the column value should be compared. - * If true, the row will be returned if only the latest version of the column - * value matches. If false, the row will be returned if any version of the - * column value matches. The default is true. + * Set whether only the latest version of the column value should be compared. If true, the row + * will be returned if only the latest version of the column value matches. If false, the row will + * be returned if any version of the column value matches. The default is true. * @param latestVersionOnly flag */ public void setLatestVersionOnly(boolean latestVersionOnly) { this.latestVersionOnly = latestVersionOnly; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { Preconditions.checkArgument(filterArguments.size() == 4 || filterArguments.size() == 6, - "Expected 4 or 6 but got: %s", filterArguments.size()); - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + "Expected 4 or 6 but got: %s", filterArguments.size()); + byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); CompareOperator op = ParseFilter.createCompareOperator(filterArguments.get(2)); - org.apache.hadoop.hbase.filter.ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); - - if (comparator instanceof RegexStringComparator || - comparator instanceof SubstringComparator) { - if (op != CompareOperator.EQUAL && - op != CompareOperator.NOT_EQUAL) { - throw new IllegalArgumentException ("A regexstring comparator and substring comparator " + - "can only be used with EQUAL and NOT_EQUAL"); + org.apache.hadoop.hbase.filter.ByteArrayComparable comparator = + ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); + + if (comparator instanceof RegexStringComparator || comparator instanceof SubstringComparator) { + if (op != CompareOperator.EQUAL && op != CompareOperator.NOT_EQUAL) { + throw new IllegalArgumentException("A regexstring comparator and substring comparator " + + "can only be used with EQUAL and NOT_EQUAL"); } } - SingleColumnValueFilter filter = new SingleColumnValueFilter(family, qualifier, - op, comparator); + SingleColumnValueFilter filter = new SingleColumnValueFilter(family, qualifier, op, comparator); if (filterArguments.size() == 6) { boolean filterIfMissing = ParseFilter.convertByteArrayToBoolean(filterArguments.get(4)); @@ -308,7 +287,7 @@ FilterProtos.SingleColumnValueFilter convert() { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { return convert().toByteArray(); } @@ -317,8 +296,8 @@ FilterProtos.SingleColumnValueFilter convert() { * @return An instance of {@link SingleColumnValueFilter} made from bytes * @see #toByteArray */ - public static SingleColumnValueFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static SingleColumnValueFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { FilterProtos.SingleColumnValueFilter proto; try { proto = FilterProtos.SingleColumnValueFilter.parseFrom(pbBytes); @@ -326,8 +305,7 @@ public static SingleColumnValueFilter parseFrom(final byte [] pbBytes) throw new DeserializationException(e); } - final CompareOperator compareOp = - CompareOperator.valueOf(proto.getCompareOp().name()); + final CompareOperator compareOp = CompareOperator.valueOf(proto.getCompareOp().name()); final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator; try { comparator = ProtobufUtil.toComparator(proto.getComparator()); @@ -335,34 +313,33 @@ public static SingleColumnValueFilter parseFrom(final byte [] pbBytes) throw new DeserializationException(ioe); } - return new SingleColumnValueFilter(proto.hasColumnFamily() ? proto.getColumnFamily() - .toByteArray() : null, proto.hasColumnQualifier() ? proto.getColumnQualifier() - .toByteArray() : null, compareOp, comparator, proto.getFilterIfMissing(), proto - .getLatestVersionOnly()); + return new SingleColumnValueFilter( + proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null, + proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null, compareOp, + comparator, proto.getFilterIfMissing(), proto.getLatestVersionOnly()); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof SingleColumnValueFilter)) return false; - SingleColumnValueFilter other = (SingleColumnValueFilter)o; + SingleColumnValueFilter other = (SingleColumnValueFilter) o; return Bytes.equals(this.getFamily(), other.getFamily()) - && Bytes.equals(this.getQualifier(), other.getQualifier()) - && this.op.equals(other.op) + && Bytes.equals(this.getQualifier(), other.getQualifier()) && this.op.equals(other.op) && this.getComparator().areSerializedFieldsEqual(other.getComparator()) && this.getFilterIfMissing() == other.getFilterIfMissing() && this.getLatestVersionOnly() == other.getLatestVersionOnly(); } /** - * The only CF this filter needs is given column family. So, it's the only essential - * column in whole scan. If filterIfMissing == false, all families are essential, - * because of possibility of skipping the rows without any data in filtered CF. + * The only CF this filter needs is given column family. So, it's the only essential column in + * whole scan. If filterIfMissing == false, all families are essential, because of possibility of + * skipping the rows without any data in filtered CF. */ @Override public boolean isFamilyEssential(byte[] name) { @@ -371,10 +348,9 @@ public boolean isFamilyEssential(byte[] name) { @Override public String toString() { - return String.format("%s (%s, %s, %s, %s)", - this.getClass().getSimpleName(), Bytes.toStringBinary(this.columnFamily), - Bytes.toStringBinary(this.columnQualifier), this.op.name(), - Bytes.toStringBinary(this.comparator.getValue())); + return String.format("%s (%s, %s, %s, %s)", this.getClass().getSimpleName(), + Bytes.toStringBinary(this.columnFamily), Bytes.toStringBinary(this.columnQualifier), + this.op.name(), Bytes.toStringBinary(this.comparator.getValue())); } @Override @@ -384,7 +360,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(Bytes.hashCode(getFamily()), Bytes.hashCode(getQualifier()), - this.op, getComparator(), getFilterIfMissing(), getLatestVersionOnly()); + return Objects.hash(Bytes.hashCode(getFamily()), Bytes.hashCode(getQualifier()), this.op, + getComparator(), getFilterIfMissing(), getLatestVersionOnly()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java index 03e1f2cabe1b..3aa0ef22c151 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,38 +15,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** - * A wrapper filter that filters an entire row if any of the Cell checks do - * not pass. + * A wrapper filter that filters an entire row if any of the Cell checks do not pass. *

      - * For example, if all columns in a row represent weights of different things, - * with the values being the actual weights, and we want to filter out the - * entire row if any of its weights are zero. In this case, we want to prevent - * rows from being emitted if a single key is filtered. Combine this filter - * with a {@link ValueFilter}: + * For example, if all columns in a row represent weights of different things, with the values being + * the actual weights, and we want to filter out the entire row if any of its weights are zero. In + * this case, we want to prevent rows from being emitted if a single key is filtered. Combine this + * filter with a {@link ValueFilter}: *

      *

      * * scan.setFilter(new SkipFilter(new ValueFilter(CompareOp.NOT_EQUAL, * new BinaryComparator(Bytes.toBytes(0)))); - * - * Any row which contained a column whose value was 0 will be filtered out - * (since ValueFilter will not pass that Cell). - * Without this filter, the other non-zero valued columns in the row would still - * be emitted. + * Any row which contained a column whose value was 0 will be filtered out (since + * ValueFilter will not pass that Cell). Without this filter, the other non-zero valued columns in + * the row would still be emitted. *

      */ @InterfaceAudience.Public @@ -95,7 +90,7 @@ public Cell transformCell(Cell v) throws IOException { public boolean filterRow() { return filterRow; } - + @Override public boolean hasFilterRow() { return true; @@ -106,20 +101,16 @@ public boolean hasFilterRow() { */ @Override public byte[] toByteArray() throws IOException { - FilterProtos.SkipFilter.Builder builder = - FilterProtos.SkipFilter.newBuilder(); + FilterProtos.SkipFilter.Builder builder = FilterProtos.SkipFilter.newBuilder(); builder.setFilter(ProtobufUtil.toFilter(this.filter)); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link SkipFilter} instance - * @return An instance of {@link SkipFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link SkipFilter} made from bytes n * @see #toByteArray */ - public static SkipFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static SkipFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.SkipFilter proto; try { proto = FilterProtos.SkipFilter.parseFrom(pbBytes); @@ -135,15 +126,15 @@ public static SkipFilter parseFrom(final byte [] pbBytes) /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof SkipFilter)) return false; - SkipFilter other = (SkipFilter)o; + SkipFilter other = (SkipFilter) o; return getFilter().areSerializedFieldsEqual(other.getFilter()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java index 1bfc7229f535..b8e33c438feb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,27 +18,27 @@ package org.apache.hadoop.hbase.filter; import java.util.Locale; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; /** - * This comparator is for use with SingleColumnValueFilter, for filtering based on - * the value of a given column. Use it to test if a given substring appears - * in a cell value in the column. The comparison is case insensitive. + * This comparator is for use with SingleColumnValueFilter, for filtering based on the value of a + * given column. Use it to test if a given substring appears in a cell value in the column. The + * comparison is case insensitive. *

      * Only EQUAL or NOT_EQUAL tests are valid with this comparator. *

      * For example: *

      + * *

        * SingleColumnValueFilter scvf =
      - *   new SingleColumnValueFilter("col", CompareOp.EQUAL,
      - *     new SubstringComparator("substr"));
      + *   new SingleColumnValueFilter("col", CompareOp.EQUAL, new SubstringComparator("substr"));
        * 
      */ @InterfaceAudience.Public @@ -64,15 +63,14 @@ public byte[] getValue() { @Override public int compareTo(byte[] value, int offset, int length) { - return Bytes.toString(value, offset, length).toLowerCase(Locale.ROOT).contains(substr) ? 0 - : 1; + return Bytes.toString(value, offset, length).toLowerCase(Locale.ROOT).contains(substr) ? 0 : 1; } /** * @return The comparator serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { ComparatorProtos.SubstringComparator.Builder builder = ComparatorProtos.SubstringComparator.newBuilder(); builder.setSubstr(this.substr); @@ -81,12 +79,11 @@ public int compareTo(byte[] value, int offset, int length) { /** * @param pbBytes A pb serialized {@link SubstringComparator} instance - * @return An instance of {@link SubstringComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link SubstringComparator} made from bytes n * @see + * #toByteArray */ - public static SubstringComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static SubstringComparator parseFrom(final byte[] pbBytes) + throws DeserializationException { ComparatorProtos.SubstringComparator proto; try { proto = ComparatorProtos.SubstringComparator.parseFrom(pbBytes); @@ -97,18 +94,16 @@ public static SubstringComparator parseFrom(final byte [] pbBytes) } /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. + * n * @return true if and only if the fields of the comparator that are serialized are equal to + * the corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof SubstringComparator)) return false; - SubstringComparator comparator = (SubstringComparator)other; - return super.areSerializedFieldsEqual(comparator) - && this.substr.equals(comparator.substr); + SubstringComparator comparator = (SubstringComparator) other; + return super.areSerializedFieldsEqual(comparator) && this.substr.equals(comparator.substr); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java index 5e7fb5c490fb..dfd2f5c537c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,25 +22,25 @@ import java.util.List; import java.util.Objects; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** - * Filter that returns only cells whose timestamp (version) is - * in the specified list of timestamps (versions). + * Filter that returns only cells whose timestamp (version) is in the specified list of timestamps + * (versions). *

      - * Note: Use of this filter overrides any time range/time stamp - * options specified using {@link org.apache.hadoop.hbase.client.Get#setTimeRange(long, long)}, + * Note: Use of this filter overrides any time range/time stamp options specified using + * {@link org.apache.hadoop.hbase.client.Get#setTimeRange(long, long)}, * {@link org.apache.hadoop.hbase.client.Scan#setTimeRange(long, long)}, - * {@link org.apache.hadoop.hbase.client.Get#setTimestamp(long)}, - * or {@link org.apache.hadoop.hbase.client.Scan#setTimestamp(long)}. + * {@link org.apache.hadoop.hbase.client.Get#setTimestamp(long)}, or + * {@link org.apache.hadoop.hbase.client.Scan#setTimestamp(long)}. */ @InterfaceAudience.Public public class TimestampsFilter extends FilterBase { @@ -54,23 +54,19 @@ public class TimestampsFilter extends FilterBase { long minTimestamp = Long.MAX_VALUE; /** - * Constructor for filter that retains only the specified timestamps in the list. - * @param timestamps + * Constructor for filter that retains only the specified timestamps in the list. n */ public TimestampsFilter(List timestamps) { this(timestamps, false); } /** - * Constructor for filter that retains only those - * cells whose timestamp (version) is in the specified - * list of timestamps. - * + * Constructor for filter that retains only those cells whose timestamp (version) is in the + * specified list of timestamps. * @param timestamps list of timestamps that are wanted. - * @param canHint should the filter provide a seek hint? This can skip - * past delete tombstones, so it should only be used when that - * is not an issue ( no deletes, or don't care if data - * becomes visible) + * @param canHint should the filter provide a seek hint? This can skip past delete tombstones, + * so it should only be used when that is not an issue ( no deletes, or don't + * care if data becomes visible) */ public TimestampsFilter(List timestamps, boolean canHint) { for (Long timestamp : timestamps) { @@ -98,7 +94,7 @@ private void init() { /** * Gets the minimum timestamp requested by filter. - * @return minimum timestamp requested by filter. + * @return minimum timestamp requested by filter. */ public long getMin() { return minTimestamp; @@ -122,13 +118,10 @@ public ReturnCode filterCell(final Cell c) { return canHint ? ReturnCode.SEEK_NEXT_USING_HINT : ReturnCode.SKIP; } - /** - * Pick the next cell that the scanner should seek to. Since this can skip any number of cells - * any of which can be a delete this can resurect old data. - * - * The method will only be used if canHint was set to true while creating the filter. - * + * Pick the next cell that the scanner should seek to. Since this can skip any number of cells any + * of which can be a delete this can resurect old data. The method will only be used if canHint + * was set to true while creating the filter. * @throws IOException This will never happen. */ @Override @@ -157,9 +150,9 @@ public Cell getNextCellHint(Cell currentCell) throws IOException { return PrivateCellUtil.createFirstOnRowColTS(currentCell, nextTimestamp); } - public static Filter createFilterFromArguments(ArrayList filterArguments) { + public static Filter createFilterFromArguments(ArrayList filterArguments) { ArrayList timestamps = new ArrayList<>(filterArguments.size()); - for (int i = 0; i filterArgument */ @Override public byte[] toByteArray() { - FilterProtos.TimestampsFilter.Builder builder = - FilterProtos.TimestampsFilter.newBuilder(); + FilterProtos.TimestampsFilter.Builder builder = FilterProtos.TimestampsFilter.newBuilder(); builder.addAllTimestamps(this.timestamps); builder.setCanHint(canHint); return builder.build().toByteArray(); @@ -180,12 +172,10 @@ public byte[] toByteArray() { /** * @param pbBytes A pb serialized {@link TimestampsFilter} instance - * * @return An instance of {@link TimestampsFilter} made from bytes * @see #toByteArray */ - public static TimestampsFilter parseFrom(final byte[] pbBytes) - throws DeserializationException { + public static TimestampsFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.TimestampsFilter proto; try { proto = FilterProtos.TimestampsFilter.parseFrom(pbBytes); @@ -193,20 +183,20 @@ public static TimestampsFilter parseFrom(final byte[] pbBytes) throw new DeserializationException(e); } return new TimestampsFilter(proto.getTimestampsList(), - proto.hasCanHint() && proto.getCanHint()); + proto.hasCanHint() && proto.getCanHint()); } /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof TimestampsFilter)) return false; - TimestampsFilter other = (TimestampsFilter)o; + TimestampsFilter other = (TimestampsFilter) o; return this.getTimestamps().equals(other.getTimestamps()); } @@ -230,8 +220,8 @@ protected String toString(int maxTimestamps) { } } - return String.format("%s (%d/%d): [%s] canHint: [%b]", this.getClass().getSimpleName(), - count, this.timestamps.size(), tsList.toString(), canHint); + return String.format("%s (%d/%d): [%s] canHint: [%b]", this.getClass().getSimpleName(), count, + this.timestamps.size(), tsList.toString(), canHint); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java index 5b5ec6f619da..810f71efbd67 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,37 +19,37 @@ import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** - * This filter is used to filter based on column value. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * cell value. + * This filter is used to filter based on column value. It takes an operator (equal, greater, not + * equal, etc) and a byte [] comparator for the cell value. *

      - * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} - * to add more control. + * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} to add more + * control. *

      * Multiple filters can be combined using {@link FilterList}. *

      - * To test the value of a single qualifier when scanning multiple qualifiers, - * use {@link SingleColumnValueFilter}. + * To test the value of a single qualifier when scanning multiple qualifiers, use + * {@link SingleColumnValueFilter}. */ @InterfaceAudience.Public public class ValueFilter extends CompareFilter { /** * Constructor. - * @param valueCompareOp the compare op for value matching + * @param valueCompareOp the compare op for value matching * @param valueComparator the comparator for value matching */ public ValueFilter(final CompareOperator valueCompareOp, - final ByteArrayComparable valueComparator) { + final ByteArrayComparable valueComparator) { super(valueCompareOp, valueComparator); } @@ -62,11 +61,11 @@ public ReturnCode filterCell(final Cell c) { return ReturnCode.INCLUDE; } - public static Filter createFilterFromArguments(ArrayList filterArguments) { - @SuppressWarnings("rawtypes") // for arguments + public static Filter createFilterFromArguments(ArrayList filterArguments) { + @SuppressWarnings("rawtypes") // for arguments ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOperator compareOp = (CompareOperator)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + CompareOperator compareOp = (CompareOperator) arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable) arguments.get(1); return new ValueFilter(compareOp, comparator); } @@ -74,21 +73,17 @@ public static Filter createFilterFromArguments(ArrayList filterArgument * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { - FilterProtos.ValueFilter.Builder builder = - FilterProtos.ValueFilter.newBuilder(); + public byte[] toByteArray() { + FilterProtos.ValueFilter.Builder builder = FilterProtos.ValueFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); } /** * @param pbBytes A pb serialized {@link ValueFilter} instance - * @return An instance of {@link ValueFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray + * @return An instance of {@link ValueFilter} made from bytes n * @see #toByteArray */ - public static ValueFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static ValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ValueFilter proto; try { proto = FilterProtos.ValueFilter.parseFrom(pbBytes); @@ -105,12 +100,12 @@ public static ValueFilter parseFrom(final byte [] pbBytes) } catch (IOException ioe) { throw new DeserializationException(ioe); } - return new ValueFilter(valueCompareOp,valueComparator); + return new ValueFilter(valueCompareOp, valueComparator); } /** - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java index 5bfdaa35574b..94cdd9794b3c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.filter; import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** - * A wrapper filter that returns true from {@link #filterAllRemaining()} as soon - * as the wrapped filters {@link Filter#filterRowKey(Cell)}, + * A wrapper filter that returns true from {@link #filterAllRemaining()} as soon as the wrapped + * filters {@link Filter#filterRowKey(Cell)}, * {@link Filter#filterCell(org.apache.hadoop.hbase.Cell)}, * {@link org.apache.hadoop.hbase.filter.Filter#filterRow()} or - * {@link org.apache.hadoop.hbase.filter.Filter#filterAllRemaining()} methods - * returns true. + * {@link org.apache.hadoop.hbase.filter.Filter#filterAllRemaining()} methods returns true. */ @InterfaceAudience.Public public class WhileMatchFilter extends FilterBase { @@ -90,7 +88,7 @@ public boolean filterRow() throws IOException { changeFAR(filterRow); return filterRow; } - + @Override public boolean hasFilterRow() { return true; @@ -101,8 +99,7 @@ public boolean hasFilterRow() { */ @Override public byte[] toByteArray() throws IOException { - FilterProtos.WhileMatchFilter.Builder builder = - FilterProtos.WhileMatchFilter.newBuilder(); + FilterProtos.WhileMatchFilter.Builder builder = FilterProtos.WhileMatchFilter.newBuilder(); builder.setFilter(ProtobufUtil.toFilter(this.filter)); return builder.build().toByteArray(); } @@ -113,8 +110,7 @@ public byte[] toByteArray() throws IOException { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static WhileMatchFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static WhileMatchFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.WhileMatchFilter proto; try { proto = FilterProtos.WhileMatchFilter.parseFrom(pbBytes); @@ -130,15 +126,15 @@ public static WhileMatchFilter parseFrom(final byte [] pbBytes) /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof WhileMatchFilter)) return false; - WhileMatchFilter other = (WhileMatchFilter)o; + WhileMatchFilter other = (WhileMatchFilter) o; return getFilter().areSerializedFieldsEqual(other.getFilter()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 5acfe3ac2616..7c0149ccb8a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.hbase.ipc.IPCUtil.toIOE; @@ -139,28 +138,29 @@ public abstract class AbstractRpcClient implements RpcC private int maxConcurrentCallsPerServer; private static final LoadingCache concurrentCounterCache = - CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS). - build(new CacheLoader() { - @Override public AtomicInteger load(Address key) throws Exception { - return new AtomicInteger(0); - } - }); + CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS) + .build(new CacheLoader() { + @Override + public AtomicInteger load(Address key) throws Exception { + return new AtomicInteger(0); + } + }); /** * Construct an IPC client for the cluster clusterId - * @param conf configuration + * @param conf configuration * @param clusterId the cluster id * @param localAddr client socket bind address. - * @param metrics the connection metrics + * @param metrics the connection metrics */ public AbstractRpcClient(Configuration conf, String clusterId, SocketAddress localAddr, - MetricsConnection metrics) { + MetricsConnection metrics) { this.userProvider = UserProvider.instantiate(conf); this.localAddr = localAddr; this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true); this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT; - this.failureSleep = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + this.failureSleep = + conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0); this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true); this.cellBlockBuilder = new CellBlockBuilder(conf); @@ -176,8 +176,8 @@ public AbstractRpcClient(Configuration conf, String clusterId, SocketAddress loc this.readTO = conf.getInt(SOCKET_TIMEOUT_READ, DEFAULT_SOCKET_TIMEOUT_READ); this.writeTO = conf.getInt(SOCKET_TIMEOUT_WRITE, DEFAULT_SOCKET_TIMEOUT_WRITE); this.metrics = metrics; - this.maxConcurrentCallsPerServer = conf.getInt( - HConstants.HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD, + this.maxConcurrentCallsPerServer = + conf.getInt(HConstants.HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD, HConstants.DEFAULT_HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD); this.connections = new PoolMap<>(getPoolType(conf), getPoolSize(conf)); @@ -192,11 +192,11 @@ public void run() { if (LOG.isDebugEnabled()) { LOG.debug("Codec=" + this.codec + ", compressor=" + this.compressor + ", tcpKeepAlive=" - + this.tcpKeepAlive + ", tcpNoDelay=" + this.tcpNoDelay + ", connectTO=" + this.connectTO - + ", readTO=" + this.readTO + ", writeTO=" + this.writeTO + ", minIdleTimeBeforeClose=" - + this.minIdleTimeBeforeClose + ", maxRetries=" + this.maxRetries + ", fallbackAllowed=" - + this.fallbackAllowed + ", bind address=" - + (this.localAddr != null ? this.localAddr : "null")); + + this.tcpKeepAlive + ", tcpNoDelay=" + this.tcpNoDelay + ", connectTO=" + this.connectTO + + ", readTO=" + this.readTO + ", writeTO=" + this.writeTO + ", minIdleTimeBeforeClose=" + + this.minIdleTimeBeforeClose + ", maxRetries=" + this.maxRetries + ", fallbackAllowed=" + + this.fallbackAllowed + ", bind address=" + + (this.localAddr != null ? this.localAddr : "null")); } } @@ -298,7 +298,8 @@ private static int getPoolSize(Configuration config) { int poolSize = config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1); if (poolSize <= 0) { - LOG.warn("{} must be positive. Using default value: 1", HConstants.HBASE_CLIENT_IPC_POOL_SIZE); + LOG.warn("{} must be positive. Using default value: 1", + HConstants.HBASE_CLIENT_IPC_POOL_SIZE); return 1; } else { return poolSize; @@ -318,13 +319,13 @@ private int nextCallId() { * Make a blocking call. Throws exceptions if there are network problems or if the remote code * threw an exception. * @param ticket Be careful which ticket you pass. A new user will mean a new Connection. - * {@link UserProvider#getCurrent()} makes a new instance of User each time so will be a - * new Connection each time. + * {@link UserProvider#getCurrent()} makes a new instance of User each time so will + * be a new Connection each time. * @return A pair with the Message response and the Cell data (if any). */ private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcController hrc, - Message param, Message returnType, final User ticket, final Address isa) - throws ServiceException { + Message param, Message returnType, final User ticket, final Address isa) + throws ServiceException { BlockingRpcCallback done = new BlockingRpcCallback<>(); callMethod(md, hrc, param, returnType, ticket, isa, done); Message val; @@ -348,10 +349,10 @@ private T getConnection(ConnectionId remoteId) throws IOException { if (failedServers.isFailedServer(remoteId.getAddress())) { if (LOG.isDebugEnabled()) { LOG.debug("Not trying to connect to " + remoteId.getAddress() - + " this server is in the failed servers list"); + + " this server is in the failed servers list"); } throw new FailedServerException( - "This server is in the failed servers list: " + remoteId.getAddress()); + "This server is in the failed servers list: " + remoteId.getAddress()); } T conn; synchronized (connections) { @@ -396,10 +397,7 @@ private void onCallFinished(Call call, HBaseRpcController hrc, Address addr, private Call callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcController hrc, final Message param, Message returnType, final User ticket, final Address addr, final RpcCallback callback) { - Span span = new IpcClientSpanBuilder() - .setMethodDescriptor(md) - .setRemoteAddress(addr) - .build(); + Span span = new IpcClientSpanBuilder().setMethodDescriptor(md).setRemoteAddress(addr).build(); try (Scope scope = span.makeCurrent()) { final MetricsConnection.CallStats cs = MetricsConnection.newCallStats(); cs.setStartTime(EnvironmentEdgeManager.currentTime()); @@ -465,10 +463,12 @@ public void cancelConnections(ServerName sn) { synchronized (connections) { for (T connection : connections.values()) { ConnectionId remoteId = connection.remoteId(); - if (remoteId.getAddress().getPort() == sn.getPort() - && remoteId.getAddress().getHostName().equals(sn.getHostname())) { + if ( + remoteId.getAddress().getPort() == sn.getPort() + && remoteId.getAddress().getHostName().equals(sn.getHostname()) + ) { LOG.info("The server on " + sn.toString() + " is dead - stopping the connection " - + connection.remoteId); + + connection.remoteId); connections.remove(remoteId, connection); connection.shutdown(); connection.cleanupConnection(); @@ -476,14 +476,15 @@ public void cancelConnections(ServerName sn) { } } } + /** * Configure an hbase rpccontroller - * @param controller to configure + * @param controller to configure * @param channelOperationTimeout timeout for operation * @return configured controller */ - static HBaseRpcController configureHBaseRpcController( - RpcController controller, int channelOperationTimeout) { + static HBaseRpcController configureHBaseRpcController(RpcController controller, + int channelOperationTimeout) { HBaseRpcController hrc; if (controller != null && controller instanceof HBaseRpcController) { hrc = (HBaseRpcController) controller; @@ -525,7 +526,7 @@ public void close() { @Override public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn, final User ticket, - int rpcTimeout) { + int rpcTimeout) { return new BlockingRpcChannelImplementation(this, createAddr(sn), ticket, rpcTimeout); } @@ -544,8 +545,8 @@ private static class AbstractRpcChannel { protected final int rpcTimeout; - protected AbstractRpcChannel(AbstractRpcClient rpcClient, Address addr, - User ticket, int rpcTimeout) { + protected AbstractRpcChannel(AbstractRpcClient rpcClient, Address addr, User ticket, + int rpcTimeout) { this.addr = addr; this.rpcClient = rpcClient; this.ticket = ticket; @@ -578,10 +579,10 @@ protected HBaseRpcController configureRpcController(RpcController controller) { * Blocking rpc channel that goes via hbase rpc. */ public static class BlockingRpcChannelImplementation extends AbstractRpcChannel - implements BlockingRpcChannel { + implements BlockingRpcChannel { - protected BlockingRpcChannelImplementation(AbstractRpcClient rpcClient, - Address addr, User ticket, int rpcTimeout) { + protected BlockingRpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, + User ticket, int rpcTimeout) { super(rpcClient, addr, ticket, rpcTimeout); } @@ -596,11 +597,10 @@ public Message callBlockingMethod(Descriptors.MethodDescriptor md, RpcController /** * Async rpc channel that goes via hbase rpc. */ - public static class RpcChannelImplementation extends AbstractRpcChannel implements - RpcChannel { + public static class RpcChannelImplementation extends AbstractRpcChannel implements RpcChannel { - protected RpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, - User ticket, int rpcTimeout) { + protected RpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, User ticket, + int rpcTimeout) { super(rpcClient, addr, ticket, rpcTimeout); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java index 145754b6f27d..4f4ac9101379 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java index da636a371613..1b203727f267 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; - import java.io.IOException; import java.io.InterruptedIOException; - import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + /** - * Simple {@link RpcCallback} implementation providing a - * {@link java.util.concurrent.Future}-like {@link BlockingRpcCallback#get()} method, which - * will block util the instance's {@link BlockingRpcCallback#run(Object)} method has been called. - * {@code R} is the RPC response type that will be passed to the {@link #run(Object)} method. + * Simple {@link RpcCallback} implementation providing a {@link java.util.concurrent.Future}-like + * {@link BlockingRpcCallback#get()} method, which will block util the instance's + * {@link BlockingRpcCallback#run(Object)} method has been called. {@code R} is the RPC response + * type that will be passed to the {@link #run(Object)} method. */ @InterfaceAudience.Private public class BlockingRpcCallback implements RpcCallback { @@ -52,8 +50,8 @@ public void run(R parameter) { /** * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was - * passed. When used asynchronously, this method will block until the {@link #run(Object)} - * method has been called. + * passed. When used asynchronously, this method will block until the {@link #run(Object)} method + * has been called. * @return the response object or {@code null} if no response was passed */ public synchronized R get() throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java index dd8f96bb2b9b..7fffdad935fc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,13 +47,13 @@ public class BlockingRpcClient extends AbstractRpcClient /** * Construct an IPC client for the cluster {@code clusterId} with the default SocketFactory This * method is called with reflection by the RpcClientFactory to create an instance - * @param conf configuration + * @param conf configuration * @param clusterId the cluster id * @param localAddr client socket bind address. - * @param metrics the connection metrics + * @param metrics the connection metrics */ public BlockingRpcClient(Configuration conf, String clusterId, SocketAddress localAddr, - MetricsConnection metrics) { + MetricsConnection metrics) { super(conf, clusterId, localAddr, metrics); this.socketFactory = NetUtils.getDefaultSocketFactory(conf); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index eb8e1d92b216..c8adc6a8cc3c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -151,7 +151,7 @@ public CallSender(String name, Configuration conf) { public void sendCall(final Call call) throws IOException { if (callsToWrite.size() >= maxQueueSize) { throw new IOException("Can't add " + call.toShortString() - + " to the write queue. callsToWrite.size()=" + callsToWrite.size()); + + " to the write queue. callsToWrite.size()=" + callsToWrite.size()); } callsToWrite.offer(call); BlockingRpcConnection.this.notifyAll(); @@ -163,8 +163,8 @@ public void remove(Call call) { // it means as well that we don't know how many calls we cancelled. calls.remove(call.id); call.setException(new CallCancelledException(call.toShortString() + ", waitTime=" - + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + ", rpcTimeout=" - + call.timeout)); + + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + ", rpcTimeout=" + + call.timeout)); } /** @@ -206,8 +206,8 @@ public void run() { * Cleans the call not yet sent when we finish. */ public void cleanup(IOException e) { - IOException ie = new ConnectionClosingException( - "Connection to " + remoteId.getAddress() + " is closing."); + IOException ie = + new ConnectionClosingException("Connection to " + remoteId.getAddress() + " is closing."); for (Call call : callsToWrite) { call.setException(ie); } @@ -217,8 +217,8 @@ public void cleanup(IOException e) { BlockingRpcConnection(BlockingRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, - rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, - rpcClient.metrics); + rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, + rpcClient.metrics); this.rpcClient = rpcClient; this.connectionHeaderPreamble = getConnectionHeaderPreamble(); ConnectionHeader header = getConnectionHeader(); @@ -231,8 +231,8 @@ public void cleanup(IOException e) { UserGroupInformation ticket = remoteId.ticket.getUGI(); this.threadName = "BRPC Connection (" + this.rpcClient.socketFactory.hashCode() + ") to " - + remoteId.getAddress().toString() - + ((ticket == null) ? " from an unknown user" : (" from " + ticket.getUserName())); + + remoteId.getAddress().toString() + + ((ticket == null) ? " from an unknown user" : (" from " + ticket.getUserName())); if (this.rpcClient.conf.getBoolean(BlockingRpcClient.SPECIFIC_WRITE_THREAD, false)) { callSender = new CallSender(threadName, this.rpcClient.conf); @@ -263,14 +263,14 @@ protected void setupConnection() throws IOException { * The max number of retries is 45, which amounts to 20s*45 = 15 minutes retries. */ if (LOG.isDebugEnabled()) { - LOG.debug("Received exception in connection setup.\n" + - StringUtils.stringifyException(toe)); + LOG.debug( + "Received exception in connection setup.\n" + StringUtils.stringifyException(toe)); } handleConnectionFailure(timeoutFailures++, this.rpcClient.maxRetries, toe); } catch (IOException ie) { if (LOG.isDebugEnabled()) { - LOG.debug("Received exception in connection setup.\n" + - StringUtils.stringifyException(ie)); + LOG.debug( + "Received exception in connection setup.\n" + StringUtils.stringifyException(ie)); } handleConnectionFailure(ioFailures++, this.rpcClient.maxRetries, ie); } @@ -284,11 +284,11 @@ protected void setupConnection() throws IOException { * the sleep is synchronized; the locks will be retained. * @param curRetries current number of retries * @param maxRetries max number of retries allowed - * @param ioe failure reason + * @param ioe failure reason * @throws IOException if max number of retries is reached */ private void handleConnectionFailure(int curRetries, int maxRetries, IOException ioe) - throws IOException { + throws IOException { closeSocket(); // throw the exception if the maximum number of retries is reached @@ -304,9 +304,8 @@ private void handleConnectionFailure(int curRetries, int maxRetries, IOException } if (LOG.isInfoEnabled()) { - LOG.info("Retrying connect to server: " + remoteId.getAddress() + - " after sleeping " + this.rpcClient.failureSleep + "ms. Already tried " + curRetries + - " time(s)."); + LOG.info("Retrying connect to server: " + remoteId.getAddress() + " after sleeping " + + this.rpcClient.failureSleep + "ms. Already tried " + curRetries + " time(s)."); } } @@ -359,15 +358,15 @@ private void disposeSasl() { } private boolean setupSaslConnection(final InputStream in2, final OutputStream out2) - throws IOException { + throws IOException { if (this.metrics != null) { this.metrics.incrNsLookups(); } saslRpcClient = new HBaseSaslRpcClient(this.rpcClient.conf, provider, token, - socket.getInetAddress(), securityInfo, this.rpcClient.fallbackAllowed, - this.rpcClient.conf.get("hbase.rpc.protection", - QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), - this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); + socket.getInetAddress(), securityInfo, this.rpcClient.fallbackAllowed, + this.rpcClient.conf.get("hbase.rpc.protection", + QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), + this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); return saslRpcClient.saslConnect(in2, out2); } @@ -378,15 +377,14 @@ private boolean setupSaslConnection(final InputStream in2, final OutputStream ou * connection again. The other problem is to do with ticket expiry. To handle that, a relogin is * attempted. *

      - * The retry logic is governed by the {@link SaslClientAuthenticationProvider#canRetry()} - * method. Some providers have the ability to obtain new credentials and then re-attempt to - * authenticate with HBase services. Other providers will continue to fail if they failed the - * first time -- for those, we want to fail-fast. + * The retry logic is governed by the {@link SaslClientAuthenticationProvider#canRetry()} method. + * Some providers have the ability to obtain new credentials and then re-attempt to authenticate + * with HBase services. Other providers will continue to fail if they failed the first time -- for + * those, we want to fail-fast. *

      */ private void handleSaslConnectionFailure(final int currRetries, final int maxRetries, - final Exception ex, final UserGroupInformation user) - throws IOException, InterruptedException { + final Exception ex, final UserGroupInformation user) throws IOException, InterruptedException { closeSocket(); user.doAs(new PrivilegedExceptionAction() { @Override @@ -400,7 +398,7 @@ public Object run() throws IOException, InterruptedException { } if (ex instanceof SaslException) { String msg = "SASL authentication failed." - + " The most likely cause is missing or invalid credentials."; + + " The most likely cause is missing or invalid credentials."; throw new RuntimeException(msg, ex); } throw new IOException(ex); @@ -424,9 +422,9 @@ public Object run() throws IOException, InterruptedException { Thread.sleep(ThreadLocalRandom.current().nextInt(reloginMaxBackoff) + 1); return null; } else { - String msg = "Failed to initiate connection for " - + UserGroupInformation.getLoginUser().getUserName() + " to " - + securityInfo.getServerPrincipal(); + String msg = + "Failed to initiate connection for " + UserGroupInformation.getLoginUser().getUserName() + + " to " + securityInfo.getServerPrincipal(); throw new IOException(msg, ex); } } @@ -442,10 +440,10 @@ private void setupIOstreams() throws IOException { if (this.rpcClient.failedServers.isFailedServer(remoteId.getAddress())) { if (LOG.isDebugEnabled()) { LOG.debug("Not trying to connect to " + remoteId.getAddress() - + " this server is in the failed servers list"); + + " this server is in the failed servers list"); } throw new FailedServerException( - "This server is in the failed servers list: " + remoteId.getAddress()); + "This server is in the failed servers list: " + remoteId.getAddress()); } try { @@ -539,10 +537,10 @@ private void writeConnectionHeader() throws IOException { boolean isCryptoAesEnable = false; // check if Crypto AES is enabled if (saslRpcClient != null) { - boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY. - getSaslQop().equalsIgnoreCase(saslRpcClient.getSaslQOP()); - isCryptoAesEnable = saslEncryptionEnabled && conf.getBoolean( - CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT); + boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop() + .equalsIgnoreCase(saslRpcClient.getSaslQOP()); + isCryptoAesEnable = saslEncryptionEnabled + && conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT); } // if Crypto AES is enabled, set transformation and negotiate with server @@ -566,7 +564,7 @@ private void processResponseForConnectionHeader() throws IOException { } RPCProtos.ConnectionHeaderResponse connectionHeaderResponse = - RPCProtos.ConnectionHeaderResponse.parseFrom(buff); + RPCProtos.ConnectionHeaderResponse.parseFrom(buff); // Get the CryptoCipherMeta, update the HBaseSaslRpcClient for Crypto Cipher if (connectionHeaderResponse.hasCryptoCipherMeta()) { @@ -574,16 +572,17 @@ private void processResponseForConnectionHeader() throws IOException { } waitingConnectionHeaderResponse = false; } catch (SocketTimeoutException ste) { - LOG.error(HBaseMarkers.FATAL, "Can't get the connection header response for rpc timeout, " + LOG.error(HBaseMarkers.FATAL, + "Can't get the connection header response for rpc timeout, " + "please check if server has the correct configuration to support the additional " - + "function.", ste); + + "function.", + ste); // timeout when waiting the connection header response, ignore the additional function throw new IOException("Timeout while waiting connection header response", ste); } } - private void negotiateCryptoAes(RPCProtos.CryptoCipherMeta cryptoCipherMeta) - throws IOException { + private void negotiateCryptoAes(RPCProtos.CryptoCipherMeta cryptoCipherMeta) throws IOException { // initialize the Crypto AES with CryptoCipherMeta saslRpcClient.initCryptoCipher(cryptoCipherMeta, this.rpcClient.conf); // reset the inputStream/outputStream for Crypto AES encryption @@ -600,7 +599,7 @@ private void writeRequest(Call call) throws IOException { ByteBuf cellBlock = null; try { cellBlock = this.rpcClient.cellBlockBuilder.buildCellBlock(this.codec, this.compressor, - call.cells, PooledByteBufAllocator.DEFAULT); + call.cells, PooledByteBufAllocator.DEFAULT); CellBlockMeta cellBlockMeta; if (cellBlock != null) { cellBlockMeta = CellBlockMeta.newBuilder().setLength(cellBlock.readableBytes()).build(); @@ -624,7 +623,7 @@ private void writeRequest(Call call) throws IOException { try { call.callStats.setRequestSizeBytes(write(this.out, requestHeader, call.param, cellBlock)); } catch (Throwable t) { - if(LOG.isTraceEnabled()) { + if (LOG.isTraceEnabled()) { LOG.trace("Error while writing {}", call.toShortString()); } IOException e = IPCUtil.toIOE(t); @@ -667,7 +666,7 @@ private void readResponse() { if (call != null) { call.callStats.setResponseSizeBytes(totalSize); call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); + .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); } return; } @@ -677,7 +676,7 @@ private void readResponse() { call.setException(re); call.callStats.setResponseSizeBytes(totalSize); call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); + .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); if (isFatalConnectionException(exceptionResponse)) { synchronized (this) { closeConn(re); @@ -701,7 +700,7 @@ private void readResponse() { call.setResponse(value, cellBlockScanner); call.callStats.setResponseSizeBytes(totalSize); call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); + .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); } } catch (IOException e) { if (expectedCall) { @@ -772,7 +771,7 @@ public void cleanupConnection() { @Override public synchronized void sendRequest(final Call call, HBaseRpcController pcrc) - throws IOException { + throws IOException { pcrc.notifyOnCancel(new RpcCallback() { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java index 137e60b74609..3dc48ce3e00e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; - import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; + /** * We will expose the connection to upper layer before initialized, so we need to buffer the calls * passed in and write them out once the connection is established. @@ -35,7 +34,8 @@ class BufferCallBeforeInitHandler extends ChannelDuplexHandler { private enum BufferCallAction { - FLUSH, FAIL + FLUSH, + FAIL } public static final class BufferCallEvent { @@ -45,7 +45,7 @@ public static final class BufferCallEvent { public final IOException error; private BufferCallEvent(BufferCallBeforeInitHandler.BufferCallAction action, - IOException error) { + IOException error) { this.action = action; this.error = error; } @@ -59,8 +59,8 @@ public static BufferCallBeforeInitHandler.BufferCallEvent fail(IOException error } } - private static final BufferCallEvent SUCCESS_EVENT = new BufferCallEvent(BufferCallAction.FLUSH, - null); + private static final BufferCallEvent SUCCESS_EVENT = + new BufferCallEvent(BufferCallAction.FLUSH, null); private final Map id2Call = new HashMap<>(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java index 8d23d9243399..3c0e24e57145 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ class Call { // The return type. Used to create shell into which we deserialize the response if any. Message responseDefaultType; @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", - justification = "Direct access is only allowed after done") + justification = "Direct access is only allowed after done") IOException error; // exception, null if value private boolean done; // true when call is done final Descriptors.MethodDescriptor md; @@ -61,9 +61,9 @@ class Call { final Span span; Timeout timeoutTask; - Call(int id, final Descriptors.MethodDescriptor md, Message param, - final CellScanner cells, final Message responseDefaultType, int timeout, int priority, - RpcCallback callback, MetricsConnection.CallStats callStats) { + Call(int id, final Descriptors.MethodDescriptor md, Message param, final CellScanner cells, + final Message responseDefaultType, int timeout, int priority, RpcCallback callback, + MetricsConnection.CallStats callStats) { this.param = param; this.md = md; this.cells = cells; @@ -81,20 +81,15 @@ class Call { * Builds a simplified {@link #toString()} that includes just the id and method name. */ public String toShortString() { - return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("id", id) - .append("methodName", md.getName()) - .toString(); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("id", id) + .append("methodName", md.getName()).toString(); } @Override public String toString() { // Call[id=32153218,methodName=Get] - return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .appendSuper(toShortString()) - .append("param", Optional.ofNullable(param) - .map(ProtobufUtil::getShortTextFormat) - .orElse("")) + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(toShortString()) + .append("param", Optional.ofNullable(param).map(ProtobufUtil::getShortTextFormat).orElse("")) .toString(); } @@ -137,7 +132,7 @@ public void setException(IOException error) { /** * Set the return value when there is no error. Notify the caller the call is done. * @param response return value of the call. - * @param cells Can be null + * @param cells Can be null */ public void setResponse(Message response, final CellScanner cells) { synchronized (this) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallCancelledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallCancelledException.java index 695c031b142f..d710f9d65535 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallCancelledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallCancelledException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallEvent.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallEvent.java index 1ee04b5801f1..6bf1fcdff191 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallEvent.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallEvent.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,8 @@ class CallEvent { public enum Type { - TIMEOUT, CANCELLED + TIMEOUT, + CANCELLED } final Type type; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallTimeoutException.java index d4105a09fa38..6dfef9c5715c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallTimeoutException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallTimeoutException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,9 +33,8 @@ public CallTimeoutException(final String msg) { /** * CallTimeoutException with cause - * * @param message the message for this exception - * @param cause the cause for this exception + * @param cause the cause for this exception */ public CallTimeoutException(final String message, final Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallerDisconnectedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallerDisconnectedException.java index 2cfde8930f6c..feb76d7245a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallerDisconnectedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CallerDisconnectedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,17 +18,16 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Exception indicating that the remote host making this IPC lost its - * IPC connection. This will never be returned back to a client, - * but is only used for logging on the server side, etc. + * Exception indicating that the remote host making this IPC lost its IPC connection. This will + * never be returned back to a client, but is only used for logging on the server side, etc. */ @InterfaceAudience.Public public class CallerDisconnectedException extends IOException { private static final long serialVersionUID = 1L; + public CallerDisconnectedException(String msg) { super(msg); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java index 111f7684224d..9e9c0688ecee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,27 +17,19 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; - import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.BufferOverflowException; import java.nio.ByteBuffer; - import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.io.ByteBuffAllocator; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffInputStream; import org.apache.hadoop.hbase.io.ByteBufferInputStream; import org.apache.hadoop.hbase.io.ByteBufferListOutputStream; @@ -50,6 +42,13 @@ import org.apache.hadoop.io.compress.CompressionInputStream; import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator; +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; /** * Helper class for building cell block. @@ -71,13 +70,13 @@ class CellBlockBuilder { public CellBlockBuilder(Configuration conf) { this.conf = conf; - this.cellBlockDecompressionMultiplier = conf - .getInt("hbase.ipc.cellblock.decompression.buffersize.multiplier", 3); + this.cellBlockDecompressionMultiplier = + conf.getInt("hbase.ipc.cellblock.decompression.buffersize.multiplier", 3); // Guess that 16k is a good size for rpc buffer. Could go bigger. See the TODO below in // #buildCellBlock. - this.cellBlockBuildingInitialBufferSize = ClassSize - .align(conf.getInt("hbase.ipc.cellblock.building.initial.buffersize", 16 * 1024)); + this.cellBlockBuildingInitialBufferSize = + ClassSize.align(conf.getInt("hbase.ipc.cellblock.building.initial.buffersize", 16 * 1024)); } private interface OutputStreamSupplier { @@ -105,17 +104,12 @@ public int size() { /** * Puts CellScanner Cells into a cell block using passed in codec and/or - * compressor. - * @param codec - * @param compressor - * @param cellScanner - * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using - * passed in codec and/or compressor; the returned buffer has - * been flipped and is ready for reading. Use limit to find total size. - * @throws IOException + * compressor. nnn * @return Null or byte buffer filled with a cellblock filled with + * passed-in Cells encoded using passed in codec and/or compressor; the + * returned buffer has been flipped and is ready for reading. Use limit to find total size. n */ public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor, - final CellScanner cellScanner) throws IOException { + final CellScanner cellScanner) throws IOException { ByteBufferOutputStreamSupplier supplier = new ByteBufferOutputStreamSupplier(); if (buildCellBlock(codec, compressor, cellScanner, supplier)) { ByteBuffer bb = supplier.baos.getByteBuffer(); @@ -150,7 +144,7 @@ public int size() { } public ByteBuf buildCellBlock(Codec codec, CompressionCodec compressor, CellScanner cellScanner, - ByteBufAllocator alloc) throws IOException { + ByteBufAllocator alloc) throws IOException { ByteBufOutputStreamSupplier supplier = new ByteBufOutputStreamSupplier(alloc); if (buildCellBlock(codec, compressor, cellScanner, supplier)) { return supplier.buf; @@ -160,7 +154,7 @@ public ByteBuf buildCellBlock(Codec codec, CompressionCodec compressor, CellScan } private boolean buildCellBlock(final Codec codec, final CompressionCodec compressor, - final CellScanner cellScanner, OutputStreamSupplier supplier) throws IOException { + final CellScanner cellScanner, OutputStreamSupplier supplier) throws IOException { if (cellScanner == null) { return false; } @@ -171,13 +165,13 @@ private boolean buildCellBlock(final Codec codec, final CompressionCodec compres encodeCellsTo(supplier.get(bufferSize), cellScanner, codec, compressor); if (LOG.isTraceEnabled() && bufferSize < supplier.size()) { LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + supplier.size() - + "; up hbase.ipc.cellblock.building.initial.buffersize?"); + + "; up hbase.ipc.cellblock.building.initial.buffersize?"); } return true; } private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec, - CompressionCodec compressor) throws IOException { + CompressionCodec compressor) throws IOException { Compressor poolCompressor = null; try { if (compressor != null) { @@ -205,10 +199,10 @@ private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec /** * Puts CellScanner Cells into a cell block using passed in codec and/or * compressor. - * @param codec to use for encoding - * @param compressor to use for encoding + * @param codec to use for encoding + * @param compressor to use for encoding * @param cellScanner to encode - * @param allocator to allocate the {@link ByteBuff}. + * @param allocator to allocate the {@link ByteBuff}. * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using * passed in codec and/or compressor; the returned buffer has * been flipped and is ready for reading. Use limit to find total size. If @@ -217,7 +211,7 @@ private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec * @throws IOException if encoding the cells fail */ public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionCodec compressor, - CellScanner cellScanner, ByteBuffAllocator allocator) throws IOException { + CellScanner cellScanner, ByteBuffAllocator allocator) throws IOException { if (cellScanner == null) { return null; } @@ -234,13 +228,13 @@ public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionC } /** - * @param codec to use for cellblock + * @param codec to use for cellblock * @param cellBlock to encode * @return CellScanner to work against the content of cellBlock * @throws IOException if encoding fails */ public CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor, - final byte[] cellBlock) throws IOException { + final byte[] cellBlock) throws IOException { // Use this method from Client side to create the CellScanner if (compressor != null) { ByteBuffer cellBlockBuf = decompress(compressor, cellBlock); @@ -254,15 +248,15 @@ public CellScanner createCellScanner(final Codec codec, final CompressionCodec c } /** - * @param codec to use for cellblock + * @param codec to use for cellblock * @param cellBlock ByteBuffer containing the cells written by the Codec. The buffer should be - * position()'ed at the start of the cell block and limit()'ed at the end. + * position()'ed at the start of the cell block and limit()'ed at the end. * @return CellScanner to work against the content of cellBlock. All cells created * out of the CellScanner will share the same ByteBuffer being passed. * @throws IOException if cell encoding fails */ public CellScanner createCellScannerReusingBuffers(final Codec codec, - final CompressionCodec compressor, ByteBuff cellBlock) throws IOException { + final CompressionCodec compressor, ByteBuff cellBlock) throws IOException { // Use this method from HRS to create the CellScanner // If compressed, decompress it first before passing it on else we will leak compression // resources if the stream is not closed properly after we let it out. @@ -273,21 +267,21 @@ public CellScanner createCellScannerReusingBuffers(final Codec codec, } private ByteBuffer decompress(CompressionCodec compressor, byte[] compressedCellBlock) - throws IOException { + throws IOException { ByteBuffer cellBlock = decompress(compressor, new ByteArrayInputStream(compressedCellBlock), - compressedCellBlock.length * this.cellBlockDecompressionMultiplier); + compressedCellBlock.length * this.cellBlockDecompressionMultiplier); return cellBlock; } private ByteBuff decompress(CompressionCodec compressor, ByteBuff compressedCellBlock) - throws IOException { + throws IOException { ByteBuffer cellBlock = decompress(compressor, new ByteBuffInputStream(compressedCellBlock), - compressedCellBlock.remaining() * this.cellBlockDecompressionMultiplier); + compressedCellBlock.remaining() * this.cellBlockDecompressionMultiplier); return new SingleByteBuff(cellBlock); } private ByteBuffer decompress(CompressionCodec compressor, InputStream cellBlockStream, - int osInitialSize) throws IOException { + int osInitialSize) throws IOException { // GZIPCodec fails w/ NPE if no configuration. if (compressor instanceof Configurable) { ((Configurable) compressor).setConf(this.conf); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellScannerButNoCodecException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellScannerButNoCodecException.java index c08272474def..a9e9faf73fdb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellScannerButNoCodecException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellScannerButNoCodecException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java index cac9ff27382e..6cb9cddd9feb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java @@ -18,14 +18,13 @@ package org.apache.hadoop.hbase.ipc; import java.util.Objects; - import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; /** - * This class holds the address and the user ticket, etc. The client connections - * to servers are uniquely identified by <remoteAddress, ticket, serviceName> + * This class holds the address and the user ticket, etc. The client connections to servers are + * uniquely identified by <remoteAddress, ticket, serviceName> */ @InterfaceAudience.Private class ConnectionId { @@ -61,21 +60,20 @@ public String toString() { public boolean equals(Object obj) { if (obj instanceof ConnectionId) { ConnectionId id = (ConnectionId) obj; - return address.equals(id.address) && - ((ticket != null && ticket.equals(id.ticket)) || - (ticket == id.ticket)) && Objects.equals(this.serviceName, id.serviceName); + return address.equals(id.address) + && ((ticket != null && ticket.equals(id.ticket)) || (ticket == id.ticket)) + && Objects.equals(this.serviceName, id.serviceName); } return false; } - @Override // simply use the default Object#hashcode() ? + @Override // simply use the default Object#hashcode() ? public int hashCode() { - return hashCode(ticket,serviceName,address); + return hashCode(ticket, serviceName, address); } public static int hashCode(User ticket, String serviceName, Address address) { - return (address.hashCode() + - PRIME * (PRIME * serviceName.hashCode() ^ - (ticket == null ? 0 : ticket.hashCode()))); + return (address.hashCode() + + PRIME * (PRIME * serviceName.hashCode() ^ (ticket == null ? 0 : ticket.hashCode()))); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java index c91d3f6e6214..c88e8fc5037c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java index 05c91ebdbf5e..279c1fbc0836 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java @@ -14,9 +14,7 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ - package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; @@ -55,14 +53,14 @@ public final class CoprocessorRpcUtils { private static final Logger LOG = LoggerFactory.getLogger(CoprocessorRpcUtils.class); /** - * We assume that all HBase protobuf services share a common package name - * (defined in the .proto files). + * We assume that all HBase protobuf services share a common package name (defined in the .proto + * files). */ private static final String hbaseServicePackage; static { Descriptors.ServiceDescriptor clientService = ClientProtos.ClientService.getDescriptor(); - hbaseServicePackage = clientService.getFullName() - .substring(0, clientService.getFullName().lastIndexOf(clientService.getName())); + hbaseServicePackage = clientService.getFullName().substring(0, + clientService.getFullName().lastIndexOf(clientService.getName())); } private CoprocessorRpcUtils() { @@ -70,10 +68,10 @@ private CoprocessorRpcUtils() { } /** - * Returns the name to use for coprocessor service calls. For core HBase services - * (in the hbase.pb protobuf package), this returns the unqualified name in order to provide - * backward compatibility across the package name change. For all other services, - * the fully-qualified service name is used. + * Returns the name to use for coprocessor service calls. For core HBase services (in the hbase.pb + * protobuf package), this returns the unqualified name in order to provide backward compatibility + * across the package name change. For all other services, the fully-qualified service name is + * used. */ public static String getServiceName(Descriptors.ServiceDescriptor service) { if (service.getFullName().startsWith(hbaseServicePackage)) { @@ -82,10 +80,10 @@ public static String getServiceName(Descriptors.ServiceDescriptor service) { return service.getFullName(); } - public static CoprocessorServiceRequest getCoprocessorServiceRequest( - final Descriptors.MethodDescriptor method, final Message request) { + public static CoprocessorServiceRequest + getCoprocessorServiceRequest(final Descriptors.MethodDescriptor method, final Message request) { return getCoprocessorServiceRequest(method, request, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY); + HConstants.EMPTY_BYTE_ARRAY); } public static CoprocessorServiceRequest getCoprocessorServiceRequest( @@ -157,10 +155,10 @@ public static ClientProtos.CoprocessorServiceResponse getResponse(final Message } /** - * Simple {@link RpcCallback} implementation providing a - * {@link java.util.concurrent.Future}-like {@link BlockingRpcCallback#get()} method, which - * will block util the instance's {@link BlockingRpcCallback#run(Object)} method has been called. - * {@code R} is the RPC response type that will be passed to the {@link #run(Object)} method. + * Simple {@link RpcCallback} implementation providing a {@link java.util.concurrent.Future}-like + * {@link BlockingRpcCallback#get()} method, which will block util the instance's + * {@link BlockingRpcCallback#run(Object)} method has been called. {@code R} is the RPC response + * type that will be passed to the {@link #run(Object)} method. */ @InterfaceAudience.Private // Copy of BlockingRpcCallback but deriving from RpcCallback non-shaded. @@ -184,7 +182,7 @@ public void run(R parameter) { /** * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was - * passed. When used asynchronously, this method will block until the {@link #run(Object)} + * passed. When used asynchronously, this method will block until the {@link #run(Object)} * method has been called. * @return the response object or {@code null} if no response was passed */ @@ -203,17 +201,17 @@ public synchronized R get() throws IOException { } /** - * Stores an exception encountered during RPC invocation so it can be passed back - * through to the client. + * Stores an exception encountered during RPC invocation so it can be passed back through to the + * client. * @param controller the controller instance provided by the client when calling the service - * @param ioe the exception encountered + * @param ioe the exception encountered */ public static void setControllerException(RpcController controller, IOException ioe) { if (controller == null) { return; } if (controller instanceof org.apache.hadoop.hbase.ipc.ServerRpcController) { - ((ServerRpcController)controller).setFailedOn(ioe); + ((ServerRpcController) controller).setFailedOn(ioe); } else { controller.setFailed(StringUtils.stringifyException(ioe)); } @@ -223,7 +221,7 @@ public static void setControllerException(RpcController controller, IOException * Retreivies exception stored during RPC invocation. * @param controller the controller instance provided by the client when calling the service * @return exception if any, or null; Will return DoNotRetryIOException for string represented - * failure causes in controller. + * failure causes in controller. */ @Nullable public static IOException getControllerException(RpcController controller) throws IOException { @@ -231,7 +229,7 @@ public static IOException getControllerException(RpcController controller) throw return null; } if (controller instanceof ServerRpcController) { - return ((ServerRpcController)controller).getFailedOn(); + return ((ServerRpcController) controller).getFailedOn(); } return new DoNotRetryIOException(controller.errorText()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java index d68d955a984e..9bee88d599f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; - import java.io.IOException; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + /** * Simple delegating controller for use with the {@link RpcControllerFactory} to help override * standard behavior of a {@link HBaseRpcController}. Used testing. @@ -130,7 +129,7 @@ public void setDone(CellScanner cellScanner) { @Override public void notifyOnCancel(RpcCallback callback, CancellationCallback action) - throws IOException { + throws IOException { delegate.notifyOnCancel(callback, action); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServerException.java index 46f57d518040..e14ed782d41d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServerException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Indicates that we're trying to connect to a already known as dead server. We will want to - * retry: we're getting this because the region location was wrong, or because - * the server just died, in which case the retry loop will help us to wait for the - * regions to recover. + * Indicates that we're trying to connect to a already known as dead server. We will want to retry: + * we're getting this because the region location was wrong, or because the server just died, in + * which case the retry loop will help us to wait for the regions to recover. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java index 1a8bc0129ea6..0a8da3c20151 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * A class to manage a list of servers that failed recently. @@ -38,8 +37,8 @@ public class FailedServers { private static final Logger LOG = LoggerFactory.getLogger(FailedServers.class); public FailedServers(Configuration conf) { - this.recheckServersTimeout = conf.getInt( - RpcClient.FAILED_SERVER_EXPIRY_KEY, RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); + this.recheckServersTimeout = + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); } /** @@ -50,15 +49,13 @@ public synchronized void addToFailedServers(Address address, Throwable throwable this.failedServers.put(address, expiry); this.latestExpiry = expiry; if (LOG.isDebugEnabled()) { - LOG.debug( - "Added failed server with address " + address + " to list caused by " - + throwable.toString()); + LOG.debug("Added failed server with address " + address + " to list caused by " + + throwable.toString()); } } /** * Check if the server should be considered as bad. Clean the old entries of the list. - * * @return true if the server is in the failed servers list */ public synchronized boolean isFailedServer(final Address address) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FallbackDisallowedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FallbackDisallowedException.java index f2e7db039ad8..21c4f3ada2ed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FallbackDisallowedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FallbackDisallowedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,6 +31,6 @@ public class FallbackDisallowedException extends HBaseIOException { public FallbackDisallowedException() { super("Server asks us to fall back to SIMPLE auth, " - + "but this client is configured to only allow secure connections."); + + "but this client is configured to only allow secure connections."); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FatalConnectionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FatalConnectionException.java index 6e674268894d..ed0e220a92cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FatalConnectionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FatalConnectionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when server finds fatal issue w/ connection setup: e.g. bad rpc version - * or unsupported auth method. - * Closes connection after throwing this exception with message on why the failure. + * Thrown when server finds fatal issue w/ connection setup: e.g. bad rpc version or unsupported + * auth method. Closes connection after throwing this exception with message on why the failure. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java index 4f953e93aa7e..b33771e5b582 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java @@ -17,20 +17,19 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; - import java.io.IOException; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + /** * Optionally carries Cells across the proxy/service interface down into ipc. On its way out it * optionally carries a set of result Cell data. We stick the Cells here when we want to avoid @@ -39,8 +38,8 @@ * RegionInfo we're making the call against if relevant (useful adding info to exceptions and logs). * Used by client and server ipc'ing. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, - HBaseInterfaceAudience.REPLICATION}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, + HBaseInterfaceAudience.REPLICATION }) @InterfaceStability.Evolving public interface HBaseRpcController extends RpcController, CellScannable { @@ -52,7 +51,7 @@ public interface HBaseRpcController extends RpcController, CellScannable { /** * @param priority Priority for this request; should fall roughly in the range - * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS} + * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS} */ void setPriority(int priority); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java index 7dde67c38e1c..19083c392f9b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java @@ -17,20 +17,19 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; - import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + /** * Get instances via {@link RpcControllerFactory} on client-side. * @see RpcControllerFactory @@ -156,7 +155,7 @@ public void reset() { @Override public int getCallTimeout() { - return callTimeout != null? callTimeout: 0; + return callTimeout != null ? callTimeout : 0; } @Override @@ -254,7 +253,7 @@ public void startCancel() { @Override public synchronized void notifyOnCancel(RpcCallback callback, CancellationCallback action) - throws IOException { + throws IOException { if (cancelled) { action.run(true); } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index 2b71493e76c9..57f8da98eff6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,12 +40,14 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocal; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; @@ -59,15 +61,15 @@ class IPCUtil { /** * Write out header, param, and cell block if there is one. - * @param dos Stream to write into - * @param header to write - * @param param to write + * @param dos Stream to write into + * @param header to write + * @param param to write * @param cellBlock to write * @return Total number of bytes written. * @throws IOException if write action fails */ public static int write(final OutputStream dos, final Message header, final Message param, - final ByteBuf cellBlock) throws IOException { + final ByteBuf cellBlock) throws IOException { // Must calculate total size and write that first so other side can read it all in in one // swoop. This is dictated by how the server is currently written. Server needs to change // if we are to be able to write without the length prefixing. @@ -79,7 +81,7 @@ public static int write(final OutputStream dos, final Message header, final Mess } private static int write(final OutputStream dos, final Message header, final Message param, - final ByteBuf cellBlock, final int totalSize) throws IOException { + final ByteBuf cellBlock, final int totalSize) throws IOException { // I confirmed toBytes does same as DataOutputStream#writeInt. dos.write(Bytes.toBytes(totalSize)); // This allocates a buffer that is the size of the message internally. @@ -140,10 +142,10 @@ static RemoteException createRemoteException(final ExceptionResponse e) { boolean doNotRetry = e.getDoNotRetry(); boolean serverOverloaded = e.hasServerOverloaded() && e.getServerOverloaded(); return e.hasHostname() ? - // If a hostname then add it to the RemoteWithExtrasException + // If a hostname then add it to the RemoteWithExtrasException new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), e.getHostname(), - e.getPort(), doNotRetry, serverOverloaded) : - new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), doNotRetry, + e.getPort(), doNotRetry, serverOverloaded) + : new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), doNotRetry, serverOverloaded); } @@ -163,8 +165,8 @@ static IOException toIOE(Throwable t) { } private static String getCallTarget(Address addr, RegionInfo regionInfo) { - return "address=" + addr + - (regionInfo != null? ", region=" + regionInfo.getRegionNameAsString(): ""); + return "address=" + addr + + (regionInfo != null ? ", region=" + regionInfo.getRegionNameAsString() : ""); } /** @@ -179,7 +181,7 @@ private static String getCallTarget(Address addr, RegionInfo regionInfo) { * deciding whether to retry. If it is not possible to create a new exception with the same type, * for example, the {@code error} is not an {@link IOException}, an {@link IOException} will be * created. - * @param addr target address + * @param addr target address * @param error the relevant exception * @return an exception to throw * @see ClientExceptionsUtil#isConnectionException(Throwable) @@ -187,14 +189,16 @@ private static String getCallTarget(Address addr, RegionInfo regionInfo) { static IOException wrapException(Address addr, RegionInfo regionInfo, Throwable error) { if (error instanceof ConnectException) { // connection refused; include the host:port in the error - return (IOException) new ConnectException("Call to " + getCallTarget(addr, regionInfo) + - " failed on connection exception: " + error).initCause(error); + return (IOException) new ConnectException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on connection exception: " + error) + .initCause(error); } else if (error instanceof SocketTimeoutException) { return (IOException) new SocketTimeoutException( "Call to " + getCallTarget(addr, regionInfo) + " failed because " + error).initCause(error); } else if (error instanceof ConnectionClosingException) { - return new ConnectionClosingException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new ConnectionClosingException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else if (error instanceof ServerTooBusyException) { // we already have address in the exception message return (IOException) error; @@ -203,51 +207,57 @@ static IOException wrapException(Address addr, RegionInfo regionInfo, Throwable try { return (IOException) error.getClass().asSubclass(DoNotRetryIOException.class) .getConstructor(String.class) - .newInstance("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error).initCause(error); + .newInstance( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error) + .initCause(error); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException - | InvocationTargetException | NoSuchMethodException | SecurityException e) { + | InvocationTargetException | NoSuchMethodException | SecurityException e) { // just ignore, will just new a DoNotRetryIOException instead below } - return new DoNotRetryIOException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new DoNotRetryIOException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else if (error instanceof ConnectionClosedException) { - return new ConnectionClosedException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new ConnectionClosedException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else if (error instanceof CallTimeoutException) { - return new CallTimeoutException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new CallTimeoutException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else if (error instanceof ClosedChannelException) { // ClosedChannelException does not have a constructor which takes a String but it is a // connection exception so we keep its original type return (IOException) error; } else if (error instanceof TimeoutException) { // TimeoutException is not an IOException, let's convert it to TimeoutIOException. - return new TimeoutIOException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new TimeoutIOException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } else { // try our best to keep the original exception type if (error instanceof IOException) { try { return (IOException) error.getClass().asSubclass(IOException.class) .getConstructor(String.class) - .newInstance("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error) + .newInstance( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error) .initCause(error); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException - | InvocationTargetException | NoSuchMethodException | SecurityException e) { + | InvocationTargetException | NoSuchMethodException | SecurityException e) { // just ignore, will just new an IOException instead below } } - return new HBaseIOException("Call to " + getCallTarget(addr, regionInfo) + - " failed on local exception: " + error, error); + return new HBaseIOException( + "Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error, + error); } } static void setCancelled(Call call) { call.setException(new CallCancelledException(call.toShortString() + ", waitTime=" - + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + ", rpcTimeout=" - + call.timeout)); + + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + ", rpcTimeout=" + + call.timeout)); } private static final FastThreadLocal DEPTH = new FastThreadLocal() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java index 4c85e3d51abe..7b698958ede2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java @@ -46,17 +46,16 @@ public class NettyRpcClient extends AbstractRpcClient { private final boolean shutdownGroupWhenClose; public NettyRpcClient(Configuration configuration, String clusterId, SocketAddress localAddress, - MetricsConnection metrics) { + MetricsConnection metrics) { super(configuration, clusterId, localAddress, metrics); Pair> groupAndChannelClass = NettyRpcClientConfigHelper.getEventLoopConfig(conf); if (groupAndChannelClass == null) { // Use our own EventLoopGroup. - int threadCount = conf.getInt( - NettyRpcClientConfigHelper.HBASE_NETTY_EVENTLOOP_RPCCLIENT_THREADCOUNT_KEY, 0); + int threadCount = + conf.getInt(NettyRpcClientConfigHelper.HBASE_NETTY_EVENTLOOP_RPCCLIENT_THREADCOUNT_KEY, 0); this.group = new NioEventLoopGroup(threadCount, - new DefaultThreadFactory("RPCClient(own)-NioEventLoopGroup", true, - Thread.NORM_PRIORITY)); + new DefaultThreadFactory("RPCClient(own)-NioEventLoopGroup", true, Thread.NORM_PRIORITY)); this.channelClass = NioSocketChannel.class; this.shutdownGroupWhenClose = true; } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.java index a8c99378720b..ef805ad5178a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.java @@ -45,27 +45,28 @@ public final class NettyRpcClientConfigHelper { public static final String EVENT_LOOP_CONFIG = "hbase.rpc.client.event-loop.config"; /** - * Name of property to change netty rpc client eventloop thread count. Default is 0. - * Tests may set this down from unlimited. + * Name of property to change netty rpc client eventloop thread count. Default is 0. Tests may set + * this down from unlimited. */ public static final String HBASE_NETTY_EVENTLOOP_RPCCLIENT_THREADCOUNT_KEY = "hbase.netty.eventloop.rpcclient.thread.count"; private static final String CONFIG_NAME = "global-event-loop"; - private static final Map>> - EVENT_LOOP_CONFIG_MAP = new HashMap<>(); + private static final Map>> EVENT_LOOP_CONFIG_MAP = new HashMap<>(); /** * Shutdown constructor. */ - private NettyRpcClientConfigHelper() {} + private NettyRpcClientConfigHelper() { + } /** * Set the EventLoopGroup and channel class for {@code AsyncRpcClient}. */ public static void setEventLoopConfig(Configuration conf, EventLoopGroup group, - Class channelClass) { + Class channelClass) { Preconditions.checkNotNull(group, "group is null"); Preconditions.checkNotNull(channelClass, "channel class is null"); conf.set(EVENT_LOOP_CONFIG, CONFIG_NAME); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index d0a13ca33d6c..14e8cbc13d3b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index c67d96f0a756..fe32189f81bd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { private final Map id2Call = new HashMap<>(); public NettyRpcDuplexHandler(NettyRpcConnection conn, CellBlockBuilder cellBlockBuilder, - Codec codec, CompressionCodec compressor) { + Codec codec, CompressionCodec compressor) { this.conn = conn; this.cellBlockBuilder = cellBlockBuilder; this.codec = codec; @@ -77,7 +77,7 @@ public NettyRpcDuplexHandler(NettyRpcConnection conn, CellBlockBuilder cellBlock } private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise promise) - throws IOException { + throws IOException { id2Call.put(call.id, call); ByteBuf cellBlock = cellBlockBuilder.buildCellBlock(codec, compressor, call.cells, ctx.alloc()); CellBlockMeta cellBlockMeta; @@ -90,8 +90,8 @@ private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise p } RequestHeader requestHeader = IPCUtil.buildRequestHeader(call, cellBlockMeta); int sizeWithoutCellBlock = IPCUtil.getTotalSizeWhenWrittenDelimited(requestHeader, call.param); - int totalSize = cellBlock != null ? sizeWithoutCellBlock + cellBlock.writerIndex() - : sizeWithoutCellBlock; + int totalSize = + cellBlock != null ? sizeWithoutCellBlock + cellBlock.writerIndex() : sizeWithoutCellBlock; ByteBuf buf = ctx.alloc().buffer(sizeWithoutCellBlock + 4); buf.writeInt(totalSize); try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf)) { @@ -133,7 +133,7 @@ private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOExcep int id = responseHeader.getCallId(); if (LOG.isTraceEnabled()) { LOG.trace("got response header " + TextFormat.shortDebugString(responseHeader) - + ", totalSize: " + totalSize + " bytes"); + + ", totalSize: " + totalSize + " bytes"); } RemoteException remoteExc; if (responseHeader.hasException()) { @@ -158,7 +158,7 @@ private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOExcep int readSoFar = IPCUtil.getTotalSizeWhenWrittenDelimited(responseHeader); int whatIsLeftToRead = totalSize - readSoFar; LOG.debug("Unknown callId: " + id + ", skipping over this response of " + whatIsLeftToRead - + " bytes"); + + " bytes"); } return; } @@ -235,7 +235,7 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc if (id2Call.isEmpty()) { if (LOG.isTraceEnabled()) { LOG.trace("shutdown connection to " + conn.remoteId().address - + " because idle for a long time"); + + " because idle for a long time"); } // It may happen that there are still some pending calls in the event loop queue and // they will get a closed channel exception. But this is not a big deal as it rarely diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java index 108b9068a2d9..62d0bb1d4550 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.lang.reflect.Constructor; import java.security.AccessController; import java.security.PrivilegedAction; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -31,10 +30,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A {@link RemoteException} with some extra information. If source exception - * was a {@link org.apache.hadoop.hbase.DoNotRetryIOException}, - * {@link #isDoNotRetry()} will return true. - *

      A {@link RemoteException} hosts exceptions we got from the server. + * A {@link RemoteException} with some extra information. If source exception was a + * {@link org.apache.hadoop.hbase.DoNotRetryIOException}, {@link #isDoNotRetry()} will return true. + *

      + * A {@link RemoteException} hosts exceptions we got from the server. */ @SuppressWarnings("serial") @InterfaceAudience.Public @@ -53,9 +52,8 @@ private final static class ClassLoaderHolder { static { ClassLoader parent = RemoteWithExtrasException.class.getClassLoader(); Configuration conf = HBaseConfiguration.create(); - CLASS_LOADER = AccessController.doPrivileged((PrivilegedAction) - () -> new DynamicClassLoader(conf, parent) - ); + CLASS_LOADER = AccessController + .doPrivileged((PrivilegedAction) () -> new DynamicClassLoader(conf, parent)); } } @@ -74,7 +72,7 @@ public RemoteWithExtrasException(String className, String msg, final String host } public RemoteWithExtrasException(String className, String msg, final String hostname, - final int port, final boolean doNotRetry, final boolean serverOverloaded) { + final int port, final boolean doNotRetry, final boolean serverOverloaded) { super(className, msg); this.hostname = hostname; this.port = port; @@ -95,14 +93,14 @@ public IOException unwrapRemoteException() { realClass = Class.forName(getClassName(), false, super.getClass().getClassLoader()); } catch (ClassNotFoundException e) { return new DoNotRetryIOException( - "Unable to load exception received from server:" + e.getMessage(), this); + "Unable to load exception received from server:" + e.getMessage(), this); } } try { return instantiateException(realClass.asSubclass(IOException.class)); } catch (Exception e) { return new DoNotRetryIOException( - "Unable to instantiate exception received from server:" + e.getMessage(), this); + "Unable to instantiate exception received from server:" + e.getMessage(), this); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index 5bb08152d30e..6ecff49e52b1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,15 +34,15 @@ public interface RpcClient extends Closeable { int FAILED_SERVER_EXPIRY_DEFAULT = 2000; String IDLE_TIME = "hbase.ipc.client.connection.minIdleTimeBeforeClose"; String IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = - "hbase.ipc.client.fallback-to-simple-auth-allowed"; + "hbase.ipc.client.fallback-to-simple-auth-allowed"; boolean IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false; String SPECIFIC_WRITE_THREAD = "hbase.ipc.client.specificThreadForWriting"; String DEFAULT_CODEC_CLASS = "hbase.client.default.rpc.codec"; String SOCKET_TIMEOUT_CONNECT = "hbase.ipc.client.socket.timeout.connect"; /** - * How long we wait when we wait for an answer. It's not the operation time, it's the time - * we wait when we start to receive an answer, when the remote write starts to send the data. + * How long we wait when we wait for an answer. It's not the operation time, it's the time we wait + * when we start to receive an answer, when the remote write starts to send the data. */ String SOCKET_TIMEOUT_READ = "hbase.ipc.client.socket.timeout.read"; String SOCKET_TIMEOUT_WRITE = "hbase.ipc.client.socket.timeout.write"; @@ -55,43 +55,36 @@ public interface RpcClient extends Closeable { int PING_CALL_ID = -1; /** - * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up - * protobuf blocking stubs. - * - * @param sn server name describing location of server - * @param user which is to use the connection + * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up protobuf + * blocking stubs. + * @param sn server name describing location of server + * @param user which is to use the connection * @param rpcTimeout default rpc operation timeout - * * @return A blocking rpc channel that goes via this rpc client instance. */ BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, int rpcTimeout); /** - * Creates a "channel" that can be used by a protobuf service. Useful setting up - * protobuf stubs. - * - * @param sn server name describing location of server - * @param user which is to use the connection + * Creates a "channel" that can be used by a protobuf service. Useful setting up protobuf stubs. + * @param sn server name describing location of server + * @param user which is to use the connection * @param rpcTimeout default rpc operation timeout - * * @return A rpc channel that goes via this rpc client instance. */ RpcChannel createRpcChannel(final ServerName sn, final User user, int rpcTimeout); /** - * Interrupt the connections to the given server. This should be called if the server - * is known as actually dead. This will not prevent current operation to be retried, and, - * depending on their own behavior, they may retry on the same server. This can be a feature, - * for example at startup. In any case, they're likely to get connection refused (if the - * process died) or no route to host: i.e. their next retries should be faster and with a - * safe exception. + * Interrupt the connections to the given server. This should be called if the server is known as + * actually dead. This will not prevent current operation to be retried, and, depending on their + * own behavior, they may retry on the same server. This can be a feature, for example at startup. + * In any case, they're likely to get connection refused (if the process died) or no route to + * host: i.e. their next retries should be faster and with a safe exception. * @param sn server location to cancel connections of */ void cancelConnections(ServerName sn); /** - * Stop all threads related to this client. No further calls may be made - * using this client. + * Stop all threads related to this client. No further calls may be made using this client. */ @Override void close(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java index 434795248c6f..9b69b5234050 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,9 +33,10 @@ public final class RpcClientFactory { public static final String CUSTOM_RPC_CLIENT_IMPL_CONF_KEY = "hbase.rpc.client.impl"; - private static final ImmutableMap DEPRECATED_NAME_MAPPING = ImmutableMap.of( - "org.apache.hadoop.hbase.ipc.RpcClientImpl", BlockingRpcClient.class.getName(), - "org.apache.hadoop.hbase.ipc.AsyncRpcClient", NettyRpcClient.class.getName()); + private static final ImmutableMap DEPRECATED_NAME_MAPPING = ImmutableMap.of("org.apache.hadoop.hbase.ipc.RpcClientImpl", + BlockingRpcClient.class.getName(), "org.apache.hadoop.hbase.ipc.AsyncRpcClient", + NettyRpcClient.class.getName()); /** * Private Constructor @@ -51,13 +52,13 @@ public static RpcClient createClient(Configuration conf, String clusterId) { /** * Creates a new RpcClient by the class defined in the configuration or falls back to * RpcClientImpl - * @param conf configuration + * @param conf configuration * @param clusterId the cluster id - * @param metrics the connection metrics + * @param metrics the connection metrics * @return newly created RpcClient */ public static RpcClient createClient(Configuration conf, String clusterId, - MetricsConnection metrics) { + MetricsConnection metrics) { return createClient(conf, clusterId, null, metrics); } @@ -73,17 +74,17 @@ private static String getRpcClientClass(Configuration conf) { /** * Creates a new RpcClient by the class defined in the configuration or falls back to * RpcClientImpl - * @param conf configuration + * @param conf configuration * @param clusterId the cluster id * @param localAddr client socket bind address. - * @param metrics the connection metrics + * @param metrics the connection metrics * @return newly created RpcClient */ public static RpcClient createClient(Configuration conf, String clusterId, - SocketAddress localAddr, MetricsConnection metrics) { + SocketAddress localAddr, MetricsConnection metrics) { String rpcClientClass = getRpcClientClass(conf); return ReflectionUtils.instantiateWithCustomCtor(rpcClientClass, new Class[] { - Configuration.class, String.class, SocketAddress.class, MetricsConnection.class }, + Configuration.class, String.class, SocketAddress.class, MetricsConnection.class }, new Object[] { conf, clusterId, localAddr, metrics }); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index bfaf91c52857..912fa4fb0654 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -21,7 +21,6 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.MetricsConnection; @@ -43,6 +42,7 @@ import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.hbase.thirdparty.io.netty.util.TimerTask; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; @@ -85,8 +85,8 @@ abstract class RpcConnection { protected SaslClientAuthenticationProvider provider; protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, ConnectionId remoteId, - String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor, - MetricsConnection metrics) throws IOException { + String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor, + MetricsConnection metrics) throws IOException { this.timeoutTimer = timeoutTimer; this.codec = codec; this.compressor = compressor; @@ -98,14 +98,14 @@ protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, Conne // Choose the correct Token and AuthenticationProvider for this client to use SaslClientAuthenticationProviders providers = - SaslClientAuthenticationProviders.getInstance(conf); + SaslClientAuthenticationProviders.getInstance(conf); Pair> pair; if (useSasl && securityInfo != null) { pair = providers.selectProvider(clusterId, ticket); if (pair == null) { if (LOG.isTraceEnabled()) { LOG.trace("Found no valid authentication method from providers={} with tokens={}", - providers.toString(), ticket.getTokens()); + providers.toString(), ticket.getTokens()); } throw new RuntimeException("Found no valid authentication method from options"); } @@ -120,7 +120,7 @@ protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, Conne this.token = pair.getSecond(); LOG.debug("Using {} authentication for service={}, sasl={}", - provider.getSaslAuthMethod().getName(), remoteId.serviceName, useSasl); + provider.getSaslAuthMethod().getName(), remoteId.serviceName, useSasl); reloginMaxBackoff = conf.getInt("hbase.security.relogin.maxbackoff", 5000); this.remoteId = remoteId; } @@ -132,8 +132,8 @@ protected final void scheduleTimeoutTask(final Call call) { @Override public void run(Timeout timeout) throws Exception { call.setTimeout(new CallTimeoutException(call.toShortString() + ", waitTime=" - + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + "ms, rpcTimeout=" - + call.timeout + "ms")); + + (EnvironmentEdgeManager.currentTime() - call.getStartTime()) + "ms, rpcTimeout=" + + call.timeout + "ms")); callTimeout(call); } }, call.timeout, TimeUnit.MILLISECONDS); @@ -159,7 +159,7 @@ protected final byte[] getConnectionHeaderPreamble() { protected final ConnectionHeader getConnectionHeader() { final ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); builder.setServiceName(remoteId.getServiceName()); - final UserInformation userInfoPB = provider.getUserInfo(remoteId.ticket); + final UserInformation userInfoPB = provider.getUserInfo(remoteId.ticket); if (userInfoPB != null) { builder.setUserInfo(userInfoPB); } @@ -174,7 +174,7 @@ protected final ConnectionHeader getConnectionHeader() { // if Crypto AES enable, setup Cipher transformation if (isCryptoAESEnable) { builder.setRpcCryptoCipherTransformation( - conf.get("hbase.rpc.crypto.encryption.aes.cipher.transform", "AES/CTR/NoPadding")); + conf.get("hbase.rpc.crypto.encryption.aes.cipher.transform", "AES/CTR/NoPadding")); } return builder.build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java index 3f160d4dc599..a256769de703 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java @@ -32,7 +32,7 @@ /** * Factory to create a {@link HBaseRpcController} */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class RpcControllerFactory { private static final Logger LOG = LoggerFactory.getLogger(RpcControllerFactory.class); @@ -67,14 +67,13 @@ public HBaseRpcController newController(final List cellIterables) } public HBaseRpcController newController(RegionInfo regionInfo, - final List cellIterables) { + final List cellIterables) { return new HBaseRpcControllerImpl(regionInfo, cellIterables); } public static RpcControllerFactory instantiate(Configuration configuration) { String rpcControllerFactoryClazz = - configuration.get(CUSTOM_CONTROLLER_CONF_KEY, - RpcControllerFactory.class.getName()); + configuration.get(CUSTOM_CONTROLLER_CONF_KEY, RpcControllerFactory.class.getName()); try { return ReflectionUtils.instantiateWithCustomCtor(rpcControllerFactoryClazz, new Class[] { Configuration.class }, new Object[] { configuration }); @@ -82,8 +81,8 @@ public static RpcControllerFactory instantiate(Configuration configuration) { // HBASE-14960: In case the RPCController is in a non-HBase jar (Phoenix), but the application // is a pure HBase application, we want to fallback to the default one. String msg = "Cannot load configured \"" + CUSTOM_CONTROLLER_CONF_KEY + "\" (" - + rpcControllerFactoryClazz + ") from hbase-site.xml, falling back to use " - + "default RpcControllerFactory"; + + rpcControllerFactoryClazz + ") from hbase-site.xml, falling back to use " + + "default RpcControllerFactory"; if (LOG.isDebugEnabled()) { LOG.warn(msg, ex); // if DEBUG enabled, we want the exception, but still log in WARN level } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java index 19b46817258a..94d2b0eafb59 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; @SuppressWarnings("serial") diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java index 745009bc0802..96a9baf5d50b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.io.IOException; @@ -51,8 +50,7 @@ public class ServerRpcController implements RpcController { /** * The exception thrown within - * {@link com.google.protobuf.Service#callMethod(com.google.protobuf.Descriptors.MethodDescriptor, RpcController, - * com.google.protobuf.Message, RpcCallback)} + * {@link com.google.protobuf.Service#callMethod(com.google.protobuf.Descriptors.MethodDescriptor, RpcController, com.google.protobuf.Message, RpcCallback)} * if any. */ // TODO: it would be good widen this to just Throwable, but IOException is what we allow now diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java index eae9886ca55c..6c22ca94e428 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetSocketAddress; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.net.Address; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java index bd1e101c2e95..520dbcb2d9c2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java index 8e09716c188c..7ed351968c18 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java index b7c28e058429..b782741971d6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.java index 7cb78f9b98db..047761c92ee9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java index 83a869b8142d..5474087a857b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index 8ae0888a9dec..2b0f2f4509e4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; /** - * State of a Region while undergoing transitions. - * This class is immutable. + * State of a Region while undergoing transitions. This class is immutable. */ @InterfaceAudience.Private public class RegionState { @@ -37,23 +36,23 @@ public class RegionState { @InterfaceAudience.Private @InterfaceStability.Evolving public enum State { - OFFLINE, // region is in an offline state - OPENING, // server has begun to open but not yet done - OPEN, // server opened region and updated meta - CLOSING, // server has begun to close but not yet done - CLOSED, // server closed region and updated meta - SPLITTING, // server started split of a region - SPLIT, // server completed split of a region - FAILED_OPEN, // failed to open, and won't retry any more - FAILED_CLOSE, // failed to close, and won't retry any more - MERGING, // server started merge a region - MERGED, // server completed merge a region - SPLITTING_NEW, // new region to be created when RS splits a parent - // region but hasn't be created yet, or master doesn't - // know it's already created - MERGING_NEW, // new region to be created when RS merges two - // daughter regions but hasn't be created yet, or - // master doesn't know it's already created + OFFLINE, // region is in an offline state + OPENING, // server has begun to open but not yet done + OPEN, // server opened region and updated meta + CLOSING, // server has begun to close but not yet done + CLOSED, // server closed region and updated meta + SPLITTING, // server started split of a region + SPLIT, // server completed split of a region + FAILED_OPEN, // failed to open, and won't retry any more + FAILED_CLOSE, // failed to close, and won't retry any more + MERGING, // server started merge a region + MERGED, // server completed merge a region + SPLITTING_NEW, // new region to be created when RS splits a parent + // region but hasn't be created yet, or master doesn't + // know it's already created + MERGING_NEW, // new region to be created when RS merges two + // daughter regions but hasn't be created yet, or + // master doesn't know it's already created ABNORMALLY_CLOSED; // the region is CLOSED because of a RS crashes. Usually it is the same // with CLOSED, but for some operations such as merge/split, we can not // apply it to a region in this state, as it may lead to data loss as we @@ -124,7 +123,6 @@ public ClusterStatusProtos.RegionState.State convert() { /** * Convert a protobuf HBaseProtos.RegionState.State to a RegionState.State - * * @return the RegionState.State */ public static State convert(ClusterStatusProtos.RegionState.State protoState) { @@ -196,13 +194,12 @@ public RegionState(RegionInfo region, State state, ServerName serverName) { this(region, state, EnvironmentEdgeManager.currentTime(), serverName); } - public RegionState(RegionInfo region, - State state, long stamp, ServerName serverName) { + public RegionState(RegionInfo region, State state, long stamp, ServerName serverName) { this(region, state, stamp, serverName, 0); } public RegionState(RegionInfo region, State state, long stamp, ServerName serverName, - long ritDuration) { + long ritDuration) { this.hri = region; this.state = state; this.stamp = stamp; @@ -351,8 +348,7 @@ public boolean isOpenedOnServer(final ServerName sn) { * Check if a region state can transition to offline */ public boolean isReadyToOffline() { - return isMerged() || isSplit() || isOffline() - || isSplittingNew() || isMergingNew(); + return isMerged() || isSplit() || isOffline() || isSplittingNew() || isMergingNew(); } /** @@ -363,16 +359,16 @@ public boolean isReadyToOnline() { } /** - * Check if a region state is one of offline states that - * can't transition to pending_close/closing (unassign/offline) + * Check if a region state is one of offline states that can't transition to pending_close/closing + * (unassign/offline) */ public boolean isUnassignable() { return isUnassignable(state); } /** - * Check if a region state is one of offline states that - * can't transition to pending_close/closing (unassign/offline) + * Check if a region state is one of offline states that can't transition to pending_close/closing + * (unassign/offline) */ public static boolean isUnassignable(State state) { return state == State.MERGED || state == State.SPLIT || state == State.OFFLINE @@ -381,10 +377,8 @@ public static boolean isUnassignable(State state) { @Override public String toString() { - return "{" + hri.getShortNameToLog() - + " state=" + state - + ", ts=" + stamp - + ", server=" + serverName + "}"; + return "{" + hri.getShortNameToLog() + " state=" + state + ", ts=" + stamp + ", server=" + + serverName + "}"; } /** @@ -392,19 +386,17 @@ public String toString() { */ public String toDescriptiveString() { long relTime = EnvironmentEdgeManager.currentTime() - stamp; - return hri.getRegionNameAsString() - + " state=" + state - + ", ts=" + new Date(stamp) + " (" + (relTime/1000) + "s ago)" - + ", server=" + serverName; + return hri.getRegionNameAsString() + " state=" + state + ", ts=" + new Date(stamp) + " (" + + (relTime / 1000) + "s ago)" + ", server=" + serverName; } /** * Convert a RegionState to an HBaseProtos.RegionState - * * @return the converted HBaseProtos.RegionState */ public ClusterStatusProtos.RegionState convert() { - ClusterStatusProtos.RegionState.Builder regionState = ClusterStatusProtos.RegionState.newBuilder(); + ClusterStatusProtos.RegionState.Builder regionState = + ClusterStatusProtos.RegionState.newBuilder(); regionState.setRegionInfo(ProtobufUtil.toRegionInfo(hri)); regionState.setState(state.convert()); regionState.setStamp(getStamp()); @@ -413,7 +405,6 @@ public ClusterStatusProtos.RegionState convert() { /** * Convert a protobuf HBaseProtos.RegionState to a RegionState - * * @return the RegionState */ public static RegionState convert(ClusterStatusProtos.RegionState proto) { @@ -430,7 +421,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { return false; } - RegionState tmp = (RegionState)obj; + RegionState tmp = (RegionState) obj; return RegionInfo.COMPARATOR.compare(tmp.hri, hri) == 0 && tmp.state == state && ((serverName != null && serverName.equals(tmp.serverName)) @@ -442,7 +433,7 @@ public boolean equals(Object obj) { */ @Override public int hashCode() { - return (serverName != null ? serverName.hashCode() * 11 : 0) - + hri.hashCode() + 5 * state.ordinal(); + return (serverName != null ? serverName.hashCode() * 11 : 0) + hri.hashCode() + + 5 * state.ordinal(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java index 5268dafb8a6b..7c6f780e069d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,32 +29,28 @@ private ProtobufMagic() { } /** - * Magic we put ahead of a serialized protobuf message. - * For example, all znode content is protobuf messages with the below magic - * for preamble. + * Magic we put ahead of a serialized protobuf message. For example, all znode content is protobuf + * messages with the below magic for preamble. */ - public static final byte [] PB_MAGIC = new byte [] {'P', 'B', 'U', 'F'}; + public static final byte[] PB_MAGIC = new byte[] { 'P', 'B', 'U', 'F' }; /** * @param bytes Bytes to check. * @return True if passed bytes has {@link #PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes) { + public static boolean isPBMagicPrefix(final byte[] bytes) { if (bytes == null) return false; return isPBMagicPrefix(bytes, 0, bytes.length); } /* - * Copied from Bytes.java to here - * hbase-common now depends on hbase-protocol - * Referencing Bytes.java directly would create circular dependency + * Copied from Bytes.java to here hbase-common now depends on hbase-protocol Referencing + * Bytes.java directly would create circular dependency */ - private static int compareTo(byte[] buffer1, int offset1, int length1, - byte[] buffer2, int offset2, int length2) { + private static int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, + int offset2, int length2) { // Short circuit equal case - if (buffer1 == buffer2 && - offset1 == offset2 && - length1 == length2) { + if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { return 0; } // Bring WritableComparator code local @@ -71,12 +67,12 @@ private static int compareTo(byte[] buffer1, int offset1, int length1, } /** - * @param bytes Bytes to check. + * @param bytes Bytes to check. * @param offset offset to start at - * @param len length to use + * @param len length to use * @return True if passed bytes has {@link #PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) { + public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { if (bytes == null || len < PB_MAGIC.length) return false; return compareTo(PB_MAGIC, 0, PB_MAGIC.length, bytes, offset, PB_MAGIC.length) == 0; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java index fd74719e722e..d5f5c8575b18 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.protobuf; import java.util.ArrayList; @@ -24,8 +23,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.gson.JsonArray; import org.apache.hbase.thirdparty.com.google.gson.JsonElement; import org.apache.hbase.thirdparty.com.google.gson.JsonObject; @@ -36,6 +35,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.MessageOrBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.util.JsonFormat; import org.apache.hbase.thirdparty.com.google.protobuf.util.JsonFormat.TypeRegistry; + import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @@ -45,8 +45,7 @@ *

        *
      • JSON string: {@link #toJsonElement(MessageOrBuilder)}
      • *
      • JSON object (gson): {@link #toJsonElement(MessageOrBuilder)}
      • - *
      • Java objects (Boolean, Number, String, List, Map): - * {@link #toJavaObject(JsonElement)}
      • + *
      • Java objects (Boolean, Number, String, List, Map): {@link #toJavaObject(JsonElement)}
      • *
      */ @InterfaceAudience.Private @@ -57,33 +56,29 @@ public class ProtobufMessageConverter { static { TypeRegistry.Builder builder = TypeRegistry.newBuilder(); - builder - .add(BytesValue.getDescriptor()) - .add(LockServiceProtos.getDescriptor().getMessageTypes()) + builder.add(BytesValue.getDescriptor()).add(LockServiceProtos.getDescriptor().getMessageTypes()) .add(MasterProcedureProtos.getDescriptor().getMessageTypes()) .add(ProcedureProtos.getDescriptor().getMessageTypes()); TypeRegistry typeRegistry = builder.build(); - jsonPrinter = JsonFormat.printer() - .usingTypeRegistry(typeRegistry) - .omittingInsignificantWhitespace(); + jsonPrinter = + JsonFormat.printer().usingTypeRegistry(typeRegistry).omittingInsignificantWhitespace(); } private ProtobufMessageConverter() { } public static String toJsonString(MessageOrBuilder messageOrBuilder) - throws InvalidProtocolBufferException { + throws InvalidProtocolBufferException { return jsonPrinter.print(messageOrBuilder); } private static void removeTypeFromJson(JsonElement json) { if (json.isJsonArray()) { - for (JsonElement child: json.getAsJsonArray()) { + for (JsonElement child : json.getAsJsonArray()) { removeTypeFromJson(child); } } else if (json.isJsonObject()) { - Iterator> iterator = - json.getAsJsonObject().entrySet().iterator(); + Iterator> iterator = json.getAsJsonObject().entrySet().iterator(); while (iterator.hasNext()) { Entry entry = iterator.next(); @@ -97,12 +92,12 @@ private static void removeTypeFromJson(JsonElement json) { } public static JsonElement toJsonElement(MessageOrBuilder messageOrBuilder) - throws InvalidProtocolBufferException { + throws InvalidProtocolBufferException { return toJsonElement(messageOrBuilder, true); } - public static JsonElement toJsonElement(MessageOrBuilder messageOrBuilder, - boolean removeType) throws InvalidProtocolBufferException { + public static JsonElement toJsonElement(MessageOrBuilder messageOrBuilder, boolean removeType) + throws InvalidProtocolBufferException { String jsonString = toJsonString(messageOrBuilder); JsonParser parser = new JsonParser(); JsonElement element = parser.parse(jsonString); @@ -140,7 +135,7 @@ private static Object toJavaObject(JsonElement element) { JsonObject object = element.getAsJsonObject(); Map map = new LinkedHashMap<>(); - for (Entry entry: object.entrySet()) { + for (Entry entry : object.entrySet()) { Object javaObject = toJavaObject(entry.getValue()); map.put(entry.getKey(), javaObject); } @@ -152,7 +147,7 @@ private static Object toJavaObject(JsonElement element) { } public static Object toJavaObject(MessageOrBuilder messageOrBuilder) - throws InvalidProtocolBufferException { + throws InvalidProtocolBufferException { JsonElement element = toJsonElement(messageOrBuilder); return toJavaObject(element); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java index 5961ec5cde81..60bf23a944d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.quotas; import org.apache.hadoop.hbase.DoNotRetryIOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java index d5190ad9f7cf..9ddd408f845c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.util.HashSet; import java.util.Set; - import org.apache.commons.lang3.StringUtils; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index a48ce71d607a..728959e0a0ca 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.quotas; import java.io.Closeable; @@ -24,19 +23,18 @@ import java.util.LinkedList; import java.util.Objects; import java.util.Queue; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Scanner to iterate over the quota settings. @@ -48,8 +46,8 @@ public class QuotaRetriever implements Closeable, Iterable { private final Queue cache = new LinkedList<>(); private ResultScanner scanner; /** - * Connection to use. - * Could pass one in and have this class use it but this class wants to be standalone. + * Connection to use. Could pass one in and have this class use it but this class wants to be + * standalone. */ private Connection connection; private Table table; @@ -104,8 +102,10 @@ public QuotaSettings next() throws IOException { if (cache.isEmpty()) { Result result = scanner.next(); // Skip exceedThrottleQuota row key because this is not a QuotaSettings - if (result != null - && Bytes.equals(result.getRow(), QuotaTableUtil.getExceedThrottleQuotaRowKey())) { + if ( + result != null + && Bytes.equals(result.getRow(), QuotaTableUtil.getExceedThrottleQuotaRowKey()) + ) { result = scanner.next(); } if (result == null) { @@ -166,13 +166,13 @@ public static QuotaRetriever open(final Configuration conf) throws IOException { /** * Open a QuotaRetriever with the specified filter. - * @param conf Configuration object to use. + * @param conf Configuration object to use. * @param filter the QuotaFilter * @return the QuotaRetriever * @throws IOException if a remote or network exception occurs */ public static QuotaRetriever open(final Configuration conf, final QuotaFilter filter) - throws IOException { + throws IOException { Scan scan = QuotaTableUtil.makeScan(filter); QuotaRetriever scanner = new QuotaRetriever(); scanner.init(conf, scan); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java index 6f7317c85557..bc211e022d0c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,22 +20,21 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Describe the Scope of the quota rules. - * The quota can be enforced at the cluster level or at machine level. + * Describe the Scope of the quota rules. The quota can be enforced at the cluster level or at + * machine level. */ @InterfaceAudience.Public public enum QuotaScope { /** - * The specified throttling rules will be applied at the cluster level. - * A limit of 100req/min means 100req/min in total. - * If you execute 50req on a machine and then 50req on another machine + * The specified throttling rules will be applied at the cluster level. A limit of 100req/min + * means 100req/min in total. If you execute 50req on a machine and then 50req on another machine * then you have to wait your quota to fill up. */ CLUSTER, /** - * The specified throttling rules will be applied on the machine level. - * A limit of 100req/min means that each machine can execute 100req/min. + * The specified throttling rules will be applied on the machine level. A limit of 100req/min + * means that each machine can execute 100req/min. */ MACHINE, } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java index b51a20d6e0ad..05218f903f68 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,12 +20,12 @@ import java.io.IOException; import java.util.Objects; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory.QuotaGlobalsSettingsBypass; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; @@ -37,7 +37,7 @@ public abstract class QuotaSettings { private final String regionServer; protected QuotaSettings(final String userName, final TableName tableName, final String namespace, - final String regionServer) { + final String regionServer) { this.userName = userName; this.namespace = namespace; this.tableName = tableName; @@ -63,10 +63,9 @@ public String getRegionServer() { } /** - * Converts the protocol buffer request into a QuotaSetting POJO. Arbitrarily - * enforces that the request only contain one "limit", despite the message - * allowing multiple. The public API does not allow such use of the message. - * + * Converts the protocol buffer request into a QuotaSetting POJO. Arbitrarily enforces that the + * request only contain one "limit", despite the message allowing multiple. The public API does + * not allow such use of the message. * @param request The protocol buffer request. * @return A {@link QuotaSettings} POJO. */ @@ -92,35 +91,33 @@ public static QuotaSettings buildFromProto(SetQuotaRequest request) { // Make sure we don't have either of the two below limits also included if (request.hasSpaceLimit() || request.hasThrottle()) { throw new IllegalStateException( - "SetQuotaRequest has multiple limits: " + TextFormat.shortDebugString(request)); + "SetQuotaRequest has multiple limits: " + TextFormat.shortDebugString(request)); } - return new QuotaGlobalsSettingsBypass( - username, tableName, namespace, regionServer, request.getBypassGlobals()); + return new QuotaGlobalsSettingsBypass(username, tableName, namespace, regionServer, + request.getBypassGlobals()); } else if (request.hasSpaceLimit()) { // Make sure we don't have the below limit as well if (request.hasThrottle()) { throw new IllegalStateException( - "SetQuotaRequests has multiple limits: " + TextFormat.shortDebugString(request)); + "SetQuotaRequests has multiple limits: " + TextFormat.shortDebugString(request)); } // Sanity check on the pb received. if (!request.getSpaceLimit().hasQuota()) { - throw new IllegalArgumentException( - "SpaceLimitRequest is missing the expected SpaceQuota."); + throw new IllegalArgumentException("SpaceLimitRequest is missing the expected SpaceQuota."); } - return QuotaSettingsFactory.fromSpace( - tableName, namespace, request.getSpaceLimit().getQuota()); + return QuotaSettingsFactory.fromSpace(tableName, namespace, + request.getSpaceLimit().getQuota()); } else if (request.hasThrottle()) { return new ThrottleSettings(username, tableName, namespace, regionServer, - request.getThrottle()); + request.getThrottle()); } else { throw new IllegalStateException("Unhandled SetRequestRequest state"); } } /** - * Convert a QuotaSettings to a protocol buffer SetQuotaRequest. - * This is used internally by the Admin client to serialize the quota settings - * and send them to the master. + * Convert a QuotaSettings to a protocol buffer SetQuotaRequest. This is used internally by the + * Admin client to serialize the quota settings and send them to the master. */ @InterfaceAudience.Private public static SetQuotaRequest buildSetQuotaRequestProto(final QuotaSettings settings) { @@ -142,9 +139,8 @@ public static SetQuotaRequest buildSetQuotaRequestProto(final QuotaSettings sett } /** - * Called by toSetQuotaRequestProto() - * the subclass should implement this method to set the specific SetQuotaRequest - * properties. + * Called by toSetQuotaRequestProto() the subclass should implement this method to set the + * specific SetQuotaRequest properties. */ @InterfaceAudience.Private protected abstract void setupSetQuotaRequest(SetQuotaRequest.Builder builder); @@ -174,40 +170,46 @@ protected String ownerToString() { protected static String sizeToString(final long size) { if (size >= (1L << 50)) { - return String.format("%.2fP", (double)size / (1L << 50)); + return String.format("%.2fP", (double) size / (1L << 50)); } if (size >= (1L << 40)) { - return String.format("%.2fT", (double)size / (1L << 40)); + return String.format("%.2fT", (double) size / (1L << 40)); } if (size >= (1L << 30)) { - return String.format("%.2fG", (double)size / (1L << 30)); + return String.format("%.2fG", (double) size / (1L << 30)); } if (size >= (1L << 20)) { - return String.format("%.2fM", (double)size / (1L << 20)); + return String.format("%.2fM", (double) size / (1L << 20)); } if (size >= (1L << 10)) { - return String.format("%.2fK", (double)size / (1L << 10)); + return String.format("%.2fK", (double) size / (1L << 10)); } - return String.format("%.2fB", (double)size); + return String.format("%.2fB", (double) size); } protected static String timeToString(final TimeUnit timeUnit) { switch (timeUnit) { - case NANOSECONDS: return "nsec"; - case MICROSECONDS: return "usec"; - case MILLISECONDS: return "msec"; - case SECONDS: return "sec"; - case MINUTES: return "min"; - case HOURS: return "hour"; - case DAYS: return "day"; + case NANOSECONDS: + return "nsec"; + case MICROSECONDS: + return "usec"; + case MILLISECONDS: + return "msec"; + case SECONDS: + return "sec"; + case MINUTES: + return "min"; + case HOURS: + return "hour"; + case DAYS: + return "day"; } throw new RuntimeException("Invalid TimeUnit " + timeUnit); } /** - * Merges the provided settings with {@code this} and returns a new settings - * object to the caller if the merged settings differ from the original. - * + * Merges the provided settings with {@code this} and returns a new settings object to the caller + * if the merged settings differ from the original. * @param newSettings The new settings to merge in. * @return The merged {@link QuotaSettings} object or null if the quota should be deleted. */ @@ -216,7 +218,6 @@ protected static String timeToString(final TimeUnit timeUnit) { /** * Validates that settings being merged into {@code this} is targeting the same "subject", e.g. * user, table, namespace. - * * @param mergee The quota settings to be merged into {@code this}. * @throws IllegalArgumentException if the subjects are not equal. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java index 3e5bc16860fd..1f3ebc7c07dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -37,7 +36,7 @@ static class QuotaGlobalsSettingsBypass extends QuotaSettings { private final boolean bypassGlobals; QuotaGlobalsSettingsBypass(final String userName, final TableName tableName, - final String namespace, final String regionServer, final boolean bypassGlobals) { + final String namespace, final String regionServer, final boolean bypassGlobals) { super(userName, tableName, namespace, regionServer); this.bypassGlobals = bypassGlobals; } @@ -76,20 +75,21 @@ protected QuotaGlobalsSettingsBypass merge(QuotaSettings newSettings) throws IOE } } - /* ========================================================================== - * QuotaSettings from the Quotas object + /* + * ========================================================================== QuotaSettings from + * the Quotas object */ static List fromUserQuotas(final String userName, final Quotas quotas) { return fromQuotas(userName, null, null, null, quotas); } static List fromUserQuotas(final String userName, final TableName tableName, - final Quotas quotas) { + final Quotas quotas) { return fromQuotas(userName, tableName, null, null, quotas); } static List fromUserQuotas(final String userName, final String namespace, - final Quotas quotas) { + final Quotas quotas) { return fromQuotas(userName, null, namespace, null, quotas); } @@ -102,20 +102,20 @@ static List fromNamespaceQuotas(final String namespace, final Quo } static List fromRegionServerQuotas(final String regionServer, - final Quotas quotas) { + final Quotas quotas) { return fromQuotas(null, null, null, regionServer, quotas); } private static List fromQuotas(final String userName, final TableName tableName, - final String namespace, final String regionServer, final Quotas quotas) { + final String namespace, final String regionServer, final Quotas quotas) { List settings = new ArrayList<>(); if (quotas.hasThrottle()) { settings - .addAll(fromThrottle(userName, tableName, namespace, regionServer, quotas.getThrottle())); + .addAll(fromThrottle(userName, tableName, namespace, regionServer, quotas.getThrottle())); } if (quotas.getBypassGlobals() == true) { settings - .add(new QuotaGlobalsSettingsBypass(userName, tableName, namespace, regionServer, true)); + .add(new QuotaGlobalsSettingsBypass(userName, tableName, namespace, regionServer, true)); } if (quotas.hasSpace()) { settings.add(fromSpace(tableName, namespace, quotas.getSpace())); @@ -124,13 +124,13 @@ private static List fromQuotas(final String userName, final Table } public static List fromTableThrottles(final TableName tableName, - final QuotaProtos.Throttle throttle) { + final QuotaProtos.Throttle throttle) { return fromThrottle(null, tableName, null, null, throttle); } protected static List fromThrottle(final String userName, - final TableName tableName, final String namespace, final String regionServer, - final QuotaProtos.Throttle throttle) { + final TableName tableName, final String namespace, final String regionServer, + final QuotaProtos.Throttle throttle) { List settings = new ArrayList<>(); if (throttle.hasReqNum()) { settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace, regionServer, @@ -177,7 +177,7 @@ static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota pro } if ((table == null && namespace == null) || (table != null && namespace != null)) { throw new IllegalArgumentException( - "Can only construct SpaceLimitSettings for a table or namespace."); + "Can only construct SpaceLimitSettings for a table or namespace."); } if (table != null) { if (protoQuota.getRemove()) { @@ -193,101 +193,97 @@ static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota pro } } - /* ========================================================================== - * RPC Throttle + /* + * ========================================================================== RPC Throttle */ /** * Throttle the specified user. - * * @param userName the user to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final ThrottleType type, - final long limit, final TimeUnit timeUnit) { + final long limit, final TimeUnit timeUnit) { return throttleUser(userName, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified user. * @param userName the user to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final ThrottleType type, - final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(userName, null, null, null, type, limit, timeUnit, scope); } /** * Throttle the specified user on the specified table. - * - * @param userName the user to throttle + * @param userName the user to throttle * @param tableName the table to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final TableName tableName, - final ThrottleType type, final long limit, final TimeUnit timeUnit) { + final ThrottleType type, final long limit, final TimeUnit timeUnit) { return throttleUser(userName, tableName, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified user on the specified table. - * @param userName the user to throttle + * @param userName the user to throttle * @param tableName the table to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final TableName tableName, - final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(userName, tableName, null, null, type, limit, timeUnit, scope); } /** * Throttle the specified user on the specified namespace. - * - * @param userName the user to throttle + * @param userName the user to throttle * @param namespace the namespace to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final String namespace, - final ThrottleType type, final long limit, final TimeUnit timeUnit) { + final ThrottleType type, final long limit, final TimeUnit timeUnit) { return throttleUser(userName, namespace, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified user on the specified namespace. - * @param userName the user to throttle + * @param userName the user to throttle * @param namespace the namespace to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleUser(final String userName, final String namespace, - final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(userName, null, namespace, null, type, limit, timeUnit, scope); } /** * Remove the throttling for the specified user. - * * @param userName the user * @return the quota settings */ @@ -297,20 +293,18 @@ public static QuotaSettings unthrottleUser(final String userName) { /** * Remove the throttling for the specified user. - * * @param userName the user - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleUserByThrottleType(final String userName, - final ThrottleType type) { + final ThrottleType type) { return throttle(userName, null, null, null, type, 0, null, QuotaScope.MACHINE); } /** * Remove the throttling for the specified user on the specified table. - * - * @param userName the user + * @param userName the user * @param tableName the table * @return the quota settings */ @@ -320,21 +314,19 @@ public static QuotaSettings unthrottleUser(final String userName, final TableNam /** * Remove the throttling for the specified user on the specified table. - * - * @param userName the user + * @param userName the user * @param tableName the table - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleUserByThrottleType(final String userName, - final TableName tableName, final ThrottleType type) { + final TableName tableName, final ThrottleType type) { return throttle(userName, tableName, null, null, type, 0, null, QuotaScope.MACHINE); } /** * Remove the throttling for the specified user on the specified namespace. - * - * @param userName the user + * @param userName the user * @param namespace the namespace * @return the quota settings */ @@ -344,48 +336,45 @@ public static QuotaSettings unthrottleUser(final String userName, final String n /** * Remove the throttling for the specified user on the specified namespace. - * - * @param userName the user + * @param userName the user * @param namespace the namespace - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleUserByThrottleType(final String userName, - final String namespace, final ThrottleType type) { + final String namespace, final ThrottleType type) { return throttle(userName, null, namespace, null, type, 0, null, QuotaScope.MACHINE); } /** * Throttle the specified table. - * * @param tableName the table to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleTable(final TableName tableName, final ThrottleType type, - final long limit, final TimeUnit timeUnit) { + final long limit, final TimeUnit timeUnit) { return throttleTable(tableName, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified table. * @param tableName the table to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleTable(final TableName tableName, final ThrottleType type, - final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(null, tableName, null, null, type, limit, timeUnit, scope); } /** * Remove the throttling for the specified table. - * * @param tableName the table * @return the quota settings */ @@ -395,47 +384,44 @@ public static QuotaSettings unthrottleTable(final TableName tableName) { /** * Remove the throttling for the specified table. - * * @param tableName the table - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleTableByThrottleType(final TableName tableName, - final ThrottleType type) { + final ThrottleType type) { return throttle(null, tableName, null, null, type, 0, null, QuotaScope.MACHINE); } /** * Throttle the specified namespace. - * * @param namespace the namespace to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleNamespace(final String namespace, final ThrottleType type, - final long limit, final TimeUnit timeUnit) { + final long limit, final TimeUnit timeUnit) { return throttleNamespace(namespace, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Throttle the specified namespace. * @param namespace the namespace to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit - * @param scope the scope of throttling + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the scope of throttling * @return the quota settings */ public static QuotaSettings throttleNamespace(final String namespace, final ThrottleType type, - final long limit, final TimeUnit timeUnit, QuotaScope scope) { + final long limit, final TimeUnit timeUnit, QuotaScope scope) { return throttle(null, null, namespace, null, type, limit, timeUnit, scope); } /** * Remove the throttling for the specified namespace. - * * @param namespace the namespace * @return the quota settings */ @@ -445,33 +431,30 @@ public static QuotaSettings unthrottleNamespace(final String namespace) { /** * Remove the throttling for the specified namespace by throttle type. - * * @param namespace the namespace - * @param type the type of throttling + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleNamespaceByThrottleType(final String namespace, - final ThrottleType type) { + final ThrottleType type) { return throttle(null, null, namespace, null, type, 0, null, QuotaScope.MACHINE); } /** * Throttle the specified region server. - * * @param regionServer the region server to throttle - * @param type the type of throttling - * @param limit the allowed number of request/data per timeUnit - * @param timeUnit the limit time unit + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleRegionServer(final String regionServer, - final ThrottleType type, final long limit, final TimeUnit timeUnit) { + final ThrottleType type, final long limit, final TimeUnit timeUnit) { return throttle(null, null, null, regionServer, type, limit, timeUnit, QuotaScope.MACHINE); } /** * Remove the throttling for the specified region server. - * * @param regionServer the region Server * @return the quota settings */ @@ -481,20 +464,19 @@ public static QuotaSettings unthrottleRegionServer(final String regionServer) { /** * Remove the throttling for the specified region server by throttle type. - * - * @param regionServer the region Server - * @param type the type of throttling + * @param regionServer the region Server + * @param type the type of throttling * @return the quota settings */ public static QuotaSettings unthrottleRegionServerByThrottleType(final String regionServer, - final ThrottleType type) { + final ThrottleType type) { return throttle(null, null, null, regionServer, type, 0, null, QuotaScope.MACHINE); } /* Throttle helper */ private static QuotaSettings throttle(final String userName, final TableName tableName, - final String namespace, final String regionServer, final ThrottleType type, final long limit, - final TimeUnit timeUnit, QuotaScope scope) { + final String namespace, final String regionServer, final ThrottleType type, final long limit, + final TimeUnit timeUnit, QuotaScope scope) { QuotaProtos.ThrottleRequest.Builder builder = QuotaProtos.ThrottleRequest.newBuilder(); if (type != null) { builder.setType(ProtobufUtil.toProtoThrottleType(type)); @@ -505,44 +487,42 @@ private static QuotaSettings throttle(final String userName, final TableName tab return new ThrottleSettings(userName, tableName, namespace, regionServer, builder.build()); } - /* ========================================================================== - * Global Settings + /* + * ========================================================================== Global Settings */ /** * Set the "bypass global settings" for the specified user - * - * @param userName the user to throttle + * @param userName the user to throttle * @param bypassGlobals true if the global settings should be bypassed * @return the quota settings */ public static QuotaSettings bypassGlobals(final String userName, final boolean bypassGlobals) { - return new QuotaGlobalsSettingsBypass(userName, null, null, null, bypassGlobals); + return new QuotaGlobalsSettingsBypass(userName, null, null, null, bypassGlobals); } - /* ========================================================================== - * FileSystem Space Settings + /* + * ========================================================================== FileSystem Space + * Settings */ /** * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given table * to the given size in bytes. When the space usage is exceeded by the table, the provided * {@link SpaceViolationPolicy} is enacted on the table. - * - * @param tableName The name of the table on which the quota should be applied. - * @param sizeLimit The limit of a table's size in bytes. + * @param tableName The name of the table on which the quota should be applied. + * @param sizeLimit The limit of a table's size in bytes. * @param violationPolicy The action to take when the quota is exceeded. * @return An {@link QuotaSettings} object. */ - public static QuotaSettings limitTableSpace( - final TableName tableName, long sizeLimit, final SpaceViolationPolicy violationPolicy) { + public static QuotaSettings limitTableSpace(final TableName tableName, long sizeLimit, + final SpaceViolationPolicy violationPolicy) { return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy); } /** * Creates a {@link QuotaSettings} object to remove the FileSystem space quota for the given * table. - * * @param tableName The name of the table to remove the quota for. * @return A {@link QuotaSettings} object. */ @@ -554,21 +534,19 @@ public static QuotaSettings removeTableSpaceLimit(TableName tableName) { * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given * namespace to the given size in bytes. When the space usage is exceeded by all tables in the * namespace, the provided {@link SpaceViolationPolicy} is enacted on all tables in the namespace. - * - * @param namespace The namespace on which the quota should be applied. - * @param sizeLimit The limit of the namespace's size in bytes. + * @param namespace The namespace on which the quota should be applied. + * @param sizeLimit The limit of the namespace's size in bytes. * @param violationPolicy The action to take when the the quota is exceeded. * @return An {@link QuotaSettings} object. */ - public static QuotaSettings limitNamespaceSpace( - final String namespace, long sizeLimit, final SpaceViolationPolicy violationPolicy) { + public static QuotaSettings limitNamespaceSpace(final String namespace, long sizeLimit, + final SpaceViolationPolicy violationPolicy) { return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy); } /** * Creates a {@link QuotaSettings} object to remove the FileSystem space quota for the given * namespace. - * * @param namespace The namespace to remove the quota on. * @return A {@link QuotaSettings} object. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 94b87c4683e4..1afb15c0ac61 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.quotas; import java.io.ByteArrayInputStream; @@ -30,7 +29,6 @@ import java.util.Objects; import java.util.Set; import java.util.regex.Pattern; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; @@ -72,18 +70,59 @@ /** * Helper class to interact with the quota table. *

    - * - * - * - * - * - * - * - * - * - * - * - * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * *
    ROW-KEYFAM/QUALDATADESC
    n.<namespace>q:s<global-quotas>
    n.<namespace>u:p<namespace-quota policy>
    n.<namespace>u:s<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
    t.<table>q:s<global-quotas>
    t.<table>u:p<table-quota policy>
    t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
    u.<user>q:s<global-quotas>
    u.<user>q:s.<table><table-quotas>
    u.<user>q:s.<ns><namespace-quotas>
    ROW-KEYFAM/QUALDATADESC
    n.<namespace>q:s<global-quotas>
    n.<namespace>u:p<namespace-quota policy>
    n.<namespace>u:s<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
    t.<table>q:s<global-quotas>
    t.<table>u:p<table-quota policy>
    t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
    u.<user>q:s<global-quotas>
    u.<user>q:s.<table><table-quotas>
    u.<user>q:s.<ns><namespace-quotas>
    */ @InterfaceAudience.Private @@ -93,7 +132,7 @@ public class QuotaTableUtil { /** System table for quotas */ public static final TableName QUOTA_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota"); + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota"); protected static final byte[] QUOTA_FAMILY_INFO = Bytes.toBytes("q"); protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u"); @@ -102,13 +141,13 @@ public class QuotaTableUtil { protected static final byte[] QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p"); protected static final byte[] QUOTA_SNAPSHOT_SIZE_QUALIFIER = Bytes.toBytes("ss"); protected static final String QUOTA_POLICY_COLUMN = - Bytes.toString(QUOTA_FAMILY_USAGE) + ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY); + Bytes.toString(QUOTA_FAMILY_USAGE) + ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY); protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u."); protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t."); protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n."); protected static final byte[] QUOTA_REGION_SERVER_ROW_KEY_PREFIX = Bytes.toBytes("r."); private static final byte[] QUOTA_EXCEED_THROTTLE_QUOTA_ROW_KEY = - Bytes.toBytes("exceedThrottleQuota"); + Bytes.toBytes("exceedThrottleQuota"); /* * TODO: Setting specified region server quota isn't supported currently and the row key "r.all" @@ -116,47 +155,48 @@ public class QuotaTableUtil { */ public static final String QUOTA_REGION_SERVER_ROW_KEY = "all"; - /* ========================================================================= - * Quota "settings" helpers + /* + * ========================================================================= Quota "settings" + * helpers */ public static Quotas getTableQuota(final Connection connection, final TableName table) - throws IOException { + throws IOException { return getQuotas(connection, getTableRowKey(table)); } public static Quotas getNamespaceQuota(final Connection connection, final String namespace) - throws IOException { + throws IOException { return getQuotas(connection, getNamespaceRowKey(namespace)); } public static Quotas getUserQuota(final Connection connection, final String user) - throws IOException { + throws IOException { return getQuotas(connection, getUserRowKey(user)); } public static Quotas getUserQuota(final Connection connection, final String user, - final TableName table) throws IOException { + final TableName table) throws IOException { return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table)); } public static Quotas getUserQuota(final Connection connection, final String user, - final String namespace) throws IOException { + final String namespace) throws IOException { return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace)); } private static Quotas getQuotas(final Connection connection, final byte[] rowKey) - throws IOException { + throws IOException { return getQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS); } public static Quotas getRegionServerQuota(final Connection connection, final String regionServer) - throws IOException { + throws IOException { return getQuotas(connection, getRegionServerRowKey(regionServer)); } private static Quotas getQuotas(final Connection connection, final byte[] rowKey, - final byte[] qualifier) throws IOException { + final byte[] qualifier) throws IOException { Get get = new Get(rowKey); get.addColumn(QUOTA_FAMILY_INFO, qualifier); Result result = doGet(connection, get); @@ -185,13 +225,13 @@ public static Get makeGetForRegionServerQuotas(final String regionServer) { } public static Get makeGetForUserQuotas(final String user, final Iterable tables, - final Iterable namespaces) { + final Iterable namespaces) { Get get = new Get(getUserRowKey(user)); get.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - for (final TableName table: tables) { + for (final TableName table : tables) { get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserTable(table)); } - for (final String ns: namespaces) { + for (final String ns : namespaces) { get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserNamespace(ns)); } return get; @@ -218,38 +258,36 @@ public static Filter makeFilter(final QuotaFilter filter) { if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) { FilterList nsFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); nsFilters.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); - nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator( - getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( + getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); userFilters.addFilter(nsFilters); hasFilter = true; } if (StringUtils.isNotEmpty(filter.getTableFilter())) { FilterList tableFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); tableFilters.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); - tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator( - getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( + getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); userFilters.addFilter(tableFilters); hasFilter = true; } if (!hasFilter) { userFilters.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); } filterList.addFilter(userFilters); } else if (StringUtils.isNotEmpty(filter.getTableFilter())) { filterList.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0))); + new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0))); } else if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) { filterList.addFilter(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0))); + new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0))); } else if (StringUtils.isNotEmpty(filter.getRegionServerFilter())) { - filterList.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator( - getRegionServerRowKeyRegex(filter.getRegionServerFilter()), 0))); + filterList.addFilter(new RowFilter(CompareOperator.EQUAL, + new RegexStringComparator(getRegionServerRowKeyRegex(filter.getRegionServerFilter()), 0))); } return filterList; } @@ -263,14 +301,14 @@ public static Scan makeQuotaSnapshotScan() { /** * Fetches all {@link SpaceQuotaSnapshot} objects from the {@code hbase:quota} table. - * * @param conn The HBase connection * @return A map of table names and their computed snapshot. */ - public static Map getSnapshots(Connection conn) throws IOException { - Map snapshots = new HashMap<>(); + public static Map getSnapshots(Connection conn) + throws IOException { + Map snapshots = new HashMap<>(); try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(makeQuotaSnapshotScan())) { + ResultScanner rs = quotaTable.getScanner(makeQuotaSnapshotScan())) { for (Result r : rs) { extractQuotaSnapshot(r, snapshots); } @@ -311,15 +349,14 @@ public static Get makeQuotaSnapshotGetForTable(TableName tn) { /** * Extracts the {@link SpaceViolationPolicy} and {@link TableName} from the provided - * {@link Result} and adds them to the given {@link Map}. If the result does not contain - * the expected information or the serialized policy in the value is invalid, this method - * will throw an {@link IllegalArgumentException}. - * - * @param result A row from the quota table. + * {@link Result} and adds them to the given {@link Map}. If the result does not contain the + * expected information or the serialized policy in the value is invalid, this method will throw + * an {@link IllegalArgumentException}. + * @param result A row from the quota table. * @param snapshots A map of snapshots to add the result of this method into. */ - public static void extractQuotaSnapshot( - Result result, Map snapshots) { + public static void extractQuotaSnapshot(Result result, + Map snapshots) { byte[] row = Objects.requireNonNull(result).getRow(); if (row == null || row.length == 0) { throw new IllegalArgumentException("Provided result had a null row"); @@ -328,49 +365,47 @@ public static void extractQuotaSnapshot( Cell c = result.getColumnLatestCell(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); if (c == null) { throw new IllegalArgumentException("Result did not contain the expected column " - + QUOTA_POLICY_COLUMN + ", " + result.toString()); + + QUOTA_POLICY_COLUMN + ", " + result.toString()); } - ByteString buffer = UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + ByteString buffer = + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); try { QuotaProtos.SpaceQuotaSnapshot snapshot = QuotaProtos.SpaceQuotaSnapshot.parseFrom(buffer); snapshots.put(targetTableName, SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot)); } catch (InvalidProtocolBufferException e) { throw new IllegalArgumentException( - "Result did not contain a valid SpaceQuota protocol buffer message", e); + "Result did not contain a valid SpaceQuota protocol buffer message", e); } } public static interface UserQuotasVisitor { - void visitUserQuotas(final String userName, final Quotas quotas) - throws IOException; + void visitUserQuotas(final String userName, final Quotas quotas) throws IOException; + void visitUserQuotas(final String userName, final TableName table, final Quotas quotas) throws IOException; + void visitUserQuotas(final String userName, final String namespace, final Quotas quotas) throws IOException; } public static interface TableQuotasVisitor { - void visitTableQuotas(final TableName tableName, final Quotas quotas) - throws IOException; + void visitTableQuotas(final TableName tableName, final Quotas quotas) throws IOException; } public static interface NamespaceQuotasVisitor { - void visitNamespaceQuotas(final String namespace, final Quotas quotas) - throws IOException; + void visitNamespaceQuotas(final String namespace, final Quotas quotas) throws IOException; } private static interface RegionServerQuotasVisitor { - void visitRegionServerQuotas(final String regionServer, final Quotas quotas) - throws IOException; + void visitRegionServerQuotas(final String regionServer, final Quotas quotas) throws IOException; } public static interface QuotasVisitor extends UserQuotasVisitor, TableQuotasVisitor, - NamespaceQuotasVisitor, RegionServerQuotasVisitor { + NamespaceQuotasVisitor, RegionServerQuotasVisitor { } public static void parseResult(final Result result, final QuotasVisitor visitor) - throws IOException { + throws IOException { byte[] row = result.getRow(); if (isNamespaceRowKey(row)) { parseNamespaceResult(result, visitor); @@ -391,7 +426,7 @@ public static void parseResult(final Result result, final QuotasVisitor visitor) } public static void parseResultToCollection(final Result result, - Collection quotaSettings) throws IOException { + Collection quotaSettings) throws IOException { QuotaTableUtil.parseResult(result, new QuotaTableUtil.QuotasVisitor() { @Override @@ -426,14 +461,14 @@ public void visitRegionServerQuotas(String regionServer, Quotas quotas) { }); } - public static void parseNamespaceResult(final Result result, - final NamespaceQuotasVisitor visitor) throws IOException { + public static void parseNamespaceResult(final Result result, final NamespaceQuotasVisitor visitor) + throws IOException { String namespace = getNamespaceFromRowKey(result.getRow()); parseNamespaceResult(namespace, result, visitor); } protected static void parseNamespaceResult(final String namespace, final Result result, - final NamespaceQuotasVisitor visitor) throws IOException { + final NamespaceQuotasVisitor visitor) throws IOException { byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); if (data != null) { Quotas quotas = quotasFromData(data); @@ -442,13 +477,13 @@ protected static void parseNamespaceResult(final String namespace, final Result } private static void parseRegionServerResult(final Result result, - final RegionServerQuotasVisitor visitor) throws IOException { + final RegionServerQuotasVisitor visitor) throws IOException { String rs = getRegionServerFromRowKey(result.getRow()); parseRegionServerResult(rs, result, visitor); } private static void parseRegionServerResult(final String regionServer, final Result result, - final RegionServerQuotasVisitor visitor) throws IOException { + final RegionServerQuotasVisitor visitor) throws IOException { byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); if (data != null) { Quotas quotas = quotasFromData(data); @@ -457,13 +492,13 @@ private static void parseRegionServerResult(final String regionServer, final Res } public static void parseTableResult(final Result result, final TableQuotasVisitor visitor) - throws IOException { + throws IOException { TableName table = getTableFromRowKey(result.getRow()); parseTableResult(table, result, visitor); } protected static void parseTableResult(final TableName table, final Result result, - final TableQuotasVisitor visitor) throws IOException { + final TableQuotasVisitor visitor) throws IOException { byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); if (data != null) { Quotas quotas = quotasFromData(data); @@ -472,17 +507,17 @@ protected static void parseTableResult(final TableName table, final Result resul } public static void parseUserResult(final Result result, final UserQuotasVisitor visitor) - throws IOException { + throws IOException { String userName = getUserFromRowKey(result.getRow()); parseUserResult(userName, result, visitor); } protected static void parseUserResult(final String userName, final Result result, - final UserQuotasVisitor visitor) throws IOException { + final UserQuotasVisitor visitor) throws IOException { Map familyMap = result.getFamilyMap(QUOTA_FAMILY_INFO); if (familyMap == null || familyMap.isEmpty()) return; - for (Map.Entry entry: familyMap.entrySet()) { + for (Map.Entry entry : familyMap.entrySet()) { Quotas quotas = quotasFromData(entry.getValue()); if (Bytes.startsWith(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX)) { String name = Bytes.toString(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX.length); @@ -505,9 +540,8 @@ protected static void parseUserResult(final String userName, final Result result */ static Put createPutForSpaceSnapshot(TableName tableName, SpaceQuotaSnapshot snapshot) { Put p = new Put(getTableRowKey(tableName)); - p.addColumn( - QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, - SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); + p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, + SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); return p; } @@ -516,23 +550,22 @@ static Put createPutForSpaceSnapshot(TableName tableName, SpaceQuotaSnapshot sna */ static Get makeGetForSnapshotSize(TableName tn, String snapshot) { Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString()))); - g.addColumn( - QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + g.addColumn(QUOTA_FAMILY_USAGE, + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); return g; } /** - * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to - * the given {@code table}. + * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to the + * given {@code table}. */ static Put createPutForSnapshotSize(TableName tableName, String snapshot, long size) { // We just need a pb message with some `long usage`, so we can just reuse the // SpaceQuotaSnapshot message instead of creating a new one. Put p = new Put(getTableRowKey(tableName)); p.addColumn(QUOTA_FAMILY_USAGE, getSnapshotSizeQualifier(snapshot), - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setQuotaUsage(size).build().toByteArray()); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() + .setQuotaUsage(size).build().toByteArray()); return p; } @@ -542,25 +575,25 @@ static Put createPutForSnapshotSize(TableName tableName, String snapshot, long s static Put createPutForNamespaceSnapshotSize(String namespace, long size) { Put p = new Put(getNamespaceRowKey(namespace)); p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER, - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setQuotaUsage(size).build().toByteArray()); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() + .setQuotaUsage(size).build().toByteArray()); return p; } /** - * Returns a list of {@code Delete} to remove given table snapshot - * entries to remove from quota table + * Returns a list of {@code Delete} to remove given table snapshot entries to remove from quota + * table * @param snapshotEntriesToRemove the entries to remove */ static List createDeletesForExistingTableSnapshotSizes( - Multimap snapshotEntriesToRemove) { + Multimap snapshotEntriesToRemove) { List deletes = new ArrayList<>(); for (Map.Entry> entry : snapshotEntriesToRemove.asMap() - .entrySet()) { + .entrySet()) { for (String snapshot : entry.getValue()) { Delete d = new Delete(getTableRowKey(entry.getKey())); d.addColumns(QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); deletes.add(d); } } @@ -572,17 +605,17 @@ static List createDeletesForExistingTableSnapshotSizes( * @param connection connection to re-use */ static List createDeletesForExistingTableSnapshotSizes(Connection connection) - throws IOException { + throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, createScanForSpaceSnapshotSizes()); } /** - * Returns a list of {@code Delete} to remove given namespace snapshot - * entries to removefrom quota table + * Returns a list of {@code Delete} to remove given namespace snapshot entries to removefrom quota + * table * @param snapshotEntriesToRemove the entries to remove */ - static List createDeletesForExistingNamespaceSnapshotSizes( - Set snapshotEntriesToRemove) { + static List + createDeletesForExistingNamespaceSnapshotSizes(Set snapshotEntriesToRemove) { List deletes = new ArrayList<>(); for (String snapshot : snapshotEntriesToRemove) { Delete d = new Delete(getNamespaceRowKey(snapshot)); @@ -597,28 +630,28 @@ static List createDeletesForExistingNamespaceSnapshotSizes( * @param connection connection to re-use */ static List createDeletesForExistingNamespaceSnapshotSizes(Connection connection) - throws IOException { + throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, - createScanForNamespaceSnapshotSizes()); + createScanForNamespaceSnapshotSizes()); } /** * Returns a list of {@code Delete} to remove all entries returned by the passed scanner. * @param connection connection to re-use - * @param scan the scanner to use to generate the list of deletes + * @param scan the scanner to use to generate the list of deletes */ static List createDeletesForExistingSnapshotsFromScan(Connection connection, Scan scan) - throws IOException { + throws IOException { List deletes = new ArrayList<>(); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(scan)) { + ResultScanner rs = quotaTable.getScanner(scan)) { for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { Cell c = cs.current(); byte[] family = Bytes.copy(c.getFamilyArray(), c.getFamilyOffset(), c.getFamilyLength()); byte[] qual = - Bytes.copy(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength()); + Bytes.copy(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength()); Delete d = new Delete(r.getRow()); d.addColumns(family, qual); deletes.add(d); @@ -631,26 +664,26 @@ static List createDeletesForExistingSnapshotsFromScan(Connection connect /** * Remove table usage snapshots (u:p columns) for the namespace passed * @param connection connection to re-use - * @param namespace the namespace to fetch the list of table usage snapshots + * @param namespace the namespace to fetch the list of table usage snapshots */ static void deleteTableUsageSnapshotsForNamespace(Connection connection, String namespace) throws IOException { Scan s = new Scan(); - //Get rows for all tables in namespace + // Get rows for all tables in namespace s.setStartStopRowForPrefixScan( Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM))); - //Scan for table usage column (u:p) in quota table - s.addColumn(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY); - //Scan for table quota column (q:s) if table has a space quota defined - s.addColumn(QUOTA_FAMILY_INFO,QUOTA_QUALIFIER_SETTINGS); + // Scan for table usage column (u:p) in quota table + s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); + // Scan for table quota column (q:s) if table has a space quota defined + s.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(s)) { + ResultScanner rs = quotaTable.getScanner(s)) { for (Result r : rs) { byte[] data = r.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - //if table does not have a table space quota defined, delete table usage column (u:p) + // if table does not have a table space quota defined, delete table usage column (u:p) if (data == null) { Delete delete = new Delete(r.getRow()); - delete.addColumns(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY); + delete.addColumns(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); quotaTable.delete(delete); } } @@ -660,8 +693,7 @@ static void deleteTableUsageSnapshotsForNamespace(Connection connection, String /** * Fetches the computed size of all snapshots against tables in a namespace for space quotas. */ - static long getNamespaceSnapshotSize( - Connection conn, String namespace) throws IOException { + static long getNamespaceSnapshotSize(Connection conn, String namespace) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { Result r = quotaTable.get(createGetNamespaceSnapshotSize(namespace)); if (r.isEmpty()) { @@ -687,8 +719,8 @@ static Get createGetNamespaceSnapshotSize(String namespace) { * Parses the snapshot size from the given Cell's value. */ static long parseSnapshotSize(Cell c) throws InvalidProtocolBufferException { - ByteString bs = UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + ByteString bs = + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); return QuotaProtos.SpaceQuotaSnapshot.parseFrom(bs).getQuotaUsage(); } @@ -717,7 +749,7 @@ static Scan createScanForNamespaceSnapshotSizes(String namespace) { // Just the usage family and only the snapshot size qualifiers return s.addFamily(QUOTA_FAMILY_USAGE) - .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); + .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); } static Scan createScanForSpaceSnapshotSizes() { @@ -737,22 +769,21 @@ static Scan createScanForSpaceSnapshotSizes(TableName table) { } // Just the usage family and only the snapshot size qualifiers - return s.addFamily(QUOTA_FAMILY_USAGE).setFilter( - new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); + return s.addFamily(QUOTA_FAMILY_USAGE) + .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); } /** * Fetches any persisted HBase snapshot sizes stored in the quota table. The sizes here are - * computed relative to the table which the snapshot was created from. A snapshot's size will - * not include the size of files which the table still refers. These sizes, in bytes, are what - * is used internally to compute quota violation for tables and namespaces. - * + * computed relative to the table which the snapshot was created from. A snapshot's size will not + * include the size of files which the table still refers. These sizes, in bytes, are what is used + * internally to compute quota violation for tables and namespaces. * @return A map of snapshot name to size in bytes per space quota computations */ - public static Map getObservedSnapshotSizes(Connection conn) throws IOException { + public static Map getObservedSnapshotSizes(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { - final Map snapshotSizes = new HashMap<>(); + ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { + final Map snapshotSizes = new HashMap<>(); for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { @@ -772,7 +803,7 @@ public static Map getObservedSnapshotSizes(Connection conn) throws */ public static Multimap getTableSnapshots(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { + ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { Multimap snapshots = HashMultimap.create(); for (Result r : rs) { CellScanner cs = r.cellScanner(); @@ -793,7 +824,7 @@ public static Multimap getTableSnapshots(Connection conn) thr */ public static Set getNamespaceSnapshots(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(createScanForNamespaceSnapshotSizes())) { + ResultScanner rs = quotaTable.getScanner(createScanForNamespaceSnapshotSizes())) { Set snapshots = new HashSet<>(); for (Result r : rs) { CellScanner cs = r.cellScanner(); @@ -810,11 +841,11 @@ public static Set getNamespaceSnapshots(Connection conn) throws IOExcept * Returns the current space quota snapshot of the given {@code tableName} from * {@code QuotaTableUtil.QUOTA_TABLE_NAME} or null if the no quota information is available for * that tableName. - * @param conn connection to re-use + * @param conn connection to re-use * @param tableName name of the table whose current snapshot is to be retreived */ public static SpaceQuotaSnapshot getCurrentSnapshotFromQuotaTable(Connection conn, - TableName tableName) throws IOException { + TableName tableName) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { Map snapshots = new HashMap<>(1); Result result = quotaTable.get(makeQuotaSnapshotGetForTable(tableName)); @@ -828,15 +859,16 @@ public static SpaceQuotaSnapshot getCurrentSnapshotFromQuotaTable(Connection con } } - /* ========================================================================= - * Quotas protobuf helpers + /* + * ========================================================================= Quotas protobuf + * helpers */ protected static Quotas quotasFromData(final byte[] data) throws IOException { return quotasFromData(data, 0, data.length); } - protected static Quotas quotasFromData( - final byte[] data, int offset, int length) throws IOException { + protected static Quotas quotasFromData(final byte[] data, int offset, int length) + throws IOException { int magicLen = ProtobufMagic.lengthOfPBMagic(); if (!ProtobufMagic.isPBMagicPrefix(data, offset, magicLen)) { throw new IOException("Missing pb magic prefix"); @@ -863,25 +895,25 @@ public static boolean isEmptyQuota(final Quotas quotas) { return !hasSettings; } - /* ========================================================================= - * HTable helpers + /* + * ========================================================================= HTable helpers */ - protected static Result doGet(final Connection connection, final Get get) - throws IOException { + protected static Result doGet(final Connection connection, final Get get) throws IOException { try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(get); } } protected static Result[] doGet(final Connection connection, final List gets) - throws IOException { + throws IOException { try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(gets); } } - /* ========================================================================= - * Quota table row key helpers + /* + * ========================================================================= Quota table row key + * helpers */ protected static byte[] getUserRowKey(final String user) { return Bytes.add(QUOTA_USER_ROW_KEY_PREFIX, Bytes.toBytes(user)); @@ -905,7 +937,7 @@ protected static byte[] getSettingsQualifierForUserTable(final TableName tableNa protected static byte[] getSettingsQualifierForUserNamespace(final String namespace) { return Bytes.add(QUOTA_QUALIFIER_SETTINGS_PREFIX, - Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); + Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); } protected static String getUserRowKeyRegex(final String user) { @@ -933,13 +965,13 @@ private static String getRowKeyRegEx(final byte[] prefix, final String regex) { } protected static String getSettingsQualifierRegexForUserTable(final String table) { - return '^' + Pattern.quote(Bytes.toString(QUOTA_QUALIFIER_SETTINGS_PREFIX)) + - table + "(?> - unmodifiableTableCFsMap(Map> tableCFsMap) { + unmodifiableTableCFsMap(Map> tableCFsMap) { Map> newTableCFsMap = new HashMap<>(); tableCFsMap.forEach((table, cfs) -> newTableCFsMap.put(table, cfs != null ? Collections.unmodifiableList(cfs) : null)); @@ -212,8 +212,7 @@ public ReplicationPeerConfigBuilder putPeerData(byte[] key, byte[] value) { } @Override - public ReplicationPeerConfigBuilder - setTableCFsMap(Map> tableCFsMap) { + public ReplicationPeerConfigBuilder setTableCFsMap(Map> tableCFsMap) { this.tableCFsMap = tableCFsMap; return this; } @@ -232,7 +231,7 @@ public ReplicationPeerConfigBuilder setReplicateAllUserTables(boolean replicateA @Override public ReplicationPeerConfigBuilder - setExcludeTableCFsMap(Map> excludeTableCFsMap) { + setExcludeTableCFsMap(Map> excludeTableCFsMap) { this.excludeTableCFsMap = excludeTableCFsMap; return this; } @@ -309,11 +308,11 @@ public boolean needToReplicate(TableName table) { /** * Decide whether the passed family of the table need replicate to the peer cluster according to * this peer config. - * @param table name of the table + * @param table name of the table * @param family family name - * @return true if (the family of) the table need replicate to the peer cluster. - * If passed family is null, return true if any CFs of the table need replicate; - * If passed family is not null, return true if the passed family need replicate. + * @return true if (the family of) the table need replicate to the peer cluster. If passed family + * is null, return true if any CFs of the table need replicate; If passed family is not + * null, return true if the passed family need replicate. */ public boolean needToReplicate(TableName table, byte[] family) { String namespace = table.getNamespaceAsString(); @@ -330,8 +329,8 @@ public boolean needToReplicate(TableName table, byte[] family) { // If cfs is null or empty then we can make sure that we do not need to replicate this table, // otherwise, we may still need to replicate the table but filter out some families. return cfs != null && !cfs.isEmpty() - // If exclude-table-cfs contains passed family then we make sure that we do not need to - // replicate this family. + // If exclude-table-cfs contains passed family then we make sure that we do not need to + // replicate this family. && (family == null || !cfs.contains(Bytes.toString(family))); } else { // Not replicate all user tables, so filter by namespaces and table-cfs config @@ -348,7 +347,7 @@ public boolean needToReplicate(TableName table, byte[] family) { return tableCFsMap != null && tableCFsMap.containsKey(table) && (family == null || CollectionUtils.isEmpty(tableCFsMap.get(table)) // If table-cfs must contain passed family then we need to replicate this family. - || tableCFsMap.get(table).contains(Bytes.toString(family))); + || tableCFsMap.get(table).contains(Bytes.toString(family))); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java index c6a97fad9e81..95256d128b46 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -45,7 +43,7 @@ public interface ReplicationPeerConfigBuilder { /** * Sets a "raw" configuration property for this replication peer. For experts only. - * @param key Configuration property key + * @param key Configuration property key * @param value Configuration property value * @return {@code this} */ @@ -60,7 +58,6 @@ public interface ReplicationPeerConfigBuilder { @InterfaceAudience.Private ReplicationPeerConfigBuilder removeConfiguration(String key); - /** * Adds all of the provided "raw" configuration entries to {@code this}. * @param configuration A collection of raw configuration entries @@ -90,17 +87,15 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData } /** - * Sets an explicit map of tables and column families in those tables that should be replicated - * to the given peer. Use {@link #setReplicateAllUserTables(boolean)} to replicate all tables - * to a peer. - * + * Sets an explicit map of tables and column families in those tables that should be replicated to + * the given peer. Use {@link #setReplicateAllUserTables(boolean)} to replicate all tables to a + * peer. * @param tableCFsMap A map from tableName to column family names. An empty collection can be - * passed to indicate replicating all column families. + * passed to indicate replicating all column families. * @return {@code this} * @see #setReplicateAllUserTables(boolean) */ - ReplicationPeerConfigBuilder - setTableCFsMap(Map> tableCFsMap); + ReplicationPeerConfigBuilder setTableCFsMap(Map> tableCFsMap); /** * Sets a unique collection of HBase namespaces that should be replicated to this peer. @@ -125,12 +120,11 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData ReplicationPeerConfigBuilder setReplicateAllUserTables(boolean replicateAllUserTables); /** - * Sets the mapping of table name to column families which should not be replicated. This - * method sets state which is mutually exclusive to {@link #setTableCFsMap(Map)}. Invoking this - * method is only relevant when all user tables are being replicated. - * - * @param tableCFsMap A mapping of table names to column families which should not be - * replicated. An empty list of column families implies all families for the table. + * Sets the mapping of table name to column families which should not be replicated. This method + * sets state which is mutually exclusive to {@link #setTableCFsMap(Map)}. Invoking this method is + * only relevant when all user tables are being replicated. + * @param tableCFsMap A mapping of table names to column families which should not be replicated. + * An empty list of column families implies all families for the table. * @return {@code this}. */ ReplicationPeerConfigBuilder setExcludeTableCFsMap(Map> tableCFsMap); @@ -140,7 +134,6 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData * configured to be replicated. This method sets state which is mutually exclusive to * {@link #setNamespaces(Set)}. Invoking this method is only relevant when all user tables are * being replicated. - * * @param namespaces A set of namespaces whose tables should not be replicated. * @return {@code this} */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java index b0c27bb704a0..f839b25af666 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class ReplicationPeerDescription { private final SyncReplicationState syncReplicationState; public ReplicationPeerDescription(String id, boolean enabled, ReplicationPeerConfig config, - SyncReplicationState syncReplicationState) { + SyncReplicationState syncReplicationState) { this.id = id; this.enabled = enabled; this.config = config; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java index de9576caebdb..39bbb20433b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,10 @@ */ @InterfaceAudience.Public public enum SyncReplicationState { - NONE(0), ACTIVE(1), DOWNGRADE_ACTIVE(2), STANDBY(3); + NONE(0), + ACTIVE(1), + DOWNGRADE_ACTIVE(2), + STANDBY(3); private final byte value; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java index e8316d1cce79..92ca03945aae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,8 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.token.Token; @@ -47,44 +45,41 @@ public abstract class AbstractHBaseSaslRpcClient { /** * Create a HBaseSaslRpcClient for an authentication method - * @param conf the configuration object - * @param provider the authentication provider - * @param token token to use if needed by the authentication method - * @param serverAddr the address of the hbase service - * @param securityInfo the security details for the remote hbase service - * @param fallbackAllowed does the client allow fallback to simple authentication - * @throws IOException + * @param conf the configuration object + * @param provider the authentication provider + * @param token token to use if needed by the authentication method + * @param serverAddr the address of the hbase service + * @param securityInfo the security details for the remote hbase service + * @param fallbackAllowed does the client allow fallback to simple authentication n */ protected AbstractHBaseSaslRpcClient(Configuration conf, - SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed) - throws IOException { + SaslClientAuthenticationProvider provider, Token token, + InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed) throws IOException { this(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, "authentication"); } /** * Create a HBaseSaslRpcClient for an authentication method - * @param conf configuration object - * @param provider the authentication provider - * @param token token to use if needed by the authentication method - * @param serverAddr the address of the hbase service - * @param securityInfo the security details for the remote hbase service + * @param conf configuration object + * @param provider the authentication provider + * @param token token to use if needed by the authentication method + * @param serverAddr the address of the hbase service + * @param securityInfo the security details for the remote hbase service * @param fallbackAllowed does the client allow fallback to simple authentication - * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") - * @throws IOException + * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") n */ protected AbstractHBaseSaslRpcClient(Configuration conf, - SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, - String rpcProtection) throws IOException { + SaslClientAuthenticationProvider provider, Token token, + InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, + String rpcProtection) throws IOException { this.fallbackAllowed = fallbackAllowed; saslProps = SaslUtil.initSaslProperties(rpcProtection); - saslClient = provider.createClient( - conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); + saslClient = + provider.createClient(conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); if (saslClient == null) { - throw new IOException("Authentication provider " + provider.getClass() - + " returned a null SaslClient"); + throw new IOException( + "Authentication provider " + provider.getClass() + " returned a null SaslClient"); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java index 259a0a4d651d..873132899d98 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; - /** * Exception thrown by access-related methods. */ @@ -33,7 +32,7 @@ public AccessDeniedException() { } public AccessDeniedException(Class clazz, String s) { - super( "AccessDenied [" + clazz.getName() + "]: " + s); + super("AccessDenied [" + clazz.getName() + "]: " + s); } public AccessDeniedException(String s) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java index 65fc6172236d..62c41ab1aed8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.yetus.audience.InterfaceAudience; /** Authentication method */ @InterfaceAudience.Private @@ -39,7 +36,7 @@ public enum AuthMethod { public final UserGroupInformation.AuthenticationMethod authenticationMethod; AuthMethod(byte code, String mechanismName, - UserGroupInformation.AuthenticationMethod authMethod) { + UserGroupInformation.AuthenticationMethod authMethod) { this.code = code; this.mechanismName = mechanismName; this.authenticationMethod = authMethod; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java index 97be44fff10d..31ed191f91a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; /** * Unwrap messages with Crypto AES. Should be placed after a diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java index ceb3f35c0c75..c4c914a04d8c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; @@ -27,9 +29,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.CoalescingBufferQueue; import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; - /** * wrap messages with Crypto AES. @@ -52,7 +51,7 @@ public void handlerAdded(ChannelHandlerContext ctx) throws Exception { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + throws Exception { if (msg instanceof ByteBuf) { queue.add((ByteBuf) msg, promise); } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java index 74ad96e2cbda..5a816877ba84 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java @@ -36,7 +36,9 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.EncryptionProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; @@ -55,34 +57,31 @@ private EncryptionUtil() { } /** - * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. - * @param conf configuration - * @param key the raw key bytes + * Protect a key by encrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. + * @param conf configuration + * @param key the raw key bytes * @param algorithm the algorithm to use with this key material - * @return the encrypted key bytes - * @throws IOException + * @return the encrypted key bytes n */ public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm) - throws IOException { + throws IOException { return wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), new SecretKeySpec(key, algorithm)); } /** - * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. - * @param conf configuration + * Protect a key by encrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. + * @param conf configuration * @param subject subject key alias - * @param key the key + * @param key the key * @return the encrypted key bytes */ - public static byte[] wrapKey(Configuration conf, String subject, Key key) - throws IOException { + public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException { // Wrap the key with the configured encryption algorithm. - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); @@ -98,11 +97,11 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) byte[] keyBytes = key.getEncoded(); builder.setLength(keyBytes.length); builder.setHashAlgorithm(Encryption.getConfiguredHashAlgorithm(conf)); - builder.setHash( - UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); + builder + .setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); ByteArrayOutputStream out = new ByteArrayOutputStream(); - Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, - conf, cipher, iv); + Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher, + iv); builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray())); // Build and return the protobuf message out.reset(); @@ -111,21 +110,18 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) } /** - * Unwrap a key by decrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. - * @param conf configuration + * Unwrap a key by decrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. + * @param conf configuration * @param subject subject key alias - * @param value the encrypted key bytes - * @return the raw key bytes - * @throws IOException - * @throws KeyException + * @param value the encrypted key bytes + * @return the raw key bytes nn */ public static Key unwrapKey(Configuration conf, String subject, byte[] value) - throws IOException, KeyException { - EncryptionProtos.WrappedKey wrappedKey = EncryptionProtos.WrappedKey.PARSER - .parseDelimitedFrom(new ByteArrayInputStream(value)); - String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, - HConstants.CIPHER_AES); + throws IOException, KeyException { + EncryptionProtos.WrappedKey wrappedKey = + EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); @@ -134,25 +130,27 @@ public static Key unwrapKey(Configuration conf, String subject, byte[] value) } private static Key getUnwrapKey(Configuration conf, String subject, - EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { + EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf); String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim(); - if(!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { + if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { String msg = String.format("Unexpected encryption key hash algorithm: %s (expecting: %s)", wrappedHashAlgorithm, configuredHashAlgorithm); - if(Encryption.failOnHashAlgorithmMismatch(conf)) { + if (Encryption.failOnHashAlgorithmMismatch(conf)) { throw new KeyException(msg); } LOG.debug(msg); } ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null; - Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), - wrappedKey.getLength(), subject, conf, cipher, iv); + Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(), + subject, conf, cipher, iv); byte[] keyBytes = out.toByteArray(); if (wrappedKey.hasHash()) { - if (!Bytes.equals(wrappedKey.getHash().toByteArray(), - Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes))) { + if ( + !Bytes.equals(wrappedKey.getHash().toByteArray(), + Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes)) + ) { throw new KeyException("Key was not successfully unwrapped"); } } @@ -162,17 +160,17 @@ private static Key getUnwrapKey(Configuration conf, String subject, /** * Unwrap a wal key by decrypting it with the secret key of the given subject. The configuration * must be set up correctly for key alias resolution. - * @param conf configuration + * @param conf configuration * @param subject subject key alias - * @param value the encrypted key bytes + * @param value the encrypted key bytes * @return the raw key bytes - * @throws IOException if key is not found for the subject, or if some I/O error occurs + * @throws IOException if key is not found for the subject, or if some I/O error occurs * @throws KeyException if fail to unwrap the key */ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) - throws IOException, KeyException { + throws IOException, KeyException { EncryptionProtos.WrappedKey wrappedKey = - EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); + EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { @@ -183,11 +181,10 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) /** * Helper to create an encyption context. - * - * @param conf The current configuration. + * @param conf The current configuration. * @param family The current column descriptor. * @return The created encryption context. - * @throws IOException if an encryption key for the column cannot be unwrapped + * @throws IOException if an encryption key for the column cannot be unwrapped * @throws IllegalStateException in case of encryption related configuration errors */ public static Encryption.Context createEncryptionContext(Configuration conf, @@ -195,7 +192,7 @@ public static Encryption.Context createEncryptionContext(Configuration conf, Encryption.Context cryptoContext = Encryption.Context.NONE; String cipherName = family.getEncryptionType(); if (cipherName != null) { - if(!Encryption.isEncryptionEnabled(conf)) { + if (!Encryption.isEncryptionEnabled(conf)) { throw new IllegalStateException("Encryption for family '" + family.getNameAsString() + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); } @@ -214,9 +211,9 @@ public static Encryption.Context createEncryptionContext(Configuration conf, // We use the encryption type specified in the column schema as a sanity check on // what the wrapped key is telling us if (!cipher.getName().equalsIgnoreCase(cipherName)) { - throw new IllegalStateException("Encryption for family '" + family.getNameAsString() - + "' configured with type '" + cipherName + "' but key specifies algorithm '" - + cipher.getName() + "'"); + throw new IllegalStateException( + "Encryption for family '" + family.getNameAsString() + "' configured with type '" + + cipherName + "' but key specifies algorithm '" + cipher.getName() + "'"); } } else { // Family does not provide key material, create a random key @@ -236,19 +233,16 @@ public static Encryption.Context createEncryptionContext(Configuration conf, /** * Helper for {@link #unwrapKey(Configuration, String, byte[])} which automatically uses the * configured master and alternative keys, rather than having to specify a key type to unwrap - * with. - * - * The configuration must be set up correctly for key alias resolution. - * - * @param conf the current configuration + * with. The configuration must be set up correctly for key alias resolution. + * @param conf the current configuration * @param keyBytes the key encrypted by master (or alternative) to unwrap * @return the key bytes, decrypted * @throws IOException if the key cannot be unwrapped */ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOException { Key key; - String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()); + String masterKeyName = + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()); try { // First try the master key key = unwrapKey(conf, masterKeyName, keyBytes); @@ -258,8 +252,7 @@ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOExcept if (LOG.isDebugEnabled()) { LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); } - String alternateKeyName = - conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); + String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); if (alternateKeyName != null) { try { key = unwrapKey(conf, alternateKeyName, keyBytes); @@ -275,24 +268,21 @@ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOExcept /** * Helper to create an instance of CryptoAES. - * - * @param conf The current configuration. + * @param conf The current configuration. * @param cryptoCipherMeta The metadata for create CryptoAES. * @return The instance of CryptoAES. * @throws IOException if create CryptoAES failed */ public static CryptoAES createCryptoAES(RPCProtos.CryptoCipherMeta cryptoCipherMeta, - Configuration conf) throws IOException { + Configuration conf) throws IOException { Properties properties = new Properties(); // the property for cipher class properties.setProperty(CryptoCipherFactory.CLASSES_KEY, - conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", - "org.apache.commons.crypto.cipher.JceCipher")); + conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", + "org.apache.commons.crypto.cipher.JceCipher")); // create SaslAES for client return new CryptoAES(cryptoCipherMeta.getTransformation(), properties, - cryptoCipherMeta.getInKey().toByteArray(), - cryptoCipherMeta.getOutKey().toByteArray(), - cryptoCipherMeta.getInIv().toByteArray(), - cryptoCipherMeta.getOutIv().toByteArray()); + cryptoCipherMeta.getInKey().toByteArray(), cryptoCipherMeta.getOutKey().toByteArray(), + cryptoCipherMeta.getInIv().toByteArray(), cryptoCipherMeta.getOutIv().toByteArray()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java index 03af94ddad96..f9350edcf011 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import java.io.BufferedInputStream; @@ -29,14 +28,11 @@ import java.io.OutputStream; import java.net.InetAddress; import java.nio.ByteBuffer; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.SaslInputStream; @@ -47,6 +43,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; + /** * A utility class that encapsulates SASL logic for RPC client. Copied from * org.apache.hadoop.security @@ -64,15 +62,14 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { private boolean initStreamForCrypto; public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, - boolean fallbackAllowed) throws IOException { + Token token, InetAddress serverAddr, SecurityInfo securityInfo, + boolean fallbackAllowed) throws IOException { super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed); } public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, - boolean fallbackAllowed, String rpcProtection, boolean initStreamForCrypto) - throws IOException { + Token token, InetAddress serverAddr, SecurityInfo securityInfo, + boolean fallbackAllowed, String rpcProtection, boolean initStreamForCrypto) throws IOException { super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, rpcProtection); this.initStreamForCrypto = initStreamForCrypto; } @@ -81,16 +78,15 @@ private static void readStatus(DataInputStream inStream) throws IOException { int status = inStream.readInt(); // read status if (status != SaslStatus.SUCCESS.state) { throw new RemoteException(WritableUtils.readString(inStream), - WritableUtils.readString(inStream)); + WritableUtils.readString(inStream)); } } /** * Do client side SASL authentication with server via the given InputStream and OutputStream - * @param inS InputStream to use + * @param inS InputStream to use * @param outS OutputStream to use - * @return true if connection is set up, or false if needs to switch to simple Auth. - * @throws IOException + * @return true if connection is set up, or false if needs to switch to simple Auth. n */ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException { DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS)); @@ -112,7 +108,7 @@ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOExceptio if (len == SaslUtil.SWITCH_TO_SIMPLE_AUTH) { if (!fallbackAllowed) { throw new IOException("Server asks us to fall back to SIMPLE auth, " - + "but this client is configured to only allow secure connections."); + + "but this client is configured to only allow secure connections."); } if (LOG.isDebugEnabled()) { LOG.debug("Server asks us to fall back to simple auth."); @@ -123,7 +119,7 @@ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOExceptio saslToken = new byte[len]; if (LOG.isDebugEnabled()) { LOG.debug("Will read input token of size " + saslToken.length - + " for processing by initSASLContext"); + + " for processing by initSASLContext"); } inStream.readFully(saslToken); } @@ -143,7 +139,7 @@ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOExceptio saslToken = new byte[inStream.readInt()]; if (LOG.isDebugEnabled()) { LOG.debug("Will read input token of size " + saslToken.length - + " for processing by initSASLContext"); + + " for processing by initSASLContext"); } inStream.readFully(saslToken); } @@ -151,16 +147,15 @@ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOExceptio try { readStatus(inStream); - } - catch (IOException e){ - if(e instanceof RemoteException){ + } catch (IOException e) { + if (e instanceof RemoteException) { LOG.debug("Sasl connection failed: ", e); throw e; } } if (LOG.isDebugEnabled()) { LOG.debug("SASL client context established. Negotiated QoP: " - + saslClient.getNegotiatedProperty(Sasl.QOP)); + + saslClient.getNegotiatedProperty(Sasl.QOP)); } // initial the inputStream, outputStream for both Sasl encryption // and Crypto AES encryption if necessary @@ -189,8 +184,8 @@ public String getSaslQOP() { return (String) saslClient.getNegotiatedProperty(Sasl.QOP); } - public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, - Configuration conf) throws IOException { + public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, Configuration conf) + throws IOException { // create SaslAES for client cryptoAES = EncryptionUtil.createCryptoAES(cryptoCipherMeta, conf); cryptoAesEnable = true; @@ -198,8 +193,7 @@ public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, /** * Get a SASL wrapped InputStream. Can be called only after saslConnect() has been called. - * @return a SASL wrapped InputStream - * @throws IOException + * @return a SASL wrapped InputStream n */ public InputStream getInputStream() throws IOException { if (!saslClient.isComplete()) { @@ -214,6 +208,7 @@ public InputStream getInputStream() throws IOException { class WrappedInputStream extends FilterInputStream { private ByteBuffer unwrappedRpcBuffer = ByteBuffer.allocate(0); + public WrappedInputStream(InputStream in) throws IOException { super(in); } @@ -261,8 +256,7 @@ private void readNextRpcPacket() throws IOException { /** * Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called. - * @return a SASL wrapped OutputStream - * @throws IOException + * @return a SASL wrapped OutputStream n */ public OutputStream getOutputStream() throws IOException { if (!saslClient.isComplete()) { @@ -279,6 +273,7 @@ class WrappedOutputStream extends FilterOutputStream { public WrappedOutputStream(OutputStream out) throws IOException { super(out); } + @Override public void write(byte[] buf, int off, int len) throws IOException { if (LOG.isDebugEnabled()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java index e4611d181378..a75091c5293d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; -import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; /** @@ -43,7 +44,7 @@ public class NettyHBaseRpcConnectionHeaderHandler extends SimpleChannelInboundHa private final ByteBuf connectionHeaderWithLength; public NettyHBaseRpcConnectionHeaderHandler(Promise saslPromise, Configuration conf, - ByteBuf connectionHeaderWithLength) { + ByteBuf connectionHeaderWithLength) { this.saslPromise = saslPromise; this.conf = conf; this.connectionHeaderWithLength = connectionHeaderWithLength; @@ -57,12 +58,12 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep msg.readBytes(buff); RPCProtos.ConnectionHeaderResponse connectionHeaderResponse = - RPCProtos.ConnectionHeaderResponse.parseFrom(buff); + RPCProtos.ConnectionHeaderResponse.parseFrom(buff); // Get the CryptoCipherMeta, update the HBaseSaslRpcClient for Crypto Cipher if (connectionHeaderResponse.hasCryptoCipherMeta()) { - CryptoAES cryptoAES = EncryptionUtil.createCryptoAES( - connectionHeaderResponse.getCryptoCipherMeta(), conf); + CryptoAES cryptoAES = + EncryptionUtil.createCryptoAES(connectionHeaderResponse.getCryptoCipherMeta(), conf); // replace the Sasl handler with Crypto AES handler setupCryptoAESHandler(ctx.pipeline(), cryptoAES); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java index a5b980350d15..9b16a41afe4e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,9 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; -import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; - import java.io.IOException; import java.net.InetAddress; - import javax.security.sasl.Sasl; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.token.Token; @@ -33,6 +28,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; +import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; + /** * Implement SASL logic for netty rpc client. * @since 2.0.0 @@ -42,8 +40,8 @@ public class NettyHBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { private static final Logger LOG = LoggerFactory.getLogger(NettyHBaseSaslRpcClient.class); public NettyHBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, - boolean fallbackAllowed, String rpcProtection) throws IOException { + Token token, InetAddress serverAddr, SecurityInfo securityInfo, + boolean fallbackAllowed, String rpcProtection) throws IOException { super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, rpcProtection); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java index 2dd80ab1ca02..7473c3269b04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,25 +17,24 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; - import java.io.IOException; import java.net.InetAddress; import java.security.PrivilegedExceptionAction; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.hadoop.hbase.ipc.FallbackDisallowedException; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; +import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; /** * Implement SASL logic for netty rpc client. @@ -61,19 +60,19 @@ public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler< /** * @param saslPromise {@code true} if success, {@code false} if server tells us to fallback to - * simple. + * simple. */ public NettyHBaseSaslRpcClientHandler(Promise saslPromise, UserGroupInformation ugi, - SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, - Configuration conf) throws IOException { + SaslClientAuthenticationProvider provider, Token token, + InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, Configuration conf) + throws IOException { this.saslPromise = saslPromise; this.ugi = ugi; this.conf = conf; this.provider = provider; this.saslRpcClient = new NettyHBaseSaslRpcClient(conf, provider, token, serverAddr, - securityInfo, fallbackAllowed, conf.get( - "hbase.rpc.protection", SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); + securityInfo, fallbackAllowed, conf.get("hbase.rpc.protection", + SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); } private void writeResponse(ChannelHandlerContext ctx, byte[] response) { @@ -99,10 +98,10 @@ private void tryComplete(ChannelHandlerContext ctx) { } private void setCryptoAESOption() { - boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY. - getSaslQop().equalsIgnoreCase(saslRpcClient.getSaslQOP()); - needProcessConnectionHeader = saslEncryptionEnabled && conf.getBoolean( - "hbase.rpc.crypto.encryption.aes.enabled", false); + boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop() + .equalsIgnoreCase(saslRpcClient.getSaslQOP()); + needProcessConnectionHeader = + saslEncryptionEnabled && conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false); } public boolean isNeedProcessConnectionHeader() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java index cbbcb0e77616..256d434f2eca 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; - import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; /** * Decode the sasl challenge sent by RpcServer. @@ -48,7 +47,7 @@ private ByteBuf tryDecodeChallenge(ByteBuf in, int offset, int readableBytes) th } if (len > MAX_CHALLENGE_SIZE) { throw new IOException( - "Sasl challenge too large(" + len + "), max allowed is " + MAX_CHALLENGE_SIZE); + "Sasl challenge too large(" + len + "), max allowed is " + MAX_CHALLENGE_SIZE); } int totalLen = 4 + len; if (readableBytes < totalLen) { @@ -69,7 +68,7 @@ private void tryDecodeError(ByteBuf in, int offset, int readableBytes) throws IO } if (classLen > MAX_CHALLENGE_SIZE) { throw new IOException("Exception class name length too large(" + classLen - + "), max allowed is " + MAX_CHALLENGE_SIZE); + + "), max allowed is " + MAX_CHALLENGE_SIZE); } if (readableBytes < 4 + classLen + 4) { return; @@ -79,8 +78,8 @@ private void tryDecodeError(ByteBuf in, int offset, int readableBytes) throws IO throw new IOException("Invalid exception message length " + msgLen); } if (msgLen > MAX_CHALLENGE_SIZE) { - throw new IOException("Exception message length too large(" + msgLen + "), max allowed is " - + MAX_CHALLENGE_SIZE); + throw new IOException( + "Exception message length too large(" + msgLen + "), max allowed is " + MAX_CHALLENGE_SIZE); } int totalLen = classLen + msgLen + 8; if (readableBytes < totalLen) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java index 332bc1933d6e..5d6fa08bd4e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum SaslStatus { - SUCCESS (0), - ERROR (1); + SUCCESS(0), + ERROR(1); public final int state; + SaslStatus(int state) { this.state = state; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java index 00d0c41240ac..dfc36e4ba314 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.security; +import javax.security.sasl.SaslClient; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import javax.security.sasl.SaslClient; - -import org.apache.yetus.audience.InterfaceAudience; - /** * Unwrap sasl messages. Should be placed after a * io.netty.handler.codec.LengthFieldBasedFrameDecoder diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java index ad2067f2cf22..c2dc1042c913 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,10 @@ import java.util.Base64; import java.util.Map; import java.util.TreeMap; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -56,7 +53,7 @@ public String getSaslQop() { public boolean matches(String stringQop) { if (saslQop.equals(stringQop)) { LOG.warn("Use authentication/integrity/privacy as value for rpc protection " - + "configurations instead of auth/auth-int/auth-conf."); + + "configurations instead of auth/auth-int/auth-conf."); return true; } return name().equalsIgnoreCase(stringQop); @@ -81,8 +78,8 @@ public static char[] encodePassword(byte[] password) { } /** - * Returns {@link org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection} - * corresponding to the given {@code stringQop} value. + * Returns {@link org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection} corresponding to + * the given {@code stringQop} value. * @throws IllegalArgumentException If stringQop doesn't match any QOP. */ public static QualityOfProtection getQop(String stringQop) { @@ -91,8 +88,8 @@ public static QualityOfProtection getQop(String stringQop) { return qop; } } - throw new IllegalArgumentException("Invalid qop: " + stringQop - + ". It must be one of 'authentication', 'integrity', 'privacy'."); + throw new IllegalArgumentException("Invalid qop: " + stringQop + + ". It must be one of 'authentication', 'integrity', 'privacy'."); } /** @@ -110,7 +107,7 @@ public static Map initSaslProperties(String rpcProtection) { QualityOfProtection qop = getQop(qops[i]); saslQopBuilder.append(",").append(qop.getSaslQop()); } - saslQop = saslQopBuilder.substring(1); // remove first ',' + saslQop = saslQopBuilder.substring(1); // remove first ',' } Map saslProps = new TreeMap<>(); saslProps.put(Sasl.QOP, saslQop); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java index 62c127e2dfb3..ebc32a827aa5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security; import javax.security.sasl.SaslClient; - import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.yetus.audience.InterfaceAudience; @@ -31,7 +30,6 @@ import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; - /** * wrap sasl messages. */ @@ -53,7 +51,7 @@ public void handlerAdded(ChannelHandlerContext ctx) throws Exception { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + throws Exception { if (msg instanceof ByteBuf) { queue.add((ByteBuf) msg, promise); } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index 749190a6bbc9..dbb4c83844a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -35,30 +35,28 @@ @InterfaceAudience.Private public class SecurityInfo { /** Maps RPC service names to authentication information */ - private static ConcurrentMap infos = new ConcurrentHashMap<>(); + private static ConcurrentMap infos = new ConcurrentHashMap<>(); // populate info for known services static { infos.put(AdminProtos.AdminService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, - Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(ClientProtos.ClientService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, - Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(MasterService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(MasterProtos.HbckService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegistryProtos.ClientMetaService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); // NOTE: IF ADDING A NEW SERVICE, BE SURE TO UPDATE HBasePolicyProvider ALSO ELSE // new Service will not be found when all is Kerberized!!!! } /** - * Adds a security configuration for a new service name. Note that this will have no effect if - * the service name was already registered. + * Adds a security configuration for a new service name. Note that this will have no effect if the + * service name was already registered. */ public static void addInfo(String serviceName, SecurityInfo securityInfo) { infos.putIfAbsent(serviceName, securityInfo); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index 67546b78b001..b6986b564ac8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,141 +45,104 @@ @InterfaceAudience.Public public class AccessControlClient { public static final TableName ACL_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl"); + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl"); /** * Return true if authorization is supported and enabled * @param connection The connection to use - * @return true if authorization is supported and enabled, false otherwise - * @throws IOException + * @return true if authorization is supported and enabled, false otherwise n */ public static boolean isAuthorizationEnabled(Connection connection) throws IOException { return connection.getAdmin().getSecurityCapabilities() - .contains(SecurityCapability.AUTHORIZATION); + .contains(SecurityCapability.AUTHORIZATION); } /** * Return true if cell authorization is supported and enabled * @param connection The connection to use - * @return true if cell authorization is supported and enabled, false otherwise - * @throws IOException + * @return true if cell authorization is supported and enabled, false otherwise n */ public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException { return connection.getAdmin().getSecurityCapabilities() - .contains(SecurityCapability.CELL_AUTHORIZATION); + .contains(SecurityCapability.CELL_AUTHORIZATION); } - private static BlockingInterface getAccessControlServiceStub(Table ht) - throws IOException { + private static BlockingInterface getAccessControlServiceStub(Table ht) throws IOException { CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = - AccessControlProtos.AccessControlService.newBlockingStub(service); + BlockingInterface protocol = AccessControlProtos.AccessControlService.newBlockingStub(service); return protocol; } /** * Grants permission on the specified table for the specified user - * @param connection The Connection instance to use - * @param tableName - * @param userName - * @param family - * @param qual - * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. - * @param actions - * @throws Throwable + * @param connection The Connection instance to use nnnn * @param mergeExistingPermissions If set + * to false, later granted permissions will override previous granted + * permissions. otherwise, it'll merge with previous granted permissions. nn */ - private static void grant(Connection connection, final TableName tableName, - final String userName, final byte[] family, final byte[] qual, boolean mergeExistingPermissions, - final Permission.Action... actions) throws Throwable { + private static void grant(Connection connection, final TableName tableName, final String userName, + final byte[] family, final byte[] qual, boolean mergeExistingPermissions, + final Permission.Action... actions) throws Throwable { connection.getAdmin().grant(new UserPermission(userName, Permission.newBuilder(tableName) - .withFamily(family).withQualifier(qual).withActions(actions).build()), + .withFamily(family).withQualifier(qual).withActions(actions).build()), mergeExistingPermissions); } /** - * Grants permission on the specified table for the specified user. - * If permissions for a specified user exists, later granted permissions will override previous granted permissions. - * @param connection The Connection instance to use - * @param tableName - * @param userName - * @param family - * @param qual - * @param actions - * @throws Throwable + * Grants permission on the specified table for the specified user. If permissions for a specified + * user exists, later granted permissions will override previous granted permissions. + * @param connection The Connection instance to use nnnnnn */ public static void grant(Connection connection, final TableName tableName, final String userName, - final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { + final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { grant(connection, tableName, userName, family, qual, true, actions); } /** - * Grants permission on the specified namespace for the specified user. - * @param connection - * @param namespace - * @param userName - * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. - * @param actions - * @throws Throwable + * Grants permission on the specified namespace for the specified user. nnn * @param + * mergeExistingPermissions If set to false, later granted permissions will override previous + * granted permissions. otherwise, it'll merge with previous granted permissions. nn */ private static void grant(Connection connection, final String namespace, final String userName, - boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { + boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { connection.getAdmin().grant( new UserPermission(userName, Permission.newBuilder(namespace).withActions(actions).build()), mergeExistingPermissions); } /** - * Grants permission on the specified namespace for the specified user. - * If permissions on the specified namespace exists, later granted permissions will override previous granted + * Grants permission on the specified namespace for the specified user. If permissions on the + * specified namespace exists, later granted permissions will override previous granted * permissions. - * @param connection The Connection instance to use - * @param namespace - * @param userName - * @param actions - * @throws Throwable + * @param connection The Connection instance to use nnnn */ public static void grant(Connection connection, final String namespace, final String userName, - final Permission.Action... actions) throws Throwable { + final Permission.Action... actions) throws Throwable { grant(connection, namespace, userName, true, actions); } /** - * Grant global permissions for the specified user. - * @param connection - * @param userName - * @param mergeExistingPermissions If set to false, later granted permissions will override - * previous granted permissions. otherwise, it'll merge with previous granted - * permissions. - * @param actions - * @throws Throwable + * Grant global permissions for the specified user. nn * @param mergeExistingPermissions If set to + * false, later granted permissions will override previous granted permissions. otherwise, it'll + * merge with previous granted permissions. nn */ private static void grant(Connection connection, final String userName, - boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { + boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { connection.getAdmin().grant( new UserPermission(userName, Permission.newBuilder().withActions(actions).build()), mergeExistingPermissions); } /** - * Grant global permissions for the specified user. - * If permissions for the specified user exists, later granted permissions will override previous granted - * permissions. - * @param connection - * @param userName - * @param actions - * @throws Throwable + * Grant global permissions for the specified user. If permissions for the specified user exists, + * later granted permissions will override previous granted permissions. nnnn */ public static void grant(Connection connection, final String userName, - final Permission.Action... actions) throws Throwable { + final Permission.Action... actions) throws Throwable { grant(connection, userName, true, actions); } public static boolean isAccessControllerRunning(Connection connection) - throws MasterNotRunningException, ZooKeeperConnectionException, IOException { + throws MasterNotRunningException, ZooKeeperConnectionException, IOException { try (Admin admin = connection.getAdmin()) { return admin.isTableAvailable(ACL_TABLE_NAME); } @@ -187,31 +150,21 @@ public static boolean isAccessControllerRunning(Connection connection) /** * Revokes the permission on the table - * @param connection The Connection instance to use - * @param tableName - * @param username - * @param family - * @param qualifier - * @param actions - * @throws Throwable + * @param connection The Connection instance to use nnnnnn */ - public static void revoke(Connection connection, final TableName tableName, - final String username, final byte[] family, final byte[] qualifier, - final Permission.Action... actions) throws Throwable { + public static void revoke(Connection connection, final TableName tableName, final String username, + final byte[] family, final byte[] qualifier, final Permission.Action... actions) + throws Throwable { connection.getAdmin().revoke(new UserPermission(username, Permission.newBuilder(tableName) - .withFamily(family).withQualifier(qualifier).withActions(actions).build())); + .withFamily(family).withQualifier(qualifier).withActions(actions).build())); } /** * Revokes the permission on the namespace for the specified user. - * @param connection The Connection instance to use - * @param namespace - * @param userName - * @param actions - * @throws Throwable + * @param connection The Connection instance to use nnnn */ - public static void revoke(Connection connection, final String namespace, - final String userName, final Permission.Action... actions) throws Throwable { + public static void revoke(Connection connection, final String namespace, final String userName, + final Permission.Action... actions) throws Throwable { connection.getAdmin().revoke( new UserPermission(userName, Permission.newBuilder(namespace).withActions(actions).build())); } @@ -221,9 +174,9 @@ public static void revoke(Connection connection, final String namespace, * @param connection The Connection instance to use */ public static void revoke(Connection connection, final String userName, - final Permission.Action... actions) throws Throwable { + final Permission.Action... actions) throws Throwable { connection.getAdmin() - .revoke(new UserPermission(userName, Permission.newBuilder().withActions(actions).build())); + .revoke(new UserPermission(userName, Permission.newBuilder().withActions(actions).build())); } /** @@ -232,11 +185,10 @@ public static void revoke(Connection connection, final String userName, * along with the list of superusers would be returned. Else, no rows get returned. * @param connection The Connection instance to use * @param tableRegex The regular expression string to match against - * @return List of UserPermissions - * @throws Throwable + * @return List of UserPermissions n */ public static List getUserPermissions(Connection connection, String tableRegex) - throws Throwable { + throws Throwable { return getUserPermissions(connection, tableRegex, HConstants.EMPTY_STRING); } @@ -244,12 +196,12 @@ public static List getUserPermissions(Connection connection, Str * List all the userPermissions matching the given table pattern and user name. * @param connection Connection * @param tableRegex The regular expression string to match against - * @param userName User name, if empty then all user permissions will be retrieved. + * @param userName User name, if empty then all user permissions will be retrieved. * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - String userName) throws Throwable { + String userName) throws Throwable { List permList = new ArrayList<>(); try (Admin admin = connection.getAdmin()) { if (tableRegex == null || tableRegex.isEmpty()) { @@ -269,7 +221,7 @@ public static List getUserPermissions(Connection connection, Str List htds = admin.listTableDescriptors(Pattern.compile(tableRegex), true); for (TableDescriptor htd : htds) { permList.addAll(admin.getUserPermissions(GetUserPermissionsRequest - .newBuilder(htd.getTableName()).withUserName(userName).build())); + .newBuilder(htd.getTableName()).withUserName(userName).build())); } } } @@ -278,46 +230,46 @@ public static List getUserPermissions(Connection connection, Str /** * List all the userPermissions matching the given table pattern and column family. - * @param connection Connection - * @param tableRegex The regular expression string to match against. It shouldn't be null, empty - * or a namespace regular expression. + * @param connection Connection + * @param tableRegex The regular expression string to match against. It shouldn't be null, empty + * or a namespace regular expression. * @param columnFamily Column family * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - byte[] columnFamily) throws Throwable { + byte[] columnFamily) throws Throwable { return getUserPermissions(connection, tableRegex, columnFamily, null, HConstants.EMPTY_STRING); } /** * List all the userPermissions matching the given table pattern, column family and user name. - * @param connection Connection - * @param tableRegex The regular expression string to match against. It shouldn't be null, empty - * or a namespace regular expression. + * @param connection Connection + * @param tableRegex The regular expression string to match against. It shouldn't be null, empty + * or a namespace regular expression. * @param columnFamily Column family - * @param userName User name, if empty then all user permissions will be retrieved. + * @param userName User name, if empty then all user permissions will be retrieved. * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - byte[] columnFamily, String userName) throws Throwable { + byte[] columnFamily, String userName) throws Throwable { return getUserPermissions(connection, tableRegex, columnFamily, null, userName); } /** * List all the userPermissions matching the given table pattern, column family and column * qualifier. - * @param connection Connection - * @param tableRegex The regular expression string to match against. It shouldn't be null, empty - * or a namespace regular expression. - * @param columnFamily Column family + * @param connection Connection + * @param tableRegex The regular expression string to match against. It shouldn't be null, + * empty or a namespace regular expression. + * @param columnFamily Column family * @param columnQualifier Column qualifier * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - byte[] columnFamily, byte[] columnQualifier) throws Throwable { + byte[] columnFamily, byte[] columnQualifier) throws Throwable { return getUserPermissions(connection, tableRegex, columnFamily, columnQualifier, HConstants.EMPTY_STRING); } @@ -325,17 +277,17 @@ public static List getUserPermissions(Connection connection, Str /** * List all the userPermissions matching the given table pattern, column family and column * qualifier. - * @param connection Connection - * @param tableRegex The regular expression string to match against. It shouldn't be null, empty - * or a namespace regular expression. - * @param columnFamily Column family + * @param connection Connection + * @param tableRegex The regular expression string to match against. It shouldn't be null, + * empty or a namespace regular expression. + * @param columnFamily Column family * @param columnQualifier Column qualifier - * @param userName User name, if empty then all user permissions will be retrieved. + * @param userName User name, if empty then all user permissions will be retrieved. * @return List of UserPermissions * @throws Throwable on failure */ public static List getUserPermissions(Connection connection, String tableRegex, - byte[] columnFamily, byte[] columnQualifier, String userName) throws Throwable { + byte[] columnFamily, byte[] columnQualifier, String userName) throws Throwable { if (tableRegex == null || tableRegex.isEmpty() || tableRegex.charAt(0) == '@') { throw new IllegalArgumentException("Table name can't be null or empty or a namespace."); } @@ -346,7 +298,7 @@ public static List getUserPermissions(Connection connection, Str for (TableDescriptor htd : htds) { permList.addAll(admin.getUserPermissions( GetUserPermissionsRequest.newBuilder(htd.getTableName()).withFamily(columnFamily) - .withQualifier(columnQualifier).withUserName(userName).build())); + .withQualifier(columnQualifier).withUserName(userName).build())); } } return permList; @@ -355,20 +307,20 @@ public static List getUserPermissions(Connection connection, Str /** * Validates whether specified user has permission to perform actions on the mentioned table, * column family or column qualifier. - * @param connection Connection - * @param tableName Table name, it shouldn't be null or empty. - * @param columnFamily The column family. Optional argument, can be empty. If empty then - * validation will happen at table level. + * @param connection Connection + * @param tableName Table name, it shouldn't be null or empty. + * @param columnFamily The column family. Optional argument, can be empty. If empty then + * validation will happen at table level. * @param columnQualifier The column qualifier. Optional argument, can be empty. If empty then - * validation will happen at table and column family level. columnQualifier will not be - * considered if columnFamily is passed as null or empty. - * @param userName User name, it shouldn't be null or empty. - * @param actions Actions + * validation will happen at table and column family level. columnQualifier + * will not be considered if columnFamily is passed as null or empty. + * @param userName User name, it shouldn't be null or empty. + * @param actions Actions * @return true if access allowed to the specified user, otherwise false. * @throws Throwable on failure */ public static boolean hasPermission(Connection connection, String tableName, String columnFamily, - String columnQualifier, String userName, Permission.Action... actions) throws Throwable { + String columnQualifier, String userName, Permission.Action... actions) throws Throwable { return hasPermission(connection, tableName, Bytes.toBytes(columnFamily), Bytes.toBytes(columnQualifier), userName, actions); } @@ -376,26 +328,26 @@ public static boolean hasPermission(Connection connection, String tableName, Str /** * Validates whether specified user has permission to perform actions on the mentioned table, * column family or column qualifier. - * @param connection Connection - * @param tableName Table name, it shouldn't be null or empty. - * @param columnFamily The column family. Optional argument, can be empty. If empty then - * validation will happen at table level. + * @param connection Connection + * @param tableName Table name, it shouldn't be null or empty. + * @param columnFamily The column family. Optional argument, can be empty. If empty then + * validation will happen at table level. * @param columnQualifier The column qualifier. Optional argument, can be empty. If empty then - * validation will happen at table and column family level. columnQualifier will not be - * considered if columnFamily is passed as null or empty. - * @param userName User name, it shouldn't be null or empty. - * @param actions Actions + * validation will happen at table and column family level. columnQualifier + * will not be considered if columnFamily is passed as null or empty. + * @param userName User name, it shouldn't be null or empty. + * @param actions Actions * @return true if access allowed to the specified user, otherwise false. * @throws Throwable on failure */ public static boolean hasPermission(Connection connection, String tableName, byte[] columnFamily, - byte[] columnQualifier, String userName, Permission.Action... actions) throws Throwable { + byte[] columnQualifier, String userName, Permission.Action... actions) throws Throwable { if (StringUtils.isEmpty(tableName) || StringUtils.isEmpty(userName)) { throw new IllegalArgumentException("Table and user name can't be null or empty."); } List permissions = new ArrayList<>(1); permissions.add(Permission.newBuilder(TableName.valueOf(tableName)).withFamily(columnFamily) - .withQualifier(columnQualifier).withActions(actions).build()); + .withQualifier(columnQualifier).withActions(actions).build()); return connection.getAdmin().hasUserPermissions(userName, permissions).get(0); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java index e0c4d99dfca5..a795d296fe7c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import org.apache.yetus.audience.InterfaceAudience; @@ -24,16 +23,16 @@ public interface AccessControlConstants { /** - * Configuration option that toggles whether EXEC permission checking is - * performed during coprocessor endpoint invocations. + * Configuration option that toggles whether EXEC permission checking is performed during + * coprocessor endpoint invocations. */ public static final String EXEC_PERMISSION_CHECKS_KEY = "hbase.security.exec.permission.checks"; /** Default setting for hbase.security.exec.permission.checks; false */ public static final boolean DEFAULT_EXEC_PERMISSION_CHECKS = false; /** - * Configuration or CF schema option for early termination of access checks - * if table or CF permissions grant access. Pre-0.98 compatible behavior + * Configuration or CF schema option for early termination of access checks if table or CF + * permissions grant access. Pre-0.98 compatible behavior */ public static final String CF_ATTRIBUTE_EARLY_OUT = "hbase.security.access.early_out"; /** Default setting for hbase.security.access.early_out */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java index 236191f1d66c..066e6f4e04da 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,26 +47,25 @@ */ @InterfaceAudience.Private public class AccessControlUtil { - private AccessControlUtil() {} + private AccessControlUtil() { + } /** * Create a request to grant user table permissions. - * - * @param username the short user name who to grant permissions + * @param username the short user name who to grant permissions * @param tableName optional table name the permissions apply - * @param family optional column family + * @param family optional column family * @param qualifier optional qualifier - * @param actions the permissions to be granted + * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest * @throws NullPointerException if {@code tableName} is {@code null} */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, TableName tableName, byte[] family, byte[] qualifier, - boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.GrantRequest buildGrantRequest(String username, + TableName tableName, byte[] family, byte[] qualifier, boolean mergeExistingPermissions, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.TablePermission.Builder permissionBuilder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } @@ -81,31 +80,26 @@ public static AccessControlProtos.GrantRequest buildGrantRequest( if (qualifier != null) { permissionBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } - ret.setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } /** * Create a request to grant user namespace permissions. - * - * @param username the short user name who to grant permissions + * @param username the short user name who to grant permissions * @param namespace optional table name the permissions apply - * @param actions the permissions to be granted + * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, String namespace, boolean mergeExistingPermissions, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.GrantRequest buildGrantRequest(String username, + String namespace, boolean mergeExistingPermissions, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.NamespacePermission.Builder permissionBuilder = - AccessControlProtos.NamespacePermission.newBuilder(); + AccessControlProtos.NamespacePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } @@ -113,56 +107,46 @@ public static AccessControlProtos.GrantRequest buildGrantRequest( permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace)); } ret.setType(AccessControlProtos.Permission.Type.Namespace) - .setNamespacePermission(permissionBuilder); + .setNamespacePermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } /** * Create a request to revoke user global permissions. - * * @param username the short user name whose permissions to be revoked - * @param actions the permissions to be revoked + * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = - AccessControlProtos.GlobalPermission.newBuilder(); + AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - ret.setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } /** * Create a request to revoke user namespace permissions. - * - * @param username the short user name whose permissions to be revoked + * @param username the short user name whose permissions to be revoked * @param namespace optional table name the permissions apply - * @param actions the permissions to be revoked + * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, String namespace, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + String namespace, AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.NamespacePermission.Builder permissionBuilder = - AccessControlProtos.NamespacePermission.newBuilder(); + AccessControlProtos.NamespacePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } @@ -170,60 +154,51 @@ public static AccessControlProtos.RevokeRequest buildRevokeRequest( permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace)); } ret.setType(AccessControlProtos.Permission.Type.Namespace) - .setNamespacePermission(permissionBuilder); + .setNamespacePermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } /** * Create a request to grant user global permissions. - * * @param username the short user name who to grant permissions - * @param actions the permissions to be granted + * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ public static AccessControlProtos.GrantRequest buildGrantRequest(String username, - boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = - AccessControlProtos.GlobalPermission.newBuilder(); + AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - ret.setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions(String user, - Permission perms) { + Permission perms) { return AccessControlProtos.UsersAndPermissions.newBuilder() - .addUserPermissions(AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder() - .setUser(ByteString.copyFromUtf8(user)) - .addPermissions(toPermission(perms)) - .build()) - .build(); + .addUserPermissions(AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder() + .setUser(ByteString.copyFromUtf8(user)).addPermissions(toPermission(perms)).build()) + .build(); } - public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions( - ListMultimap perms) { + public static AccessControlProtos.UsersAndPermissions + toUsersAndPermissions(ListMultimap perms) { AccessControlProtos.UsersAndPermissions.Builder builder = - AccessControlProtos.UsersAndPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perms.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = - AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (Permission perm: entry.getValue()) { + for (Permission perm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(perm)); } builder.addUserPermissions(userPermBuilder.build()); @@ -231,13 +206,13 @@ public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions( return builder.build(); } - public static ListMultimap toUsersAndPermissions( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUsersAndPermissions(AccessControlProtos.UsersAndPermissions proto) { ListMultimap result = ArrayListMultimap.create(); - for (AccessControlProtos.UsersAndPermissions.UserPermissions userPerms: - proto.getUserPermissionsList()) { + for (AccessControlProtos.UsersAndPermissions.UserPermissions userPerms : proto + .getUserPermissionsList()) { String user = userPerms.getUser().toStringUtf8(); - for (AccessControlProtos.Permission perm: userPerms.getPermissionsList()) { + for (AccessControlProtos.Permission perm : userPerms.getPermissionsList()) { result.put(user, toPermission(perm)); } } @@ -285,7 +260,7 @@ public static Permission toPermission(AccessControlProtos.Permission proto) { throw new IllegalStateException("Namespace must not be empty in NamespacePermission"); } return Permission.newBuilder(perm.getNamespaceName().toStringUtf8()).withActions(actions) - .build(); + .build(); } if (proto.getType() == AccessControlProtos.Permission.Type.Table) { AccessControlProtos.TablePermission perm = proto.getTablePermission(); @@ -304,14 +279,13 @@ public static Permission toPermission(AccessControlProtos.Permission proto) { qualifier = perm.getQualifier().toByteArray(); } return Permission.newBuilder(table).withFamily(family).withQualifier(qualifier) - .withActions(actions).build(); + .withActions(actions).build(); } throw new IllegalStateException("Unrecognize Perm Type: " + proto.getType()); } /** * Convert a client Permission to a Permission proto - * * @param perm the client Permission * @return the protobuf Permission */ @@ -356,7 +330,7 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { AccessControlProtos.GlobalPermission.newBuilder(); Permission.Action[] actions = perm.getActions(); if (actions != null) { - for (Permission.Action a: actions) { + for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } } @@ -367,12 +341,11 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { /** * Converts a list of Permission.Action proto to an array of client Permission.Action objects. - * * @param protoActions the list of protobuf Actions * @return the converted array of Actions */ public static Permission.Action[] - toPermissionActions(List protoActions) { + toPermissionActions(List protoActions) { Permission.Action[] actions = new Permission.Action[protoActions.size()]; for (int i = 0; i < protoActions.size(); i++) { actions[i] = toPermissionAction(protoActions.get(i)); @@ -382,68 +355,62 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { /** * Converts a Permission.Action proto to a client Permission.Action object. - * * @param action the protobuf Action * @return the converted Action */ - public static Permission.Action toPermissionAction( - AccessControlProtos.Permission.Action action) { + public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; } - throw new IllegalArgumentException("Unknown action value "+action.name()); + throw new IllegalArgumentException("Unknown action value " + action.name()); } /** * Convert a client Permission.Action to a Permission.Action proto - * * @param action the client Action * @return the protobuf Action */ - public static AccessControlProtos.Permission.Action toPermissionAction( - Permission.Action action) { + public static AccessControlProtos.Permission.Action toPermissionAction(Permission.Action action) { switch (action) { - case READ: - return AccessControlProtos.Permission.Action.READ; - case WRITE: - return AccessControlProtos.Permission.Action.WRITE; - case EXEC: - return AccessControlProtos.Permission.Action.EXEC; - case CREATE: - return AccessControlProtos.Permission.Action.CREATE; - case ADMIN: - return AccessControlProtos.Permission.Action.ADMIN; + case READ: + return AccessControlProtos.Permission.Action.READ; + case WRITE: + return AccessControlProtos.Permission.Action.WRITE; + case EXEC: + return AccessControlProtos.Permission.Action.EXEC; + case CREATE: + return AccessControlProtos.Permission.Action.CREATE; + case ADMIN: + return AccessControlProtos.Permission.Action.ADMIN; } - throw new IllegalArgumentException("Unknown action value "+action.name()); + throw new IllegalArgumentException("Unknown action value " + action.name()); } /** * Convert a client user permission to a user permission proto - * * @param perm the client UserPermission * @return the protobuf UserPermission */ public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) { return AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(perm.getUser())) - .setPermission(toPermission(perm.getPermission())) - .build(); + .setUser(ByteString.copyFromUtf8(perm.getUser())) + .setPermission(toPermission(perm.getPermission())).build(); } /** * Converts the permissions list into a protocol buffer GetUserPermissionsResponse */ - public static GetUserPermissionsResponse buildGetUserPermissionsResponse( - final List permissions) { + public static GetUserPermissionsResponse + buildGetUserPermissionsResponse(final List permissions) { GetUserPermissionsResponse.Builder builder = GetUserPermissionsResponse.newBuilder(); for (UserPermission perm : permissions) { builder.addUserPermission(toUserPermission(perm)); @@ -453,7 +420,6 @@ public static GetUserPermissionsResponse buildGetUserPermissionsResponse( /** * Converts a user permission proto to a client user permission object. - * * @param proto the protobuf UserPermission * @return the converted UserPermission */ @@ -462,21 +428,20 @@ public static UserPermission toUserPermission(AccessControlProtos.UserPermission } /** - * Convert a ListMultimap<String, TablePermission> where key is username - * to a protobuf UserPermission - * + * Convert a ListMultimap<String, TablePermission> where key is username to a protobuf + * UserPermission * @param perm the list of user and table permissions * @return the protobuf UserTablePermissions */ - public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( - ListMultimap perm) { + public static AccessControlProtos.UsersAndPermissions + toUserTablePermissions(ListMultimap perm) { AccessControlProtos.UsersAndPermissions.Builder builder = - AccessControlProtos.UsersAndPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perm.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = - AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (UserPermission userPerm: entry.getValue()) { + for (UserPermission userPerm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(userPerm.getPermission())); } builder.addUserPermissions(userPermBuilder.build()); @@ -488,55 +453,52 @@ public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( * A utility used to grant a user global permissions. *

    * It's also called by the shell, in case you want to find references. - * - * @param protocol the AccessControlService protocol proxy + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions - * @param actions the permissions to be granted - * @throws ServiceException - * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead. + * @param actions the permissions to be granted n * @deprecated Use + * {@link Admin#grant(UserPermission, boolean)} instead. */ @Deprecated public static void grant(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, boolean mergeExistingPermissions, - Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, + boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } - AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, mergeExistingPermissions, + AccessControlProtos.GrantRequest request = + buildGrantRequest(userShortName, mergeExistingPermissions, permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } /** - * A utility used to grant a user table permissions. The permissions will - * be for a table table/column family/qualifier. + * A utility used to grant a user table permissions. The permissions will be for a table + * table/column family/qualifier. *

    * It's also called by the shell, in case you want to find references. - * - * @param protocol the AccessControlService protocol proxy + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions - * @param tableName optional table name - * @param f optional column family - * @param q optional qualifier - * @param actions the permissions to be granted - * @throws ServiceException - * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead. + * @param tableName optional table name + * @param f optional column family + * @param q optional qualifier + * @param actions the permissions to be granted n * @deprecated Use + * {@link Admin#grant(UserPermission, boolean)} instead. */ @Deprecated public static void grant(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, - byte[] f, byte[] q, boolean mergeExistingPermissions, Permission.Action... actions) - throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, + byte[] f, byte[] q, boolean mergeExistingPermissions, Permission.Action... actions) + throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } AccessControlProtos.GrantRequest request = - buildGrantRequest(userShortName, tableName, f, q, mergeExistingPermissions, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + buildGrantRequest(userShortName, tableName, f, q, mergeExistingPermissions, + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } @@ -544,24 +506,23 @@ public static void grant(RpcController controller, * A utility used to grant a user namespace permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param namespace the short name of the user to grant permissions - * @param actions the permissions to be granted - * @throws ServiceException - * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead. + * @param protocol the AccessControlService protocol proxy + * @param namespace the short name of the user to grant permissions + * @param actions the permissions to be granted n * @deprecated Use + * {@link Admin#grant(UserPermission, boolean)} instead. */ @Deprecated public static void grant(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, String namespace, - boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, String namespace, + boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } - AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, namespace, mergeExistingPermissions, + AccessControlProtos.GrantRequest request = + buildGrantRequest(userShortName, namespace, mergeExistingPermissions, permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } @@ -570,55 +531,53 @@ public static void grant(RpcController controller, * A utility used to revoke a user's global permissions. *

    * It's also called by the shell, in case you want to find references. - * - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions - * @param actions the permissions to be revoked + * @param actions the permissions to be revoked * @throws ServiceException on failure * @deprecated Use {@link Admin#revoke(UserPermission)} instead. */ @Deprecated public static void revoke(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, - Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, + Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } /** - * A utility used to revoke a user's table permissions. The permissions will - * be for a table/column family/qualifier. + * A utility used to revoke a user's table permissions. The permissions will be for a table/column + * family/qualifier. *

    * It's also called by the shell, in case you want to find references. - * - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions - * @param tableName optional table name - * @param f optional column family - * @param q optional qualifier - * @param actions the permissions to be revoked + * @param tableName optional table name + * @param f optional column family + * @param q optional qualifier + * @param actions the permissions to be revoked * @throws ServiceException on failure * @deprecated Use {@link Admin#revoke(UserPermission)} instead. */ @Deprecated public static void revoke(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, - byte[] f, byte[] q, Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, + byte[] f, byte[] q, Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, tableName, f, q, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } @@ -626,26 +585,25 @@ public static void revoke(RpcController controller, * A utility used to revoke a user's namespace permissions. *

    * It's also called by the shell, in case you want to find references. - * - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions - * @param namespace optional table name - * @param actions the permissions to be revoked + * @param namespace optional table name + * @param actions the permissions to be revoked * @throws ServiceException on failure * @deprecated Use {@link Admin#revoke(UserPermission)} instead. */ @Deprecated public static void revoke(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, String namespace, - Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, String namespace, + Permission.Action... actions) throws ServiceException { List permActions = - Lists.newArrayListWithCapacity(actions.length); + Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, namespace, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } @@ -653,31 +611,30 @@ public static void revoke(RpcController controller, * A utility used to get user's global permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController - * @param protocol the AccessControlService protocol proxy + * @param protocol the AccessControlService protocol proxy * @throws ServiceException on failure * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol) throws ServiceException { + AccessControlService.BlockingInterface protocol) throws ServiceException { return getUserPermissions(controller, protocol, HConstants.EMPTY_STRING); } /** * A utility used to get user's global permissions based on the specified user name. * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param userName User name, if empty then all user permissions will be retrieved. - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param protocol the AccessControlService protocol proxy + * @param userName User name, if empty then all user permissions will be retrieved. n + * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} + * instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, String userName) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userName) throws ServiceException { AccessControlProtos.GetUserPermissionsRequest.Builder builder = - AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); builder.setType(AccessControlProtos.Permission.Type.Global); if (!StringUtils.isEmpty(userName)) { builder.setUserName(ByteString.copyFromUtf8(userName)); @@ -685,7 +642,7 @@ public static List getUserPermissions(RpcController controller, AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = - protocol.getUserPermissions(controller, request); + protocol.getUserPermissions(controller, request); List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) { perms.add(toUserPermission(perm)); @@ -697,38 +654,35 @@ public static List getUserPermissions(RpcController controller, * A utility used to get user table permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param t optional table name - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param protocol the AccessControlService protocol proxy + * @param t optional table name n * @deprecated Use + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, - TableName t) throws ServiceException { + AccessControlService.BlockingInterface protocol, TableName t) throws ServiceException { return getUserPermissions(controller, protocol, t, null, null, HConstants.EMPTY_STRING); } /** * A utility used to get user table permissions based on the column family, column qualifier and * user name. - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param t optional table name - * @param columnFamily Column family + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy + * @param t optional table name + * @param columnFamily Column family * @param columnQualifier Column qualifier - * @param userName User name, if empty then all user permissions will be retrieved. - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param userName User name, if empty then all user permissions will be retrieved. n + * * @deprecated Use + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, TableName t, byte[] columnFamily, - byte[] columnQualifier, String userName) throws ServiceException { + AccessControlService.BlockingInterface protocol, TableName t, byte[] columnFamily, + byte[] columnQualifier, String userName) throws ServiceException { AccessControlProtos.GetUserPermissionsRequest.Builder builder = - AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); if (t != null) { builder.setTableName(ProtobufUtil.toProtoTableName(t)); } @@ -745,7 +699,7 @@ public static List getUserPermissions(RpcController controller, builder.setType(AccessControlProtos.Permission.Type.Table); AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = - protocol.getUserPermissions(controller, request); + protocol.getUserPermissions(controller, request); List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) { perms.add(toUserPermission(perm)); @@ -757,35 +711,32 @@ public static List getUserPermissions(RpcController controller, * A utility used to get permissions for selected namespace. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param namespace name of the namespace - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param protocol the AccessControlService protocol proxy + * @param namespace name of the namespace n * @deprecated Use + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, - byte[] namespace) throws ServiceException { + AccessControlService.BlockingInterface protocol, byte[] namespace) throws ServiceException { return getUserPermissions(controller, protocol, namespace, HConstants.EMPTY_STRING); } /** * A utility used to get permissions for selected namespace based on the specified user name. * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param namespace name of the namespace - * @param userName User name, if empty then all user permissions will be retrieved. - * @throws ServiceException - * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * @param protocol the AccessControlService protocol proxy + * @param namespace name of the namespace + * @param userName User name, if empty then all user permissions will be retrieved. n + * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} + * instead. */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, byte[] namespace, String userName) - throws ServiceException { + AccessControlService.BlockingInterface protocol, byte[] namespace, String userName) + throws ServiceException { AccessControlProtos.GetUserPermissionsRequest.Builder builder = - AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); if (namespace != null) { builder.setNamespaceName(UnsafeByteOperations.unsafeWrap(namespace)); } @@ -795,7 +746,7 @@ public static List getUserPermissions(RpcController controller, builder.setType(AccessControlProtos.Permission.Type.Namespace); AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = - protocol.getUserPermissions(controller, request); + protocol.getUserPermissions(controller, request); List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) { perms.add(toUserPermission(perm)); @@ -806,29 +757,26 @@ public static List getUserPermissions(RpcController controller, /** * Validates whether specified user has permission to perform actions on the mentioned table, * column family or column qualifier. - * @param controller RpcController - * @param protocol the AccessControlService protocol proxy - * @param tableName Table name, it shouldn't be null or empty. - * @param columnFamily The column family. Optional argument, can be empty. If empty then - * validation will happen at table level. + * @param controller RpcController + * @param protocol the AccessControlService protocol proxy + * @param tableName Table name, it shouldn't be null or empty. + * @param columnFamily The column family. Optional argument, can be empty. If empty then + * validation will happen at table level. * @param columnQualifier The column qualifier. Optional argument, can be empty. If empty then - * validation will happen at table and column family level. columnQualifier will not be - * considered if columnFamily is passed as null or empty. - * @param userName User name, it shouldn't be null or empty. - * @param actions Actions - * @return true if access allowed, otherwise false - * @throws ServiceException - * @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead. + * validation will happen at table and column family level. columnQualifier + * will not be considered if columnFamily is passed as null or empty. + * @param userName User name, it shouldn't be null or empty. + * @param actions Actions + * @return true if access allowed, otherwise false n * @deprecated Use + * {@link Admin#hasUserPermissions(String, List)} instead. */ @Deprecated public static boolean hasPermission(RpcController controller, - AccessControlService.BlockingInterface protocol, TableName tableName, byte[] columnFamily, - byte[] columnQualifier, String userName, Permission.Action[] actions) - throws ServiceException { + AccessControlService.BlockingInterface protocol, TableName tableName, byte[] columnFamily, + byte[] columnQualifier, String userName, Permission.Action[] actions) throws ServiceException { AccessControlProtos.TablePermission.Builder tablePermissionBuilder = - AccessControlProtos.TablePermission.newBuilder(); - tablePermissionBuilder - .setTableName(ProtobufUtil.toProtoTableName(tableName)); + AccessControlProtos.TablePermission.newBuilder(); + tablePermissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName)); if (Bytes.len(columnFamily) > 0) { tablePermissionBuilder.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily)); } @@ -839,10 +787,10 @@ public static boolean hasPermission(RpcController controller, tablePermissionBuilder.addAction(toPermissionAction(a)); } AccessControlProtos.HasPermissionRequest request = AccessControlProtos.HasPermissionRequest - .newBuilder().setTablePermission(tablePermissionBuilder) - .setUserName(ByteString.copyFromUtf8(userName)).build(); + .newBuilder().setTablePermission(tablePermissionBuilder) + .setUserName(ByteString.copyFromUtf8(userName)).build(); AccessControlProtos.HasPermissionResponse response = - protocol.hasPermission(controller, request); + protocol.hasPermission(controller, request); return response.getHasPermission(); } @@ -851,8 +799,8 @@ public static boolean hasPermission(RpcController controller, * @param proto the proto UsersAndPermissions * @return a ListMultimap with user and its permissions */ - public static ListMultimap toUserPermission( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUserPermission(AccessControlProtos.UsersAndPermissions proto) { ListMultimap userPermission = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -871,8 +819,8 @@ public static ListMultimap toUserPermission( * @param proto the proto UsersAndPermissions * @return a ListMultimap with user and its permissions */ - public static ListMultimap toPermission( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toPermission(AccessControlProtos.UsersAndPermissions proto) { ListMultimap perms = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -887,21 +835,19 @@ public static ListMultimap toPermission( /** * Create a request to revoke user table permissions. - * - * @param username the short user name whose permissions to be revoked + * @param username the short user name whose permissions to be revoked * @param tableName optional table name the permissions apply - * @param family optional column family + * @param family optional column family * @param qualifier optional qualifier - * @param actions the permissions to be revoked + * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, TableName tableName, byte[] family, byte[] qualifier, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + TableName tableName, byte[] family, byte[] qualifier, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.TablePermission.Builder permissionBuilder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } @@ -914,13 +860,10 @@ public static AccessControlProtos.RevokeRequest buildRevokeRequest( if (qualifier != null) { permissionBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } - ret.setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java index 8e1767cce944..03dee44abc71 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Objects; @@ -36,7 +35,7 @@ public final class GetUserPermissionsRequest { private byte[] qualifier; private GetUserPermissionsRequest(String userName, String namespace, TableName tableName, - byte[] family, byte[] qualifier) { + byte[] family, byte[] qualifier) { this.userName = userName; this.namespace = namespace; this.tableName = tableName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java index 01d53ebb37f7..570c543b4b53 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java index 7781d2295693..b4cbe6723a5e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -38,7 +36,7 @@ public class NamespacePermission extends Permission { /** * Construct a namespace permission. * @param namespace namespace's name - * @param assigned assigned actions + * @param assigned assigned actions */ NamespacePermission(String namespace, Action... assigned) { super(assigned); @@ -53,7 +51,7 @@ public String getNamespace() { /** * check if given action is granted in given namespace. * @param namespace namespace's name - * @param action action to be checked + * @param action action to be checked * @return true if granted, false otherwise */ public boolean implies(String namespace, Action action) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java index 49f2432ffa58..b3ac386689e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; @@ -27,20 +26,17 @@ import java.util.List; import java.util.Map; import java.util.Objects; - import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.VersionedWritable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.VersionedWritable; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** - * Base permissions instance representing the ability to perform a given set - * of actions. - * + * Base permissions instance representing the ability to perform a given set of actions. * @see TablePermission */ @InterfaceAudience.Public @@ -49,21 +45,32 @@ public class Permission extends VersionedWritable { @InterfaceAudience.Public public enum Action { - READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A'); + READ('R'), + WRITE('W'), + EXEC('X'), + CREATE('C'), + ADMIN('A'); private final byte code; + Action(char code) { this.code = (byte) code; } - public byte code() { return code; } + public byte code() { + return code; + } } @InterfaceAudience.Private protected enum Scope { - GLOBAL('G'), NAMESPACE('N'), TABLE('T'), EMPTY('E'); + GLOBAL('G'), + NAMESPACE('N'), + TABLE('T'), + EMPTY('E'); private final byte code; + Scope(char code) { this.code = (byte) code; } @@ -82,23 +89,15 @@ public byte code() { protected Scope scope = Scope.EMPTY; static { - ACTION_BY_CODE = ImmutableMap.of( - Action.READ.code, Action.READ, - Action.WRITE.code, Action.WRITE, - Action.EXEC.code, Action.EXEC, - Action.CREATE.code, Action.CREATE, - Action.ADMIN.code, Action.ADMIN - ); - - SCOPE_BY_CODE = ImmutableMap.of( - Scope.GLOBAL.code, Scope.GLOBAL, - Scope.NAMESPACE.code, Scope.NAMESPACE, - Scope.TABLE.code, Scope.TABLE, - Scope.EMPTY.code, Scope.EMPTY - ); + ACTION_BY_CODE = ImmutableMap.of(Action.READ.code, Action.READ, Action.WRITE.code, Action.WRITE, + Action.EXEC.code, Action.EXEC, Action.CREATE.code, Action.CREATE, Action.ADMIN.code, + Action.ADMIN); + + SCOPE_BY_CODE = ImmutableMap.of(Scope.GLOBAL.code, Scope.GLOBAL, Scope.NAMESPACE.code, + Scope.NAMESPACE, Scope.TABLE.code, Scope.TABLE, Scope.EMPTY.code, Scope.EMPTY); } - /** Empty constructor for Writable implementation. Do not use. */ + /** Empty constructor for Writable implementation. Do not use. */ public Permission() { super(); } @@ -114,8 +113,8 @@ public Permission(byte[] actionCodes) { for (byte code : actionCodes) { Action action = ACTION_BY_CODE.get(code); if (action == null) { - LOG.error("Ignoring unknown action code '" + - Bytes.toStringBinary(new byte[] { code }) + "'"); + LOG.error( + "Ignoring unknown action code '" + Bytes.toStringBinary(new byte[] { code }) + "'"); continue; } actions.add(action); @@ -146,9 +145,8 @@ public void setActions(Action[] assigned) { } /** - * Check if two permission equals regardless of actions. It is useful when - * merging a new permission with an existed permission which needs to check two permissions's - * fields. + * Check if two permission equals regardless of actions. It is useful when merging a new + * permission with an existed permission which needs to check two permissions's fields. * @param obj instance * @return true if equals, false otherwise */ @@ -221,8 +219,8 @@ public void readFields(DataInput in) throws IOException { byte b = in.readByte(); Action action = ACTION_BY_CODE.get(b); if (action == null) { - throw new IOException("Unknown action code '" + - Bytes.toStringBinary(new byte[] { b }) + "' in input"); + throw new IOException( + "Unknown action code '" + Bytes.toStringBinary(new byte[] { b }) + "' in input"); } actions.add(action); } @@ -235,7 +233,7 @@ public void write(DataOutput out) throws IOException { super.write(out); out.writeByte(actions != null ? actions.size() : 0); if (actions != null) { - for (Action a: actions) { + for (Action a : actions) { out.writeByte(a.code()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java index 661bcc842a8d..b6df2c94a044 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Collection; @@ -28,6 +27,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; @@ -39,7 +39,6 @@ /** * Convert protobuf objects in AccessControl.proto under hbase-protocol-shaded to user-oriented * objects and vice versa.
    - * * In HBASE-15638, we create a hbase-protocol-shaded module for upgrading protobuf version to 3.x, * but there are still some coprocessor endpoints(such as AccessControl, Authentication, * MulitRowMutation) which depend on hbase-protocol module for CPEP compatibility. In fact, we use @@ -73,16 +72,16 @@ public static AccessControlProtos.Permission.Action toPermissionAction(Permissio */ public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; } throw new IllegalArgumentException("Unknown action value " + action.name()); } @@ -94,7 +93,7 @@ public static Permission.Action toPermissionAction(AccessControlProtos.Permissio * @return the converted array of Actions */ public static Permission.Action[] - toPermissionActions(List protoActions) { + toPermissionActions(List protoActions) { Permission.Action[] actions = new Permission.Action[protoActions.size()]; for (int i = 0; i < protoActions.size(); i++) { actions[i] = toPermissionAction(protoActions.get(i)); @@ -110,8 +109,8 @@ public static org.apache.hadoop.hbase.TableName toTableName(HBaseProtos.TableNam public static HBaseProtos.TableName toProtoTableName(TableName tableName) { return HBaseProtos.TableName.newBuilder() - .setNamespace(ByteString.copyFrom(tableName.getNamespace())) - .setQualifier(ByteString.copyFrom(tableName.getQualifier())).build(); + .setNamespace(ByteString.copyFrom(tableName.getNamespace())) + .setQualifier(ByteString.copyFrom(tableName.getQualifier())).build(); } /** @@ -151,7 +150,7 @@ public static Permission toPermission(AccessControlProtos.Permission proto) { if (perm.hasFamily()) family = perm.getFamily().toByteArray(); if (perm.hasQualifier()) qualifier = perm.getQualifier().toByteArray(); return Permission.newBuilder(table).withFamily(family).withQualifier(qualifier) - .withActions(actions).build(); + .withActions(actions).build(); } throw new IllegalStateException("Unrecognize Perm Type: " + proto.getType()); } @@ -167,9 +166,9 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { NamespacePermission nsPerm = (NamespacePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Namespace); AccessControlProtos.NamespacePermission.Builder builder = - AccessControlProtos.NamespacePermission.newBuilder(); + AccessControlProtos.NamespacePermission.newBuilder(); builder.setNamespaceName(org.apache.hbase.thirdparty.com.google.protobuf.ByteString - .copyFromUtf8(nsPerm.getNamespace())); + .copyFromUtf8(nsPerm.getNamespace())); Permission.Action[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { @@ -181,7 +180,7 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { TablePermission tablePerm = (TablePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Table); AccessControlProtos.TablePermission.Builder builder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); builder.setTableName(toProtoTableName(tablePerm.getTableName())); if (tablePerm.hasFamily()) { builder.setFamily(ByteString.copyFrom(tablePerm.getFamily())); @@ -200,7 +199,7 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { // perm.getAccessScope() == Permission.Scope.GLOBAL ret.setType(AccessControlProtos.Permission.Type.Global); AccessControlProtos.GlobalPermission.Builder builder = - AccessControlProtos.GlobalPermission.newBuilder(); + AccessControlProtos.GlobalPermission.newBuilder(); Permission.Action[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { @@ -218,8 +217,8 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { * @param proto the protobuf UserPermission * @return the converted UserPermission */ - public static ListMultimap toUserTablePermissions( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUserTablePermissions(AccessControlProtos.UsersAndPermissions proto) { ListMultimap perms = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -239,12 +238,12 @@ public static ListMultimap toUserTablePermissions( * @return the protobuf UserTablePermissions */ public static AccessControlProtos.UsersAndPermissions - toUserTablePermissions(ListMultimap perm) { + toUserTablePermissions(ListMultimap perm) { AccessControlProtos.UsersAndPermissions.Builder builder = - AccessControlProtos.UsersAndPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perm.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = - AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); for (UserPermission userPerm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(userPerm.getPermission())); @@ -270,14 +269,14 @@ public static UserPermission toUserPermission(AccessControlProtos.UserPermission */ public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) { return AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(perm.getUser())) - .setPermission(toPermission(perm.getPermission())).build(); + .setUser(ByteString.copyFromUtf8(perm.getUser())) + .setPermission(toPermission(perm.getPermission())).build(); } public static GrantRequest buildGrantRequest(UserPermission userPermission, - boolean mergeExistingPermissions) { + boolean mergeExistingPermissions) { return GrantRequest.newBuilder().setUserPermission(toUserPermission(userPermission)) - .setMergeExistingPermissions(mergeExistingPermissions).build(); + .setMergeExistingPermissions(mergeExistingPermissions).build(); } public static RevokeRequest buildRevokeRequest(UserPermission userPermission) { @@ -285,9 +284,9 @@ public static RevokeRequest buildRevokeRequest(UserPermission userPermission) { } public static AccessControlProtos.GetUserPermissionsRequest - buildGetUserPermissionsRequest(GetUserPermissionsRequest request) { + buildGetUserPermissionsRequest(GetUserPermissionsRequest request) { AccessControlProtos.GetUserPermissionsRequest.Builder builder = - AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); if (request.getUserName() != null && !request.getUserName().isEmpty()) { builder.setUserName(ByteString.copyFromUtf8(request.getUserName())); } @@ -312,7 +311,7 @@ public static RevokeRequest buildRevokeRequest(UserPermission userPermission) { } public static GetUserPermissionsResponse - buildGetUserPermissionsResponse(final List permissions) { + buildGetUserPermissionsResponse(final List permissions) { GetUserPermissionsResponse.Builder builder = GetUserPermissionsResponse.newBuilder(); for (UserPermission perm : permissions) { builder.addUserPermission(toUserPermission(perm)); @@ -321,7 +320,7 @@ public static RevokeRequest buildRevokeRequest(UserPermission userPermission) { } public static HasUserPermissionsRequest buildHasUserPermissionsRequest(String userName, - List permissions) { + List permissions) { HasUserPermissionsRequest.Builder builder = HasUserPermissionsRequest.newBuilder(); if (userName != null && !userName.isEmpty()) { builder.setUserName(ByteString.copyFromUtf8(userName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java index f17919f70bf9..e0a12c7d431f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -15,24 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Represents an authorization for access for the given actions, optionally - * restricted to the given column family or column qualifier, over the - * given table. If the family property is null, it implies - * full table access. + * Represents an authorization for access for the given actions, optionally restricted to the given + * column family or column qualifier, over the given table. If the family property is + * null, it implies full table access. */ @InterfaceAudience.Public public class TablePermission extends Permission { @@ -43,10 +40,10 @@ public class TablePermission extends Permission { /** * Construct a table:family:qualifier permission. - * @param table table name - * @param family family name + * @param table table name + * @param family family name * @param qualifier qualifier name - * @param assigned assigned actions + * @param assigned assigned actions */ TablePermission(TableName table, byte[] family, byte[] qualifier, Action... assigned) { super(assigned); @@ -82,10 +79,10 @@ public String getNamespace() { /** * Check if given action can performs on given table:family:qualifier. - * @param table table name - * @param family family name + * @param table table name + * @param family family name * @param qualifier qualifier name - * @param action one of [Read, Write, Create, Exec, Admin] + * @param action one of [Read, Write, Create, Exec, Admin] * @return true if can, false otherwise */ public boolean implies(TableName table, byte[] family, byte[] qualifier, Action action) { @@ -103,7 +100,7 @@ public boolean implies(TableName table, byte[] family, byte[] qualifier, Action /** * Check if given action can performs on given table:family. - * @param table table name + * @param table table name * @param family family name * @param action one of [Read, Write, Create, Exec, Admin] * @return true if can, false otherwise @@ -131,13 +128,13 @@ private boolean failCheckQualifier(byte[] qual) { } /** - * Checks if this permission grants access to perform the given action on - * the given table and key value. - * @param table the table on which the operation is being performed - * @param kv the KeyValue on which the operation is being requested + * Checks if this permission grants access to perform the given action on the given table and key + * value. + * @param table the table on which the operation is being performed + * @param kv the KeyValue on which the operation is being requested * @param action the action requested - * @return true if the action is allowed over the given scope - * by this permission, otherwise false + * @return true if the action is allowed over the given scope by this permission, + * otherwise false */ public boolean implies(TableName table, KeyValue kv, Action action) { if (failCheckTable(table)) { @@ -168,8 +165,8 @@ public boolean tableFieldsEqual(TablePermission tp) { boolean tEq = (table == null && tp.table == null) || (table != null && table.equals(tp.table)); boolean fEq = (family == null && tp.family == null) || Bytes.equals(family, tp.family); - boolean qEq = (qualifier == null && tp.qualifier == null) || - Bytes.equals(qualifier, tp.qualifier); + boolean qEq = + (qualifier == null && tp.qualifier == null) || Bytes.equals(qualifier, tp.qualifier); return tEq && fEq && qEq; } @@ -212,10 +209,9 @@ public String toString() { protected String rawExpression() { StringBuilder raw = new StringBuilder(); if (table != null) { - raw.append("table=").append(table) - .append(", family=").append(family == null ? null : Bytes.toString(family)) - .append(", qualifier=").append(qualifier == null ? null : Bytes.toString(qualifier)) - .append(", "); + raw.append("table=").append(table).append(", family=") + .append(family == null ? null : Bytes.toString(family)).append(", qualifier=") + .append(qualifier == null ? null : Bytes.toString(qualifier)).append(", "); } return raw.toString() + super.rawExpression(); } @@ -224,7 +220,7 @@ protected String rawExpression() { public void readFields(DataInput in) throws IOException { super.readFields(in); byte[] tableBytes = Bytes.readByteArray(in); - if(tableBytes.length > 0) { + if (tableBytes.length > 0) { table = TableName.valueOf(tableBytes); } if (in.readBoolean()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java index 896ba5251a3c..39bd02ccabb9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** - * UserPermission consists of a user name and a permission. - * Permission can be one of [Global, Namespace, Table] permission. + * UserPermission consists of a user name and a permission. Permission can be one of [Global, + * Namespace, Table] permission. */ @InterfaceAudience.Public public class UserPermission { @@ -34,7 +32,7 @@ public class UserPermission { /** * Construct a user permission given permission. - * @param user user name + * @param user user name * @param permission one of [Global, Namespace, Table] permission */ public UserPermission(String user, Permission permission) { @@ -87,9 +85,8 @@ public int hashCode() { @Override public String toString() { - StringBuilder str = new StringBuilder("UserPermission: ") - .append("user=").append(user) - .append(", ").append(permission.toString()); + StringBuilder str = new StringBuilder("UserPermission: ").append("user=").append(user) + .append(", ").append(permission.toString()); return str.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java index d018ce19921b..e9990066050e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java @@ -27,11 +27,10 @@ */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving -public abstract class AbstractSaslClientAuthenticationProvider implements - SaslClientAuthenticationProvider { +public abstract class AbstractSaslClientAuthenticationProvider + implements SaslClientAuthenticationProvider { public static final String AUTH_TOKEN_TYPE = "HBASE_AUTH_TOKEN"; - @Override public final String getTokenKind() { // All HBase authentication tokens are "HBASE_AUTH_TOKEN"'s. We differentiate between them diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java index a681d53719d0..cdd1fdb381f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -38,12 +37,12 @@ public interface AuthenticationProviderSelector { * {@link #selectProvider(String, User)}. */ void configure(Configuration conf, - Collection availableProviders); + Collection availableProviders); /** - * Chooses the authentication provider which should be used given the provided client context - * from the authentication providers passed in via {@link #configure(Configuration, Collection)}. + * Chooses the authentication provider which should be used given the provided client context from + * the authentication providers passed in via {@link #configure(Configuration, Collection)}. */ - Pair> selectProvider( - String clusterId, User user); + Pair> + selectProvider(String clusterId, User user); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java index 752003dad8c6..2c9968f6f71b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java @@ -21,9 +21,7 @@ import java.util.Collection; import java.util.Objects; - import net.jcip.annotations.NotThreadSafe; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -39,14 +37,12 @@ /** * Default implementation of {@link AuthenticationProviderSelector} which can choose from the * authentication implementations which HBase provides out of the box: Simple, Kerberos, and - * Delegation Token authentication. - * - * This implementation will ignore any {@link SaslAuthenticationProvider}'s which are available - * on the classpath or specified in the configuration because HBase cannot correctly choose which - * token should be returned to a client when multiple are present. It is expected that users - * implement their own {@link AuthenticationProviderSelector} when writing a custom provider. - * - * This implementation is not thread-safe. {@link #configure(Configuration, Collection)} and + * Delegation Token authentication. This implementation will ignore any + * {@link SaslAuthenticationProvider}'s which are available on the classpath or specified in the + * configuration because HBase cannot correctly choose which token should be returned to a client + * when multiple are present. It is expected that users implement their own + * {@link AuthenticationProviderSelector} when writing a custom provider. This implementation is not + * thread-safe. {@link #configure(Configuration, Collection)} and * {@link #selectProvider(String, User)} is not safe if they are called concurrently. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @@ -61,8 +57,8 @@ public class BuiltInProviderSelector implements AuthenticationProviderSelector { Text digestAuthTokenKind = null; @Override - public void configure( - Configuration conf, Collection providers) { + public void configure(Configuration conf, + Collection providers) { if (this.conf != null) { throw new IllegalStateException("configure() should only be called once"); } @@ -73,19 +69,19 @@ public void configure( if (SimpleSaslAuthenticationProvider.SASL_AUTH_METHOD.getName().contentEquals(name)) { if (simpleAuth != null) { throw new IllegalStateException( - "Encountered multiple SimpleSaslClientAuthenticationProvider instances"); + "Encountered multiple SimpleSaslClientAuthenticationProvider instances"); } simpleAuth = (SimpleSaslClientAuthenticationProvider) provider; } else if (GssSaslAuthenticationProvider.SASL_AUTH_METHOD.getName().equals(name)) { if (krbAuth != null) { throw new IllegalStateException( - "Encountered multiple GssSaslClientAuthenticationProvider instances"); + "Encountered multiple GssSaslClientAuthenticationProvider instances"); } krbAuth = (GssSaslClientAuthenticationProvider) provider; } else if (DigestSaslAuthenticationProvider.SASL_AUTH_METHOD.getName().equals(name)) { if (digestAuth != null) { throw new IllegalStateException( - "Encountered multiple DigestSaslClientAuthenticationProvider instances"); + "Encountered multiple DigestSaslClientAuthenticationProvider instances"); } digestAuth = (DigestSaslClientAuthenticationProvider) provider; digestAuthTokenKind = new Text(digestAuth.getTokenKind()); @@ -95,13 +91,13 @@ public void configure( } if (simpleAuth == null || krbAuth == null || digestAuth == null) { throw new IllegalStateException("Failed to load SIMPLE, KERBEROS, and DIGEST authentication " - + "providers. Classpath is not sane."); + + "providers. Classpath is not sane."); } } @Override - public Pair> selectProvider( - String clusterId, User user) { + public Pair> + selectProvider(String clusterId, User user) { requireNonNull(clusterId, "Null clusterId was given"); requireNonNull(user, "Null user was given"); @@ -117,10 +113,11 @@ public Pair> // (for whatever that's worth). for (Token token : user.getTokens()) { // We need to check for two things: - // 1. This token is for the HBase cluster we want to talk to - // 2. We have suppporting client implementation to handle the token (the "kind" of token) - if (clusterIdAsText.equals(token.getService()) && - digestAuthTokenKind.equals(token.getKind())) { + // 1. This token is for the HBase cluster we want to talk to + // 2. We have suppporting client implementation to handle the token (the "kind" of token) + if ( + clusterIdAsText.equals(token.getService()) && digestAuthTokenKind.equals(token.getKind()) + ) { return new Pair<>(digestAuth, token); } } @@ -128,15 +125,17 @@ public Pair> final UserGroupInformation currentUser = user.getUGI(); // May be null if Hadoop AuthenticationMethod is PROXY final UserGroupInformation realUser = currentUser.getRealUser(); - if (currentUser.hasKerberosCredentials() || - (realUser != null && realUser.hasKerberosCredentials())) { + if ( + currentUser.hasKerberosCredentials() + || (realUser != null && realUser.hasKerberosCredentials()) + ) { return new Pair<>(krbAuth, null); } // This indicates that a client is requesting some authentication mechanism which the servers // don't know how to process (e.g. there is no provider which can support it). This may be // a bug or simply a misconfiguration of client *or* server. LOG.warn("No matching SASL authentication provider and supporting token found from providers" - + " for user: {}", user); + + " for user: {}", user); return null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java index c1b7ddb7c554..712d4035448b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Base class for all Apache HBase, built-in {@link SaslAuthenticationProvider}'s to extend. - * - * HBase users should take care to note that this class (and its sub-classes) are marked with the + * Base class for all Apache HBase, built-in {@link SaslAuthenticationProvider}'s to extend. HBase + * users should take care to note that this class (and its sub-classes) are marked with the * {@code InterfaceAudience.Private} annotation. These implementations are available for users to * read, copy, and modify, but should not be extended or re-used in binary form. There are no * compatibility guarantees provided for implementations of this class. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java index 7cbdecd642be..d71c07d1575a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java @@ -26,8 +26,8 @@ @InterfaceAudience.Private public class DigestSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "DIGEST", (byte)82, "DIGEST-MD5", AuthenticationMethod.TOKEN); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("DIGEST", (byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java index a84f24b9080e..480e724599bd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; @@ -30,7 +29,6 @@ import javax.security.sasl.RealmChoiceCallback; import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -45,19 +43,19 @@ @InterfaceAudience.Private public class DigestSaslClientAuthenticationProvider extends DigestSaslAuthenticationProvider - implements SaslClientAuthenticationProvider { + implements SaslClientAuthenticationProvider { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, - null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); + null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); } public static class DigestSaslClientCallbackHandler implements CallbackHandler { private static final Logger LOG = - LoggerFactory.getLogger(DigestSaslClientCallbackHandler.class); + LoggerFactory.getLogger(DigestSaslClientCallbackHandler.class); private final String userName; private final char[] userPassword; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java index 07101848e507..7dea40f2657a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java @@ -26,8 +26,8 @@ @InterfaceAudience.Private public class GssSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "KERBEROS", (byte)81, "GSSAPI", AuthenticationMethod.KERBEROS); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("KERBEROS", (byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java index 21a4828b49e9..218fd13b60c1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java @@ -20,10 +20,8 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SecurityConstants; @@ -41,9 +39,9 @@ @InterfaceAudience.Private public class GssSaslClientAuthenticationProvider extends GssSaslAuthenticationProvider - implements SaslClientAuthenticationProvider { - private static final Logger LOG = LoggerFactory.getLogger( - GssSaslClientAuthenticationProvider.class); + implements SaslClientAuthenticationProvider { + private static final Logger LOG = + LoggerFactory.getLogger(GssSaslClientAuthenticationProvider.class); private static boolean useCanonicalHostname(Configuration conf) { return !conf.getBoolean( @@ -57,10 +55,9 @@ public static String getHostnameForServerPrincipal(Configuration conf, InetAddre if (useCanonicalHostname(conf)) { hostname = addr.getCanonicalHostName(); if (hostname.equals(addr.getHostAddress())) { - LOG.warn("Canonical hostname for SASL principal is the same with IP address: " - + hostname + ", " + addr.getHostName() + ". Check DNS configuration or consider " - + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS - + "=true"); + LOG.warn("Canonical hostname for SASL principal is the same with IP address: " + hostname + + ", " + addr.getHostName() + ". Check DNS configuration or consider " + + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS + "=true"); } } else { hostname = addr.getHostName(); @@ -70,30 +67,30 @@ public static String getHostnameForServerPrincipal(Configuration conf, InetAddre } String getServerPrincipal(Configuration conf, SecurityInfo securityInfo, InetAddress server) - throws IOException { + throws IOException { String hostname = getHostnameForServerPrincipal(conf, server); String serverKey = securityInfo.getServerPrincipal(); if (serverKey == null) { throw new IllegalArgumentException( - "Can't obtain server Kerberos config key from SecurityInfo"); + "Can't obtain server Kerberos config key from SecurityInfo"); } return SecurityUtil.getServerPrincipal(conf.get(serverKey), hostname); } @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { String serverPrincipal = getServerPrincipal(conf, securityInfo, serverAddr); LOG.debug("Setting up Kerberos RPC to server={}", serverPrincipal); String[] names = SaslUtil.splitKerberosName(serverPrincipal); if (names.length != 3) { - throw new IOException("Kerberos principal '" + serverPrincipal - + "' does not have the expected format"); + throw new IOException( + "Kerberos principal '" + serverPrincipal + "' does not have the expected format"); } return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, - names[0], names[1], saslProps, null); + names[0], names[1], saslProps, null); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java index 7930564cb9f6..edea8a463992 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Objects; - import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -78,19 +77,13 @@ public boolean equals(Object o) { return false; } SaslAuthMethod other = (SaslAuthMethod) o; - return Objects.equals(name, other.name) && - code == other.code && - Objects.equals(saslMech, other.saslMech) && - Objects.equals(method, other.method); + return Objects.equals(name, other.name) && code == other.code + && Objects.equals(saslMech, other.saslMech) && Objects.equals(method, other.method); } @Override public int hashCode() { - return new HashCodeBuilder() - .append(name) - .append(code) - .append(saslMech) - .append(method) - .toHashCode(); + return new HashCodeBuilder().append(name).append(code).append(saslMech).append(method) + .toHashCode(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java index 1f6d821ce953..99e2916fa513 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java @@ -22,13 +22,11 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. - * It is suggested that custom implementations extend the abstract class in the type hierarchy - * instead of directly implementing this interface (clients have a base class available, but - * servers presently do not). - * - * Implementations of this interface must be unique among each other via the {@code byte} - * returned by {@link SaslAuthMethod#getCode()} on {@link #getSaslAuthMethod()}. + * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. It is + * suggested that custom implementations extend the abstract class in the type hierarchy instead of + * directly implementing this interface (clients have a base class available, but servers presently + * do not). Implementations of this interface must be unique among each other via the + * {@code byte} returned by {@link SaslAuthMethod#getCode()} on {@link #getSaslAuthMethod()}. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java index 4b1cabcfc494..bbc5ddac91aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -38,10 +36,9 @@ /** * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. * Implementations should not directly implement this interface, but instead extend - * {@link AbstractSaslClientAuthenticationProvider}. - * - * Implementations of this interface must make an implementation of {@code hashCode()} - * which returns the same value across multiple instances of the provider implementation. + * {@link AbstractSaslClientAuthenticationProvider}. Implementations of this interface must make an + * implementation of {@code hashCode()} which returns the same value across multiple instances of + * the provider implementation. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving @@ -51,8 +48,8 @@ public interface SaslClientAuthenticationProvider extends SaslAuthenticationProv * Creates the SASL client instance for this auth'n method. */ SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, - Token token, boolean fallbackAllowed, - Map saslProps) throws IOException; + Token token, boolean fallbackAllowed, Map saslProps) + throws IOException; /** * Constructs a {@link UserInformation} from the given {@link UserGroupInformation} @@ -60,18 +57,15 @@ SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo UserInformation getUserInfo(User user); /** - * Returns the "real" user, the user who has the credentials being authenticated by the - * remote service, in the form of an {@link UserGroupInformation} object. - * - * It is common in the Hadoop "world" to have distinct notions of a "real" user and a "proxy" - * user. A "real" user is the user which actually has the credentials (often, a Kerberos ticket), - * but some code may be running as some other user who has no credentials. This method gives - * the authentication provider a chance to acknowledge this is happening and ensure that any - * RPCs are executed with the real user's credentials, because executing them as the proxy user - * would result in failure because no credentials exist to authenticate the RPC. - * - * Not all implementations will need to implement this method. By default, the provided User's - * UGI is returned directly. + * Returns the "real" user, the user who has the credentials being authenticated by the remote + * service, in the form of an {@link UserGroupInformation} object. It is common in the Hadoop + * "world" to have distinct notions of a "real" user and a "proxy" user. A "real" user is the user + * which actually has the credentials (often, a Kerberos ticket), but some code may be running as + * some other user who has no credentials. This method gives the authentication provider a chance + * to acknowledge this is happening and ensure that any RPCs are executed with the real user's + * credentials, because executing them as the proxy user would result in failure because no + * credentials exist to authenticate the RPC. Not all implementations will need to implement this + * method. By default, the provided User's UGI is returned directly. */ default UserGroupInformation getRealUser(User ugi) { return ugi.getUGI(); @@ -86,8 +80,9 @@ default boolean canRetry() { } /** - * Executes any necessary logic to re-login the client. Not all implementations will have - * any logic that needs to be executed. + * Executes any necessary logic to re-login the client. Not all implementations will have any + * logic that needs to be executed. */ - default void relogin() throws IOException {} + default void relogin() throws IOException { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java index aaaee003c595..befd52c4a371 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.ServiceLoader; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -43,21 +42,20 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving public final class SaslClientAuthenticationProviders { - private static final Logger LOG = LoggerFactory.getLogger( - SaslClientAuthenticationProviders.class); + private static final Logger LOG = + LoggerFactory.getLogger(SaslClientAuthenticationProviders.class); public static final String SELECTOR_KEY = "hbase.client.sasl.provider.class"; public static final String EXTRA_PROVIDERS_KEY = "hbase.client.sasl.provider.extras"; private static final AtomicReference providersRef = - new AtomicReference<>(); + new AtomicReference<>(); private final Collection providers; private final AuthenticationProviderSelector selector; - private SaslClientAuthenticationProviders( - Collection providers, - AuthenticationProviderSelector selector) { + private SaslClientAuthenticationProviders(Collection providers, + AuthenticationProviderSelector selector) { this.providers = providers; this.selector = selector; } @@ -90,16 +88,16 @@ public static synchronized void reset() { } /** - * Adds the given {@code provider} to the set, only if an equivalent provider does not - * already exist in the set. + * Adds the given {@code provider} to the set, only if an equivalent provider does not already + * exist in the set. */ static void addProviderIfNotExists(SaslClientAuthenticationProvider provider, - HashMap providers) { + HashMap providers) { Byte code = provider.getSaslAuthMethod().getCode(); SaslClientAuthenticationProvider existingProvider = providers.get(code); if (existingProvider != null) { throw new RuntimeException("Already registered authentication provider with " + code + " " - + existingProvider.getClass()); + + existingProvider.getClass()); } providers.put(code, provider); } @@ -108,9 +106,9 @@ static void addProviderIfNotExists(SaslClientAuthenticationProvider provider, * Instantiates the ProviderSelector implementation from the provided configuration. */ static AuthenticationProviderSelector instantiateSelector(Configuration conf, - Collection providers) { - Class clz = conf.getClass( - SELECTOR_KEY, BuiltInProviderSelector.class, AuthenticationProviderSelector.class); + Collection providers) { + Class clz = conf.getClass(SELECTOR_KEY, + BuiltInProviderSelector.class, AuthenticationProviderSelector.class); try { AuthenticationProviderSelector selector = clz.getConstructor().newInstance(); selector.configure(conf, providers); @@ -118,10 +116,10 @@ static AuthenticationProviderSelector instantiateSelector(Configuration conf, LOG.trace("Loaded ProviderSelector {}", selector.getClass()); } return selector; - } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | - InvocationTargetException e) { - throw new RuntimeException("Failed to instantiate " + clz + - " as the ProviderSelector defined by " + SELECTOR_KEY, e); + } catch (InstantiationException | IllegalAccessException | NoSuchMethodException + | InvocationTargetException e) { + throw new RuntimeException( + "Failed to instantiate " + clz + " as the ProviderSelector defined by " + SELECTOR_KEY, e); } } @@ -129,8 +127,8 @@ static AuthenticationProviderSelector instantiateSelector(Configuration conf, * Extracts and instantiates authentication providers from the configuration. */ static void addExplicitProviders(Configuration conf, - HashMap providers) { - for(String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { + HashMap providers) { + for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { Class clz; // Load the class from the config try { @@ -143,7 +141,7 @@ static void addExplicitProviders(Configuration conf, // Make sure it's the right type if (!SaslClientAuthenticationProvider.class.isAssignableFrom(clz)) { LOG.warn("Ignoring SaslClientAuthenticationProvider {} because it is not an instance of" - + " SaslClientAuthenticationProvider", clz); + + " SaslClientAuthenticationProvider", clz); continue; } @@ -152,7 +150,7 @@ static void addExplicitProviders(Configuration conf, try { provider = (SaslClientAuthenticationProvider) clz.getConstructor().newInstance(); } catch (InstantiationException | IllegalAccessException | NoSuchMethodException - | InvocationTargetException e) { + | InvocationTargetException e) { LOG.warn("Failed to instantiate SaslClientAuthenticationProvider {}", clz, e); continue; } @@ -169,21 +167,20 @@ static void addExplicitProviders(Configuration conf, */ static SaslClientAuthenticationProviders instantiate(Configuration conf) { ServiceLoader loader = - ServiceLoader.load(SaslClientAuthenticationProvider.class); - HashMap providerMap = new HashMap<>(); + ServiceLoader.load(SaslClientAuthenticationProvider.class); + HashMap providerMap = new HashMap<>(); for (SaslClientAuthenticationProvider provider : loader) { addProviderIfNotExists(provider, providerMap); } addExplicitProviders(conf, providerMap); - Collection providers = Collections.unmodifiableCollection( - providerMap.values()); + Collection providers = + Collections.unmodifiableCollection(providerMap.values()); if (LOG.isTraceEnabled()) { - String loadedProviders = providers.stream() - .map((provider) -> provider.getClass().getName()) - .collect(Collectors.joining(", ")); + String loadedProviders = providers.stream().map((provider) -> provider.getClass().getName()) + .collect(Collectors.joining(", ")); LOG.trace("Found SaslClientAuthenticationProviders {}", loadedProviders); } @@ -192,16 +189,13 @@ static SaslClientAuthenticationProviders instantiate(Configuration conf) { } /** - * Returns the provider and token pair for SIMPLE authentication. - * - * This method is a "hack" while SIMPLE authentication for HBase does not flow through - * the SASL codepath. + * Returns the provider and token pair for SIMPLE authentication. This method is a "hack" while + * SIMPLE authentication for HBase does not flow through the SASL codepath. */ public Pair> - getSimpleProvider() { + getSimpleProvider() { Optional optional = providers.stream() - .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider) - .findFirst(); + .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider).findFirst(); return new Pair<>(optional.get(), null); } @@ -209,15 +203,14 @@ static SaslClientAuthenticationProviders instantiate(Configuration conf) { * Chooses the best authentication provider and corresponding token given the HBase cluster * identifier and the user. */ - public Pair> selectProvider( - String clusterId, User clientUser) { + public Pair> + selectProvider(String clusterId, User clientUser) { return selector.selectProvider(clusterId, clientUser); } @Override public String toString() { - return providers.stream() - .map((p) -> p.getClass().getName()) - .collect(Collectors.joining(", ", "providers=[", "], selector=")) + selector.getClass(); + return providers.stream().map((p) -> p.getClass().getName()) + .collect(Collectors.joining(", ", "providers=[", "], selector=")) + selector.getClass(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java index 3f1122c75413..01b1f452685a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java @@ -25,8 +25,8 @@ */ @InterfaceAudience.Private public class SimpleSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "SIMPLE", (byte)80, "", AuthenticationMethod.SIMPLE); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("SIMPLE", (byte) 80, "", AuthenticationMethod.SIMPLE); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java index 3a9142f34c44..6fff703689c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; @@ -34,13 +32,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; @InterfaceAudience.Private -public class SimpleSaslClientAuthenticationProvider extends - SimpleSaslAuthenticationProvider implements SaslClientAuthenticationProvider { +public class SimpleSaslClientAuthenticationProvider extends SimpleSaslAuthenticationProvider + implements SaslClientAuthenticationProvider { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java index 0e0a2500a54f..9110b6a7b91e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.DataInput; @@ -51,8 +50,8 @@ public AuthenticationTokenIdentifier(String username) { this.username = username; } - public AuthenticationTokenIdentifier(String username, int keyId, - long issueDate, long expirationDate) { + public AuthenticationTokenIdentifier(String username, int keyId, long issueDate, + long expirationDate) { this.username = username; this.keyId = keyId; this.issueDate = issueDate; @@ -114,15 +113,13 @@ void setSequenceNumber(long seq) { public byte[] toBytes() { AuthenticationProtos.TokenIdentifier.Builder builder = - AuthenticationProtos.TokenIdentifier.newBuilder(); + AuthenticationProtos.TokenIdentifier.newBuilder(); builder.setKind(AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN); if (username != null) { builder.setUsername(ByteString.copyFromUtf8(username)); } - builder.setIssueDate(issueDate) - .setExpirationDate(expirationDate) - .setKeyId(keyId) - .setSequenceNumber(sequenceNumber); + builder.setIssueDate(issueDate).setExpirationDate(expirationDate).setKeyId(keyId) + .setSequenceNumber(sequenceNumber); return builder.build().toByteArray(); } @@ -143,9 +140,11 @@ public void readFields(DataInput in) throws IOException { ProtobufUtil.mergeFrom(builder, inBytes); AuthenticationProtos.TokenIdentifier identifier = builder.build(); // sanity check on type - if (!identifier.hasKind() || - identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) { - throw new IOException("Invalid TokenIdentifier kind from input "+identifier.getKind()); + if ( + !identifier.hasKind() + || identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN + ) { + throw new IOException("Invalid TokenIdentifier kind from input " + identifier.getKind()); } // copy the field values @@ -172,26 +171,22 @@ public boolean equals(Object other) { return false; } if (other instanceof AuthenticationTokenIdentifier) { - AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier)other; - return sequenceNumber == ident.getSequenceNumber() - && keyId == ident.getKeyId() - && issueDate == ident.getIssueDate() - && expirationDate == ident.getExpirationDate() - && (username == null ? ident.getUsername() == null : - username.equals(ident.getUsername())); + AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier) other; + return sequenceNumber == ident.getSequenceNumber() && keyId == ident.getKeyId() + && issueDate == ident.getIssueDate() && expirationDate == ident.getExpirationDate() + && (username == null ? ident.getUsername() == null : username.equals(ident.getUsername())); } return false; } @Override public int hashCode() { - return (int)sequenceNumber; + return (int) sequenceNumber; } @Override public String toString() { - return "(username=" + username + ", keyId=" - + keyId + ", issueDate=" + issueDate - + ", expirationDate=" + expirationDate + ", sequenceNumber=" + sequenceNumber + ")"; + return "(username=" + username + ", keyId=" + keyId + ", issueDate=" + issueDate + + ", expirationDate=" + expirationDate + ", sequenceNumber=" + sequenceNumber + ")"; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java index 39959ef61db4..1dbc7c8cd2f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java @@ -15,22 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.util.Collection; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private -public class AuthenticationTokenSelector - implements TokenSelector { +public class AuthenticationTokenSelector implements TokenSelector { private static final Logger LOG = LoggerFactory.getLogger(AuthenticationTokenSelector.class); public AuthenticationTokenSelector() { @@ -38,15 +35,17 @@ public AuthenticationTokenSelector() { @Override public Token selectToken(Text serviceName, - Collection> tokens) { + Collection> tokens) { if (serviceName != null) { for (Token ident : tokens) { - if (serviceName.equals(ident.getService()) && - AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) { + if ( + serviceName.equals(ident.getService()) + && AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind()) + ) { if (LOG.isDebugEnabled()) { - LOG.debug("Returning token "+ident); + LOG.debug("Returning token " + ident); } - return (Token)ident; + return (Token) ident; } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java index a29c47c5f6fb..40ff0373c36c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.IOException; @@ -52,7 +51,8 @@ public final class ClientTokenUtil { // Set in TestClientTokenUtil via reflection private static ServiceException injectedException; - private ClientTokenUtil() {} + private ClientTokenUtil() { + } private static void injectFault() throws ServiceException { if (injectedException != null) { @@ -66,8 +66,8 @@ private static void injectFault() throws ServiceException { * @return the authentication token instance, wrapped by a {@link CompletableFuture}. */ @InterfaceAudience.Private - public static CompletableFuture> obtainToken( - AsyncConnection conn) { + public static CompletableFuture> + obtainToken(AsyncConnection conn) { CompletableFuture> future = new CompletableFuture<>(); if (injectedException != null) { future.completeExceptionally(ProtobufUtil.handleRemoteException(injectedException)); @@ -75,11 +75,12 @@ public static CompletableFuture> obtainToke } AsyncTable table = conn.getTable(TableName.META_TABLE_NAME); table. coprocessorService( - AuthenticationProtos.AuthenticationService::newStub, - (s, c, r) -> s.getAuthenticationToken(c, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance(), r), - HConstants.EMPTY_START_ROW).whenComplete((resp, error) -> { + AuthenticationProtos.GetAuthenticationTokenResponse> coprocessorService( + AuthenticationProtos.AuthenticationService::newStub, + (s, c, r) -> s.getAuthenticationToken(c, + AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance(), r), + HConstants.EMPTY_START_ROW) + .whenComplete((resp, error) -> { if (error != null) { future.completeExceptionally(ProtobufUtil.handleRemoteException(error)); } else { @@ -96,20 +97,17 @@ AuthenticationProtos.GetAuthenticationTokenResponse> coprocessorService( * @return the authentication token instance */ @InterfaceAudience.Private - static Token obtainToken( - Connection conn) throws IOException { + static Token obtainToken(Connection conn) throws IOException { Table meta = null; try { injectFault(); meta = conn.getTable(TableName.META_TABLE_NAME); - CoprocessorRpcChannel rpcChannel = meta.coprocessorService( - HConstants.EMPTY_START_ROW); + CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); AuthenticationProtos.AuthenticationService.BlockingInterface service = - AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); - AuthenticationProtos.GetAuthenticationTokenResponse response = - service.getAuthenticationToken(null, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); + AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); + AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken( + null, AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); return toToken(response.getToken()); } catch (ServiceException se) { @@ -123,7 +121,6 @@ static Token obtainToken( /** * Converts a Token instance (with embedded identifier) to the protobuf representation. - * * @param token the Token instance to copy * @return the protobuf Token message */ @@ -140,17 +137,15 @@ static AuthenticationProtos.Token toToken(Token t /** * Converts a protobuf Token message back into a Token instance. - * * @param proto the protobuf Token message * @return the Token instance */ @InterfaceAudience.Private static Token toToken(AuthenticationProtos.Token proto) { - return new Token<>( - proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, - proto.hasPassword() ? proto.getPassword().toByteArray() : null, - AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, - proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null); + return new Token<>(proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, + proto.hasPassword() ? proto.getPassword().toByteArray() : null, + AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, + proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null); } /** @@ -160,8 +155,8 @@ static Token toToken(AuthenticationProtos.Token p * @return the authentication token instance */ @InterfaceAudience.Private - static Token obtainToken( - final Connection conn, User user) throws IOException, InterruptedException { + static Token obtainToken(final Connection conn, User user) + throws IOException, InterruptedException { return user.runAs(new PrivilegedExceptionAction>() { @Override public Token run() throws Exception { @@ -171,16 +166,14 @@ public Token run() throws Exception { } /** - * Obtain an authentication token for the given user and add it to the - * user's credentials. + * Obtain an authentication token for the given user and add it to the user's credentials. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @throws IOException If making a remote call to the authentication service fails + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ - public static void obtainAndCacheToken(final Connection conn, - User user) - throws IOException, InterruptedException { + public static void obtainAndCacheToken(final Connection conn, User user) + throws IOException, InterruptedException { try { Token token = obtainToken(conn, user); @@ -188,15 +181,14 @@ public static void obtainAndCacheToken(final Connection conn, throw new IOException("No token returned for user " + user.getName()); } if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName()); } user.addToken(token); } catch (IOException | InterruptedException | RuntimeException e) { throw e; } catch (Exception e) { throw new UndeclaredThrowableException(e, - "Unexpected exception obtaining token for user " + user.getName()); + "Unexpected exception obtaining token for user " + user.getName()); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java index f8ac1b966097..f15bab6c0951 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -31,6 +30,7 @@ public class Authorizations { private List labels; + public Authorizations(String... labels) { this.labels = new ArrayList<>(labels.length); Collections.addAll(this.labels, labels); @@ -43,12 +43,12 @@ public Authorizations(List labels) { public List getLabels() { return Collections.unmodifiableList(this.labels); } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("[ "); - for (String label: labels) { + for (String label : labels) { sb.append(label); sb.append(' '); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java index 6cf8fb748dfd..8abaee005094 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hbase.security.visibility; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * This contains a visibility expression which can be associated with a cell. When it is set with a * Mutation, all the cells in that mutation will get associated with this expression. A visibility - * expression can contain visibility labels combined with logical - * operators AND(&), OR(|) and NOT(!) + * expression can contain visibility labels combined with logical operators AND(&), OR(|) and + * NOT(!) */ @InterfaceAudience.Public public class CellVisibility { @@ -48,25 +48,22 @@ public String toString() { } /** - * Helps in quoting authentication Strings. Use this if unicode characters to - * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * Helps in quoting authentication Strings. Use this if unicode characters to be used in + * expression or special characters like '(', ')', '"','\','&','|','!' */ public static String quote(String auth) { return quote(Bytes.toBytes(auth)); } /** - * Helps in quoting authentication Strings. Use this if unicode characters to - * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * Helps in quoting authentication Strings. Use this if unicode characters to be used in + * expression or special characters like '(', ')', '"','\','&','|','!' */ public static String quote(byte[] auth) { int escapeChars = 0; for (int i = 0; i < auth.length; i++) - if (auth[i] == '"' || auth[i] == '\\') - escapeChars++; + if (auth[i] == '"' || auth[i] == '\\') escapeChars++; byte[] escapedAuth = new byte[auth.length + escapeChars + 2]; int index = 1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java index 778288d4c03f..e9160ec976c2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java @@ -28,4 +28,3 @@ public InvalidLabelException(String msg) { super(msg); } } - diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index 3d3d081ad481..42508782d1a7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService; - /** * Utility client for doing visibility labels admin operations. */ @@ -56,113 +55,94 @@ public class VisibilityClient { /** * Return true if cell visibility features are supported and enabled * @param connection The connection to use - * @return true if cell visibility features are supported and enabled, false otherwise - * @throws IOException + * @return true if cell visibility features are supported and enabled, false otherwise n */ public static boolean isCellVisibilityEnabled(Connection connection) throws IOException { return connection.getAdmin().getSecurityCapabilities() - .contains(SecurityCapability.CELL_VISIBILITY); + .contains(SecurityCapability.CELL_VISIBILITY); } /** - * Utility method for adding label to the system. - * - * @param connection - * @param label - * @return VisibilityLabelsResponse - * @throws Throwable + * Utility method for adding label to the system. nnnn */ public static VisibilityLabelsResponse addLabel(Connection connection, final String label) - throws Throwable { + throws Throwable { return addLabels(connection, new String[] { label }); } /** - * Utility method for adding labels to the system. - * - * @param connection - * @param labels - * @return VisibilityLabelsResponse - * @throws Throwable + * Utility method for adding labels to the system. nnnn */ public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels) - throws Throwable { + throws Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); - for (String label : labels) { - if (label.length() > 0) { - VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(UnsafeByteOperations.unsafeWrap((Bytes.toBytes(label)))); - builder.addVisLabel(newBuilder.build()); + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); + for (String label : labels) { + if (label.length() > 0) { + VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); + newBuilder.setLabel(UnsafeByteOperations.unsafeWrap((Bytes.toBytes(label)))); + builder.addVisLabel(newBuilder.build()); + } } + service.addLabels(controller, builder.build(), rpcCallback); + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - service.addLabels(controller, builder.build(), rpcCallback); - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; + }; Map result = - table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } } /** - * Sets given labels globally authorized for the user. - * @param connection - * @param auths - * @param user - * @return VisibilityLabelsResponse - * @throws Throwable + * Sets given labels globally authorized for the user. nnnnn */ public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths, - final String user) throws Throwable { + final String user) throws Throwable { return setOrClearAuths(connection, auths, user, true); } /** - * @param connection the Connection instance to use. - * @param user - * @return labels, the given user is globally authorized for. - * @throws Throwable + * @param connection the Connection instance to use. n * @return labels, the given user is + * globally authorized for. n */ public static GetAuthsResponse getAuths(Connection connection, final String user) - throws Throwable { + throws Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { - GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); - getAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); - service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); - GetAuthsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); + @Override + public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { + GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); + getAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); + service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); + GetAuthsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - return response; - } - }; - Map result = - table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + }; + Map result = table.coprocessorService(VisibilityLabelsService.class, + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -171,91 +151,85 @@ public GetAuthsResponse call(VisibilityLabelsService service) throws IOException /** * Retrieve the list of visibility labels defined in the system. * @param connection The Connection instance to use. - * @param regex The regular expression to filter which labels are returned. - * @return labels The list of visibility labels defined in the system. - * @throws Throwable + * @param regex The regular expression to filter which labels are returned. + * @return labels The list of visibility labels defined in the system. n */ public static ListLabelsResponse listLabels(Connection connection, final String regex) - throws Throwable { + throws Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { - ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); - if (regex != null) { - // Compile the regex here to catch any regex exception earlier. - Pattern pattern = Pattern.compile(regex); - listAuthLabelsReqBuilder.setRegex(pattern.toString()); - } - service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); - ListLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); + @Override + public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { + ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); + if (regex != null) { + // Compile the regex here to catch any regex exception earlier. + Pattern pattern = Pattern.compile(regex); + listAuthLabelsReqBuilder.setRegex(pattern.toString()); + } + service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); + ListLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - return response; - } - }; + }; Map result = - table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } } /** - * Removes given labels from user's globally authorized list of labels. - * @param connection - * @param auths - * @param user - * @return VisibilityLabelsResponse - * @throws Throwable + * Removes given labels from user's globally authorized list of labels. nnnnn */ public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths, - final String user) throws Throwable { + final String user) throws Throwable { return setOrClearAuths(connection, auths, user, false); } private static VisibilityLabelsResponse setOrClearAuths(Connection connection, - final String[] auths, final String user, final boolean setOrClear) - throws IOException, ServiceException, Throwable { + final String[] auths, final String user, final boolean setOrClear) + throws IOException, ServiceException, Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); - setAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); - for (String auth : auths) { - if (auth.length() > 0) { - setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); + setAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); + for (String auth : auths) { + if (auth.length() > 0) { + setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + } } + if (setOrClear) { + service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } else { + service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - if (setOrClear) { - service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } else { - service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = table.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java index 0945dd98afc2..c7bb27c6715d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,8 +19,8 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public final class VisibilityConstants { @@ -31,8 +31,8 @@ public final class VisibilityConstants { public static final String VISIBILITY_LABELS_ATTR_KEY = "VISIBILITY"; /** Internal storage table for visibility labels */ - public static final TableName LABELS_TABLE_NAME = TableName.valueOf( - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "labels"); + public static final TableName LABELS_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "labels"); /** Family for the internal storage table for visibility labels */ public static final byte[] LABELS_TABLE_FAMILY = Bytes.toBytes("f"); @@ -41,16 +41,16 @@ public final class VisibilityConstants { public static final byte[] LABEL_QUALIFIER = new byte[1]; /** - * Visibility serialization version format. It indicates the visibility labels - * are sorted based on ordinal + * Visibility serialization version format. It indicates the visibility labels are sorted based on + * ordinal **/ public static final byte SORTED_ORDINAL_SERIALIZATION_FORMAT = 1; /** Byte representation of the visibility_serialization_version **/ public static final byte[] SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL = - new byte[] { SORTED_ORDINAL_SERIALIZATION_FORMAT }; + new byte[] { SORTED_ORDINAL_SERIALIZATION_FORMAT }; - public static final String CHECK_AUTHS_FOR_MUTATION = - "hbase.security.visibility.mutations.checkauths"; + public static final String CHECK_AUTHS_FOR_MUTATION = + "hbase.security.visibility.mutations.checkauths"; public static final String NOT_OPERATOR = "!"; public static final String AND_OPERATOR = "&"; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java index a73d47501912..7d8d550e82e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /* diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java index 874b2b42cec3..dfbb0b9d02b2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.regex.Pattern; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -54,7 +53,7 @@ public class VisibilityLabelsValidator { validAuthChars['.'] = true; validAuthChars['/'] = true; } - + static final boolean isValidAuthChar(byte b) { return validAuthChars[0xff & b]; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 77c9127ef92c..40b32d53c39e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -47,7 +47,6 @@ import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; @@ -71,9 +70,10 @@ import org.apache.hadoop.hbase.ServerTaskBuilder; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.client.BalanceResponse; -import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.BalancerDecision; +import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.ClientUtil; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -122,7 +122,6 @@ import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DynamicClassLoader; @@ -203,6 +202,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; @@ -220,12 +220,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; /** - * Protobufs utility. - * Be aware that a class named org.apache.hadoop.hbase.protobuf.ProtobufUtil (i.e. no 'shaded' in - * the package name) carries a COPY of a subset of this class for non-shaded - * users; e.g. Coprocessor Endpoints. If you make change in here, be sure to make change in - * the companion class too (not the end of the world, especially if you are adding new functionality - * but something to be aware of. + * Protobufs utility. Be aware that a class named org.apache.hadoop.hbase.protobuf.ProtobufUtil + * (i.e. no 'shaded' in the package name) carries a COPY of a subset of this class for non-shaded + * users; e.g. Coprocessor Endpoints. If you make change in here, be sure to make change in the + * companion class too (not the end of the world, especially if you are adding new functionality but + * something to be aware of. */ @InterfaceAudience.Private // TODO: some clients (Hive, etc) use this class public final class ProtobufUtil { @@ -234,18 +233,18 @@ private ProtobufUtil() { } /** - * Many results are simple: no cell, exists true or false. To save on object creations, - * we reuse them across calls. + * Many results are simple: no cell, exists true or false. To save on object creations, we reuse + * them across calls. */ - private final static Cell[] EMPTY_CELL_ARRAY = new Cell[]{}; + private final static Cell[] EMPTY_CELL_ARRAY = new Cell[] {}; private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true); - private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE - = Result.create((Cell[])null, true, true); - private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE - = Result.create((Cell[])null, false, true); + private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE = + Result.create((Cell[]) null, true, true); + private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE = + Result.create((Cell[]) null, false, true); private final static ClientProtos.Result EMPTY_RESULT_PB; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE; @@ -254,13 +253,12 @@ private ProtobufUtil() { private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE_STALE; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE_STALE; - static { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); builder.setExists(true); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); + EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_TRUE_STALE = builder.build(); @@ -268,13 +266,13 @@ private ProtobufUtil() { builder.setExists(false); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); + EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_FALSE_STALE = builder.build(); builder.clear(); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB = builder.build(); + EMPTY_RESULT_PB = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_STALE = builder.build(); } @@ -290,9 +288,8 @@ private final static class ClassLoaderHolder { static { ClassLoader parent = ProtobufUtil.class.getClassLoader(); Configuration conf = HBaseConfiguration.create(); - CLASS_LOADER = AccessController.doPrivileged((PrivilegedAction) - () -> new DynamicClassLoader(conf, parent) - ); + CLASS_LOADER = AccessController + .doPrivileged((PrivilegedAction) () -> new DynamicClassLoader(conf, parent)); classLoaderLoaded = true; } } @@ -302,14 +299,13 @@ public static boolean isClassLoaderLoaded() { } /** - * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, - * to flag what follows as a protobuf in hbase. Prepend these bytes to all content written to - * znodes, etc. + * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, to flag what + * follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc. * @param bytes Bytes to decorate - * @return The passed bytes with magic prepended (Creates a new - * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. + * @return The passed bytes with magic prepended (Creates a new byte array that is + * bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ - public static byte [] prependPBMagic(final byte [] bytes) { + public static byte[] prependPBMagic(final byte[] bytes) { return Bytes.add(PB_MAGIC, bytes); } @@ -317,17 +313,17 @@ public static boolean isClassLoaderLoaded() { * @param bytes Bytes to check. * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes) { + public static boolean isPBMagicPrefix(final byte[] bytes) { return ProtobufMagic.isPBMagicPrefix(bytes); } /** - * @param bytes Bytes to check. + * @param bytes Bytes to check. * @param offset offset to start at - * @param len length to use + * @param len length to use * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) { + public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { return ProtobufMagic.isPBMagicPrefix(bytes, offset, len); } @@ -339,7 +335,7 @@ public static void expectPBMagicPrefix(final byte[] bytes) throws Deserializatio if (!isPBMagicPrefix(bytes)) { String bytesPrefix = bytes == null ? "null" : Bytes.toStringBinary(bytes, 0, PB_MAGIC.length); throw new DeserializationException( - "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix" + ", bytes: " + bytesPrefix); + "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix" + ", bytes: " + bytesPrefix); } } @@ -350,7 +346,7 @@ public static int lengthOfPBMagic() { return ProtobufMagic.lengthOfPBMagic(); } - public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final byte [] value) { + public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final byte[] value) { ComparatorProtos.ByteArrayComparable.Builder builder = ComparatorProtos.ByteArrayComparable.newBuilder(); if (value != null) builder.setValue(UnsafeByteOperations.unsafeWrap(value)); @@ -358,12 +354,10 @@ public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final b } /** - * Return the IOException thrown by the remote server wrapped in - * ServiceException as cause. - * + * Return the IOException thrown by the remote server wrapped in ServiceException as cause. * @param se ServiceException that wraps IO exception thrown by the server - * @return Exception wrapped in ServiceException or - * a new IOException that wraps the unexpected ServiceException. + * @return Exception wrapped in ServiceException or a new IOException that wraps the unexpected + * ServiceException. */ public static IOException getRemoteException(ServiceException se) { return makeIOExceptionOfException(se); @@ -372,8 +366,8 @@ public static IOException getRemoteException(ServiceException se) { /** * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than * just {@link ServiceException}. Prefer this method to - * {@link #getRemoteException(ServiceException)} because trying to - * contain direct protobuf references. + * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf + * references. */ public static IOException handleRemoteException(Throwable e) { return makeIOExceptionOfException(e); @@ -388,14 +382,13 @@ private static IOException makeIOExceptionOfException(Throwable e) { return ExceptionUtil.asInterrupt(t); } if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); + t = ((RemoteException) t).unwrapRemoteException(); } - return t instanceof IOException? (IOException)t: new HBaseIOException(t); + return t instanceof IOException ? (IOException) t : new HBaseIOException(t); } /** * Convert a ServerName to a protocol buffer ServerName - * * @param serverName the ServerName to convert * @return the converted protocol buffer ServerName * @see #toServerName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName) @@ -404,8 +397,7 @@ public static HBaseProtos.ServerName toServerName(final ServerName serverName) { if (serverName == null) { return null; } - HBaseProtos.ServerName.Builder builder = - HBaseProtos.ServerName.newBuilder(); + HBaseProtos.ServerName.Builder builder = HBaseProtos.ServerName.newBuilder(); builder.setHostName(serverName.getHostname()); if (serverName.getPort() >= 0) { builder.setPort(serverName.getPort()); @@ -418,7 +410,6 @@ public static HBaseProtos.ServerName toServerName(final ServerName serverName) { /** * Convert a protocol buffer ServerName to a ServerName - * * @param proto the protocol buffer ServerName to convert * @return the converted ServerName */ @@ -439,9 +430,9 @@ public static ServerName toServerName(final HBaseProtos.ServerName proto) { /** * Get a ServerName from the passed in data bytes. * @param data Data with a serialize server name in it; can handle the old style servername where - * servername was host and port. Works too with data that begins w/ the pb 'PBUF' magic - * and that is then followed by a protobuf that has a serialized {@link ServerName} in - * it. + * servername was host and port. Works too with data that begins w/ the pb 'PBUF' + * magic and that is then followed by a protobuf that has a serialized + * {@link ServerName} in it. * @return Returns null if data is null else converts passed data to a ServerName * instance. */ @@ -485,10 +476,8 @@ public static ServerName toServerName(final byte[] data) throws DeserializationE * @param proto protocol buffer ServerNameList * @return a list of ServerName */ - public static List toServerNameList( - List proto) { - return proto.stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()); + public static List toServerNameList(List proto) { + return proto.stream().map(ProtobufUtil::toServerName).collect(Collectors.toList()); } /** @@ -496,10 +485,10 @@ public static List toServerNameList( * @param proto the ListNamespaceDescriptorsResponse * @return a list of NamespaceDescriptor */ - public static List toNamespaceDescriptorList( - ListNamespaceDescriptorsResponse proto) { + public static List + toNamespaceDescriptorList(ListNamespaceDescriptorsResponse proto) { return proto.getNamespaceDescriptorList().stream().map(ProtobufUtil::toNamespaceDescriptor) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** @@ -512,7 +501,7 @@ public static List toTableDescriptorList(GetTableDescriptorsRes return new ArrayList<>(); } return proto.getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** @@ -521,20 +510,19 @@ public static List toTableDescriptorList(GetTableDescriptorsRes * @return a list of TableDescriptor */ public static List - toTableDescriptorList(ListTableDescriptorsByNamespaceResponse proto) { + toTableDescriptorList(ListTableDescriptorsByNamespaceResponse proto) { if (proto == null) return new ArrayList<>(); return proto.getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** * get the split keys in form "byte [][]" from a CreateTableRequest proto - * * @param proto the CreateTableRequest * @return the split keys */ - public static byte [][] getSplitKeysArray(final CreateTableRequest proto) { - byte [][] splitKeys = new byte[proto.getSplitKeysCount()][]; + public static byte[][] getSplitKeysArray(final CreateTableRequest proto) { + byte[][] splitKeys = new byte[proto.getSplitKeysCount()][]; for (int i = 0; i < proto.getSplitKeysCount(); ++i) { splitKeys[i] = proto.getSplitKeys(i).toByteArray(); } @@ -544,51 +532,47 @@ public static List toTableDescriptorList(GetTableDescriptorsRes /** * Convert a protobuf Durability into a client Durability */ - public static Durability toDurability( - final ClientProtos.MutationProto.Durability proto) { - switch(proto) { - case USE_DEFAULT: - return Durability.USE_DEFAULT; - case SKIP_WAL: - return Durability.SKIP_WAL; - case ASYNC_WAL: - return Durability.ASYNC_WAL; - case SYNC_WAL: - return Durability.SYNC_WAL; - case FSYNC_WAL: - return Durability.FSYNC_WAL; - default: - return Durability.USE_DEFAULT; + public static Durability toDurability(final ClientProtos.MutationProto.Durability proto) { + switch (proto) { + case USE_DEFAULT: + return Durability.USE_DEFAULT; + case SKIP_WAL: + return Durability.SKIP_WAL; + case ASYNC_WAL: + return Durability.ASYNC_WAL; + case SYNC_WAL: + return Durability.SYNC_WAL; + case FSYNC_WAL: + return Durability.FSYNC_WAL; + default: + return Durability.USE_DEFAULT; } } /** * Convert a client Durability into a protbuf Durability */ - public static ClientProtos.MutationProto.Durability toDurability( - final Durability d) { - switch(d) { - case USE_DEFAULT: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; - case SKIP_WAL: - return ClientProtos.MutationProto.Durability.SKIP_WAL; - case ASYNC_WAL: - return ClientProtos.MutationProto.Durability.ASYNC_WAL; - case SYNC_WAL: - return ClientProtos.MutationProto.Durability.SYNC_WAL; - case FSYNC_WAL: - return ClientProtos.MutationProto.Durability.FSYNC_WAL; - default: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; + public static ClientProtos.MutationProto.Durability toDurability(final Durability d) { + switch (d) { + case USE_DEFAULT: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; + case SKIP_WAL: + return ClientProtos.MutationProto.Durability.SKIP_WAL; + case ASYNC_WAL: + return ClientProtos.MutationProto.Durability.ASYNC_WAL; + case SYNC_WAL: + return ClientProtos.MutationProto.Durability.SYNC_WAL; + case FSYNC_WAL: + return ClientProtos.MutationProto.Durability.FSYNC_WAL; + default: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; } } /** * Convert a protocol buffer Get to a client Get - * * @param proto the protocol buffer Get to convert - * @return the converted client Get - * @throws IOException + * @return the converted client Get n */ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto == null) return null; @@ -609,8 +593,8 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = toTimeRange(cftr.getTimeRange()); - get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -621,14 +605,14 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { FilterProtos.Filter filter = proto.getFilter(); get.setFilter(ProtobufUtil.toFilter(filter)); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { get.addColumn(family, qualifier.toByteArray()); } } else { @@ -636,7 +620,7 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { } } } - if (proto.hasExistenceOnly() && proto.getExistenceOnly()){ + if (proto.hasExistenceOnly() && proto.getExistenceOnly()) { get.setCheckExistenceOnly(true); } if (proto.hasConsistency()) { @@ -650,58 +634,59 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { public static Consistency toConsistency(ClientProtos.Consistency consistency) { switch (consistency) { - case STRONG : return Consistency.STRONG; - case TIMELINE : return Consistency.TIMELINE; - default : return Consistency.STRONG; + case STRONG: + return Consistency.STRONG; + case TIMELINE: + return Consistency.TIMELINE; + default: + return Consistency.STRONG; } } public static ClientProtos.Consistency toConsistency(Consistency consistency) { switch (consistency) { - case STRONG : return ClientProtos.Consistency.STRONG; - case TIMELINE : return ClientProtos.Consistency.TIMELINE; - default : return ClientProtos.Consistency.STRONG; + case STRONG: + return ClientProtos.Consistency.STRONG; + case TIMELINE: + return ClientProtos.Consistency.TIMELINE; + default: + return ClientProtos.Consistency.STRONG; } } /** * Convert a protocol buffer Mutate to a Put. - * * @param proto The protocol buffer MutationProto to convert - * @return A client Put. - * @throws IOException + * @return A client Put. n */ - public static Put toPut(final MutationProto proto) - throws IOException { + public static Put toPut(final MutationProto proto) throws IOException { return toPut(proto, null); } /** * Convert a protocol buffer Mutate to a Put. - * - * @param proto The protocol buffer MutationProto to convert + * @param proto The protocol buffer MutationProto to convert * @param cellScanner If non-null, the Cell data that goes with this proto. - * @return A client Put. - * @throws IOException + * @return A client Put. n */ public static Put toPut(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? + throws IOException { + // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? MutationType type = proto.getMutateType(); - assert type == MutationType.PUT: type.name(); - long timestamp = proto.hasTimestamp()? proto.getTimestamp(): HConstants.LATEST_TIMESTAMP; + assert type == MutationType.PUT : type.name(); + long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (put == null) { @@ -714,13 +699,13 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner throw new IllegalArgumentException("row cannot be null"); } // The proto has the metadata and the data itself - ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (ColumnValue column: proto.getColumnValueList()) { + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } long ts = timestamp; if (qv.hasTimestamp()) { @@ -729,51 +714,35 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner byte[] allTagsBytes; if (qv.hasTags()) { allTagsBytes = qv.getTags().toByteArray(); - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(proto.getRow().toByteArray()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .setTags(allTagsBytes) - .build()); + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(proto.getRow().toByteArray()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()) + .setTags(allTagsBytes).build()); } else { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Cell.Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .setTags(allTagsBytes) - .build()); + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(Cell.Type.Put) + .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).setTags(allTagsBytes) + .build()); } } else { - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .build()); - } else{ - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .build()); + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()).build()); + } else { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) + .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) + .setTimestamp(ts).setType(Type.Put) + .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).build()); } } } } } put.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { put.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return put; @@ -781,43 +750,38 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner /** * Convert a protocol buffer Mutate to a Delete - * * @param proto the protocol buffer Mutate to convert - * @return the converted client Delete - * @throws IOException + * @return the converted client Delete n */ - public static Delete toDelete(final MutationProto proto) - throws IOException { + public static Delete toDelete(final MutationProto proto) throws IOException { return toDelete(proto, null); } /** * Convert a protocol buffer Mutate to a Delete - * - * @param proto the protocol buffer Mutate to convert + * @param proto the protocol buffer Mutate to convert * @param cellScanner if non-null, the data that goes with this delete. - * @return the converted client Delete - * @throws IOException + * @return the converted client Delete n */ public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.DELETE : type.name(); long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + + TextFormat.shortDebugString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + TextFormat.shortDebugString(proto)); } Cell cell = cellScanner.current(); if (delete == null) { @@ -830,9 +794,9 @@ public static Delete toDelete(final MutationProto proto, final CellScanner cellS if (delete == null) { throw new IllegalArgumentException("row cannot be null"); } - for (ColumnValue column: proto.getColumnValueList()) { + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { DeleteType deleteType = qv.getDeleteType(); byte[] qualifier = null; if (qv.hasQualifier()) { @@ -852,35 +816,38 @@ public static Delete toDelete(final MutationProto proto, final CellScanner cellS } } delete.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { delete.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return delete; } + @FunctionalInterface - private interface ConsumerWithException { + private interface ConsumerWithException { void accept(T t, U u) throws IOException; } - private static T toDelta(Function supplier, ConsumerWithException consumer, - final MutationProto proto, final CellScanner cellScanner) throws IOException { + private static T toDelta(Function supplier, + ConsumerWithException consumer, final MutationProto proto, + final CellScanner cellScanner) throws IOException { byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; T mutation = row == null ? null : supplier.apply(new Bytes(row)); int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (mutation == null) { - mutation = supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + mutation = + supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } consumer.accept(mutation, cell); } @@ -893,23 +860,18 @@ private static T toDelta(Function supplier, Consu for (QualifierValue qv : column.getQualifierValueList()) { byte[] qualifier = qv.getQualifier().toByteArray(); if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } byte[] value = qv.getValue().toByteArray(); byte[] tags = null; if (qv.hasTags()) { tags = qv.getTags().toByteArray(); } - consumer.accept(mutation, ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(mutation.getRow()) - .setFamily(family) - .setQualifier(qualifier) - .setTimestamp(cellTimestampOrLatest(qv)) - .setType(KeyValue.Type.Put.getCode()) - .setValue(value) - .setTags(tags) - .build()); + consumer.accept(mutation, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setRow(mutation.getRow()).setFamily(family).setQualifier(qualifier) + .setTimestamp(cellTimestampOrLatest(qv)).setType(KeyValue.Type.Put.getCode()) + .setValue(value).setTags(tags).build()); } } } @@ -929,18 +891,16 @@ private static long cellTimestampOrLatest(QualifierValue cell) { } /** - * Convert a protocol buffer Mutate to an Append - * @param cellScanner - * @param proto the protocol buffer Mutate to convert - * @return the converted client Append - * @throws IOException + * Convert a protocol buffer Mutate to an Append n * @param proto the protocol buffer Mutate to + * convert + * @return the converted client Append n */ public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.APPEND : type.name(); Append append = toDelta((Bytes row) -> new Append(row.get(), row.getOffset(), row.getLength()), - Append::add, proto, cellScanner); + Append::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = toTimeRange(proto.getTimeRange()); append.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -950,17 +910,16 @@ public static Append toAppend(final MutationProto proto, final CellScanner cellS /** * Convert a protocol buffer Mutate to an Increment - * * @param proto the protocol buffer Mutate to convert - * @return the converted client Increment - * @throws IOException + * @return the converted client Increment n */ public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.INCREMENT : type.name(); - Increment increment = toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), - Increment::add, proto, cellScanner); + Increment increment = + toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), + Increment::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = toTimeRange(proto.getTimeRange()); increment.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -970,10 +929,8 @@ public static Increment toIncrement(final MutationProto proto, final CellScanner /** * Convert a MutateRequest to Mutation - * * @param proto the protocol buffer Mutate to convert - * @return the converted Mutation - * @throws IOException + * @return the converted Mutation n */ public static Mutation toMutation(final MutationProto proto) throws IOException { MutationType type = proto.getMutateType(); @@ -1020,15 +977,11 @@ public static Scan.ReadType toReadType(ClientProtos.Scan.ReadType readType) { /** * Convert a client Scan to a protocol buffer Scan - * * @param scan the client Scan to convert - * @return the converted protocol buffer Scan - * @throws IOException + * @return the converted protocol buffer Scan n */ - public static ClientProtos.Scan toScan( - final Scan scan) throws IOException { - ClientProtos.Scan.Builder scanBuilder = - ClientProtos.Scan.newBuilder(); + public static ClientProtos.Scan toScan(final Scan scan) throws IOException { + ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder(); scanBuilder.setCacheBlocks(scan.getCacheBlocks()); if (scan.getBatch() > 0) { scanBuilder.setBatchSize(scan.getBatch()); @@ -1046,15 +999,14 @@ public static ClientProtos.Scan toScan( scanBuilder.setMaxVersions(scan.getMaxVersions()); scan.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { scanBuilder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)) - .setTimeRange(toTimeRange(timeRange)) + .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)).setTimeRange(toTimeRange(timeRange)) .build()); }); scanBuilder.setTimeRange(ProtobufUtil.toTimeRange(scan.getTimeRange())); Map attributes = scan.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); scanBuilder.addAttribute(attributeBuilder.build()); @@ -1073,13 +1025,12 @@ public static ClientProtos.Scan toScan( } if (scan.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); - for (Map.Entry> - family: scan.getFamilyMap().entrySet()) { + for (Map.Entry> family : scan.getFamilyMap().entrySet()) { columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - NavigableSet qualifiers = family.getValue(); + NavigableSet qualifiers = family.getValue(); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte [] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } } @@ -1120,13 +1071,10 @@ public static ClientProtos.Scan toScan( /** * Convert a protocol buffer Scan to a client Scan - * * @param proto the protocol buffer Scan to convert - * @return the converted client Scan - * @throws IOException + * @return the converted client Scan n */ - public static Scan toScan( - final ClientProtos.Scan proto) throws IOException { + public static Scan toScan(final ClientProtos.Scan proto) throws IOException { byte[] startRow = HConstants.EMPTY_START_ROW; byte[] stopRow = HConstants.EMPTY_END_ROW; boolean includeStartRow = true; @@ -1149,7 +1097,7 @@ public static Scan toScan( } } Scan scan = - new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow); + new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow); if (proto.hasCacheBlocks()) { scan.setCacheBlocks(proto.getCacheBlocks()); } @@ -1168,8 +1116,8 @@ public static Scan toScan( if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = toTimeRange(cftr.getTimeRange()); - scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -1189,14 +1137,14 @@ public static Scan toScan( if (proto.hasAllowPartialResults()) { scan.setAllowPartialResults(proto.getAllowPartialResults()); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { scan.addColumn(family, qualifier.toByteArray()); } } else { @@ -1233,8 +1181,8 @@ public static ClientProtos.Cursor toCursor(Cursor cursor) { public static ClientProtos.Cursor toCursor(Cell cell) { return ClientProtos.Cursor.newBuilder() - .setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) - .build(); + .setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + .build(); } public static Cursor toCursor(ClientProtos.Cursor cursor) { @@ -1243,15 +1191,11 @@ public static Cursor toCursor(ClientProtos.Cursor cursor) { /** * Create a protocol buffer Get based on a client Get. - * * @param get the client Get - * @return a protocol buffer Get - * @throws IOException + * @return a protocol buffer Get n */ - public static ClientProtos.Get toGet( - final Get get) throws IOException { - ClientProtos.Get.Builder builder = - ClientProtos.Get.newBuilder(); + public static ClientProtos.Get toGet(final Get get) throws IOException { + ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder(); builder.setRow(UnsafeByteOperations.unsafeWrap(get.getRow())); builder.setCacheBlocks(get.getCacheBlocks()); builder.setMaxVersions(get.getMaxVersions()); @@ -1260,15 +1204,14 @@ public static ClientProtos.Get toGet( } get.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { builder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)) - .setTimeRange(toTimeRange(timeRange)) + .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)).setTimeRange(toTimeRange(timeRange)) .build()); }); builder.setTimeRange(ProtobufUtil.toTimeRange(get.getTimeRange())); Map attributes = get.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1277,12 +1220,12 @@ public static ClientProtos.Get toGet( if (get.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); Map> families = get.getFamilyMap(); - for (Map.Entry> family: families.entrySet()) { + for (Map.Entry> family : families.entrySet()) { NavigableSet qualifiers = family.getValue(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte[] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } } @@ -1295,7 +1238,7 @@ public static ClientProtos.Get toGet( if (get.getRowOffsetPerColumnFamily() > 0) { builder.setStoreOffset(get.getRowOffsetPerColumnFamily()); } - if (get.isCheckExistenceOnly()){ + if (get.isCheckExistenceOnly()) { builder.setExistenceOnly(true); } if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) { @@ -1315,12 +1258,7 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } /** - * Create a protocol buffer Mutate based on a client Mutation - * - * @param type - * @param mutation - * @return a protobuf'd Mutation - * @throws IOException + * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n */ public static MutationProto toMutation(final MutationType type, final Mutation mutation, final long nonce) throws IOException { @@ -1328,13 +1266,12 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder) throws IOException { + MutationProto.Builder builder) throws IOException { return toMutation(type, mutation, builder, HConstants.NO_NONCE); } public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder, long nonce) - throws IOException { + MutationProto.Builder builder, long nonce) throws IOException { builder = getMutationBuilderAndSetCommonFields(type, mutation, builder); if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); @@ -1347,15 +1284,15 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> family: mutation.getFamilyCellMap().entrySet()) { + for (Map.Entry> family : mutation.getFamilyCellMap().entrySet()) { columnBuilder.clear(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - for (Cell cell: family.getValue()) { + for (Cell cell : family.getValue()) { valueBuilder.clear(); - valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); - valueBuilder.setValue(UnsafeByteOperations.unsafeWrap( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); + valueBuilder.setValue(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); valueBuilder.setTimestamp(cell.getTimestamp()); if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) { KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte()); @@ -1370,34 +1307,27 @@ public static MutationProto toMutation(final MutationType type, final Mutation m /** * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. - * @param type - * @param mutation - * @param builder - * @return a protobuf'd Mutation - * @throws IOException + * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a + * protobuf'd Mutation n */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder) throws IOException { + final MutationProto.Builder builder) throws IOException { return toMutationNoData(type, mutation, builder, HConstants.NO_NONCE); } /** - * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. - * @param type - * @param mutation - * @return a protobuf'd Mutation - * @throws IOException + * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. + * Understanding is that the Cell will be transported other than via protobuf. nn * @return a + * protobuf'd Mutation n */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation) - throws IOException { - MutationProto.Builder builder = MutationProto.newBuilder(); + throws IOException { + MutationProto.Builder builder = MutationProto.newBuilder(); return toMutationNoData(type, mutation, builder); } public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder, long nonce) throws IOException { + final MutationProto.Builder builder, long nonce) throws IOException { getMutationBuilderAndSetCommonFields(type, mutation, builder); builder.setAssociatedCellCount(mutation.size()); if (mutation instanceof Increment) { @@ -1414,13 +1344,11 @@ public static MutationProto toMutationNoData(final MutationType type, final Muta /** * Code shared by {@link #toMutation(MutationType, Mutation)} and - * {@link #toMutationNoData(MutationType, Mutation)} - * @param type - * @param mutation - * @return A partly-filled out protobuf'd Mutation. + * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd + * Mutation. */ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type, - final Mutation mutation, MutationProto.Builder builder) { + final Mutation mutation, MutationProto.Builder builder) { builder.setRow(UnsafeByteOperations.unsafeWrap(mutation.getRow())); builder.setMutateType(type); builder.setDurability(toDurability(mutation.getDurability())); @@ -1428,7 +1356,7 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final Map attributes = mutation.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1439,7 +1367,6 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final /** * Convert a client Result to a protocol buffer Result - * * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1448,14 +1375,13 @@ public static ClientProtos.Result toResult(final Result result) { } /** - * Convert a client Result to a protocol buffer Result - * @param result the client Result to convert - * @param encodeTags whether to includeTags in converted protobuf result or not - * When @encodeTags is set to true, it will return all the tags in the response. - * These tags may contain some sensitive data like acl permissions, etc. - * Only the tools like Export, Import which needs to take backup needs to set - * it to true so that cell tags are persisted in backup. - * Refer to HBASE-25246 for more context. + * Convert a client Result to a protocol buffer Result + * @param result the client Result to convert + * @param encodeTags whether to includeTags in converted protobuf result or not When @encodeTags + * is set to true, it will return all the tags in the response. These tags may + * contain some sensitive data like acl permissions, etc. Only the tools like + * Export, Import which needs to take backup needs to set it to true so that + * cell tags are persisted in backup. Refer to HBASE-25246 for more context. * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final Result result, boolean encodeTags) { @@ -1481,12 +1407,11 @@ public static ClientProtos.Result toResult(final Result result, boolean encodeTa /** * Convert a client Result to a protocol buffer Result - * * @param existence the client existence to send * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final boolean existence, boolean stale) { - if (stale){ + if (stale) { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE_STALE : EMPTY_RESULT_PB_EXISTS_FALSE_STALE; } else { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE : EMPTY_RESULT_PB_EXISTS_FALSE; @@ -1494,9 +1419,8 @@ public static ClientProtos.Result toResult(final boolean existence, boolean stal } /** - * Convert a client Result to a protocol buffer Result. - * The pb Result does not include the Cell data. That is for transport otherwise. - * + * Convert a client Result to a protocol buffer Result. The pb Result does not include the Cell + * data. That is for transport otherwise. * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1512,7 +1436,6 @@ public static ClientProtos.Result toResultNoData(final Result result) { /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert * @return the converted client Result */ @@ -1522,26 +1445,24 @@ public static Result toResult(final ClientProtos.Result proto) { /** * Convert a protocol buffer Result to a client Result - * - * @param proto the protocol buffer Result to convert - * @param decodeTags whether to decode tags into converted client Result - * When @decodeTags is set to true, it will decode all the tags from the - * response. These tags may contain some sensitive data like acl permissions, - * etc. Only the tools like Export, Import which needs to take backup needs to - * set it to true so that cell tags are persisted in backup. - * Refer to HBASE-25246 for more context. + * @param proto the protocol buffer Result to convert + * @param decodeTags whether to decode tags into converted client Result When @decodeTags is set + * to true, it will decode all the tags from the response. These tags may + * contain some sensitive data like acl permissions, etc. Only the tools like + * Export, Import which needs to take backup needs to set it to true so that + * cell tags are persisted in backup. Refer to HBASE-25246 for more context. * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto, boolean decodeTags) { if (proto.hasExists()) { if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } List values = proto.getCellList(); - if (values.isEmpty()){ + if (values.isEmpty()) { return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT; } @@ -1555,23 +1476,23 @@ public static Result toResult(final ClientProtos.Result proto, boolean decodeTag /** * Convert a protocol buffer Result to a client Result - * - * @param proto the protocol buffer Result to convert + * @param proto the protocol buffer Result to convert * @param scanner Optional cell scanner. - * @return the converted client Result - * @throws IOException + * @return the converted client Result n */ public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner) - throws IOException { + throws IOException { List values = proto.getCellList(); if (proto.hasExists()) { - if ((values != null && !values.isEmpty()) || - (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) { + if ( + (values != null && !values.isEmpty()) + || (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0) + ) { throw new IllegalArgumentException("bad proto: exists with cells is no allowed " + proto); } if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } @@ -1587,23 +1508,21 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner } } - if (!values.isEmpty()){ + if (!values.isEmpty()) { if (cells == null) cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (CellProtos.Cell c: values) { + for (CellProtos.Cell c : values) { cells.add(toCell(builder, c, false)); } } return (cells == null || cells.isEmpty()) - ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) - : Result.create(cells, null, proto.getStale()); + ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) + : Result.create(cells, null, proto.getStale()); } - /** * Convert a ByteArrayComparable to a protocol buffer Comparator - * * @param comparator the ByteArrayComparable to convert * @return the converted protocol buffer Comparator */ @@ -1616,23 +1535,22 @@ public static ComparatorProtos.Comparator toComparator(ByteArrayComparable compa /** * Convert a protocol buffer Comparator to a ByteArrayComparable - * * @param proto the protocol buffer Comparator to convert * @return the converted ByteArrayComparable */ @SuppressWarnings("unchecked") public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) - throws IOException { + throws IOException { String type = proto.getName(); String funcName = "parseFrom"; - byte [] value = proto.getSerializedComparator().toByteArray(); + byte[] value = proto.getSerializedComparator().toByteArray(); try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Method parseFrom = c.getMethod(funcName, byte[].class); if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (ByteArrayComparable)parseFrom.invoke(null, value); + return (ByteArrayComparable) parseFrom.invoke(null, value); } catch (Exception e) { throw new IOException(e); } @@ -1640,14 +1558,13 @@ public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto /** * Convert a protocol buffer Filter to a client Filter - * * @param proto the protocol buffer Filter to convert * @return the converted Filter */ @SuppressWarnings("unchecked") public static Filter toFilter(FilterProtos.Filter proto) throws IOException { String type = proto.getName(); - final byte [] value = proto.getSerializedFilter().toByteArray(); + final byte[] value = proto.getSerializedFilter().toByteArray(); String funcName = "parseFrom"; try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); @@ -1655,7 +1572,7 @@ public static Filter toFilter(FilterProtos.Filter proto) throws IOException { if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (Filter)parseFrom.invoke(c, value); + return (Filter) parseFrom.invoke(c, value); } catch (Exception e) { // Either we couldn't instantiate the method object, or "parseFrom" failed. // In either case, let's not retry. @@ -1665,7 +1582,6 @@ public static Filter toFilter(FilterProtos.Filter proto) throws IOException { /** * Convert a client Filter to a protocol buffer Filter - * * @param filter the Filter to convert * @return the converted protocol buffer Filter */ @@ -1677,54 +1593,46 @@ public static FilterProtos.Filter toFilter(Filter filter) throws IOException { } /** - * Convert a delete KeyValue type to protocol buffer DeleteType. - * - * @param type - * @return protocol buffer DeleteType - * @throws IOException + * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer + * DeleteType n */ - public static DeleteType toDeleteType( - KeyValue.Type type) throws IOException { + public static DeleteType toDeleteType(KeyValue.Type type) throws IOException { switch (type) { - case Delete: - return DeleteType.DELETE_ONE_VERSION; - case DeleteColumn: - return DeleteType.DELETE_MULTIPLE_VERSIONS; - case DeleteFamily: - return DeleteType.DELETE_FAMILY; - case DeleteFamilyVersion: - return DeleteType.DELETE_FAMILY_VERSION; - default: + case Delete: + return DeleteType.DELETE_ONE_VERSION; + case DeleteColumn: + return DeleteType.DELETE_MULTIPLE_VERSIONS; + case DeleteFamily: + return DeleteType.DELETE_FAMILY; + case DeleteFamilyVersion: + return DeleteType.DELETE_FAMILY_VERSION; + default: throw new IOException("Unknown delete type: " + type); } } /** * Convert a protocol buffer DeleteType to delete KeyValue type. - * * @param type The DeleteType - * @return The type. - * @throws IOException + * @return The type. n */ - public static KeyValue.Type fromDeleteType( - DeleteType type) throws IOException { + public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException { switch (type) { - case DELETE_ONE_VERSION: - return KeyValue.Type.Delete; - case DELETE_MULTIPLE_VERSIONS: - return KeyValue.Type.DeleteColumn; - case DELETE_FAMILY: - return KeyValue.Type.DeleteFamily; - case DELETE_FAMILY_VERSION: - return KeyValue.Type.DeleteFamilyVersion; - default: - throw new IOException("Unknown delete type: " + type); + case DELETE_ONE_VERSION: + return KeyValue.Type.Delete; + case DELETE_MULTIPLE_VERSIONS: + return KeyValue.Type.DeleteColumn; + case DELETE_FAMILY: + return KeyValue.Type.DeleteFamily; + case DELETE_FAMILY_VERSION: + return KeyValue.Type.DeleteFamilyVersion; + default: + throw new IOException("Unknown delete type: " + type); } } /** * Convert a stringified protocol buffer exception Parameter to a Java Exception - * * @param parameter the protocol buffer Parameter to convert * @return the converted Exception * @throws IOException if failed to deserialize the parameter @@ -1736,7 +1644,7 @@ public static Throwable toException(final NameBytesPair parameter) throws IOExce String type = parameter.getName(); try { Class c = - (Class)Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); + (Class) Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Constructor cn = null; try { cn = c.getDeclaredConstructor(String.class); @@ -1751,31 +1659,30 @@ public static Throwable toException(final NameBytesPair parameter) throws IOExce } } -// Start helpers for Client + // Start helpers for Client @SuppressWarnings("unchecked") public static T newServiceStub(Class service, RpcChannel channel) - throws Exception { - return (T)Methods.call(service, null, "newStub", - new Class[]{ RpcChannel.class }, new Object[]{ channel }); + throws Exception { + return (T) Methods.call(service, null, "newStub", new Class[] { RpcChannel.class }, + new Object[] { channel }); } -// End helpers for Client -// Start helpers for Admin + // End helpers for Client + // Start helpers for Admin /** - * A helper to retrieve region info given a region name or an - * encoded region name using admin protocol. - * + * A helper to retrieve region info given a region name or an encoded region name using admin + * protocol. * @return the retrieved region info */ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( - final RpcController controller, final AdminService.BlockingInterface admin, - final byte[] regionName) throws IOException { + final RpcController controller, final AdminService.BlockingInterface admin, + final byte[] regionName) throws IOException { try { GetRegionInfoRequest request = getGetRegionInfoRequest(regionName); - GetRegionInfoResponse response = admin.getRegionInfo(controller, - getGetRegionInfoRequest(regionName)); + GetRegionInfoResponse response = + admin.getRegionInfo(controller, getGetRegionInfoRequest(regionName)); return toRegionInfo(response.getRegionInfo()); } catch (ServiceException se) { throw getRemoteException(se); @@ -1785,25 +1692,22 @@ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( /** * @return A GetRegionInfoRequest for the passed in regionName. */ - public static GetRegionInfoRequest getGetRegionInfoRequest(final byte [] regionName) + public static GetRegionInfoRequest getGetRegionInfoRequest(final byte[] regionName) throws IOException { - return org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName)? - GetRegionInfoRequest.newBuilder().setRegion(RequestConverter. - buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, regionName)).build(): - RequestConverter.buildGetRegionInfoRequest(regionName); + return org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName) + ? GetRegionInfoRequest.newBuilder() + .setRegion(RequestConverter.buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, + regionName)) + .build() + : RequestConverter.buildGetRegionInfoRequest(regionName); } /** - * A helper to close a region given a region name - * using admin protocol. - * - * @param admin - * @param regionName - * @throws IOException + * A helper to close a region given a region name using admin protocol. nnn */ public static void closeRegion(final RpcController controller, - final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName) - throws IOException { + final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName) + throws IOException { CloseRegionRequest closeRegionRequest = ProtobufUtil.buildCloseRegionRequest(server, regionName); try { @@ -1814,19 +1718,15 @@ public static void closeRegion(final RpcController controller, } /** - * A helper to warmup a region given a region name - * using admin protocol - * - * @param admin - * @param regionInfo - * + * A helper to warmup a region given a region name using admin protocol nn * */ public static void warmupRegion(final RpcController controller, - final AdminService.BlockingInterface admin, final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { + final AdminService.BlockingInterface admin, + final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { try { WarmupRegionRequest warmupRegionRequest = - RequestConverter.buildWarmupRegionRequest(regionInfo); + RequestConverter.buildWarmupRegionRequest(regionInfo); admin.warmupRegion(controller, warmupRegionRequest); } catch (ServiceException e) { @@ -1835,16 +1735,12 @@ public static void warmupRegion(final RpcController controller, } /** - * A helper to open a region using admin protocol. - * @param admin - * @param region - * @throws IOException + * A helper to open a region using admin protocol. nnn */ public static void openRegion(final RpcController controller, - final AdminService.BlockingInterface admin, ServerName server, final org.apache.hadoop.hbase.client.RegionInfo region) - throws IOException { - OpenRegionRequest request = - RequestConverter.buildOpenRegionRequest(server, region, null); + final AdminService.BlockingInterface admin, ServerName server, + final org.apache.hadoop.hbase.client.RegionInfo region) throws IOException { + OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, region, null); try { admin.openRegion(controller, request); } catch (ServiceException se) { @@ -1853,26 +1749,20 @@ public static void openRegion(final RpcController controller, } /** - * A helper to get the all the online regions on a region - * server using admin protocol. - * - * @param admin - * @return a list of online region info - * @throws IOException + * A helper to get the all the online regions on a region server using admin protocol. n * @return + * a list of online region info n */ - public static List getOnlineRegions(final AdminService.BlockingInterface admin) - throws IOException { + public static List + getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException { return getOnlineRegions(null, admin); } /** - * A helper to get the all the online regions on a region - * server using admin protocol. + * A helper to get the all the online regions on a region server using admin protocol. * @return a list of online region info */ - public static List getOnlineRegions(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + public static List getOnlineRegions( + final RpcController controller, final AdminService.BlockingInterface admin) throws IOException { GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); GetOnlineRegionResponse response = null; try { @@ -1885,14 +1775,15 @@ public static List getOnlineRegions(f /** * Get the list of region info from a GetOnlineRegionResponse - * * @param proto the GetOnlineRegionResponse * @return the list of region info or empty if proto is null */ - public static List getRegionInfos(final GetOnlineRegionResponse proto) { + public static List + getRegionInfos(final GetOnlineRegionResponse proto) { if (proto == null) return Collections.EMPTY_LIST; - List regionInfos = new ArrayList<>(proto.getRegionInfoList().size()); - for (RegionInfo regionInfo: proto.getRegionInfoList()) { + List regionInfos = + new ArrayList<>(proto.getRegionInfoList().size()); + for (RegionInfo regionInfo : proto.getRegionInfoList()) { regionInfos.add(toRegionInfo(regionInfo)); } return regionInfos; @@ -1903,8 +1794,7 @@ public static List getRegionInfos(fin * @return the server name */ public static ServerInfo getServerInfo(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + final AdminService.BlockingInterface admin) throws IOException { GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); try { GetServerInfoResponse response = admin.getServerInfo(controller, request); @@ -1915,28 +1805,22 @@ public static ServerInfo getServerInfo(final RpcController controller, } /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * + * A helper to get the list of files of a column family on a given region using admin protocol. * @return the list of store files */ public static List getStoreFiles(final AdminService.BlockingInterface admin, - final byte[] regionName, final byte[] family) - throws IOException { + final byte[] regionName, final byte[] family) throws IOException { return getStoreFiles(null, admin, regionName, family); } /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * + * A helper to get the list of files of a column family on a given region using admin protocol. * @return the list of store files */ public static List getStoreFiles(final RpcController controller, - final AdminService.BlockingInterface admin, final byte[] regionName, final byte[] family) - throws IOException { - GetStoreFileRequest request = - ProtobufUtil.buildGetStoreFileRequest(regionName, family); + final AdminService.BlockingInterface admin, final byte[] regionName, final byte[] family) + throws IOException { + GetStoreFileRequest request = ProtobufUtil.buildGetStoreFileRequest(regionName, family); try { GetStoreFileResponse response = admin.getStoreFile(controller, request); return response.getStoreFileList(); @@ -1945,7 +1829,7 @@ public static List getStoreFiles(final RpcController controller, } } -// End helpers for Admin + // End helpers for Admin /* * Get the total (read + write) requests from a RegionLoad pb @@ -1960,11 +1844,10 @@ public static long getTotalRequestsCount(RegionLoad rl) { return rl.getReadRequestsCount() + rl.getWriteRequestsCount(); } - /** * @param m Message to get delimited pb serialization of (with pb magic prefix) */ - public static byte [] toDelimitedByteArray(final Message m) throws IOException { + public static byte[] toDelimitedByteArray(final Message m) throws IOException { // Allocate arbitrary big size so we avoid resizing. ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); baos.write(PB_MAGIC); @@ -1974,13 +1857,12 @@ public static long getTotalRequestsCount(RegionLoad rl) { /** * Find the HRegion encoded name based on a region specifier - * * @param regionSpecifier the region specifier * @return the corresponding region's encoded name * @throws DoNotRetryIOException if the specifier type is unsupported */ - public static String getRegionEncodedName( - final RegionSpecifier regionSpecifier) throws DoNotRetryIOException { + public static String getRegionEncodedName(final RegionSpecifier regionSpecifier) + throws DoNotRetryIOException { ByteString value = regionSpecifier.getValue(); RegionSpecifierType type = regionSpecifier.getType(); switch (type) { @@ -1989,8 +1871,7 @@ public static String getRegionEncodedName( case ENCODED_REGION_NAME: return value.toStringUtf8(); default: - throw new DoNotRetryIOException( - "Unsupported region specifier type: " + type); + throw new DoNotRetryIOException("Unsupported region specifier type: " + type); } } @@ -2017,7 +1898,7 @@ public static MapReduceProtos.ScanMetrics toScanMetrics(ScanMetrics scanMetrics, Map metrics = scanMetrics.getMetricsMap(reset); for (Entry e : metrics.entrySet()) { HBaseProtos.NameInt64Pair nameInt64Pair = - HBaseProtos.NameInt64Pair.newBuilder().setName(e.getKey()).setValue(e.getValue()).build(); + HBaseProtos.NameInt64Pair.newBuilder().setName(e.getKey()).setValue(e.getValue()).build(); builder.addMetrics(nameInt64Pair); } return builder.build(); @@ -2034,7 +1915,7 @@ public static void toIOException(ServiceException se) throws IOException { Throwable cause = se.getCause(); if (cause != null && cause instanceof IOException) { - throw (IOException)cause; + throw (IOException) cause; } throw new IOException(se); } @@ -2085,14 +1966,11 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, - boolean decodeTags) { - ExtendedCellBuilder builder = cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()); + boolean decodeTags) { + ExtendedCellBuilder builder = cellBuilder.clear().setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()).setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()).setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()); if (decodeTags && cell.hasTags()) { builder.setTags(cell.getTags().toByteArray()); } @@ -2101,12 +1979,10 @@ public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { HBaseProtos.NamespaceDescriptor.Builder b = - HBaseProtos.NamespaceDescriptor.newBuilder() - .setName(ByteString.copyFromUtf8(ns.getName())); - for(Map.Entry entry: ns.getConfiguration().entrySet()) { - b.addConfiguration(HBaseProtos.NameStringPair.newBuilder() - .setName(entry.getKey()) - .setValue(entry.getValue())); + HBaseProtos.NamespaceDescriptor.newBuilder().setName(ByteString.copyFromUtf8(ns.getName())); + for (Map.Entry entry : ns.getConfiguration().entrySet()) { + b.addConfiguration( + HBaseProtos.NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue())); } return b.build(); } @@ -2120,25 +1996,25 @@ public static NamespaceDescriptor toNamespaceDescriptor(HBaseProtos.NamespaceDes } public static CompactionDescriptor toCompactionDescriptor( - org.apache.hadoop.hbase.client.RegionInfo info, byte[] family, - List inputPaths, List outputPaths, Path storeDir) { + org.apache.hadoop.hbase.client.RegionInfo info, byte[] family, List inputPaths, + List outputPaths, Path storeDir) { return toCompactionDescriptor(info, null, family, inputPaths, outputPaths, storeDir); } public static CompactionDescriptor toCompactionDescriptor( - org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName, - byte[] family, List inputPaths, List outputPaths, Path storeDir) { + org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName, byte[] family, + List inputPaths, List outputPaths, Path storeDir) { // compaction descriptor contains relative paths. // input / output paths are relative to the store dir // store dir is relative to region dir CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder() - .setTableName(UnsafeByteOperations.unsafeWrap(info.getTable().toBytes())) - .setEncodedRegionName(UnsafeByteOperations.unsafeWrap( - regionName == null ? info.getEncodedNameAsBytes() : regionName)) - .setFamilyName(UnsafeByteOperations.unsafeWrap(family)) - .setStoreHomeDir(storeDir.getName()); //make relative + .setTableName(UnsafeByteOperations.unsafeWrap(info.getTable().toBytes())) + .setEncodedRegionName(UnsafeByteOperations + .unsafeWrap(regionName == null ? info.getEncodedNameAsBytes() : regionName)) + .setFamilyName(UnsafeByteOperations.unsafeWrap(family)).setStoreHomeDir(storeDir.getName()); // make + // relative for (Path inputPath : inputPaths) { - builder.addCompactionInput(inputPath.getName()); //relative path + builder.addCompactionInput(inputPath.getName()); // relative path } for (Path outputPath : outputPaths) { builder.addCompactionOutput(outputPath.getName()); @@ -2147,20 +2023,20 @@ public static CompactionDescriptor toCompactionDescriptor( return builder.build(); } - public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.hadoop.hbase.client.RegionInfo hri, - long flushSeqId, Map> committedFiles) { - FlushDescriptor.Builder desc = FlushDescriptor.newBuilder() - .setAction(action) - .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes())) - .setRegionName(UnsafeByteOperations.unsafeWrap(hri.getRegionName())) - .setFlushSequenceNumber(flushSeqId) - .setTableName(UnsafeByteOperations.unsafeWrap(hri.getTable().getName())); + public static FlushDescriptor toFlushDescriptor(FlushAction action, + org.apache.hadoop.hbase.client.RegionInfo hri, long flushSeqId, + Map> committedFiles) { + FlushDescriptor.Builder desc = FlushDescriptor.newBuilder().setAction(action) + .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes())) + .setRegionName(UnsafeByteOperations.unsafeWrap(hri.getRegionName())) + .setFlushSequenceNumber(flushSeqId) + .setTableName(UnsafeByteOperations.unsafeWrap(hri.getTable().getName())); for (Map.Entry> entry : committedFiles.entrySet()) { WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builder = - WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder() + WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder() .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) - .setStoreHomeDir(Bytes.toString(entry.getKey())); //relative to region + .setStoreHomeDir(Bytes.toString(entry.getKey())); // relative to region if (entry.getValue() != null) { for (Path path : entry.getValue()) { builder.addFlushOutput(path.getName()); @@ -2171,41 +2047,31 @@ public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.h return desc.build(); } - public static RegionEventDescriptor toRegionEventDescriptor( - EventType eventType, org.apache.hadoop.hbase.client.RegionInfo hri, long seqId, ServerName server, - Map> storeFiles) { + public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, + org.apache.hadoop.hbase.client.RegionInfo hri, long seqId, ServerName server, + Map> storeFiles) { final byte[] tableNameAsBytes = hri.getTable().getName(); final byte[] encodedNameAsBytes = hri.getEncodedNameAsBytes(); final byte[] regionNameAsBytes = hri.getRegionName(); - return toRegionEventDescriptor(eventType, - tableNameAsBytes, - encodedNameAsBytes, - regionNameAsBytes, - seqId, + return toRegionEventDescriptor(eventType, tableNameAsBytes, encodedNameAsBytes, + regionNameAsBytes, seqId, - server, - storeFiles); + server, storeFiles); } public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, - byte[] tableNameAsBytes, - byte[] encodedNameAsBytes, - byte[] regionNameAsBytes, - long seqId, - - ServerName server, - Map> storeFiles) { - RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder() - .setEventType(eventType) - .setTableName(UnsafeByteOperations.unsafeWrap(tableNameAsBytes)) - .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedNameAsBytes)) - .setRegionName(UnsafeByteOperations.unsafeWrap(regionNameAsBytes)) - .setLogSequenceNumber(seqId) - .setServer(toServerName(server)); + byte[] tableNameAsBytes, byte[] encodedNameAsBytes, byte[] regionNameAsBytes, long seqId, + + ServerName server, Map> storeFiles) { + RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder().setEventType(eventType) + .setTableName(UnsafeByteOperations.unsafeWrap(tableNameAsBytes)) + .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedNameAsBytes)) + .setRegionName(UnsafeByteOperations.unsafeWrap(regionNameAsBytes)).setLogSequenceNumber(seqId) + .setServer(toServerName(server)); for (Entry> entry : storeFiles.entrySet()) { - StoreDescriptor.Builder builder = StoreDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) + StoreDescriptor.Builder builder = + StoreDescriptor.newBuilder().setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) .setStoreHomeDir(Bytes.toString(entry.getKey())); for (Path path : entry.getValue()) { builder.addStoreFile(path.getName()); @@ -2217,55 +2083,53 @@ public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, } /** - * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. - * Tries to NOT print out data both because it can be big but also so we do not have data in our - * logs. Use judiciously. - * @param m - * @return toString of passed m + * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to + * NOT print out data both because it can be big but also so we do not have data in our logs. Use + * judiciously. n * @return toString of passed m */ public static String getShortTextFormat(Message m) { if (m == null) return "null"; if (m instanceof ScanRequest) { - // This should be small and safe to output. No data. + // This should be small and safe to output. No data. return TextFormat.shortDebugString(m); } else if (m instanceof RegionServerReportRequest) { // Print a short message only, just the servername and the requests, not the full load. - RegionServerReportRequest r = (RegionServerReportRequest)m; - return "server " + TextFormat.shortDebugString(r.getServer()) + - " load { numberOfRequests: " + r.getLoad().getNumberOfRequests() + " }"; + RegionServerReportRequest r = (RegionServerReportRequest) m; + return "server " + TextFormat.shortDebugString(r.getServer()) + " load { numberOfRequests: " + + r.getLoad().getNumberOfRequests() + " }"; } else if (m instanceof RegionServerStartupRequest) { // Should be small enough. return TextFormat.shortDebugString(m); } else if (m instanceof MutationProto) { - return toShortString((MutationProto)m); + return toShortString((MutationProto) m); } else if (m instanceof GetRequest) { GetRequest r = (GetRequest) m; - return "region= " + getStringForByteString(r.getRegion().getValue()) + - ", row=" + getStringForByteString(r.getGet().getRow()); + return "region= " + getStringForByteString(r.getRegion().getValue()) + ", row=" + + getStringForByteString(r.getGet().getRow()); } else if (m instanceof ClientProtos.MultiRequest) { ClientProtos.MultiRequest r = (ClientProtos.MultiRequest) m; // Get the number of Actions - int actionsCount = r.getRegionActionList() - .stream() - .mapToInt(ClientProtos.RegionAction::getActionCount) - .sum(); + int actionsCount = + r.getRegionActionList().stream().mapToInt(ClientProtos.RegionAction::getActionCount).sum(); // Get first set of Actions. ClientProtos.RegionAction actions = r.getRegionActionList().get(0); - String row = actions.getActionCount() <= 0? "": - getStringForByteString(actions.getAction(0).hasGet()? - actions.getAction(0).getGet().getRow(): - actions.getAction(0).getMutation().getRow()); - return "region= " + getStringForByteString(actions.getRegion().getValue()) + - ", for " + actionsCount + " action(s) and 1st row key=" + row; + String row = actions.getActionCount() <= 0 + ? "" + : getStringForByteString(actions.getAction(0).hasGet() + ? actions.getAction(0).getGet().getRow() + : actions.getAction(0).getMutation().getRow()); + return "region= " + getStringForByteString(actions.getRegion().getValue()) + ", for " + + actionsCount + " action(s) and 1st row key=" + row; } else if (m instanceof ClientProtos.MutateRequest) { ClientProtos.MutateRequest r = (ClientProtos.MutateRequest) m; - return "region= " + getStringForByteString(r.getRegion().getValue()) + - ", row=" + getStringForByteString(r.getMutation().getRow()); + return "region= " + getStringForByteString(r.getRegion().getValue()) + ", row=" + + getStringForByteString(r.getMutation().getRow()); } else if (m instanceof ClientProtos.CoprocessorServiceRequest) { ClientProtos.CoprocessorServiceRequest r = (ClientProtos.CoprocessorServiceRequest) m; - return "coprocessorService= " + r.getCall().getServiceName() + ":" + r.getCall().getMethodName(); + return "coprocessorService= " + r.getCall().getServiceName() + ":" + + r.getCall().getMethodName(); } return "TODO: " + m.getClass().toString(); } @@ -2276,7 +2140,6 @@ private static String getStringForByteString(ByteString bs) { /** * Return SlowLogParams to maintain recent online slowlog responses - * * @param message Message object {@link Message} * @return SlowLogParams with regionName(for filter queries) and params */ @@ -2296,15 +2159,13 @@ public static SlowLogParams getSlowLogParams(Message message) { } else if (message instanceof GetRequest) { GetRequest getRequest = (GetRequest) message; String regionName = getStringForByteString(getRequest.getRegion().getValue()); - String params = "region= " + regionName + ", row= " - + getStringForByteString(getRequest.getGet().getRow()); + String params = + "region= " + regionName + ", row= " + getStringForByteString(getRequest.getGet().getRow()); return new SlowLogParams(regionName, params); } else if (message instanceof MultiRequest) { MultiRequest multiRequest = (MultiRequest) message; - int actionsCount = multiRequest.getRegionActionList() - .stream() - .mapToInt(ClientProtos.RegionAction::getActionCount) - .sum(); + int actionsCount = multiRequest.getRegionActionList().stream() + .mapToInt(ClientProtos.RegionAction::getActionCount).sum(); RegionAction actions = multiRequest.getRegionActionList().get(0); String regionName = getStringForByteString(actions.getRegion().getValue()); String params = "region= " + regionName + ", for " + actionsCount + " action(s)"; @@ -2316,8 +2177,7 @@ public static SlowLogParams getSlowLogParams(Message message) { return new SlowLogParams(regionName, params); } else if (message instanceof CoprocessorServiceRequest) { CoprocessorServiceRequest coprocessorServiceRequest = (CoprocessorServiceRequest) message; - String params = "coprocessorService= " - + coprocessorServiceRequest.getCall().getServiceName() + String params = "coprocessorService= " + coprocessorServiceRequest.getCall().getServiceName() + ":" + coprocessorServiceRequest.getCall().getMethodName(); return new SlowLogParams(params); } @@ -2331,19 +2191,19 @@ public static SlowLogParams getSlowLogParams(Message message) { * @return Short String of mutation proto */ static String toShortString(final MutationProto proto) { - return "row=" + Bytes.toString(proto.getRow().toByteArray()) + - ", type=" + proto.getMutateType().toString(); + return "row=" + Bytes.toString(proto.getRow().toByteArray()) + ", type=" + + proto.getMutateType().toString(); } public static TableName toTableName(HBaseProtos.TableName tableNamePB) { return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(), - tableNamePB.getQualifier().asReadOnlyByteBuffer()); + tableNamePB.getQualifier().asReadOnlyByteBuffer()); } public static HBaseProtos.TableName toProtoTableName(TableName tableName) { return HBaseProtos.TableName.newBuilder() - .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())) - .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build(); + .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())) + .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build(); } public static List toProtoTableNameList(List tableNameList) { @@ -2372,10 +2232,8 @@ public static TableName[] getTableNameArray(List tableNam } /** - * Convert a protocol buffer CellVisibility to a client CellVisibility - * - * @param proto - * @return the converted client CellVisibility + * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted + * client CellVisibility */ public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) { if (proto == null) return null; @@ -2383,11 +2241,8 @@ public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) } /** - * Convert a protocol buffer CellVisibility bytes to a client CellVisibility - * - * @param protoBytes - * @return the converted client CellVisibility - * @throws DeserializationException + * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the + * converted client CellVisibility n */ public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException { if (protoBytes == null) return null; @@ -2403,10 +2258,8 @@ public static CellVisibility toCellVisibility(byte[] protoBytes) throws Deserial } /** - * Create a protocol buffer CellVisibility based on a client CellVisibility. - * - * @param cellVisibility - * @return a protocol buffer CellVisibility + * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a + * protocol buffer CellVisibility */ public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) { ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); @@ -2415,10 +2268,8 @@ public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVi } /** - * Convert a protocol buffer Authorizations to a client Authorizations - * - * @param proto - * @return the converted client Authorizations + * Convert a protocol buffer Authorizations to a client Authorizations n * @return the converted + * client Authorizations */ public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) { if (proto == null) return null; @@ -2426,11 +2277,8 @@ public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) } /** - * Convert a protocol buffer Authorizations bytes to a client Authorizations - * - * @param protoBytes - * @return the converted client Authorizations - * @throws DeserializationException + * Convert a protocol buffer Authorizations bytes to a client Authorizations n * @return the + * converted client Authorizations n */ public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException { if (protoBytes == null) return null; @@ -2446,10 +2294,8 @@ public static Authorizations toAuthorizations(byte[] protoBytes) throws Deserial } /** - * Create a protocol buffer Authorizations based on a client Authorizations. - * - * @param authorizations - * @return a protocol buffer Authorizations + * Create a protocol buffer Authorizations based on a client Authorizations. n * @return a + * protocol buffer Authorizations */ public static ClientProtos.Authorizations toAuthorizations(Authorizations authorizations) { ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder(); @@ -2460,48 +2306,56 @@ public static ClientProtos.Authorizations toAuthorizations(Authorizations author } /** - * Convert a protocol buffer TimeUnit to a client TimeUnit - * - * @param proto - * @return the converted client TimeUnit + * Convert a protocol buffer TimeUnit to a client TimeUnit n * @return the converted client + * TimeUnit */ public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) { switch (proto) { - case NANOSECONDS: return TimeUnit.NANOSECONDS; - case MICROSECONDS: return TimeUnit.MICROSECONDS; - case MILLISECONDS: return TimeUnit.MILLISECONDS; - case SECONDS: return TimeUnit.SECONDS; - case MINUTES: return TimeUnit.MINUTES; - case HOURS: return TimeUnit.HOURS; - case DAYS: return TimeUnit.DAYS; + case NANOSECONDS: + return TimeUnit.NANOSECONDS; + case MICROSECONDS: + return TimeUnit.MICROSECONDS; + case MILLISECONDS: + return TimeUnit.MILLISECONDS; + case SECONDS: + return TimeUnit.SECONDS; + case MINUTES: + return TimeUnit.MINUTES; + case HOURS: + return TimeUnit.HOURS; + case DAYS: + return TimeUnit.DAYS; } throw new RuntimeException("Invalid TimeUnit " + proto); } /** - * Convert a client TimeUnit to a protocol buffer TimeUnit - * - * @param timeUnit - * @return the converted protocol buffer TimeUnit + * Convert a client TimeUnit to a protocol buffer TimeUnit n * @return the converted protocol + * buffer TimeUnit */ public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) { switch (timeUnit) { - case NANOSECONDS: return HBaseProtos.TimeUnit.NANOSECONDS; - case MICROSECONDS: return HBaseProtos.TimeUnit.MICROSECONDS; - case MILLISECONDS: return HBaseProtos.TimeUnit.MILLISECONDS; - case SECONDS: return HBaseProtos.TimeUnit.SECONDS; - case MINUTES: return HBaseProtos.TimeUnit.MINUTES; - case HOURS: return HBaseProtos.TimeUnit.HOURS; - case DAYS: return HBaseProtos.TimeUnit.DAYS; + case NANOSECONDS: + return HBaseProtos.TimeUnit.NANOSECONDS; + case MICROSECONDS: + return HBaseProtos.TimeUnit.MICROSECONDS; + case MILLISECONDS: + return HBaseProtos.TimeUnit.MILLISECONDS; + case SECONDS: + return HBaseProtos.TimeUnit.SECONDS; + case MINUTES: + return HBaseProtos.TimeUnit.MINUTES; + case HOURS: + return HBaseProtos.TimeUnit.HOURS; + case DAYS: + return HBaseProtos.TimeUnit.DAYS; } throw new RuntimeException("Invalid TimeUnit " + timeUnit); } /** - * Convert a protocol buffer ThrottleType to a client ThrottleType - * - * @param proto - * @return the converted client ThrottleType + * Convert a protocol buffer ThrottleType to a client ThrottleType n * @return the converted + * client ThrottleType */ public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) { switch (proto) { @@ -2529,10 +2383,8 @@ public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) } /** - * Convert a client ThrottleType to a protocol buffer ThrottleType - * - * @param type - * @return the converted protocol buffer ThrottleType + * Convert a client ThrottleType to a protocol buffer ThrottleType n * @return the converted + * protocol buffer ThrottleType */ public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) { switch (type) { @@ -2560,163 +2412,159 @@ public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType ty } /** - * Convert a protocol buffer QuotaScope to a client QuotaScope - * - * @param proto - * @return the converted client QuotaScope + * Convert a protocol buffer QuotaScope to a client QuotaScope n * @return the converted client + * QuotaScope */ public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) { switch (proto) { - case CLUSTER: return QuotaScope.CLUSTER; - case MACHINE: return QuotaScope.MACHINE; + case CLUSTER: + return QuotaScope.CLUSTER; + case MACHINE: + return QuotaScope.MACHINE; } throw new RuntimeException("Invalid QuotaScope " + proto); } /** - * Convert a client QuotaScope to a protocol buffer QuotaScope - * - * @param scope - * @return the converted protocol buffer QuotaScope + * Convert a client QuotaScope to a protocol buffer QuotaScope n * @return the converted protocol + * buffer QuotaScope */ public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) { switch (scope) { - case CLUSTER: return QuotaProtos.QuotaScope.CLUSTER; - case MACHINE: return QuotaProtos.QuotaScope.MACHINE; + case CLUSTER: + return QuotaProtos.QuotaScope.CLUSTER; + case MACHINE: + return QuotaProtos.QuotaScope.MACHINE; } throw new RuntimeException("Invalid QuotaScope " + scope); } /** - * Convert a protocol buffer QuotaType to a client QuotaType - * - * @param proto - * @return the converted client QuotaType + * Convert a protocol buffer QuotaType to a client QuotaType n * @return the converted client + * QuotaType */ public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) { switch (proto) { - case THROTTLE: return QuotaType.THROTTLE; - case SPACE: return QuotaType.SPACE; + case THROTTLE: + return QuotaType.THROTTLE; + case SPACE: + return QuotaType.SPACE; } throw new RuntimeException("Invalid QuotaType " + proto); } /** - * Convert a client QuotaType to a protocol buffer QuotaType - * - * @param type - * @return the converted protocol buffer QuotaType + * Convert a client QuotaType to a protocol buffer QuotaType n * @return the converted protocol + * buffer QuotaType */ public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { switch (type) { - case THROTTLE: return QuotaProtos.QuotaType.THROTTLE; - case SPACE: return QuotaProtos.QuotaType.SPACE; - default: throw new RuntimeException("Invalid QuotaType " + type); + case THROTTLE: + return QuotaProtos.QuotaType.THROTTLE; + case SPACE: + return QuotaProtos.QuotaType.SPACE; + default: + throw new RuntimeException("Invalid QuotaType " + type); } } /** * Converts a protocol buffer SpaceViolationPolicy to a client SpaceViolationPolicy. - * * @param proto The protocol buffer space violation policy. * @return The corresponding client SpaceViolationPolicy. */ - public static SpaceViolationPolicy toViolationPolicy( - final QuotaProtos.SpaceViolationPolicy proto) { + public static SpaceViolationPolicy + toViolationPolicy(final QuotaProtos.SpaceViolationPolicy proto) { switch (proto) { - case DISABLE: return SpaceViolationPolicy.DISABLE; - case NO_WRITES_COMPACTIONS: return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - case NO_WRITES: return SpaceViolationPolicy.NO_WRITES; - case NO_INSERTS: return SpaceViolationPolicy.NO_INSERTS; + case DISABLE: + return SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: + return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: + return SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: + return SpaceViolationPolicy.NO_INSERTS; } throw new RuntimeException("Invalid SpaceViolationPolicy " + proto); } /** * Converts a client SpaceViolationPolicy to a protocol buffer SpaceViolationPolicy. - * * @param policy The client SpaceViolationPolicy object. * @return The corresponding protocol buffer SpaceViolationPolicy. */ - public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy( - final SpaceViolationPolicy policy) { + public static QuotaProtos.SpaceViolationPolicy + toProtoViolationPolicy(final SpaceViolationPolicy policy) { switch (policy) { - case DISABLE: return QuotaProtos.SpaceViolationPolicy.DISABLE; - case NO_WRITES_COMPACTIONS: return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - case NO_WRITES: return QuotaProtos.SpaceViolationPolicy.NO_WRITES; - case NO_INSERTS: return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; + case DISABLE: + return QuotaProtos.SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: + return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: + return QuotaProtos.SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: + return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; } throw new RuntimeException("Invalid SpaceViolationPolicy " + policy); } /** * Build a protocol buffer TimedQuota - * - * @param limit the allowed number of request/data per timeUnit + * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit - * @param scope the quota scope + * @param scope the quota scope * @return the protocol buffer TimedQuota */ public static QuotaProtos.TimedQuota toTimedQuota(final long limit, final TimeUnit timeUnit, - final QuotaScope scope) { - return QuotaProtos.TimedQuota.newBuilder() - .setSoftLimit(limit) - .setTimeUnit(toProtoTimeUnit(timeUnit)) - .setScope(toProtoQuotaScope(scope)) - .build(); + final QuotaScope scope) { + return QuotaProtos.TimedQuota.newBuilder().setSoftLimit(limit) + .setTimeUnit(toProtoTimeUnit(timeUnit)).setScope(toProtoQuotaScope(scope)).build(); } /** * Builds a protocol buffer SpaceQuota. - * - * @param limit The maximum space usage for the quota in bytes. + * @param limit The maximum space usage for the quota in bytes. * @param violationPolicy The policy to apply when the quota is violated. * @return The protocol buffer SpaceQuota. */ - public static QuotaProtos.SpaceQuota toProtoSpaceQuota( - final long limit, final SpaceViolationPolicy violationPolicy) { - return QuotaProtos.SpaceQuota.newBuilder() - .setSoftLimit(limit) - .setViolationPolicy(toProtoViolationPolicy(violationPolicy)) - .build(); + public static QuotaProtos.SpaceQuota toProtoSpaceQuota(final long limit, + final SpaceViolationPolicy violationPolicy) { + return QuotaProtos.SpaceQuota.newBuilder().setSoftLimit(limit) + .setViolationPolicy(toProtoViolationPolicy(violationPolicy)).build(); } /** - * Generates a marker for the WAL so that we propagate the notion of a bulk region load - * throughout the WAL. - * + * Generates a marker for the WAL so that we propagate the notion of a bulk region load throughout + * the WAL. * @param tableName The tableName into which the bulk load is being imported into. * @param encodedRegionName Encoded region name of the region which is being bulk loaded. * @param storeFiles A set of store files of a column family are bulk loaded. - * @param storeFilesSize Map of store files and their lengths - * @param bulkloadSeqId sequence ID (by a force flush) used to create bulk load hfile - * name + * @param storeFilesSize Map of store files and their lengths + * @param bulkloadSeqId sequence ID (by a force flush) used to create bulk load hfile name * @return The WAL log marker for bulk loads. */ public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableName, ByteString encodedRegionName, Map> storeFiles, Map storeFilesSize, long bulkloadSeqId) { - return toBulkLoadDescriptor(tableName, encodedRegionName, storeFiles, - storeFilesSize, bulkloadSeqId, null, true); + return toBulkLoadDescriptor(tableName, encodedRegionName, storeFiles, storeFilesSize, + bulkloadSeqId, null, true); } public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableName, - ByteString encodedRegionName, Map> storeFiles, - Map storeFilesSize, long bulkloadSeqId, - List clusterIds, boolean replicate) { + ByteString encodedRegionName, Map> storeFiles, + Map storeFilesSize, long bulkloadSeqId, List clusterIds, + boolean replicate) { BulkLoadDescriptor.Builder desc = - BulkLoadDescriptor.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .setEncodedRegionName(encodedRegionName) - .setBulkloadSeqNum(bulkloadSeqId) - .setReplicate(replicate); - if(clusterIds != null) { + BulkLoadDescriptor.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setEncodedRegionName(encodedRegionName).setBulkloadSeqNum(bulkloadSeqId) + .setReplicate(replicate); + if (clusterIds != null) { desc.addAllClusterIds(clusterIds); } for (Map.Entry> entry : storeFiles.entrySet()) { - WALProtos.StoreDescriptor.Builder builder = StoreDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) + WALProtos.StoreDescriptor.Builder builder = + StoreDescriptor.newBuilder().setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) .setStoreHomeDir(Bytes.toString(entry.getKey())); // relative to region for (Path path : entry.getValue()) { String name = path.getName(); @@ -2734,8 +2582,7 @@ public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableN * This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding * buffers * @param builder current message builder - * @param in Inputsream with delimited protobuf data - * @throws IOException + * @param in Inputsream with delimited protobuf data n */ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) throws IOException { @@ -2753,15 +2600,14 @@ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers where the message size is known + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers + * where the message size is known * @param builder current message builder - * @param in InputStream containing protobuf data - * @param size known size of protobuf data - * @throws IOException + * @param in InputStream containing protobuf data + * @param size known size of protobuf data n */ public static void mergeFrom(Message.Builder builder, InputStream in, int size) - throws IOException { + throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(in); codedInput.setSizeLimit(size); builder.mergeFrom(codedInput); @@ -2769,14 +2615,12 @@ public static void mergeFrom(Message.Builder builder, InputStream in, int size) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers where the message size is not known + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers + * where the message size is not known * @param builder current message builder - * @param in InputStream containing protobuf data - * @throws IOException + * @param in InputStream containing protobuf data n */ - public static void mergeFrom(Message.Builder builder, InputStream in) - throws IOException { + public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(in); codedInput.setSizeLimit(Integer.MAX_VALUE); builder.mergeFrom(codedInput); @@ -2784,11 +2628,10 @@ public static void mergeFrom(Message.Builder builder, InputStream in) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with ByteStrings + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with ByteStrings * @param builder current message builder - * @param bs ByteString containing the - * @throws IOException + * @param bs ByteString containing the n */ public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException { final CodedInputStream codedInput = bs.newCodedInput(); @@ -2798,11 +2641,10 @@ public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOEx } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder - * @param b byte array - * @throws IOException + * @param b byte array n */ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(b); @@ -2812,16 +2654,13 @@ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOExcepti } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder - * @param b byte array - * @param offset - * @param length - * @throws IOException + * @param b byte array nnn */ public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length) - throws IOException { + throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(b, offset, length); codedInput.setSizeLimit(length); builder.mergeFrom(codedInput); @@ -2829,7 +2668,7 @@ public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int } public static void mergeFrom(Message.Builder builder, CodedInputStream codedInput, int length) - throws IOException { + throws IOException { codedInput.resetSizeCounter(); int prevLimit = codedInput.setSizeLimit(length); @@ -2841,37 +2680,32 @@ public static void mergeFrom(Message.Builder builder, CodedInputStream codedInpu codedInput.setSizeLimit(prevLimit); } - public static ReplicationLoadSink toReplicationLoadSink( - ClusterStatusProtos.ReplicationLoadSink rls) { + public static ReplicationLoadSink + toReplicationLoadSink(ClusterStatusProtos.ReplicationLoadSink rls) { ReplicationLoadSink.ReplicationLoadSinkBuilder builder = ReplicationLoadSink.newBuilder(); - builder.setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()). - setTimestampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp()). - setTimestampStarted(rls.hasTimestampStarted()? rls.getTimestampStarted(): -1L). - setTotalOpsProcessed(rls.hasTotalOpsProcessed()? rls.getTotalOpsProcessed(): -1L); + builder.setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) + .setTimestampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp()) + .setTimestampStarted(rls.hasTimestampStarted() ? rls.getTimestampStarted() : -1L) + .setTotalOpsProcessed(rls.hasTotalOpsProcessed() ? rls.getTotalOpsProcessed() : -1L); return builder.build(); } - public static ReplicationLoadSource toReplicationLoadSource( - ClusterStatusProtos.ReplicationLoadSource rls) { + public static ReplicationLoadSource + toReplicationLoadSource(ClusterStatusProtos.ReplicationLoadSource rls) { ReplicationLoadSource.ReplicationLoadSourceBuilder builder = ReplicationLoadSource.newBuilder(); - builder.setPeerID(rls.getPeerID()). - setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()). - setSizeOfLogQueue(rls.getSizeOfLogQueue()). - setTimestampOfLastShippedOp(rls.getTimeStampOfLastShippedOp()). - setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()). - setReplicationLag(rls.getReplicationLag()). - setQueueId(rls.getQueueId()). - setRecovered(rls.getRecovered()). - setRunning(rls.getRunning()). - setEditsSinceRestart(rls.getEditsSinceRestart()). - setEditsRead(rls.getEditsRead()). - setoPsShipped(rls.getOPsShipped()); + builder.setPeerID(rls.getPeerID()).setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) + .setSizeOfLogQueue(rls.getSizeOfLogQueue()) + .setTimestampOfLastShippedOp(rls.getTimeStampOfLastShippedOp()) + .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) + .setReplicationLag(rls.getReplicationLag()).setQueueId(rls.getQueueId()) + .setRecovered(rls.getRecovered()).setRunning(rls.getRunning()) + .setEditsSinceRestart(rls.getEditsSinceRestart()).setEditsRead(rls.getEditsRead()) + .setoPsShipped(rls.getOPsShipped()); return builder.build(); } /** * Get a protocol buffer VersionInfo - * * @return the converted protocol buffer VersionInfo */ public static HBaseProtos.VersionInfo getVersionInfo() { @@ -2897,9 +2731,9 @@ public static HBaseProtos.VersionInfo getVersionInfo() { * @return the converted list of SecurityCapability elements */ public static List toSecurityCapabilityList( - List capabilities) { + List capabilities) { List scList = new ArrayList<>(capabilities.size()); - for (MasterProtos.SecurityCapabilitiesResponse.Capability c: capabilities) { + for (MasterProtos.SecurityCapabilitiesResponse.Capability c : capabilities) { try { scList.add(SecurityCapability.valueOf(c.getNumber())); } catch (IllegalArgumentException e) { @@ -2955,11 +2789,12 @@ public static ColumnFamilySchema toColumnFamilySchema(ColumnFamilyDescriptor hcd */ public static ColumnFamilyDescriptor toColumnFamilyDescriptor(final ColumnFamilySchema cfs) { // Use the empty constructor so we preserve the initial values set on construction for things - // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for + // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for // unrelated-looking test failures that are hard to trace back to here. - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); - cfs.getAttributesList().forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); + cfs.getAttributesList() + .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); cfs.getConfigurationList().forEach(a -> builder.setConfiguration(a.getName(), a.getValue())); return builder.build(); } @@ -2990,23 +2825,19 @@ public static TableSchema toTableSchema(TableDescriptor htd) { * @return An {@link TableDescriptor} made from the passed in pb ts. */ public static TableDescriptor toTableDescriptor(final TableSchema ts) { - TableDescriptorBuilder builder - = TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); - ts.getColumnFamiliesList() - .stream() - .map(ProtobufUtil::toColumnFamilyDescriptor) + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); + ts.getColumnFamiliesList().stream().map(ProtobufUtil::toColumnFamilyDescriptor) .forEach(builder::setColumnFamily); ts.getAttributesList() .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); - ts.getConfigurationList() - .forEach(a -> builder.setValue(a.getName(), a.getValue())); + ts.getConfigurationList().forEach(a -> builder.setValue(a.getName(), a.getValue())); return builder.build(); } /** * Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state - * @param state the protobuf CompactionState - * @return CompactionState + * @param state the protobuf CompactionState n */ public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) { return CompactionState.valueOf(state.toString()); @@ -3018,16 +2849,15 @@ public static GetRegionInfoResponse.CompactionState createCompactionState(Compac /** * Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state - * @param state the protobuf CompactionState - * @return CompactionState + * @param state the protobuf CompactionState n */ - public static CompactionState createCompactionStateForRegionLoad( - RegionLoad.CompactionState state) { + public static CompactionState + createCompactionStateForRegionLoad(RegionLoad.CompactionState state) { return CompactionState.valueOf(state.toString()); } - public static RegionLoad.CompactionState createCompactionStateForRegionLoad( - CompactionState state) { + public static RegionLoad.CompactionState + createCompactionStateForRegionLoad(CompactionState state) { return RegionLoad.CompactionState.valueOf(state.toString()); } @@ -3042,7 +2872,7 @@ public static Optional toOptionalTimestamp(MajorCompactionTimestampRespons * @return the protobuf SnapshotDescription type */ public static SnapshotProtos.SnapshotDescription.Type - createProtosSnapShotDescType(SnapshotType type) { + createProtosSnapShotDescType(SnapshotType type) { return SnapshotProtos.SnapshotDescription.Type.valueOf(type.name()); } @@ -3053,14 +2883,14 @@ public static Optional toOptionalTimestamp(MajorCompactionTimestampRespons * @return the protobuf SnapshotDescription type */ public static SnapshotProtos.SnapshotDescription.Type - createProtosSnapShotDescType(String snapshotDesc) { + createProtosSnapShotDescType(String snapshotDesc) { return SnapshotProtos.SnapshotDescription.Type.valueOf(snapshotDesc.toUpperCase(Locale.ROOT)); } /** * Creates {@link SnapshotType} from the {@link SnapshotProtos.SnapshotDescription.Type} - * @param type the snapshot description type - * @return the protobuf SnapshotDescription type + * @param type the snapshot description type + * @return the protobuf SnapshotDescription type */ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription.Type type) { return SnapshotType.valueOf(type.toString()); @@ -3072,8 +2902,9 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription * @return the protobuf SnapshotDescription */ public static SnapshotProtos.SnapshotDescription - createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { - SnapshotProtos.SnapshotDescription.Builder builder = SnapshotProtos.SnapshotDescription.newBuilder(); + createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { + SnapshotProtos.SnapshotDescription.Builder builder = + SnapshotProtos.SnapshotDescription.newBuilder(); if (snapshotDesc.getTableName() != null) { builder.setTable(snapshotDesc.getTableNameAsString()); } @@ -3086,8 +2917,10 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription if (snapshotDesc.getCreationTime() != -1L) { builder.setCreationTime(snapshotDesc.getCreationTime()); } - if (snapshotDesc.getTtl() != -1L && - snapshotDesc.getTtl() < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { + if ( + snapshotDesc.getTtl() != -1L + && snapshotDesc.getTtl() < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE) + ) { builder.setTtl(snapshotDesc.getTtl()); } if (snapshotDesc.getVersion() != -1) { @@ -3106,36 +2939,34 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription * @return the POJO SnapshotDescription */ public static SnapshotDescription - createSnapshotDesc(SnapshotProtos.SnapshotDescription snapshotDesc) { + createSnapshotDesc(SnapshotProtos.SnapshotDescription snapshotDesc) { final Map snapshotProps = new HashMap<>(); snapshotProps.put("TTL", snapshotDesc.getTtl()); snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, snapshotDesc.getMaxFileSize()); return new SnapshotDescription(snapshotDesc.getName(), - snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null, - createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(), - snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), snapshotProps); + snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null, + createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(), + snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), snapshotProps); } public static RegionLoadStats createRegionLoadStats(ClientProtos.RegionLoadStats stats) { return new RegionLoadStats(stats.getMemStoreLoad(), stats.getHeapOccupancy(), - stats.getCompactionPressure()); + stats.getCompactionPressure()); } /** - * @param msg - * @return A String version of the passed in msg + * n * @return A String version of the passed in msg */ public static String toText(Message msg) { return TextFormat.shortDebugString(msg); } - public static byte [] toBytes(ByteString bs) { + public static byte[] toBytes(ByteString bs) { return bs.toByteArray(); } /** - * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. - * @throws IOException + * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n */ public static T call(Callable callable) throws IOException { try { @@ -3146,21 +2977,20 @@ public static T call(Callable callable) throws IOException { } /** - * Create a protocol buffer GetStoreFileRequest for a given region name - * - * @param regionName the name of the region to get info - * @param family the family to get store file list - * @return a protocol buffer GetStoreFileRequest - */ - public static GetStoreFileRequest - buildGetStoreFileRequest(final byte[] regionName, final byte[] family) { - GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.addFamily(UnsafeByteOperations.unsafeWrap(family)); - return builder.build(); - } + * Create a protocol buffer GetStoreFileRequest for a given region name + * @param regionName the name of the region to get info + * @param family the family to get store file list + * @return a protocol buffer GetStoreFileRequest + */ + public static GetStoreFileRequest buildGetStoreFileRequest(final byte[] regionName, + final byte[] family) { + GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.addFamily(UnsafeByteOperations.unsafeWrap(family)); + return builder.build(); + } /** * Create a CloseRegionRequest for a given region name @@ -3172,12 +3002,12 @@ public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte } public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte[] regionName, - ServerName destinationServer) { + ServerName destinationServer) { return buildCloseRegionRequest(server, regionName, destinationServer, -1); } public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte[] regionName, - ServerName destinationServer, long closeProcId) { + ServerName destinationServer, long closeProcId) { CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); RegionSpecifier region = RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); @@ -3193,9 +3023,9 @@ public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte } public static ProcedureDescription buildProcedureDescription(String signature, String instance, - Map props) { + Map props) { ProcedureDescription.Builder builder = - ProcedureDescription.newBuilder().setSignature(signature).setInstance(instance); + ProcedureDescription.newBuilder().setSignature(signature).setInstance(instance); if (props != null && !props.isEmpty()) { props.entrySet().forEach(entry -> builder.addConfiguration( NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue()).build())); @@ -3204,29 +3034,27 @@ public static ProcedureDescription buildProcedureDescription(String signature, S } /** - * Get the Meta region state from the passed data bytes. Can handle both old and new style - * server names. - * @param data protobuf serialized data with meta server name. + * Get the Meta region state from the passed data bytes. Can handle both old and new style server + * names. + * @param data protobuf serialized data with meta server name. * @param replicaId replica ID for this region * @return RegionState instance corresponding to the serialized data. * @throws DeserializationException if the data is invalid. */ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replicaId) - throws DeserializationException { + throws DeserializationException { RegionState.State state = RegionState.State.OPEN; ServerName serverName; if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) { try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.MetaRegionServer rl = - ZooKeeperProtos.MetaRegionServer.parser().parseFrom(data, prefixLen, - data.length - prefixLen); + ZooKeeperProtos.MetaRegionServer rl = ZooKeeperProtos.MetaRegionServer.parser() + .parseFrom(data, prefixLen, data.length - prefixLen); if (rl.hasState()) { state = RegionState.State.convert(rl.getState()); } HBaseProtos.ServerName sn = rl.getServer(); - serverName = ServerName.valueOf( - sn.getHostName(), sn.getPort(), sn.getStartCode()); + serverName = ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); } catch (InvalidProtocolBufferException e) { throw new DeserializationException("Unable to parse meta region location"); } @@ -3237,21 +3065,21 @@ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replic if (serverName == null) { state = RegionState.State.OFFLINE; } - return new RegionState(RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName); + return new RegionState( + RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), + state, serverName); } /** * Get a ServerName from the passed in data bytes. - * @param data Data with a serialize server name in it; can handle the old style - * servername where servername was host and port. Works too with data that - * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that - * has a serialized {@link ServerName} in it. - * @return Returns null if data is null else converts passed data - * to a ServerName instance. - * @throws DeserializationException - */ - public static ServerName parseServerNameFrom(final byte [] data) throws DeserializationException { + * @param data Data with a serialize server name in it; can handle the old style servername where + * servername was host and port. Works too with data that begins w/ the pb 'PBUF' + * magic and that is then followed by a protobuf that has a serialized + * {@link ServerName} in it. + * @return Returns null if data is null else converts passed data to a ServerName + * instance. n + */ + public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException { if (data == null || data.length <= 0) return null; if (ProtobufMagic.isPBMagicPrefix(data)) { int prefixLen = ProtobufMagic.lengthOfPBMagic(); @@ -3259,13 +3087,13 @@ public static ServerName parseServerNameFrom(final byte [] data) throws Deserial ZooKeeperProtos.Master rss = ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName sn = - rss.getMaster(); + rss.getMaster(); return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); - } catch (/*InvalidProtocolBufferException*/IOException e) { + } catch (/* InvalidProtocolBufferException */IOException e) { // A failed parse of the znode is pretty catastrophic. Rather than loop // retrying hoping the bad bytes will changes, and rather than change // the signature on this method to add an IOE which will send ripples all - // over the code base, throw a RuntimeException. This should "never" happen. + // over the code base, throw a RuntimeException. This should "never" happen. // Fail fast if it does. throw new DeserializationException(e); } @@ -3306,7 +3134,8 @@ public static String toLockJson(List lockedRes JsonArray lockedResourceJsons = new JsonArray(lockedResourceProtos.size()); for (LockServiceProtos.LockedResource lockedResourceProto : lockedResourceProtos) { try { - JsonElement lockedResourceJson = ProtobufMessageConverter.toJsonElement(lockedResourceProto); + JsonElement lockedResourceJson = + ProtobufMessageConverter.toJsonElement(lockedResourceProto); lockedResourceJsons.add(lockedResourceJson); } catch (InvalidProtocolBufferException e) { lockedResourceJsons.add(e.toString()); @@ -3317,11 +3146,11 @@ public static String toLockJson(List lockedRes /** * Convert a RegionInfo to a Proto RegionInfo - * * @param info the RegionInfo to convert * @return the converted Proto RegionInfo */ - public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase.client.RegionInfo info) { + public static HBaseProtos.RegionInfo + toRegionInfo(final org.apache.hadoop.hbase.client.RegionInfo info) { if (info == null) { return null; } @@ -3342,18 +3171,18 @@ public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase. /** * Convert HBaseProto.RegionInfo to a RegionInfo - * * @param proto the RegionInfo to convert * @return the converted RegionInfo */ - public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBaseProtos.RegionInfo proto) { + public static org.apache.hadoop.hbase.client.RegionInfo + toRegionInfo(final HBaseProtos.RegionInfo proto) { if (proto == null) { return null; } TableName tableName = ProtobufUtil.toTableName(proto.getTableName()); long regionId = proto.getRegionId(); int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; - int replicaId = proto.hasReplicaId()? proto.getReplicaId(): defaultReplicaId; + int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : defaultReplicaId; if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) { return RegionInfoBuilder.FIRST_META_REGIONINFO; } @@ -3369,12 +3198,8 @@ public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBase if (proto.hasSplit()) { split = proto.getSplit(); } - RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(startKey) - .setEndKey(endKey) - .setRegionId(regionId) - .setReplicaId(replicaId) - .setSplit(split); + RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey) + .setEndKey(endKey).setRegionId(regionId).setReplicaId(replicaId).setSplit(split); if (proto.hasOffline()) { rib.setOffline(proto.getOffline()); } @@ -3397,18 +3222,17 @@ public static HRegionLocation toRegionLocation(HBaseProtos.RegionLocation proto) return new HRegionLocation(regionInfo, serverName, proto.getSeqNum()); } - public static List toSnapshotDescriptionList( - GetCompletedSnapshotsResponse response, Pattern pattern) { + public static List + toSnapshotDescriptionList(GetCompletedSnapshotsResponse response, Pattern pattern) { return response.getSnapshotsList().stream().map(ProtobufUtil::createSnapshotDesc) - .filter(snap -> pattern != null ? pattern.matcher(snap.getName()).matches() : true) - .collect(Collectors.toList()); + .filter(snap -> pattern != null ? pattern.matcher(snap.getName()).matches() : true) + .collect(Collectors.toList()); } - public static CacheEvictionStats toCacheEvictionStats( - HBaseProtos.CacheEvictionStats stats) throws IOException{ + public static CacheEvictionStats toCacheEvictionStats(HBaseProtos.CacheEvictionStats stats) + throws IOException { CacheEvictionStatsBuilder builder = CacheEvictionStats.builder(); - builder.withEvictedBlocks(stats.getEvictedBlocks()) - .withMaxCacheSize(stats.getMaxCacheSize()); + builder.withEvictedBlocks(stats.getEvictedBlocks()).withMaxCacheSize(stats.getMaxCacheSize()); if (stats.getExceptionCount() > 0) { for (HBaseProtos.RegionExceptionMessage exception : stats.getExceptionList()) { HBaseProtos.RegionSpecifier rs = exception.getRegion(); @@ -3419,65 +3243,52 @@ public static CacheEvictionStats toCacheEvictionStats( return builder.build(); } - public static HBaseProtos.CacheEvictionStats toCacheEvictionStats( - CacheEvictionStats cacheEvictionStats) { - HBaseProtos.CacheEvictionStats.Builder builder - = HBaseProtos.CacheEvictionStats.newBuilder(); + public static HBaseProtos.CacheEvictionStats + toCacheEvictionStats(CacheEvictionStats cacheEvictionStats) { + HBaseProtos.CacheEvictionStats.Builder builder = HBaseProtos.CacheEvictionStats.newBuilder(); for (Map.Entry entry : cacheEvictionStats.getExceptions().entrySet()) { - builder.addException( - RegionExceptionMessage.newBuilder() - .setRegion(RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, entry.getKey())) - .setException(ResponseConverter.buildException(entry.getValue())) - .build() - ); - } - return builder - .setEvictedBlocks(cacheEvictionStats.getEvictedBlocks()) - .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize()) - .build(); - } - - public static ClusterStatusProtos.ReplicationLoadSource toReplicationLoadSource( - ReplicationLoadSource rls) { - return ClusterStatusProtos.ReplicationLoadSource.newBuilder() - .setPeerID(rls.getPeerID()) - .setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) - .setSizeOfLogQueue((int) rls.getSizeOfLogQueue()) - .setTimeStampOfLastShippedOp(rls.getTimestampOfLastShippedOp()) - .setReplicationLag(rls.getReplicationLag()) - .setQueueId(rls.getQueueId()) - .setRecovered(rls.isRecovered()) - .setRunning(rls.isRunning()) - .setEditsSinceRestart(rls.hasEditsSinceRestart()) - .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) - .setOPsShipped(rls.getOPsShipped()) - .setEditsRead(rls.getEditsRead()) - .build(); - } - - public static ClusterStatusProtos.ReplicationLoadSink toReplicationLoadSink( - ReplicationLoadSink rls) { + builder.addException(RegionExceptionMessage.newBuilder() + .setRegion( + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, entry.getKey())) + .setException(ResponseConverter.buildException(entry.getValue())).build()); + } + return builder.setEvictedBlocks(cacheEvictionStats.getEvictedBlocks()) + .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize()).build(); + } + + public static ClusterStatusProtos.ReplicationLoadSource + toReplicationLoadSource(ReplicationLoadSource rls) { + return ClusterStatusProtos.ReplicationLoadSource.newBuilder().setPeerID(rls.getPeerID()) + .setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) + .setSizeOfLogQueue((int) rls.getSizeOfLogQueue()) + .setTimeStampOfLastShippedOp(rls.getTimestampOfLastShippedOp()) + .setReplicationLag(rls.getReplicationLag()).setQueueId(rls.getQueueId()) + .setRecovered(rls.isRecovered()).setRunning(rls.isRunning()) + .setEditsSinceRestart(rls.hasEditsSinceRestart()) + .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) + .setOPsShipped(rls.getOPsShipped()).setEditsRead(rls.getEditsRead()).build(); + } + + public static ClusterStatusProtos.ReplicationLoadSink + toReplicationLoadSink(ReplicationLoadSink rls) { return ClusterStatusProtos.ReplicationLoadSink.newBuilder() - .setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) - .setTimeStampsOfLastAppliedOp(rls.getTimestampsOfLastAppliedOp()) - .setTimestampStarted(rls.getTimestampStarted()) - .setTotalOpsProcessed(rls.getTotalOpsProcessed()) - .build(); + .setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) + .setTimeStampsOfLastAppliedOp(rls.getTimestampsOfLastAppliedOp()) + .setTimestampStarted(rls.getTimestampStarted()) + .setTotalOpsProcessed(rls.getTotalOpsProcessed()).build(); } public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) { if (timeRange == null) { timeRange = TimeRange.allTime(); } - return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()) - .setTo(timeRange.getMax()) + return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()).setTo(timeRange.getMax()) .build(); } public static byte[] toCompactionEventTrackerBytes(Set storeFiles) { HFileProtos.CompactionEventTracker.Builder builder = - HFileProtos.CompactionEventTracker.newBuilder(); + HFileProtos.CompactionEventTracker.newBuilder(); storeFiles.forEach(sf -> builder.addCompactedStoreFile(ByteString.copyFromUtf8(sf))); return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); } @@ -3486,20 +3297,20 @@ public static Set toCompactedStoreFiles(byte[] bytes) throws IOException if (bytes != null && ProtobufUtil.isPBMagicPrefix(bytes)) { int pbLen = ProtobufUtil.lengthOfPBMagic(); HFileProtos.CompactionEventTracker.Builder builder = - HFileProtos.CompactionEventTracker.newBuilder(); + HFileProtos.CompactionEventTracker.newBuilder(); ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen); HFileProtos.CompactionEventTracker compactionEventTracker = builder.build(); List compactedStoreFiles = compactionEventTracker.getCompactedStoreFileList(); if (compactedStoreFiles != null && compactedStoreFiles.size() != 0) { return compactedStoreFiles.stream().map(ByteString::toStringUtf8) - .collect(Collectors.toSet()); + .collect(Collectors.toSet()); } } return Collections.emptySet(); } - public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount( - RegionStatesCount regionStatesCount) { + public static ClusterStatusProtos.RegionStatesCount + toTableRegionStatesCount(RegionStatesCount regionStatesCount) { int openRegions = 0; int splitRegions = 0; int closedRegions = 0; @@ -3512,17 +3323,13 @@ public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount( regionsInTransition = regionStatesCount.getRegionsInTransition(); totalRegions = regionStatesCount.getTotalRegions(); } - return ClusterStatusProtos.RegionStatesCount.newBuilder() - .setOpenRegions(openRegions) - .setSplitRegions(splitRegions) - .setClosedRegions(closedRegions) - .setRegionsInTransition(regionsInTransition) - .setTotalRegions(totalRegions) - .build(); + return ClusterStatusProtos.RegionStatesCount.newBuilder().setOpenRegions(openRegions) + .setSplitRegions(splitRegions).setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition).setTotalRegions(totalRegions).build(); } - public static RegionStatesCount toTableRegionStatesCount( - ClusterStatusProtos.RegionStatesCount regionStatesCount) { + public static RegionStatesCount + toTableRegionStatesCount(ClusterStatusProtos.RegionStatesCount regionStatesCount) { int openRegions = 0; int splitRegions = 0; int closedRegions = 0; @@ -3535,59 +3342,47 @@ public static RegionStatesCount toTableRegionStatesCount( splitRegions = regionStatesCount.getSplitRegions(); totalRegions = regionStatesCount.getTotalRegions(); } - return new RegionStatesCount.RegionStatesCountBuilder() - .setOpenRegions(openRegions) - .setSplitRegions(splitRegions) - .setClosedRegions(closedRegions) - .setRegionsInTransition(regionsInTransition) - .setTotalRegions(totalRegions) - .build(); + return new RegionStatesCount.RegionStatesCountBuilder().setOpenRegions(openRegions) + .setSplitRegions(splitRegions).setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition).setTotalRegions(totalRegions).build(); } /** * Convert Protobuf class - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload} - * To client SlowLog Payload class {@link OnlineLogRecord} - * + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload} To client + * SlowLog Payload class {@link OnlineLogRecord} * @param slowLogPayload SlowLog Payload protobuf instance * @return SlowLog Payload for client usecase */ - private static LogEntry getSlowLogRecord( - final TooSlowLog.SlowLogPayload slowLogPayload) { - OnlineLogRecord onlineLogRecord = new OnlineLogRecord.OnlineLogRecordBuilder() - .setCallDetails(slowLogPayload.getCallDetails()) - .setClientAddress(slowLogPayload.getClientAddress()) - .setMethodName(slowLogPayload.getMethodName()) - .setMultiGetsCount(slowLogPayload.getMultiGets()) - .setMultiMutationsCount(slowLogPayload.getMultiMutations()) - .setMultiServiceCalls(slowLogPayload.getMultiServiceCalls()) - .setParam(slowLogPayload.getParam()) - .setProcessingTime(slowLogPayload.getProcessingTime()) - .setQueueTime(slowLogPayload.getQueueTime()) - .setRegionName(slowLogPayload.getRegionName()) - .setResponseSize(slowLogPayload.getResponseSize()) - .setServerClass(slowLogPayload.getServerClass()) - .setStartTime(slowLogPayload.getStartTime()) - .setUserName(slowLogPayload.getUserName()) - .build(); + private static LogEntry getSlowLogRecord(final TooSlowLog.SlowLogPayload slowLogPayload) { + OnlineLogRecord onlineLogRecord = + new OnlineLogRecord.OnlineLogRecordBuilder().setCallDetails(slowLogPayload.getCallDetails()) + .setClientAddress(slowLogPayload.getClientAddress()) + .setMethodName(slowLogPayload.getMethodName()) + .setMultiGetsCount(slowLogPayload.getMultiGets()) + .setMultiMutationsCount(slowLogPayload.getMultiMutations()) + .setMultiServiceCalls(slowLogPayload.getMultiServiceCalls()) + .setParam(slowLogPayload.getParam()).setProcessingTime(slowLogPayload.getProcessingTime()) + .setQueueTime(slowLogPayload.getQueueTime()).setRegionName(slowLogPayload.getRegionName()) + .setResponseSize(slowLogPayload.getResponseSize()) + .setServerClass(slowLogPayload.getServerClass()).setStartTime(slowLogPayload.getStartTime()) + .setUserName(slowLogPayload.getUserName()).build(); return onlineLogRecord; } /** - * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} - * + * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} * @param logEntry slowlog response protobuf instance * @return list of SlowLog payloads for client usecase */ - public static List toSlowLogPayloads( - final HBaseProtos.LogEntry logEntry) { + public static List toSlowLogPayloads(final HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("SlowLogResponses")) { - AdminProtos.SlowLogResponses slowLogResponses = (AdminProtos.SlowLogResponses) method - .invoke(null, logEntry.getLogMessage()); + AdminProtos.SlowLogResponses slowLogResponses = + (AdminProtos.SlowLogResponses) method.invoke(null, logEntry.getLogMessage()); return slowLogResponses.getSlowLogPayloadsList().stream() .map(ProtobufUtil::getSlowLogRecord).collect(Collectors.toList()); } @@ -3600,7 +3395,6 @@ public static List toSlowLogPayloads( /** * Convert {@link ClearSlowLogResponses} to boolean - * * @param clearSlowLogResponses Clear slowlog response protobuf instance * @return boolean representing clear slowlog response */ @@ -3608,34 +3402,30 @@ public static boolean toClearSlowLogPayload(final ClearSlowLogResponses clearSlo return clearSlowLogResponses.getIsCleaned(); } - public static void populateBalanceRSGroupResponse(RSGroupAdminProtos.BalanceRSGroupResponse.Builder responseBuilder, BalanceResponse response) { - responseBuilder - .setBalanceRan(response.isBalancerRan()) + public static void populateBalanceRSGroupResponse( + RSGroupAdminProtos.BalanceRSGroupResponse.Builder responseBuilder, BalanceResponse response) { + responseBuilder.setBalanceRan(response.isBalancerRan()) .setMovesCalculated(response.getMovesCalculated()) .setMovesExecuted(response.getMovesExecuted()); } - public static BalanceResponse toBalanceResponse(RSGroupAdminProtos.BalanceRSGroupResponse response) { - return BalanceResponse.newBuilder() - .setBalancerRan(response.getBalanceRan()) + public static BalanceResponse + toBalanceResponse(RSGroupAdminProtos.BalanceRSGroupResponse response) { + return BalanceResponse.newBuilder().setBalancerRan(response.getBalanceRan()) .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0) .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesCalculated() : 0) .build(); } - public static RSGroupAdminProtos.BalanceRSGroupRequest createBalanceRSGroupRequest(String groupName, BalanceRequest request) { - return RSGroupAdminProtos.BalanceRSGroupRequest.newBuilder() - .setRSGroupName(groupName) - .setDryRun(request.isDryRun()) - .setIgnoreRit(request.isIgnoreRegionsInTransition()) - .build(); + public static RSGroupAdminProtos.BalanceRSGroupRequest + createBalanceRSGroupRequest(String groupName, BalanceRequest request) { + return RSGroupAdminProtos.BalanceRSGroupRequest.newBuilder().setRSGroupName(groupName) + .setDryRun(request.isDryRun()).setIgnoreRit(request.isIgnoreRegionsInTransition()).build(); } public static BalanceRequest toBalanceRequest(RSGroupAdminProtos.BalanceRSGroupRequest request) { - return BalanceRequest.newBuilder() - .setDryRun(request.hasDryRun() && request.getDryRun()) - .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()) - .build(); + return BalanceRequest.newBuilder().setDryRun(request.hasDryRun() && request.getDryRun()) + .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()).build(); } public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { @@ -3650,8 +3440,8 @@ public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { .map(ProtobufUtil::toTableName).collect(Collectors.toList()); rsGroupInfo.addAllTables(tables); - proto.getConfigurationList().forEach(pair -> - rsGroupInfo.setConfiguration(pair.getName(), pair.getValue())); + proto.getConfigurationList() + .forEach(pair -> rsGroupInfo.setConfiguration(pair.getName(), pair.getValue())); return rsGroupInfo; } @@ -3663,14 +3453,16 @@ public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { List hostports = new ArrayList<>(pojo.getServers().size()); for (Address el : pojo.getServers()) { hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) - .setPort(el.getPort()).build()); + .setPort(el.getPort()).build()); } - List configuration = pojo.getConfiguration().entrySet() - .stream().map(entry -> NameStringPair.newBuilder() + List< + NameStringPair> configuration = + pojo + .getConfiguration().entrySet().stream().map(entry -> NameStringPair.newBuilder() .setName(entry.getKey()).setValue(entry.getValue()).build()) - .collect(Collectors.toList()); + .collect(Collectors.toList()); return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports) - .addAllTables(tables).addAllConfiguration(configuration).build(); + .addAllTables(tables).addAllConfiguration(configuration).build(); } public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, @@ -3681,13 +3473,13 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, if (filter != null) { builder.ifMatches(filter); } else { - builder.ifMatches(condition.getFamily().toByteArray(), - condition.getQualifier().toByteArray(), + builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = condition.hasTimeRange() + ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); builder.timeRange(timeRange); try { @@ -3718,13 +3510,13 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, if (filter != null) { builder.ifMatches(filter); } else { - builder.ifMatches(condition.getFamily().toByteArray(), - condition.getQualifier().toByteArray(), + builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = condition.hasTimeRange() + ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); builder.timeRange(timeRange); try { @@ -3739,8 +3531,8 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, } else if (m instanceof Append) { return builder.build((Append) m); } else { - throw new DoNotRetryIOException("Unsupported mutate type: " + m.getClass() - .getSimpleName().toUpperCase()); + throw new DoNotRetryIOException( + "Unsupported mutate type: " + m.getClass().getSimpleName().toUpperCase()); } } else { return builder.build(new RowMutations(mutations.get(0).getRow()).add(mutations)); @@ -3754,15 +3546,15 @@ public static ClientProtos.Condition toCondition(final byte[] row, final byte[] final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, final TimeRange timeRange) throws IOException { - ClientProtos.Condition.Builder builder = ClientProtos.Condition.newBuilder() - .setRow(UnsafeByteOperations.unsafeWrap(row)); + ClientProtos.Condition.Builder builder = + ClientProtos.Condition.newBuilder().setRow(UnsafeByteOperations.unsafeWrap(row)); if (filter != null) { builder.setFilter(ProtobufUtil.toFilter(filter)); } else { builder.setFamily(UnsafeByteOperations.unsafeWrap(family)) - .setQualifier(UnsafeByteOperations.unsafeWrap( - qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) + .setQualifier(UnsafeByteOperations + .unsafeWrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) .setComparator(ProtobufUtil.toComparator(new BinaryComparator(value))) .setCompareType(HBaseProtos.CompareType.valueOf(op.name())); } @@ -3776,21 +3568,19 @@ public static ClientProtos.Condition toCondition(final byte[] row, final Filter } public static ClientProtos.Condition toCondition(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, - final TimeRange timeRange) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final TimeRange timeRange) + throws IOException { return toCondition(row, family, qualifier, op, value, null, timeRange); } - public static List toBalancerDecisionResponse( - HBaseProtos.LogEntry logEntry) { + public static List toBalancerDecisionResponse(HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("BalancerDecisionsResponse")) { MasterProtos.BalancerDecisionsResponse response = - (MasterProtos.BalancerDecisionsResponse) method - .invoke(null, logEntry.getLogMessage()); + (MasterProtos.BalancerDecisionsResponse) method.invoke(null, logEntry.getLogMessage()); return getBalancerDecisionEntries(response); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException @@ -3800,16 +3590,14 @@ public static List toBalancerDecisionResponse( throw new RuntimeException("Invalid response from server"); } - public static List toBalancerRejectionResponse( - HBaseProtos.LogEntry logEntry) { + public static List toBalancerRejectionResponse(HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("BalancerRejectionsResponse")) { MasterProtos.BalancerRejectionsResponse response = - (MasterProtos.BalancerRejectionsResponse) method - .invoke(null, logEntry.getLogMessage()); + (MasterProtos.BalancerRejectionsResponse) method.invoke(null, logEntry.getLogMessage()); return getBalancerRejectionEntries(response); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException @@ -3819,32 +3607,33 @@ public static List toBalancerRejectionResponse( throw new RuntimeException("Invalid response from server"); } - public static List getBalancerDecisionEntries( - MasterProtos.BalancerDecisionsResponse response) { + public static List + getBalancerDecisionEntries(MasterProtos.BalancerDecisionsResponse response) { List balancerDecisions = response.getBalancerDecisionList(); if (CollectionUtils.isEmpty(balancerDecisions)) { return Collections.emptyList(); } - return balancerDecisions.stream().map(balancerDecision -> new BalancerDecision.Builder() - .setInitTotalCost(balancerDecision.getInitTotalCost()) - .setInitialFunctionCosts(balancerDecision.getInitialFunctionCosts()) - .setComputedTotalCost(balancerDecision.getComputedTotalCost()) - .setFinalFunctionCosts(balancerDecision.getFinalFunctionCosts()) - .setComputedSteps(balancerDecision.getComputedSteps()) - .setRegionPlans(balancerDecision.getRegionPlansList()).build()) + return balancerDecisions.stream() + .map(balancerDecision -> new BalancerDecision.Builder() + .setInitTotalCost(balancerDecision.getInitTotalCost()) + .setInitialFunctionCosts(balancerDecision.getInitialFunctionCosts()) + .setComputedTotalCost(balancerDecision.getComputedTotalCost()) + .setFinalFunctionCosts(balancerDecision.getFinalFunctionCosts()) + .setComputedSteps(balancerDecision.getComputedSteps()) + .setRegionPlans(balancerDecision.getRegionPlansList()).build()) .collect(Collectors.toList()); } - public static List getBalancerRejectionEntries( - MasterProtos.BalancerRejectionsResponse response) { + public static List + getBalancerRejectionEntries(MasterProtos.BalancerRejectionsResponse response) { List balancerRejections = response.getBalancerRejectionList(); if (CollectionUtils.isEmpty(balancerRejections)) { return Collections.emptyList(); } - return balancerRejections.stream().map(balancerRejection -> new BalancerRejection.Builder() - .setReason(balancerRejection.getReason()) - .setCostFuncInfoList(balancerRejection.getCostFuncInfoList()) - .build()) + return balancerRejections.stream() + .map(balancerRejection -> new BalancerRejection.Builder() + .setReason(balancerRejection.getReason()) + .setCostFuncInfoList(balancerRejection.getCostFuncInfoList()).build()) .collect(Collectors.toList()); } @@ -3853,8 +3642,7 @@ public static HBaseProtos.LogRequest toBalancerDecisionRequest(int limit) { MasterProtos.BalancerDecisionsRequest.newBuilder().setLimit(limit).build(); return HBaseProtos.LogRequest.newBuilder() .setLogClassName(balancerDecisionsRequest.getClass().getName()) - .setLogMessage(balancerDecisionsRequest.toByteString()) - .build(); + .setLogMessage(balancerDecisionsRequest.toByteString()).build(); } public static HBaseProtos.LogRequest toBalancerRejectionRequest(int limit) { @@ -3862,58 +3650,43 @@ public static HBaseProtos.LogRequest toBalancerRejectionRequest(int limit) { MasterProtos.BalancerRejectionsRequest.newBuilder().setLimit(limit).build(); return HBaseProtos.LogRequest.newBuilder() .setLogClassName(balancerRejectionsRequest.getClass().getName()) - .setLogMessage(balancerRejectionsRequest.toByteString()) - .build(); + .setLogMessage(balancerRejectionsRequest.toByteString()).build(); } public static MasterProtos.BalanceRequest toBalanceRequest(BalanceRequest request) { - return MasterProtos.BalanceRequest.newBuilder() - .setDryRun(request.isDryRun()) - .setIgnoreRit(request.isIgnoreRegionsInTransition()) - .build(); + return MasterProtos.BalanceRequest.newBuilder().setDryRun(request.isDryRun()) + .setIgnoreRit(request.isIgnoreRegionsInTransition()).build(); } public static BalanceRequest toBalanceRequest(MasterProtos.BalanceRequest request) { - return BalanceRequest.newBuilder() - .setDryRun(request.hasDryRun() && request.getDryRun()) - .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()) - .build(); + return BalanceRequest.newBuilder().setDryRun(request.hasDryRun() && request.getDryRun()) + .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()).build(); } public static MasterProtos.BalanceResponse toBalanceResponse(BalanceResponse response) { - return MasterProtos.BalanceResponse.newBuilder() - .setBalancerRan(response.isBalancerRan()) + return MasterProtos.BalanceResponse.newBuilder().setBalancerRan(response.isBalancerRan()) .setMovesCalculated(response.getMovesCalculated()) - .setMovesExecuted(response.getMovesExecuted()) - .build(); + .setMovesExecuted(response.getMovesExecuted()).build(); } public static BalanceResponse toBalanceResponse(MasterProtos.BalanceResponse response) { return BalanceResponse.newBuilder() .setBalancerRan(response.hasBalancerRan() && response.getBalancerRan()) .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesExecuted() : 0) - .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0) - .build(); + .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0).build(); } public static ServerTask getServerTask(ClusterStatusProtos.ServerTask task) { - return ServerTaskBuilder.newBuilder() - .setDescription(task.getDescription()) - .setStatus(task.getStatus()) - .setState(ServerTask.State.valueOf(task.getState().name())) - .setStartTime(task.getStartTime()) - .setCompletionTime(task.getCompletionTime()) - .build(); + return ServerTaskBuilder.newBuilder().setDescription(task.getDescription()) + .setStatus(task.getStatus()).setState(ServerTask.State.valueOf(task.getState().name())) + .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTime()).build(); } public static ClusterStatusProtos.ServerTask toServerTask(ServerTask task) { - return ClusterStatusProtos.ServerTask.newBuilder() - .setDescription(task.getDescription()) + return ClusterStatusProtos.ServerTask.newBuilder().setDescription(task.getDescription()) .setStatus(task.getStatus()) .setState(ClusterStatusProtos.ServerTask.State.valueOf(task.getState().name())) - .setStartTime(task.getStartTime()) - .setCompletionTime(task.getCompletionTime()) - .build(); + .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTime()).build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 3008956d7517..addda9c59860 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -167,8 +167,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; /** - * Helper utility to build protocol buffer requests, - * or build components for protocol buffer requests. + * Helper utility to build protocol buffer requests, or build components for protocol buffer + * requests. */ @InterfaceAudience.Private public final class RequestConverter { @@ -176,20 +176,18 @@ public final class RequestConverter { private RequestConverter() { } -// Start utilities for Client + // Start utilities for Client /** * Create a protocol buffer GetRequest for a client Get - * * @param regionName the name of the region to get - * @param get the client Get + * @param get the client Get * @return a protocol buffer GetRequest */ - public static GetRequest buildGetRequest(final byte[] regionName, - final Get get) throws IOException { + public static GetRequest buildGetRequest(final byte[] regionName, final Get get) + throws IOException { GetRequest.Builder builder = GetRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setGet(ProtobufUtil.toGet(get)); return builder.build(); @@ -197,9 +195,7 @@ public static GetRequest buildGetRequest(final byte[] regionName, /** * Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append - * - * @return a mutate request - * @throws IOException + * @return a mutate request n */ public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value, @@ -219,21 +215,19 @@ public static MutateRequest buildMutateRequest(final byte[] regionName, final by /** * Create a protocol buffer MultiRequest for conditioned row mutations - * - * @return a multi request - * @throws IOException + * @return a multi request n */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, - final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final Filter filter, final TimeRange timeRange, + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final byte[] value, final Filter filter, final TimeRange timeRange, final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { - return buildMultiRequest(regionName, rowMutations, ProtobufUtil.toCondition(row, family, - qualifier, op, value, filter, timeRange), nonceGroup, nonce); + return buildMultiRequest(regionName, rowMutations, + ProtobufUtil.toCondition(row, family, qualifier, op, value, filter, timeRange), nonceGroup, + nonce); } /** * Create a protocol buffer MultiRequest for row mutations - * * @return a multi request */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, @@ -251,7 +245,7 @@ private static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionNa boolean hasNonce = false; ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); MutationProto.Builder mutationBuilder = MutationProto.newBuilder(); - for (Mutation mutation: rowMutations.getMutations()) { + for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); MutationProto mp; if (mutation instanceof Increment || mutation instanceof Append) { @@ -278,104 +272,74 @@ private static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionNa } /** - * Create a protocol buffer MutateRequest for a put - * - * @param regionName - * @param put - * @return a mutate request - * @throws IOException + * Create a protocol buffer MutateRequest for a put nn * @return a mutate request n */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Put put) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put) + throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, put, MutationProto.newBuilder())); return builder.build(); } /** - * Create a protocol buffer MutateRequest for an append - * - * @param regionName - * @param append - * @return a mutate request - * @throws IOException + * Create a protocol buffer MutateRequest for an append nn * @return a mutate request n */ - public static MutateRequest buildMutateRequest(final byte[] regionName, - final Append append, long nonceGroup, long nonce) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append, + long nonceGroup, long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } - builder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, append, - MutationProto.newBuilder(), nonce)); + builder.setMutation( + ProtobufUtil.toMutation(MutationType.APPEND, append, MutationProto.newBuilder(), nonce)); return builder.build(); } /** - * Create a protocol buffer MutateRequest for a client increment - * - * @param regionName - * @param increment - * @return a mutate request + * Create a protocol buffer MutateRequest for a client increment nn * @return a mutate request */ - public static MutateRequest buildMutateRequest(final byte[] regionName, - final Increment increment, final long nonceGroup, final long nonce) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment, + final long nonceGroup, final long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } builder.setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, increment, - MutationProto.newBuilder(), nonce)); + MutationProto.newBuilder(), nonce)); return builder.build(); } /** - * Create a protocol buffer MutateRequest for a delete - * - * @param regionName - * @param delete - * @return a mutate request - * @throws IOException + * Create a protocol buffer MutateRequest for a delete nn * @return a mutate request n */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Delete delete) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete) + throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); - builder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, - MutationProto.newBuilder())); + builder.setMutation( + ProtobufUtil.toMutation(MutationType.DELETE, delete, MutationProto.newBuilder())); return builder.build(); } public static RegionAction.Builder getRegionActionBuilderWithRegion( - final RegionAction.Builder regionActionBuilder, final byte [] regionName) { + final RegionAction.Builder regionActionBuilder, final byte[] regionName) { RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); regionActionBuilder.setRegion(region); return regionActionBuilder; } /** - * Create a protocol buffer ScanRequest for a client Scan - * - * @param regionName - * @param scan - * @param numberOfRows - * @param closeScanner - * @return a scan request - * @throws IOException + * Create a protocol buffer ScanRequest for a client Scan nnnn * @return a scan request n */ public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int numberOfRows, - boolean closeScanner) throws IOException { + boolean closeScanner) throws IOException { ScanRequest.Builder builder = ScanRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setNumberOfRows(numberOfRows); @@ -392,14 +356,10 @@ public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int num } /** - * Create a protocol buffer ScanRequest for a scanner id - * @param scannerId - * @param numberOfRows - * @param closeScanner - * @return a scan request + * Create a protocol buffer ScanRequest for a scanner id nnn * @return a scan request */ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner, - boolean trackMetrics) { + boolean trackMetrics) { ScanRequest.Builder builder = ScanRequest.newBuilder(); builder.setNumberOfRows(numberOfRows); builder.setCloseScanner(closeScanner); @@ -411,15 +371,10 @@ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boo } /** - * Create a protocol buffer ScanRequest for a scanner id - * @param scannerId - * @param numberOfRows - * @param closeScanner - * @param nextCallSeq - * @return a scan request + * Create a protocol buffer ScanRequest for a scanner id nnnn * @return a scan request */ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner, - long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) { + long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) { ScanRequest.Builder builder = ScanRequest.newBuilder(); builder.setNumberOfRows(numberOfRows); builder.setCloseScanner(closeScanner); @@ -436,50 +391,38 @@ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boo } /** - * Create a protocol buffer bulk load request - * - * @param familyPaths - * @param regionName - * @param assignSeqNum - * @param userToken - * @param bulkToken - * @param copyFiles - * @return a bulk load request + * Create a protocol buffer bulk load request nnnnnn * @return a bulk load request */ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( - final List> familyPaths, final byte[] regionName, boolean assignSeqNum, - final Token userToken, final String bulkToken, boolean copyFiles, - List clusterIds, boolean replicate) { - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + final List> familyPaths, final byte[] regionName, boolean assignSeqNum, + final Token userToken, final String bulkToken, boolean copyFiles, List clusterIds, + boolean replicate) { + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); ClientProtos.DelegationToken protoDT = null; if (userToken != null) { - protoDT = - ClientProtos.DelegationToken.newBuilder() - .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) - .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); + protoDT = ClientProtos.DelegationToken.newBuilder() + .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) + .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); } - List protoFamilyPaths = new ArrayList<>(familyPaths.size()); + List protoFamilyPaths = + new ArrayList<>(familyPaths.size()); if (!familyPaths.isEmpty()) { - ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder - = ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); - for(Pair el: familyPaths) { - protoFamilyPaths.add(pathBuilder - .setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())) + ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder = + ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); + for (Pair el : familyPaths) { + protoFamilyPaths.add(pathBuilder.setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())) .setPath(el.getSecond()).build()); } pathBuilder.clear(); } - BulkLoadHFileRequest.Builder request = - ClientProtos.BulkLoadHFileRequest.newBuilder() - .setRegion(region) - .setAssignSeqNum(assignSeqNum) - .addAllFamilyPath(protoFamilyPaths); + BulkLoadHFileRequest.Builder request = ClientProtos.BulkLoadHFileRequest.newBuilder() + .setRegion(region).setAssignSeqNum(assignSeqNum).addAllFamilyPath(protoFamilyPaths); if (userToken != null) { request.setFsToken(protoDT); } @@ -496,47 +439,46 @@ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( /** * Create a protocol buffer multirequest with NO data for a list of actions (data is carried - * otherwise than via protobuf). This means it just notes attributes, whether to write the - * WAL, etc., and the presence in protobuf serves as place holder for the data which is - * coming along otherwise. Note that Get is different. It does not contain 'data' and is always - * carried by protobuf. We return references to the data by adding them to the passed in - * data param. - *

    Propagates Actions original index. - *

    The passed in multiRequestBuilder will be populated with region actions. - * @param regionName The region name of the actions. - * @param actions The actions that are grouped by the same region name. - * @param cells Place to stuff references to actual data. + * otherwise than via protobuf). This means it just notes attributes, whether to write the WAL, + * etc., and the presence in protobuf serves as place holder for the data which is coming along + * otherwise. Note that Get is different. It does not contain 'data' and is always carried by + * protobuf. We return references to the data by adding them to the passed in data + * param. + *

    + * Propagates Actions original index. + *

    + * The passed in multiRequestBuilder will be populated with region actions. + * @param regionName The region name of the actions. + * @param actions The actions that are grouped by the same region name. + * @param cells Place to stuff references to actual data. * @param multiRequestBuilder The multiRequestBuilder to be populated with region actions. * @param regionActionBuilder regionActionBuilder to be used to build region action. - * @param actionBuilder actionBuilder to be used to build action. - * @param mutationBuilder mutationBuilder to be used to build mutation. - * @param nonceGroup nonceGroup to be applied. - * @param indexMap Map of created RegionAction to the original index for a - * RowMutations/CheckAndMutate within the original list of actions - * @throws IOException + * @param actionBuilder actionBuilder to be used to build action. + * @param mutationBuilder mutationBuilder to be used to build mutation. + * @param nonceGroup nonceGroup to be applied. + * @param indexMap Map of created RegionAction to the original index for a + * RowMutations/CheckAndMutate within the original list of actions n */ public static void buildNoDataRegionActions(final byte[] regionName, - final Iterable actions, final List cells, - final MultiRequest.Builder multiRequestBuilder, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder, - long nonceGroup, final Map indexMap) throws IOException { + final Iterable actions, final List cells, + final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, + long nonceGroup, final Map indexMap) throws IOException { regionActionBuilder.clear(); - RegionAction.Builder builder = getRegionActionBuilderWithRegion( - regionActionBuilder, regionName); + RegionAction.Builder builder = + getRegionActionBuilderWithRegion(regionActionBuilder, regionName); ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null; boolean hasNonce = false; List rowMutationsList = new ArrayList<>(); List checkAndMutates = new ArrayList<>(); - for (Action action: actions) { + for (Action action : actions) { Row row = action.getAction(); actionBuilder.clear(); actionBuilder.setIndex(action.getOriginalIndex()); mutationBuilder.clear(); if (row instanceof Get) { - Get g = (Get)row; + Get g = (Get) row; builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g))); } else if (row instanceof Put) { buildNoDataRegionAction((Put) row, cells, builder, actionBuilder, mutationBuilder); @@ -554,18 +496,17 @@ public static void buildNoDataRegionActions(final byte[] regionName, RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row; // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString. org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = - org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap( - exec.getRequest().toByteArray()); + org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations + .unsafeWrap(exec.getRequest().toByteArray()); if (cpBuilder == null) { cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder(); } else { cpBuilder.clear(); } - builder.addAction(actionBuilder.setServiceCall( - cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) - .setServiceName(exec.getMethod().getService().getFullName()) - .setMethodName(exec.getMethod().getName()) - .setRequest(value))); + builder.addAction(actionBuilder + .setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) + .setServiceName(exec.getMethod().getService().getFullName()) + .setMethodName(exec.getMethod().getName()).setRequest(value))); } else if (row instanceof RowMutations) { rowMutationsList.add(action); } else if (row instanceof CheckAndMutate) { @@ -609,9 +550,9 @@ public static void buildNoDataRegionActions(final byte[] regionName, getRegionActionBuilderWithRegion(builder, regionName); CheckAndMutate cam = (CheckAndMutate) action.getAction(); - builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), - cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), - cam.getTimeRange())); + builder + .setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), + cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange())); if (cam.getAction() instanceof Put) { actionBuilder.clear(); @@ -643,8 +584,8 @@ public static void buildNoDataRegionActions(final byte[] regionName, } builder.setAtomic(true); } else { - throw new DoNotRetryIOException("CheckAndMutate doesn't support " + - cam.getAction().getClass().getName()); + throw new DoNotRetryIOException( + "CheckAndMutate doesn't support " + cam.getAction().getClass().getName()); } multiRequestBuilder.addRegionAction(builder.build()); @@ -660,50 +601,48 @@ public static void buildNoDataRegionActions(final byte[] regionName, } private static void buildNoDataRegionAction(final Put put, final List cells, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, + final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException { cells.add(put); - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); } - private static void buildNoDataRegionAction(final Delete delete, - final List cells, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + private static void buildNoDataRegionAction(final Delete delete, final List cells, + final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, + final MutationProto.Builder mutationBuilder) throws IOException { int size = delete.size(); // Note that a legitimate Delete may have a size of zero; i.e. a Delete that has nothing - // in it but the row to delete. In this case, the current implementation does not make + // in it but the row to delete. In this case, the current implementation does not make // a KeyValue to represent a delete-of-all-the-row until we serialize... For such cases // where the size returned is zero, we will send the Delete fully pb'd rather than have // metadata only in the pb and then send the kv along the side in cells. if (size > 0) { cells.add(delete); - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, delete, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, delete, mutationBuilder))); } else { - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, mutationBuilder))); } } private static void buildNoDataRegionAction(final Increment increment, final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(increment); - regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( - MutationType.INCREMENT, increment, mutationBuilder, nonce))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.INCREMENT, increment, mutationBuilder, nonce))); } - private static void buildNoDataRegionAction(final Append append, - final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + private static void buildNoDataRegionAction(final Append append, final List cells, + long nonce, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(append); - regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( - MutationType.APPEND, append, mutationBuilder, nonce))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.APPEND, append, mutationBuilder, nonce))); } /** @@ -714,7 +653,7 @@ private static boolean buildNoDataRegionAction(final RowMutations rowMutations, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException { boolean ret = false; - for (Mutation mutation: rowMutations.getMutations()) { + for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); MutationProto mp; if (mutation instanceof Increment || mutation instanceof Append) { @@ -743,45 +682,39 @@ private static MutationType getMutationType(Mutation mutation) { } } -// End utilities for Client -//Start utilities for Admin + // End utilities for Client + // Start utilities for Admin /** * Create a protocol buffer GetRegionInfoRequest for a given region name - * * @param regionName the name of the region to get info * @return a protocol buffer GetRegionInfoRequest */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName) { + public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName) { return buildGetRegionInfoRequest(regionName, false); } /** * Create a protocol buffer GetRegionInfoRequest for a given region name - * - * @param regionName the name of the region to get info + * @param regionName the name of the region to get info * @param includeCompactionState indicate if the compaction state is requested * @return a protocol buffer GetRegionInfoRequest */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName, - final boolean includeCompactionState) { + public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, + final boolean includeCompactionState) { return buildGetRegionInfoRequest(regionName, includeCompactionState, false); } /** - * - * @param regionName the name of the region to get info - * @param includeCompactionState indicate if the compaction state is requested - * @param includeBestSplitRow indicate if the bestSplitRow is requested + * @param regionName the name of the region to get info + * @param includeCompactionState indicate if the compaction state is requested + * @param includeBestSplitRow indicate if the bestSplitRow is requested * @return protocol buffer GetRegionInfoRequest */ public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, - final boolean includeCompactionState, boolean includeBestSplitRow) { + final boolean includeCompactionState, boolean includeBestSplitRow) { GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (includeCompactionState) { builder.setCompactionState(includeCompactionState); @@ -824,7 +757,7 @@ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName /** * Create a protocol buffer FlushRegionRequest for a given region name - * @param regionName the name of the region to get info + * @param regionName the name of the region to get info * @param columnFamily column family within a region * @return a protocol buffer FlushRegionRequest */ @@ -842,13 +775,13 @@ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName /** * Create a protocol buffer OpenRegionRequest for a given region - * @param server the serverName for the RPC - * @param region the region to open + * @param server the serverName for the RPC + * @param region the region to open * @param favoredNodes a list of favored nodes * @return a protocol buffer OpenRegionRequest */ - public static OpenRegionRequest buildOpenRegionRequest(ServerName server, - final RegionInfo region, List favoredNodes) { + public static OpenRegionRequest buildOpenRegionRequest(ServerName server, final RegionInfo region, + List favoredNodes) { OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes, -1L)); if (server != null) { @@ -864,7 +797,7 @@ public static OpenRegionRequest buildOpenRegionRequest(ServerName server, * @return a protocol buffer UpdateFavoredNodesRequest */ public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest( - final List>> updateRegionInfos) { + final List>> updateRegionInfos) { UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder(); if (updateRegionInfos != null && !updateRegionInfos.isEmpty()) { RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder(); @@ -893,12 +826,10 @@ public static WarmupRegionRequest buildWarmupRegionRequest(final RegionInfo regi /** * Create a CompactRegionRequest for a given region name * @param regionName the name of the region to get info - * @param major indicator if it is a major compaction - * @param columnFamily - * @return a CompactRegionRequest + * @param major indicator if it is a major compaction n * @return a CompactRegionRequest */ public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major, - byte[] columnFamily) { + byte[] columnFamily) { CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); @@ -912,8 +843,8 @@ public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, /** * @see #buildRollWALWriterRequest() */ - private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder() - .build(); + private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = + RollWALWriterRequest.newBuilder().build(); /** * Create a new RollWALWriterRequest @@ -926,8 +857,8 @@ public static RollWALWriterRequest buildRollWALWriterRequest() { /** * @see #buildGetServerInfoRequest() */ - private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = GetServerInfoRequest.newBuilder() - .build(); + private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = + GetServerInfoRequest.newBuilder().build(); /** * Create a new GetServerInfoRequest @@ -948,17 +879,16 @@ public static StopServerRequest buildStopServerRequest(final String reason) { return builder.build(); } -//End utilities for Admin + // End utilities for Admin /** * Convert a byte array to a protocol buffer RegionSpecifier - * - * @param type the region specifier type + * @param type the region specifier type * @param value the region specifier byte array value * @return a protocol buffer RegionSpecifier */ - public static RegionSpecifier buildRegionSpecifier( - final RegionSpecifierType type, final byte[] value) { + public static RegionSpecifier buildRegionSpecifier(final RegionSpecifierType type, + final byte[] value) { RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); regionBuilder.setValue(UnsafeByteOperations.unsafeWrap(value)); regionBuilder.setType(type); @@ -966,17 +896,10 @@ public static RegionSpecifier buildRegionSpecifier( } /** - * Create a protocol buffer AddColumnRequest - * - * @param tableName - * @param column - * @return an AddColumnRequest + * Create a protocol buffer AddColumnRequest nn * @return an AddColumnRequest */ - public static AddColumnRequest buildAddColumnRequest( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) { + public static AddColumnRequest buildAddColumnRequest(final TableName tableName, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); @@ -986,17 +909,10 @@ public static AddColumnRequest buildAddColumnRequest( } /** - * Create a protocol buffer DeleteColumnRequest - * - * @param tableName - * @param columnName - * @return a DeleteColumnRequest + * Create a protocol buffer DeleteColumnRequest nn * @return a DeleteColumnRequest */ - public static DeleteColumnRequest buildDeleteColumnRequest( - final TableName tableName, - final byte [] columnName, - final long nonceGroup, - final long nonce) { + public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName, + final byte[] columnName, final long nonceGroup, final long nonce) { DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setColumnName(UnsafeByteOperations.unsafeWrap(columnName)); @@ -1006,17 +922,10 @@ public static DeleteColumnRequest buildDeleteColumnRequest( } /** - * Create a protocol buffer ModifyColumnRequest - * - * @param tableName - * @param column - * @return an ModifyColumnRequest + * Create a protocol buffer ModifyColumnRequest nn * @return an ModifyColumnRequest */ - public static ModifyColumnRequest buildModifyColumnRequest( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) { + public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); @@ -1025,9 +934,9 @@ public static ModifyColumnRequest buildModifyColumnRequest( return builder.build(); } - public static ModifyColumnStoreFileTrackerRequest - buildModifyColumnStoreFileTrackerRequest(final TableName tableName, final byte[] family, - final String dstSFT, final long nonceGroup, final long nonce) { + public static ModifyColumnStoreFileTrackerRequest buildModifyColumnStoreFileTrackerRequest( + final TableName tableName, final byte[] family, final String dstSFT, final long nonceGroup, + final long nonce) { ModifyColumnStoreFileTrackerRequest.Builder builder = ModifyColumnStoreFileTrackerRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); @@ -1039,16 +948,13 @@ public static ModifyColumnRequest buildModifyColumnRequest( } /** - * Create a protocol buffer MoveRegionRequest - * @param encodedRegionName - * @param destServerName - * @return A MoveRegionRequest + * Create a protocol buffer MoveRegionRequest nn * @return A MoveRegionRequest */ public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName, - ServerName destServerName) { + ServerName destServerName) { MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, - encodedRegionName)); + builder + .setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, encodedRegionName)); if (destServerName != null) { builder.setDestServerName(ProtobufUtil.toServerName(destServerName)); } @@ -1056,14 +962,12 @@ public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName, } public static MergeTableRegionsRequest buildMergeTableRegionsRequest( - final byte[][] encodedNameOfdaughaterRegions, - final boolean forcible, - final long nonceGroup, - final long nonce) throws DeserializationException { + final byte[][] encodedNameOfdaughaterRegions, final boolean forcible, final long nonceGroup, + final long nonce) throws DeserializationException { MergeTableRegionsRequest.Builder builder = MergeTableRegionsRequest.newBuilder(); - for (int i = 0; i< encodedNameOfdaughaterRegions.length; i++) { - builder.addRegion(buildRegionSpecifier( - RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfdaughaterRegions[i])); + for (int i = 0; i < encodedNameOfdaughaterRegions.length; i++) { + builder.addRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, + encodedNameOfdaughaterRegions[i])); } builder.setForcible(forcible); builder.setNonceGroup(nonceGroup); @@ -1072,8 +976,8 @@ public static MergeTableRegionsRequest buildMergeTableRegionsRequest( } public static SplitTableRegionRequest buildSplitTableRegionRequest(final RegionInfo regionInfo, - final byte[] splitRow, final long nonceGroup, final long nonce) - throws DeserializationException { + final byte[] splitRow, final long nonceGroup, final long nonce) + throws DeserializationException { SplitTableRegionRequest.Builder builder = SplitTableRegionRequest.newBuilder(); builder.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo)); if (splitRow != null) { @@ -1085,52 +989,37 @@ public static SplitTableRegionRequest buildSplitTableRegionRequest(final RegionI } /** - * Create a protocol buffer AssignRegionRequest - * - * @param regionName - * @return an AssignRegionRequest + * Create a protocol buffer AssignRegionRequest n * @return an AssignRegionRequest */ - public static AssignRegionRequest buildAssignRegionRequest(final byte [] regionName) { + public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) { AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** - * Creates a protocol buffer UnassignRegionRequest - * - * @param regionName - * @return an UnassignRegionRequest + * Creates a protocol buffer UnassignRegionRequest n * @return an UnassignRegionRequest */ - public static UnassignRegionRequest buildUnassignRegionRequest( - final byte [] regionName) { + public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) { UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** - * Creates a protocol buffer OfflineRegionRequest - * - * @param regionName - * @return an OfflineRegionRequest + * Creates a protocol buffer OfflineRegionRequest n * @return an OfflineRegionRequest */ - public static OfflineRegionRequest buildOfflineRegionRequest(final byte [] regionName) { + public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) { OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** - * Creates a protocol buffer DeleteTableRequest - * - * @param tableName - * @return a DeleteTableRequest + * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest */ - public static DeleteTableRequest buildDeleteTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); @@ -1140,16 +1029,12 @@ public static DeleteTableRequest buildDeleteTableRequest( /** * Creates a protocol buffer TruncateTableRequest - * - * @param tableName name of table to truncate + * @param tableName name of table to truncate * @param preserveSplits True if the splits should be preserved * @return a TruncateTableRequest */ - public static TruncateTableRequest buildTruncateTableRequest( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) { + public static TruncateTableRequest buildTruncateTableRequest(final TableName tableName, + final boolean preserveSplits, final long nonceGroup, final long nonce) { TruncateTableRequest.Builder builder = TruncateTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setPreserveSplits(preserveSplits); @@ -1159,15 +1044,10 @@ public static TruncateTableRequest buildTruncateTableRequest( } /** - * Creates a protocol buffer EnableTableRequest - * - * @param tableName - * @return an EnableTableRequest + * Creates a protocol buffer EnableTableRequest n * @return an EnableTableRequest */ - public static EnableTableRequest buildEnableTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static EnableTableRequest buildEnableTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { EnableTableRequest.Builder builder = EnableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); @@ -1176,15 +1056,10 @@ public static EnableTableRequest buildEnableTableRequest( } /** - * Creates a protocol buffer DisableTableRequest - * - * @param tableName - * @return a DisableTableRequest + * Creates a protocol buffer DisableTableRequest n * @return a DisableTableRequest */ - public static DisableTableRequest buildDisableTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static DisableTableRequest buildDisableTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { DisableTableRequest.Builder builder = DisableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setNonceGroup(nonceGroup); @@ -1193,21 +1068,14 @@ public static DisableTableRequest buildDisableTableRequest( } /** - * Creates a protocol buffer CreateTableRequest - * - * @param tableDescriptor - * @param splitKeys - * @return a CreateTableRequest + * Creates a protocol buffer CreateTableRequest nn * @return a CreateTableRequest */ - public static CreateTableRequest buildCreateTableRequest( - final TableDescriptor tableDescriptor, - final byte [][] splitKeys, - final long nonceGroup, - final long nonce) { + public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor, + final byte[][] splitKeys, final long nonceGroup, final long nonce) { CreateTableRequest.Builder builder = CreateTableRequest.newBuilder(); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); if (splitKeys != null) { - for(byte[] key : splitKeys) { + for (byte[] key : splitKeys) { builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key)); } } @@ -1217,17 +1085,10 @@ public static CreateTableRequest buildCreateTableRequest( } /** - * Creates a protocol buffer ModifyTableRequest - * - * @param tableName - * @param tableDesc - * @return a ModifyTableRequest + * Creates a protocol buffer ModifyTableRequest nn * @return a ModifyTableRequest */ - public static ModifyTableRequest buildModifyTableRequest( - final TableName tableName, - final TableDescriptor tableDesc, - final long nonceGroup, - final long nonce) { + public static ModifyTableRequest buildModifyTableRequest(final TableName tableName, + final TableDescriptor tableDesc, final long nonceGroup, final long nonce) { ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc)); @@ -1248,13 +1109,10 @@ public static ModifyTableStoreFileTrackerRequest buildModifyTableStoreFileTracke } /** - * Creates a protocol buffer GetTableDescriptorsRequest - * - * @param tableNames - * @return a GetTableDescriptorsRequest + * Creates a protocol buffer GetTableDescriptorsRequest n * @return a GetTableDescriptorsRequest */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final List tableNames) { + public static GetTableDescriptorsRequest + buildGetTableDescriptorsRequest(final List tableNames) { GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); if (tableNames != null) { for (TableName tableName : tableNames) { @@ -1266,13 +1124,12 @@ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( /** * Creates a protocol buffer GetTableDescriptorsRequest - * - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableDescriptorsRequest */ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(final Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); if (pattern != null) { builder.setRegex(pattern.toString()); @@ -1283,13 +1140,12 @@ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(final P /** * Creates a protocol buffer GetTableNamesRequest - * - * @param pattern The compiled regular expression to match against + * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableNamesRequest */ public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern pattern, - boolean includeSysTables) { + boolean includeSysTables) { GetTableNamesRequest.Builder builder = GetTableNamesRequest.newBuilder(); if (pattern != null) { builder.setRegex(pattern.toString()); @@ -1305,7 +1161,7 @@ public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern patte */ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final TableState state) { return SetTableStateInMetaRequest.newBuilder().setTableState(state.convert()) - .setTableName(ProtobufUtil.toProtoTableName(state.getTableName())).build(); + .setTableName(ProtobufUtil.toProtoTableName(state.getTableName())).build(); } /** @@ -1332,20 +1188,17 @@ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final T /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table - * * @param tableName the table name * @return a GetTableDescriptorsRequest */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final TableName tableName) { + public static GetTableDescriptorsRequest + buildGetTableDescriptorsRequest(final TableName tableName) { return GetTableDescriptorsRequest.newBuilder() - .addTableNames(ProtobufUtil.toProtoTableName(tableName)) - .build(); + .addTableNames(ProtobufUtil.toProtoTableName(tableName)).build(); } /** * Creates a protocol buffer IsMasterRunningRequest - * * @return a IsMasterRunningRequest */ public static IsMasterRunningRequest buildIsMasterRunningRequest() { @@ -1353,21 +1206,15 @@ public static IsMasterRunningRequest buildIsMasterRunningRequest() { } /** - * Creates a protocol buffer SetBalancerRunningRequest - * - * @param on - * @param synchronous - * @return a SetBalancerRunningRequest + * Creates a protocol buffer SetBalancerRunningRequest nn * @return a SetBalancerRunningRequest */ - public static SetBalancerRunningRequest buildSetBalancerRunningRequest( - boolean on, - boolean synchronous) { + public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on, + boolean synchronous) { return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build(); } /** * Creates a protocol buffer IsBalancerEnabledRequest - * * @return a IsBalancerEnabledRequest */ public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() { @@ -1376,28 +1223,23 @@ public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() { /** * Creates a protocol buffer ClearRegionBlockCacheRequest - * * @return a ClearRegionBlockCacheRequest */ public static ClearRegionBlockCacheRequest - buildClearRegionBlockCacheRequest(List hris) { + buildClearRegionBlockCacheRequest(List hris) { ClearRegionBlockCacheRequest.Builder builder = ClearRegionBlockCacheRequest.newBuilder(); - hris.forEach( - hri -> builder.addRegion( - buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName()) - )); + hris.forEach(hri -> builder + .addRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName()))); return builder.build(); } /** * Creates a protocol buffer GetClusterStatusRequest - * * @return A GetClusterStatusRequest */ public static GetClusterStatusRequest buildGetClusterStatusRequest(EnumSet

    servers, - String targetGroup) { + String targetGroup) { Set hostPorts = Sets.newHashSet(); for (Address el : servers) { - hostPorts.add( - HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()) - .build()); + hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } return MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts) - .build(); + .build(); } public static RemoveServersRequest buildRemoveServersRequest(Set
    servers) { Set hostPorts = Sets.newHashSet(); - for(Address el: servers) { - hostPorts.add(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); + for (Address el : servers) { + hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } - return RemoveServersRequest.newBuilder() - .addAllServers(hostPorts) - .build(); + return RemoveServersRequest.newBuilder().addAllServers(hostPorts).build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java index d62f0ac74e22..440891382e7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; /** - * Helper utility to build protocol buffer responses, - * or retrieve data from protocol buffer responses. + * Helper utility to build protocol buffer responses, or retrieve data from protocol buffer + * responses. */ @InterfaceAudience.Private public final class ResponseConverter { @@ -76,9 +76,7 @@ private ResponseConverter() { // Start utilities for Client public static SingleResponse getResult(final ClientProtos.MutateRequest request, - final ClientProtos.MutateResponse response, - final CellScanner cells) - throws IOException { + final ClientProtos.MutateResponse response, final CellScanner cells) throws IOException { SingleResponse singleResponse = new SingleResponse(); SingleResponse.Entry entry = new SingleResponse.Entry(); entry.setResult(ProtobufUtil.toResult(response.getResult(), cells)); @@ -89,37 +87,32 @@ public static SingleResponse getResult(final ClientProtos.MutateRequest request, /** * Get the results from a protocol buffer MultiResponse - * - * @param request the original protocol buffer MultiRequest + * @param request the original protocol buffer MultiRequest * @param response the protocol buffer MultiResponse to convert - * @param cells Cells to go with the passed in proto. Can be null. - * @return the results that were in the MultiResponse (a Result or an Exception). - * @throws IOException + * @param cells Cells to go with the passed in proto. Can be null. + * @return the results that were in the MultiResponse (a Result or an Exception). n */ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request, - final MultiResponse response, final CellScanner cells) - throws IOException { + final MultiResponse response, final CellScanner cells) throws IOException { return getResults(request, null, response, cells); } /** * Get the results from a protocol buffer MultiResponse - * - * @param request the original protocol buffer MultiRequest + * @param request the original protocol buffer MultiRequest * @param indexMap Used to support RowMutations/CheckAndMutate in batch * @param response the protocol buffer MultiResponse to convert - * @param cells Cells to go with the passed in proto. Can be null. - * @return the results that were in the MultiResponse (a Result or an Exception). - * @throws IOException + * @param cells Cells to go with the passed in proto. Can be null. + * @return the results that were in the MultiResponse (a Result or an Exception). n */ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request, - final Map indexMap, final MultiResponse response, - final CellScanner cells) throws IOException { + final Map indexMap, final MultiResponse response, final CellScanner cells) + throws IOException { int requestRegionActionCount = request.getRegionActionCount(); int responseRegionActionResultCount = response.getRegionActionResultCount(); if (requestRegionActionCount != responseRegionActionResultCount) { - throw new IllegalStateException("Request mutation count=" + requestRegionActionCount + - " does not match response mutation result count=" + responseRegionActionResultCount); + throw new IllegalStateException("Request mutation count=" + requestRegionActionCount + + " does not match response mutation result count=" + responseRegionActionResultCount); } org.apache.hadoop.hbase.client.MultiResponse results = @@ -129,23 +122,25 @@ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final Mult RegionAction actions = request.getRegionAction(i); RegionActionResult actionResult = response.getRegionActionResult(i); HBaseProtos.RegionSpecifier rs = actions.getRegion(); - if (rs.hasType() && - (rs.getType() != HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)){ + if ( + rs.hasType() + && (rs.getType() != HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + ) { throw new IllegalArgumentException( - "We support only encoded types for protobuf multi response."); + "We support only encoded types for protobuf multi response."); } byte[] regionName = rs.getValue().toByteArray(); if (actionResult.hasException()) { - Throwable regionException = ProtobufUtil.toException(actionResult.getException()); + Throwable regionException = ProtobufUtil.toException(actionResult.getException()); results.addException(regionName, regionException); continue; } if (actions.getActionCount() != actionResult.getResultOrExceptionCount()) { - throw new IllegalStateException("actions.getActionCount=" + actions.getActionCount() + - ", actionResult.getResultOrExceptionCount=" + - actionResult.getResultOrExceptionCount() + " for region " + actions.getRegion()); + throw new IllegalStateException("actions.getActionCount=" + actions.getActionCount() + + ", actionResult.getResultOrExceptionCount=" + actionResult.getResultOrExceptionCount() + + " for region " + actions.getRegion()); } // For RowMutations/CheckAndMutate action, if there is an exception, the exception is set @@ -239,7 +234,6 @@ private static Result getMutateRowResult(RegionActionResult actionResult, CellSc /** * Create a CheckAndMutateResult object from a protocol buffer MutateResponse - * * @return a CheckAndMutateResult object */ public static CheckAndMutateResult getCheckAndMutateResult( @@ -253,10 +247,7 @@ public static CheckAndMutateResult getCheckAndMutateResult( } /** - * Wrap a throwable to an action result. - * - * @param t - * @return an action result builder + * Wrap a throwable to an action result. n * @return an action result builder */ public static ResultOrException.Builder buildActionResult(final Throwable t) { ResultOrException.Builder builder = ResultOrException.newBuilder(); @@ -265,10 +256,7 @@ public static ResultOrException.Builder buildActionResult(final Throwable t) { } /** - * Wrap a throwable to an action result. - * - * @param r - * @return an action result builder + * Wrap a throwable to an action result. n * @return an action result builder */ public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) { ResultOrException.Builder builder = ResultOrException.newBuilder(); @@ -277,14 +265,12 @@ public static ResultOrException.Builder buildActionResult(final ClientProtos.Res } /** - * @param t - * @return NameValuePair of the exception name to stringified version os exception. + * n * @return NameValuePair of the exception name to stringified version os exception. */ public static NameBytesPair buildException(final Throwable t) { NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder(); parameterBuilder.setName(t.getClass().getName()); - parameterBuilder.setValue( - ByteString.copyFromUtf8(StringUtils.stringifyException(t))); + parameterBuilder.setValue(ByteString.copyFromUtf8(StringUtils.stringifyException(t))); return parameterBuilder.build(); } @@ -297,12 +283,11 @@ public static HasPermissionResponse buildHasPermissionResponse(boolean hasPermis return builder.build(); } -// End utilities for Client -// Start utilities for Admin + // End utilities for Client + // Start utilities for Admin /** * Get the list of region info from a GetOnlineRegionResponse - * * @param proto the GetOnlineRegionResponse * @return the list of region info */ @@ -313,25 +298,19 @@ public static List getRegionInfos(final GetOnlineRegionResponse prot /** * Check if the region is closed from a CloseRegionResponse - * * @param proto the CloseRegionResponse * @return the region close state */ - public static boolean isClosed - (final CloseRegionResponse proto) { + public static boolean isClosed(final CloseRegionResponse proto) { if (proto == null || !proto.hasClosed()) return false; return proto.getClosed(); } /** - * A utility to build a GetServerInfoResponse. - * - * @param serverName - * @param webuiPort - * @return the response + * A utility to build a GetServerInfoResponse. nn * @return the response */ - public static GetServerInfoResponse buildGetServerInfoResponse( - final ServerName serverName, final int webuiPort) { + public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName, + final int webuiPort) { GetServerInfoResponse.Builder builder = GetServerInfoResponse.newBuilder(); ServerInfo.Builder serverInfoBuilder = ServerInfo.newBuilder(); serverInfoBuilder.setServerName(ProtobufUtil.toServerName(serverName)); @@ -343,15 +322,12 @@ public static GetServerInfoResponse buildGetServerInfoResponse( } /** - * A utility to build a GetOnlineRegionResponse. - * - * @param regions - * @return the response + * A utility to build a GetOnlineRegionResponse. n * @return the response */ - public static GetOnlineRegionResponse buildGetOnlineRegionResponse( - final List regions) { + public static GetOnlineRegionResponse + buildGetOnlineRegionResponse(final List regions) { GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder(); - for (RegionInfo region: regions) { + for (RegionInfo region : regions) { builder.addRegionInfo(ProtobufUtil.toRegionInfo(region)); } return builder.build(); @@ -381,30 +357,29 @@ public static RunCleanerChoreResponse buildRunCleanerChoreResponse(boolean ran) return RunCleanerChoreResponse.newBuilder().setCleanerChoreRan(ran).build(); } -// End utilities for Admin + // End utilities for Admin /** * Creates a response for the last flushed sequence Id request * @return A GetLastFlushedSequenceIdResponse */ - public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse( - RegionStoreSequenceIds ids) { + public static GetLastFlushedSequenceIdResponse + buildGetLastFlushedSequenceIdResponse(RegionStoreSequenceIds ids) { return GetLastFlushedSequenceIdResponse.newBuilder() - .setLastFlushedSequenceId(ids.getLastFlushedSequenceId()) - .addAllStoreLastFlushedSequenceId(ids.getStoreSequenceIdList()).build(); + .setLastFlushedSequenceId(ids.getLastFlushedSequenceId()) + .addAllStoreLastFlushedSequenceId(ids.getStoreSequenceIdList()).build(); } /** - * Stores an exception encountered during RPC invocation so it can be passed back - * through to the client. + * Stores an exception encountered during RPC invocation so it can be passed back through to the + * client. * @param controller the controller instance provided by the client when calling the service - * @param ioe the exception encountered + * @param ioe the exception encountered */ - public static void setControllerException(RpcController controller, - IOException ioe) { + public static void setControllerException(RpcController controller, IOException ioe) { if (controller != null) { if (controller instanceof ServerRpcController) { - ((ServerRpcController)controller).setFailedOn(ioe); + ((ServerRpcController) controller).setFailedOn(ioe); } else { controller.setFailed(StringUtils.stringifyException(ioe)); } @@ -415,13 +390,13 @@ public static void setControllerException(RpcController controller, * Retreivies exception stored during RPC invocation. * @param controller the controller instance provided by the client when calling the service * @return exception if any, or null; Will return DoNotRetryIOException for string represented - * failure causes in controller. + * failure causes in controller. */ @Nullable public static IOException getControllerException(RpcController controller) throws IOException { if (controller != null && controller.failed()) { if (controller instanceof ServerRpcController) { - return ((ServerRpcController)controller).getFailedOn(); + return ((ServerRpcController) controller).getFailedOn(); } else { return new DoNotRetryIOException(controller.errorText()); } @@ -429,35 +404,31 @@ public static IOException getControllerException(RpcController controller) throw return null; } - /** - * Create Results from the cells using the cells meta data. - * @param cellScanner - * @param response - * @return results + * Create Results from the cells using the cells meta data. nnn */ public static Result[] getResults(CellScanner cellScanner, ScanResponse response) - throws IOException { + throws IOException { if (response == null) return null; // If cellscanner, then the number of Results to return is the count of elements in the - // cellsPerResult list. Otherwise, it is how many results are embedded inside the response. - int noOfResults = cellScanner != null? - response.getCellsPerResultCount(): response.getResultsCount(); + // cellsPerResult list. Otherwise, it is how many results are embedded inside the response. + int noOfResults = + cellScanner != null ? response.getCellsPerResultCount() : response.getResultsCount(); Result[] results = new Result[noOfResults]; for (int i = 0; i < noOfResults; i++) { if (cellScanner != null) { - // Cells are out in cellblocks. Group them up again as Results. How many to read at a + // Cells are out in cellblocks. Group them up again as Results. How many to read at a // time will be found in getCellsLength -- length here is how many Cells in the i'th Result int noOfCells = response.getCellsPerResult(i); boolean isPartial = - response.getPartialFlagPerResultCount() > i ? - response.getPartialFlagPerResult(i) : false; + response.getPartialFlagPerResultCount() > i ? response.getPartialFlagPerResult(i) : false; List cells = new ArrayList<>(noOfCells); for (int j = 0; j < noOfCells; j++) { try { if (cellScanner.advance() == false) { // We are not able to retrieve the exact number of cells which ResultCellMeta says us. - // We have to scan for the same results again. Throwing DNRIOE as a client retry on the + // We have to scan for the same results again. Throwing DNRIOE as a client retry on + // the // same scanner will result in OutOfOrderScannerNextException String msg = "Results sent from server=" + noOfResults + ". But only got " + i + " results completely at client. Resetting the scanner to scan again."; @@ -468,8 +439,9 @@ public static Result[] getResults(CellScanner cellScanner, ScanResponse response // We are getting IOE while retrieving the cells for Results. // We have to scan for the same results again. Throwing DNRIOE as a client retry on the // same scanner will result in OutOfOrderScannerNextException - LOG.error("Exception while reading cells from result." - + "Resetting the scanner to scan again.", ioe); + LOG.error( + "Exception while reading cells from result." + "Resetting the scanner to scan again.", + ioe); throw new DoNotRetryIOException("Resetting the scanner.", ioe); } cells.add(cellScanner.current()); @@ -507,11 +479,11 @@ public static Map getScanMetrics(ScanResponse response) { /** * Creates a protocol buffer ClearRegionBlockCacheResponse - * * @return a ClearRegionBlockCacheResponse */ - public static AdminProtos.ClearRegionBlockCacheResponse buildClearRegionBlockCacheResponse(final HBaseProtos.CacheEvictionStats - cacheEvictionStats) { - return AdminProtos.ClearRegionBlockCacheResponse.newBuilder().setStats(cacheEvictionStats).build(); + public static AdminProtos.ClearRegionBlockCacheResponse + buildClearRegionBlockCacheResponse(final HBaseProtos.CacheEvictionStats cacheEvictionStats) { + return AdminProtos.ClearRegionBlockCacheResponse.newBuilder().setStats(cacheEvictionStats) + .build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java index 771ee8cffbec..5ea6144d0376 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.slowlog; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -33,17 +30,18 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog; + /** * Slowlog Accessor to record slow/large RPC log identified at each RegionServer RpcServer level. - * This can be done only optionally to record the entire history of slow/large rpc calls - * since RingBuffer can handle only limited latest records. + * This can be done only optionally to record the entire history of slow/large rpc calls since + * RingBuffer can handle only limited latest records. */ @InterfaceAudience.Private public class SlowLogTableAccessor { @@ -53,14 +51,13 @@ public class SlowLogTableAccessor { private static Connection connection; /** - * hbase:slowlog table name - can be enabled - * with config - hbase.regionserver.slowlog.systable.enabled + * hbase:slowlog table name - can be enabled with config - + * hbase.regionserver.slowlog.systable.enabled */ public static final TableName SLOW_LOG_TABLE_NAME = TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "slowlog"); - private static void doPut(final Connection connection, final List puts) - throws IOException { + private static void doPut(final Connection connection, final List puts) throws IOException { try (Table table = connection.getTable(SLOW_LOG_TABLE_NAME)) { table.put(puts); } @@ -69,10 +66,10 @@ private static void doPut(final Connection connection, final List puts) /** * Add slow/large log records to hbase:slowlog table * @param slowLogPayloads List of SlowLogPayload to process - * @param configuration Configuration to use for connection + * @param configuration Configuration to use for connection */ public static void addSlowLogRecords(final List slowLogPayloads, - final Configuration configuration) { + final Configuration configuration) { List puts = new ArrayList<>(slowLogPayloads.size()); for (TooSlowLog.SlowLogPayload slowLogPayload : slowLogPayloads) { final byte[] rowKey = getRowKey(slowLogPayload); @@ -115,7 +112,7 @@ public static void addSlowLogRecords(final List slowL } private static synchronized void createConnection(Configuration configuration) - throws IOException { + throws IOException { Configuration conf = new Configuration(configuration); // rpc timeout: 20s conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 20000); @@ -126,17 +123,15 @@ private static synchronized void createConnection(Configuration configuration) } /** - * Create rowKey: currentTime APPEND slowLogPayload.hashcode - * Scan on slowlog table should keep records with sorted order of time, however records - * added at the very same time could be in random order. - * + * Create rowKey: currentTime APPEND slowLogPayload.hashcode Scan on slowlog table should keep + * records with sorted order of time, however records added at the very same time could be in + * random order. * @param slowLogPayload SlowLogPayload to process * @return rowKey byte[] */ private static byte[] getRowKey(final TooSlowLog.SlowLogPayload slowLogPayload) { String hashcode = String.valueOf(slowLogPayload.hashCode()); - String lastFiveDig = - hashcode.substring((hashcode.length() > 5) ? (hashcode.length() - 5) : 0); + String lastFiveDig = hashcode.substring((hashcode.length() > 5) ? (hashcode.length() - 5) : 0); if (lastFiveDig.startsWith("-")) { lastFiveDig = String.valueOf(ThreadLocalRandom.current().nextInt(99999)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java index f2f917e011a7..c5e10f11070a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,8 +24,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; /** - * Class to help with dealing with a snapshot description on the client side. - * There is a corresponding class on the server side. + * Class to help with dealing with a snapshot description on the client side. There is a + * corresponding class on the server side. */ @InterfaceAudience.Private public final class ClientSnapshotDescriptionUtils { @@ -37,10 +36,10 @@ private ClientSnapshotDescriptionUtils() { * Check to make sure that the description of the snapshot requested is valid * @param snapshot description of the snapshot * @throws IllegalArgumentException if the name of the snapshot or the name of the table to - * snapshot are not valid names + * snapshot are not valid names */ public static void assertSnapshotRequestIsValid(SnapshotProtos.SnapshotDescription snapshot) - throws IllegalArgumentException { + throws IllegalArgumentException { // make sure the snapshot name is valid TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()), true); if (snapshot.hasTable()) { @@ -68,15 +67,8 @@ public static String toString(SnapshotProtos.SnapshotDescription snapshot) { return null; } - return new StringBuilder("{ ss=") - .append(snapshot.getName()) - .append(" table=") - .append(snapshot.hasTable() ? TableName.valueOf(snapshot.getTable()) : "") - .append(" type=") - .append(snapshot.getType()) - .append(" ttl=") - .append(snapshot.getTtl()) - .append(" }") - .toString(); + return new StringBuilder("{ ss=").append(snapshot.getName()).append(" table=") + .append(snapshot.hasTable() ? TableName.valueOf(snapshot.getTable()) : "").append(" type=") + .append(snapshot.getType()).append(" ttl=").append(snapshot.getTtl()).append(" }").toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java index c8ba848d28f1..1d56c9bd42f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,9 +28,8 @@ public class CorruptedSnapshotException extends HBaseSnapshotException { /** * Snapshot was corrupt for some reason. - * * @param message message describing the exception - * @param e the actual cause of the exception + * @param e the actual cause of the exception */ public CorruptedSnapshotException(String message, Exception e) { super(message, e); @@ -38,8 +37,7 @@ public CorruptedSnapshotException(String message, Exception e) { /** * Snapshot was corrupt for some reason. - * - * @param message full description of the failure + * @param message full description of the failure * @param snapshotDescription snapshot that was expected */ public CorruptedSnapshotException(String message, SnapshotDescription snapshotDescription) { @@ -48,10 +46,9 @@ public CorruptedSnapshotException(String message, SnapshotDescription snapshotDe /** * Snapshot was corrupt for some reason. - * * @param message message describing the exception */ public CorruptedSnapshotException(String message) { - super(message, (SnapshotDescription)null); + super(message, (SnapshotDescription) null); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java index df5f9255e4da..4395c1d9f262 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public ExportSnapshotException(String message) { /** * @param message message describing the exception - * @param e the actual cause of the exception + * @param e the actual cause of the exception */ public ExportSnapshotException(String message, Exception e) { super(message, e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java index 1f50b5ce53fa..5fd62a86008c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,6 @@ public class HBaseSnapshotException extends DoNotRetryIOException { /** * Some exception happened for a snapshot and don't even know the snapshot that it was about. - * * @param message the full description of the failure */ public HBaseSnapshotException(String message) { @@ -40,8 +39,7 @@ public HBaseSnapshotException(String message) { /** * Exception for the given snapshot that has no previous root cause. - * - * @param message the reason why the snapshot failed + * @param message the reason why the snapshot failed * @param snapshotDescription the description of the snapshot that is failing */ public HBaseSnapshotException(String message, SnapshotDescription snapshotDescription) { @@ -51,13 +49,12 @@ public HBaseSnapshotException(String message, SnapshotDescription snapshotDescri /** * Exception for the given snapshot due to another exception. - * - * @param message the reason why the snapshot failed - * @param cause the root cause of the failure + * @param message the reason why the snapshot failed + * @param cause the root cause of the failure * @param snapshotDescription the description of the snapshot that is being failed */ public HBaseSnapshotException(String message, Throwable cause, - SnapshotDescription snapshotDescription) { + SnapshotDescription snapshotDescription) { super(message, cause); this.description = snapshotDescription; } @@ -65,9 +62,8 @@ public HBaseSnapshotException(String message, Throwable cause, /** * Exception when the description of the snapshot cannot be determined, due to some root other * root cause. - * * @param message description of what caused the failure - * @param cause the root cause + * @param cause the root cause */ public HBaseSnapshotException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java index 029450994e0b..1148e046a9c1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,7 @@ @InterfaceAudience.Public public class RestoreSnapshotException extends HBaseSnapshotException { /** - * @param message reason why restoring the snapshot fails + * @param message reason why restoring the snapshot fails * @param snapshotDescription description of the snapshot attempted */ public RestoreSnapshotException(String message, SnapshotDescription snapshotDescription) { @@ -35,12 +35,12 @@ public RestoreSnapshotException(String message, SnapshotDescription snapshotDesc } /** - * @param message reason why restoring the snapshot fails - * @param cause the root cause of the failure + * @param message reason why restoring the snapshot fails + * @param cause the root cause of the failure * @param snapshotDescription description of the snapshot attempted */ public RestoreSnapshotException(String message, Throwable cause, - SnapshotDescription snapshotDescription) { + SnapshotDescription snapshotDescription) { super(message, cause, snapshotDescription); } @@ -53,7 +53,7 @@ public RestoreSnapshotException(String message) { /** * @param message reason why restoring the snapshot fails - * @param cause the root cause of the failure + * @param cause the root cause of the failure */ public RestoreSnapshotException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java index d4f672b1e0d5..e72836b95ad5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +21,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when a snapshot could not be created due to a server-side error when - * taking the snapshot. + * Thrown when a snapshot could not be created due to a server-side error when taking the snapshot. */ @SuppressWarnings("serial") @InterfaceAudience.Public public class SnapshotCreationException extends HBaseSnapshotException { /** * Used internally by the RPC engine to pass the exception back to the client. - * * @param message error message to pass back */ public SnapshotCreationException(String message) { @@ -38,8 +36,7 @@ public SnapshotCreationException(String message) { /** * Failure to create the specified snapshot. - * - * @param message reason why the snapshot couldn't be completed + * @param message reason why the snapshot couldn't be completed * @param snapshotDescription description of the snapshot attempted */ public SnapshotCreationException(String message, SnapshotDescription snapshotDescription) { @@ -48,13 +45,12 @@ public SnapshotCreationException(String message, SnapshotDescription snapshotDes /** * Failure to create the specified snapshot due to an external cause. - * - * @param message reason why the snapshot couldn't be completed - * @param cause the root cause of the failure + * @param message reason why the snapshot couldn't be completed + * @param cause the root cause of the failure * @param snapshotDescription description of the snapshot attempted */ public SnapshotCreationException(String message, Throwable cause, - SnapshotDescription snapshotDescription) { + SnapshotDescription snapshotDescription) { super(message, cause, snapshotDescription); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java index 9c0e51c39e5b..617de5a76c8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,6 +38,6 @@ public SnapshotDoesNotExistException(String message) { */ public SnapshotDoesNotExistException(SnapshotDescription snapshotDescription) { super("Snapshot '" + snapshotDescription.getName() + "' doesn't exist on the filesystem", - snapshotDescription); + snapshotDescription); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java index 6942b691939b..6d798934e394 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ public class SnapshotExistsException extends HBaseSnapshotException { /** * Failure due to the snapshot already existing. - * * @param message the full description of the failure */ public SnapshotExistsException(String message) { @@ -37,8 +36,7 @@ public SnapshotExistsException(String message) { /** * Failure due to the snapshot already existing. - * - * @param message the full description of the failure + * @param message the full description of the failure * @param snapshotDescription snapshot that was attempted */ public SnapshotExistsException(String message, SnapshotDescription snapshotDescription) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java index 5de352108fa3..d6e36552105b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.snapshot; import java.io.IOException; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java index 7951eafb4104..ac11c635e37e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public UnknownSnapshotException(String message) { /** * @param message full information about the failure - * @param e the actual cause of the exception + * @param e the actual cause of the exception */ public UnknownSnapshotException(String message, Exception e) { super(message, e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java index 4d8d38c44ded..a4d5fccc1be7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** Thrown when the file system needs to be upgraded */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java index 77cbf387148c..0ff131f23bf2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index 057cb7e37555..7b4d660097fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,31 +25,23 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** - * - * The PoolMap maps a key to a collection of values, the elements - * of which are managed by a pool. In effect, that collection acts as a shared - * pool of resources, access to which is closely controlled as per the semantics - * of the pool. - * + * The PoolMap maps a key to a collection of values, the elements of which are managed + * by a pool. In effect, that collection acts as a shared pool of resources, access to which is + * closely controlled as per the semantics of the pool. *

    - * In case the size of the pool is set to a non-zero positive number, that is - * used to cap the number of resources that a pool may contain for any given - * key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool. + * In case the size of the pool is set to a non-zero positive number, that is used to cap the number + * of resources that a pool may contain for any given key. A size of {@link Integer#MAX_VALUE} is + * interpreted as an unbounded pool. *

    - * *

    - * PoolMap is thread-safe. It does not remove elements automatically. Unused resources - * must be closed and removed explicitly. + * PoolMap is thread-safe. It does not remove elements automatically. Unused resources must be + * closed and removed explicitly. *

    - * - * @param - * the type of the key to the resource - * @param - * the type of the resource being pooled + * @param the type of the key to the resource + * @param the type of the resource being pooled */ @InterfaceAudience.Private public class PoolMap { @@ -58,32 +49,33 @@ public class PoolMap { private final PoolType poolType; private final int poolMaxSize; - public PoolMap(PoolType poolType, int poolMaxSize) { - pools = new HashMap<>(); - this.poolType = poolType; - this.poolMaxSize = poolMaxSize; + public PoolMap(PoolType poolType, int poolMaxSize) { + pools = new HashMap<>(); + this.poolType = poolType; + this.poolMaxSize = poolMaxSize; } public V getOrCreate(K key, PoolResourceSupplier supplier) throws IOException { - synchronized (pools) { - Pool pool = pools.get(key); - - if (pool == null) { - pool = createPool(); - pools.put(key, pool); - } - - try { - return pool.getOrCreate(supplier); - } catch (IOException | RuntimeException | Error e) { - if (pool.size() == 0) { - pools.remove(key); - } - - throw e; - } - } + synchronized (pools) { + Pool pool = pools.get(key); + + if (pool == null) { + pool = createPool(); + pools.put(key, pool); + } + + try { + return pool.getOrCreate(supplier); + } catch (IOException | RuntimeException | Error e) { + if (pool.size() == 0) { + pools.remove(key); + } + + throw e; + } + } } + public boolean remove(K key, V value) { synchronized (pools) { Pool pool = pools.get(key); @@ -128,7 +120,7 @@ public void clear() { } public interface PoolResourceSupplier { - R get() throws IOException; + R get() throws IOException; } protected static V createResource(PoolResourceSupplier supplier) throws IOException { @@ -149,7 +141,8 @@ protected interface Pool { } public enum PoolType { - ThreadLocal, RoundRobin; + ThreadLocal, + RoundRobin; public static PoolType valueOf(String poolTypeName, PoolType defaultPoolType) { PoolType poolType = PoolType.fuzzyMatch(poolTypeName); @@ -172,30 +165,25 @@ public static PoolType fuzzyMatch(String name) { protected Pool createPool() { switch (poolType) { - case RoundRobin: - return new RoundRobinPool<>(poolMaxSize); - case ThreadLocal: - return new ThreadLocalPool<>(); - default: - return new RoundRobinPool<>(poolMaxSize); + case RoundRobin: + return new RoundRobinPool<>(poolMaxSize); + case ThreadLocal: + return new ThreadLocalPool<>(); + default: + return new RoundRobinPool<>(poolMaxSize); } } /** - * The RoundRobinPool represents a {@link PoolMap.Pool}, which - * stores its resources in an {@link ArrayList}. It load-balances access to - * its resources by returning a different resource every time a given key is - * looked up. - * + * The RoundRobinPool represents a {@link PoolMap.Pool}, which stores its resources + * in an {@link ArrayList}. It load-balances access to its resources by returning a different + * resource every time a given key is looked up. *

    - * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of - * the pool is unbounded. Otherwise, it caps the number of resources in this - * pool to the (non-zero positive) value specified in {@link #maxSize}. + * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of the pool is + * unbounded. Otherwise, it caps the number of resources in this pool to the (non-zero positive) + * value specified in {@link #maxSize}. *

    - * - * @param - * the type of the resource - * + * @param the type of the resource */ @SuppressWarnings("serial") static class RoundRobinPool implements Pool { @@ -254,18 +242,15 @@ public int size() { } /** - * The ThreadLocalPool represents a {@link PoolMap.Pool} that - * works similarly to {@link ThreadLocal} class. It essentially binds the resource - * to the thread from which it is accessed. It doesn't remove resources when a thread exits, - * those resources must be closed manually. - * + * The ThreadLocalPool represents a {@link PoolMap.Pool} that works similarly to + * {@link ThreadLocal} class. It essentially binds the resource to the thread from which it is + * accessed. It doesn't remove resources when a thread exits, those resources must be closed + * manually. *

    - * Note that the size of the pool is essentially bounded by the number of threads - * that add resources to this pool. + * Note that the size of the pool is essentially bounded by the number of threads that add + * resources to this pool. *

    - * - * @param - * the type of the resource + * @param the type of the resource */ static class ThreadLocalPool implements Pool { private final Map resources; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java index 698330acc921..7ebbbf44cebd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,10 +24,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility class with methods for manipulating Writable objects @@ -38,11 +36,11 @@ public class Writables { /** * @param w writable * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. + * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e * @see #getWritable(byte[], Writable) */ - public static byte [] getBytes(final Writable w) throws IOException { + public static byte[] getBytes(final Writable w) throws IOException { if (w == null) { throw new IllegalArgumentException("Writable cannot be null"); } @@ -64,20 +62,20 @@ public class Writables { * Put a bunch of Writables as bytes all into the one byte array. * @param ws writable * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. + * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e */ - public static byte [] getBytes(final Writable... ws) throws IOException { - List bytes = new ArrayList<>(ws.length); + public static byte[] getBytes(final Writable... ws) throws IOException { + List bytes = new ArrayList<>(ws.length); int size = 0; - for (Writable w: ws) { - byte [] b = getBytes(w); + for (Writable w : ws) { + byte[] b = getBytes(w); size += b.length; bytes.add(b); } - byte [] result = new byte[size]; + byte[] result = new byte[size]; int offset = 0; - for (byte [] b: bytes) { + for (byte[] b : bytes) { System.arraycopy(b, 0, result, offset, b.length); offset += b.length; } @@ -88,39 +86,32 @@ public class Writables { * Set bytes into the passed Writable by calling its * {@link Writable#readFields(java.io.DataInput)}. * @param bytes serialized bytes - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. - * @throws IOException e - * @throws IllegalArgumentException + * @param w An empty Writable (usually made by calling the null-arg constructor). + * @return The passed Writable after its readFields has been called fed by the passed + * bytes array or IllegalArgumentException if passed null or an empty + * bytes array. + * @throws IOException e n */ - public static Writable getWritable(final byte [] bytes, final Writable w) - throws IOException { + public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException { return getWritable(bytes, 0, bytes.length, w); } /** * Set bytes into the passed Writable by calling its * {@link Writable#readFields(java.io.DataInput)}. - * @param bytes serialized bytes + * @param bytes serialized bytes * @param offset offset into array * @param length length of data - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. - * @throws IOException e - * @throws IllegalArgumentException + * @param w An empty Writable (usually made by calling the null-arg constructor). + * @return The passed Writable after its readFields has been called fed by the passed + * bytes array or IllegalArgumentException if passed null or an empty + * bytes array. + * @throws IOException e n */ - public static Writable getWritable(final byte [] bytes, final int offset, - final int length, final Writable w) - throws IOException { - if (bytes == null || length <=0) { - throw new IllegalArgumentException("Can't build a writable with empty " + - "bytes array"); + public static Writable getWritable(final byte[] bytes, final int offset, final int length, + final Writable w) throws IOException { + if (bytes == null || length <= 0) { + throw new IllegalArgumentException("Can't build a writable with empty " + "bytes array"); } if (w == null) { throw new IllegalArgumentException("Writable cannot be null"); @@ -136,26 +127,24 @@ public static Writable getWritable(final byte [] bytes, final int offset, } /** - * Copy one Writable to another. Copies bytes using data streams. + * Copy one Writable to another. Copies bytes using data streams. * @param src Source Writable * @param tgt Target Writable * @return The target Writable. * @throws IOException e */ - public static Writable copyWritable(final Writable src, final Writable tgt) - throws IOException { + public static Writable copyWritable(final Writable src, final Writable tgt) throws IOException { return copyWritable(getBytes(src), tgt); } /** - * Copy one Writable to another. Copies bytes using data streams. + * Copy one Writable to another. Copies bytes using data streams. * @param bytes Source Writable - * @param tgt Target Writable + * @param tgt Target Writable * @return The target Writable. * @throws IOException e */ - public static Writable copyWritable(final byte [] bytes, final Writable tgt) - throws IOException { + public static Writable copyWritable(final byte[] bytes, final Writable tgt) throws IOException { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes)); try { tgt.readFields(dis); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 0447e31fdd09..96170736208b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ public final class ReadOnlyZKClient implements Closeable { private static final int DEFAULT_RECOVERY_RETRY = 30; public static final String RECOVERY_RETRY_INTERVAL_MILLIS = - "zookeeper.recovery.retry.intervalmill"; + "zookeeper.recovery.retry.intervalmill"; private static final int DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000; @@ -134,11 +134,11 @@ public ReadOnlyZKClient(Configuration conf) { this.sessionTimeoutMs = conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT); this.maxRetries = conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY); this.retryIntervalMs = - conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); + conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); LOG.debug( - "Connect {} to {} with session timeout={}ms, retries {}, " + - "retry interval {}ms, keepAlive={}ms", + "Connect {} to {} with session timeout={}ms, retries {}, " + + "retry interval {}ms, keepAlive={}ms", getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs); Threads.setDaemonThreadRunning(new Thread(this::run), "ReadOnlyZKClient-" + connectString + "@" + getId()); @@ -260,8 +260,8 @@ public CompletableFuture get(String path) { @Override protected void doExec(ZooKeeper zk) { - zk.getData(path, false, - (rc, path, ctx, data, stat) -> onComplete(zk, rc, data, true), null); + zk.getData(path, false, (rc, path, ctx, data, stat) -> onComplete(zk, rc, data, true), + null); } }); return future; @@ -311,7 +311,8 @@ private void closeZk() { private ZooKeeper getZk() throws IOException { // may be closed when session expired if (zookeeper == null || !zookeeper.getState().isAlive()) { - zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { + }); } return zookeeper; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java index 5072706cb5ae..f0fae958a66a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index 4d3e7b3c50ba..5d73504164ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -43,9 +43,8 @@ public class ZNodePaths { public final String baseZNode; /** - * The prefix of meta znode. Does not include baseZNode. - * Its a 'prefix' because meta replica id integer can be tagged on the end (if - * no number present, it is 'default' replica). + * The prefix of meta znode. Does not include baseZNode. Its a 'prefix' because meta replica id + * integer can be tagged on the end (if no number present, it is 'default' replica). */ private final String metaZNodePrefix; @@ -96,49 +95,42 @@ public ZNodePaths(Configuration conf) { drainingZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining")); masterAddressZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); backupMasterAddressesZNode = - joinZNode(baseZNode, conf.get("zookeeper.znode.backup.masters", "backup-masters")); + joinZNode(baseZNode, conf.get("zookeeper.znode.backup.masters", "backup-masters")); clusterStateZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.state", "running")); tableZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.tableEnableDisable", "table")); clusterIdZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.clusterId", "hbaseid")); splitLogZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.splitlog", SPLIT_LOGDIR_NAME)); balancerZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer")); regionNormalizerZNode = - joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); + joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); switchZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); masterMaintZNode = - joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); + joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); replicationZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication")); peersZNode = - joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.peers", "peers")); + joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.peers", "peers")); queuesZNode = joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.rs", "rs")); - hfileRefsZNode = joinZNode(replicationZNode, - conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs")); + hfileRefsZNode = + joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs")); snapshotCleanupZNode = joinZNode(baseZNode, - conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); + conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); } @Override public String toString() { - return new StringBuilder() - .append("ZNodePaths [baseZNode=").append(baseZNode) - .append(", rsZNode=").append(rsZNode) - .append(", drainingZNode=").append(drainingZNode) - .append(", masterAddressZNode=").append(masterAddressZNode) - .append(", backupMasterAddressesZNode=").append(backupMasterAddressesZNode) - .append(", clusterStateZNode=").append(clusterStateZNode) - .append(", tableZNode=").append(tableZNode) - .append(", clusterIdZNode=").append(clusterIdZNode) - .append(", splitLogZNode=").append(splitLogZNode) - .append(", balancerZNode=").append(balancerZNode) - .append(", regionNormalizerZNode=").append(regionNormalizerZNode) - .append(", switchZNode=").append(switchZNode) - .append(", masterMaintZNode=").append(masterMaintZNode) - .append(", replicationZNode=").append(replicationZNode) - .append(", peersZNode=").append(peersZNode) - .append(", queuesZNode=").append(queuesZNode) - .append(", hfileRefsZNode=").append(hfileRefsZNode) - .append(", snapshotCleanupZNode=").append(snapshotCleanupZNode) - .append("]").toString(); + return new StringBuilder().append("ZNodePaths [baseZNode=").append(baseZNode) + .append(", rsZNode=").append(rsZNode).append(", drainingZNode=").append(drainingZNode) + .append(", masterAddressZNode=").append(masterAddressZNode) + .append(", backupMasterAddressesZNode=").append(backupMasterAddressesZNode) + .append(", clusterStateZNode=").append(clusterStateZNode).append(", tableZNode=") + .append(tableZNode).append(", clusterIdZNode=").append(clusterIdZNode) + .append(", splitLogZNode=").append(splitLogZNode).append(", balancerZNode=") + .append(balancerZNode).append(", regionNormalizerZNode=").append(regionNormalizerZNode) + .append(", switchZNode=").append(switchZNode).append(", masterMaintZNode=") + .append(masterMaintZNode).append(", replicationZNode=").append(replicationZNode) + .append(", peersZNode=").append(peersZNode).append(", queuesZNode=").append(queuesZNode) + .append(", hfileRefsZNode=").append(hfileRefsZNode).append(", snapshotCleanupZNode=") + .append(snapshotCleanupZNode).append("]").toString(); } /** @@ -154,8 +146,7 @@ public String getZNodeForReplica(int replicaId) { /** * Parses the meta replicaId from the passed path. - * @param path the name of the full path which includes baseZNode. - * @return replicaId + * @param path the name of the full path which includes baseZNode. n */ public int getMetaReplicaIdFromPath(String path) { // Extract the znode from path. The prefix is of the following format. @@ -166,13 +157,12 @@ public int getMetaReplicaIdFromPath(String path) { /** * Parse the meta replicaId from the passed znode - * @param znode the name of the znode, does not include baseZNode - * @return replicaId + * @param znode the name of the znode, does not include baseZNode n */ public int getMetaReplicaIdFromZNode(String znode) { - return znode.equals(metaZNodePrefix)? - RegionInfo.DEFAULT_REPLICA_ID: - Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); + return znode.equals(metaZNodePrefix) + ? RegionInfo.DEFAULT_REPLICA_ID + : Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); } /** @@ -198,8 +188,8 @@ public boolean isClientReadable(String path) { // Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS // all clients need to access this data to work. Using zk for sharing data to clients (other // than service lookup case is not a recommended design pattern. - return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) || - path.equals(clusterIdZNode) || path.equals(rsZNode) || + return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) + || path.equals(clusterIdZNode) || path.equals(rsZNode) || // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not path.equals(tableZNode) || path.startsWith(tableZNode + "/"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java index dd26ed5f2091..e6c3da4d0f95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java @@ -19,13 +19,12 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.ZooKeeper; +import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; /** * Methods that help working with ZooKeeper @@ -39,11 +38,12 @@ private ZooKeeperHelper() { /** * Get a ZooKeeper instance and wait until it connected before returning. * @param sessionTimeoutMs Used as session timeout passed to the created ZooKeeper AND as the - * timeout to wait on connection establishment. + * timeout to wait on connection establishment. */ public static ZooKeeper getConnectedZooKeeper(String connectString, int sessionTimeoutMs) - throws IOException { - ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + throws IOException { + ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { + }); return ensureConnectedZooKeeper(zookeeper, sessionTimeoutMs); } @@ -52,18 +52,17 @@ public static ZooKeeper getConnectedZooKeeper(String connectString, int sessionT * @param timeout Time to wait on established Connection */ public static ZooKeeper ensureConnectedZooKeeper(ZooKeeper zookeeper, int timeout) - throws ZooKeeperConnectionException { + throws ZooKeeperConnectionException { if (zookeeper.getState().isConnected()) { return zookeeper; } Stopwatch stopWatch = Stopwatch.createStarted(); // Make sure we are connected before we hand it back. - while(!zookeeper.getState().isConnected()) { + while (!zookeeper.getState().isConnected()) { Threads.sleep(1); if (stopWatch.elapsed(TimeUnit.MILLISECONDS) > timeout) { - throw new ZooKeeperConnectionException("Failed connect after waiting " + - stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + - zookeeper); + throw new ZooKeeperConnectionException("Failed connect after waiting " + + stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + zookeeper); } } return zookeeper; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java index 628655a083c2..1d33307b9ca9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,8 +69,8 @@ public void testMetaReaderGetColumnMethods() { assertArrayEquals(HConstants.STARTCODE_QUALIFIER, CatalogFamilyFormat.getStartCodeColumn(0)); assertArrayEquals( - Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + - CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"), + Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + + CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"), CatalogFamilyFormat.getStartCodeColumn(42)); assertArrayEquals(HConstants.SEQNUM_QUALIFIER, CatalogFamilyFormat.getSeqNumColumn(0)); @@ -84,19 +84,18 @@ public void testMetaReaderGetColumnMethods() { * The info we can get from the regionName is: table name, start key, regionId, replicaId. */ @Test - public void testParseRegionInfoFromRegionName() throws IOException { - RegionInfo originalRegionInfo = RegionInfoBuilder.newBuilder( - TableName.valueOf(name.getMethodName())).setRegionId(999999L) - .setStartKey(Bytes.toBytes("2")).setEndKey(Bytes.toBytes("3")) - .setReplicaId(1).build(); - RegionInfo newParsedRegionInfo = CatalogFamilyFormat - .parseRegionInfoFromRegionName(originalRegionInfo.getRegionName()); + public void testParseRegionInfoFromRegionName() throws IOException { + RegionInfo originalRegionInfo = + RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setRegionId(999999L) + .setStartKey(Bytes.toBytes("2")).setEndKey(Bytes.toBytes("3")).setReplicaId(1).build(); + RegionInfo newParsedRegionInfo = + CatalogFamilyFormat.parseRegionInfoFromRegionName(originalRegionInfo.getRegionName()); assertEquals("Parse TableName error", originalRegionInfo.getTable(), newParsedRegionInfo.getTable()); assertEquals("Parse regionId error", originalRegionInfo.getRegionId(), newParsedRegionInfo.getRegionId()); - assertTrue("Parse startKey error", Bytes.equals(originalRegionInfo.getStartKey(), - newParsedRegionInfo.getStartKey())); + assertTrue("Parse startKey error", + Bytes.equals(originalRegionInfo.getStartKey(), newParsedRegionInfo.getStartKey())); assertEquals("Parse replicaId error", originalRegionInfo.getReplicaId(), newParsedRegionInfo.getReplicaId()); assertTrue("We can't parse endKey from regionName only", diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java index db46768be315..b60fb727b697 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java index fd0183ee32b1..4bf425ed562e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java index ec79c5f815f5..022cab91267f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java index b344ff5febc5..ff4a92ae394d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,8 +82,8 @@ public void tearDown() throws IOException { private void assertTrace(String methodName, ServerName serverName) { Waiter.waitFor(CONF, 1000, () -> traceRule.getSpans().stream() - .anyMatch(span -> span.getName().equals("AsyncConnection." + methodName) && - span.getKind() == SpanKind.INTERNAL && span.hasEnded())); + .anyMatch(span -> span.getName().equals("AsyncConnection." + methodName) + && span.getKind() == SpanKind.INTERNAL && span.hasEnded())); SpanData data = traceRule.getSpans().stream() .filter(s -> s.getName().equals("AsyncConnection." + methodName)).findFirst().get(); assertEquals(StatusCode.OK, data.getStatus().getStatusCode()); @@ -101,8 +101,8 @@ public void testHbck() { @Test public void testHbckWithServerName() throws IOException { - ServerName serverName = ServerName.valueOf("localhost", 23456, - EnvironmentEdgeManager.currentTime()); + ServerName serverName = + ServerName.valueOf("localhost", 23456, EnvironmentEdgeManager.currentTime()); conn.getHbck(serverName); assertTrace("getHbck", serverName); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java index 1a3feb735da8..eefb62b64315 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java @@ -30,6 +30,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasItem; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -61,6 +62,7 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, MediumTests.class }) @@ -111,18 +113,14 @@ private SpanData waitSpan(String name) { private SpanData waitSpan(Matcher matcher) { Matcher spanLocator = allOf(matcher, hasEnded()); try { - Waiter.waitFor(CONF, 1000, new MatcherPredicate<>( - "waiting for span", + Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span", () -> traceRule.getSpans(), hasItem(spanLocator))); } catch (AssertionError e) { LOG.error("AssertionError while waiting for matching span. Span reservoir contains: {}", traceRule.getSpans()); throw e; } - return traceRule.getSpans() - .stream() - .filter(spanLocator::matches) - .findFirst() + return traceRule.getSpans().stream().filter(spanLocator::matches).findFirst() .orElseThrow(AssertionError::new); } @@ -130,34 +128,29 @@ private SpanData waitSpan(Matcher matcher) { public void testClearCache() { conn.getLocator().clearCache(); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn))); } @Test public void testClearCacheServerName() { - ServerName sn = ServerName.valueOf("127.0.0.1", 12345, - EnvironmentEdgeManager.currentTime()); + ServerName sn = ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime()); conn.getLocator().clearCache(sn); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - hasAttributes(containsEntry("db.hbase.server.name", sn.getServerName())))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + hasAttributes(containsEntry("db.hbase.server.name", sn.getServerName())))); } @Test public void testClearCacheTableName() { conn.getLocator().clearCache(TableName.META_TABLE_NAME); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME))); } @Test @@ -165,13 +158,11 @@ public void testGetRegionLocation() { conn.getLocator().getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( - containsEntryWithStringValuesOf("db.hbase.regions", + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME), + hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", locs.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); } @@ -180,16 +171,12 @@ public void testGetRegionLocations() { conn.getLocator().getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations"); - String[] expectedRegions = Arrays.stream(locs.getRegionLocations()) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .toArray(String[]::new); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + String[] expectedRegions = + Arrays.stream(locs.getRegionLocations()).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( + buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java index bc5ebf4e9fff..db3ccebe89d2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell.Type; @@ -68,7 +67,9 @@ import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -170,7 +171,7 @@ AsyncRegionLocator getLocator() { @Override public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { + throws Throwable { TableName tableName = invocation.getArgument(0); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); ServerName serverName = ServerName.valueOf("rs", 16010, 12345); @@ -485,28 +486,24 @@ public Void answer(InvocationOnMock invocation) throws Throwable { ScanRequest req = invocation.getArgument(1); RpcCallback done = invocation.getArgument(2); if (!req.hasScannerId()) { - done.run(ScanResponse.newBuilder() - .setScannerId(scannerId).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true) - .build()); + done.run(ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) + .setMoreResultsInRegion(true).setMoreResults(true).build()); } else { if (req.hasRenew() && req.getRenew()) { future.complete(null); } assertFalse("close scanner should not come in with scan priority " + scanPriority, - req.hasCloseScanner() && req.getCloseScanner()); + req.hasCloseScanner() && req.getCloseScanner()); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) - .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) - .setValue(Bytes.toBytes("v")).build(); + Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) + .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) + .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); - done.run( - ScanResponse.newBuilder() - .setScannerId(scannerId).setTtl(800).setMoreResultsInRegion(true) - .setMoreResults(true).addResults(ProtobufUtil.toResult(result)) - .build()); + done.run(ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) + .setMoreResultsInRegion(true).setMoreResults(true) + .addResults(ProtobufUtil.toResult(result)).build()); } }); return null; @@ -518,13 +515,13 @@ public Void answer(InvocationOnMock invocation) throws Throwable { @SuppressWarnings("FutureReturnValueIgnored") @Override public Void answer(InvocationOnMock invocation) throws Throwable { - threadPool.submit(() ->{ + threadPool.submit(() -> { ScanRequest req = invocation.getArgument(1); RpcCallback done = invocation.getArgument(2); assertTrue("close request should have scannerId", req.hasScannerId()); assertEquals("close request's scannerId should match", scannerId, req.getScannerId()); assertTrue("close request should have closerScanner set", - req.hasCloseScanner() && req.getCloseScanner()); + req.hasCloseScanner() && req.getCloseScanner()); done.run(ScanResponse.getDefaultInstance()); }); @@ -549,8 +546,8 @@ public void testScanNormalTable() throws Exception { @Test public void testScanSystemTable() throws Exception { CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS); - testForTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName()), - renewFuture, Optional.empty()); + testForTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName()), renewFuture, + Optional.empty()); } @Test @@ -560,7 +557,7 @@ public void testScanMetaTable() throws Exception { } private void testForTable(TableName tableName, CompletableFuture renewFuture, - Optional priority) throws Exception { + Optional priority) throws Exception { Scan scan = new Scan().setCaching(1).setMaxResultSize(1); priority.ifPresent(scan::setPriority); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java index 69cd77668dc7..99e52361109c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java @@ -38,6 +38,7 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -80,8 +81,10 @@ import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -206,37 +209,37 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); final User user = UserProvider.instantiate(CONF).getCurrent(); - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, - user) { - - @Override - AsyncRegionLocator getLocator() { - AsyncRegionLocator locator = mock(AsyncRegionLocator.class); - Answer> answer = - new Answer>() { - - @Override - public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { - TableName tableName = invocation.getArgument(0); - RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); - ServerName serverName = ServerName.valueOf("rs", 16010, 12345); - HRegionLocation loc = new HRegionLocation(info, serverName); - return CompletableFuture.completedFuture(loc); - } - }; - doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), - any(RegionLocateType.class), anyLong()); - doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), - anyInt(), any(RegionLocateType.class), anyLong()); - return locator; - } + conn = + new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, user) { + + @Override + AsyncRegionLocator getLocator() { + AsyncRegionLocator locator = mock(AsyncRegionLocator.class); + Answer> answer = + new Answer>() { + + @Override + public CompletableFuture answer(InvocationOnMock invocation) + throws Throwable { + TableName tableName = invocation.getArgument(0); + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); + ServerName serverName = ServerName.valueOf("rs", 16010, 12345); + HRegionLocation loc = new HRegionLocation(info, serverName); + return CompletableFuture.completedFuture(loc); + } + }; + doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), + any(RegionLocateType.class), anyLong()); + doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), + anyInt(), any(RegionLocateType.class), anyLong()); + return locator; + } - @Override - ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { - return stub; - } - }; + @Override + ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { + return stub; + } + }; table = conn.getTable(TableName.valueOf("table"), ForkJoinPool.commonPool()); } @@ -251,26 +254,19 @@ private void assertTrace(String tableOperation) { private void assertTrace(String tableOperation, Matcher matcher) { final TableName tableName = table.getName(); - final Matcher spanLocator = allOf( - hasName(containsString(tableOperation)), hasEnded()); + final Matcher spanLocator = + allOf(hasName(containsString(tableOperation)), hasEnded()); final String expectedName = tableOperation + " " + tableName.getNameWithNamespaceInclAsString(); - Waiter.waitFor(CONF, 1000, new MatcherPredicate<>( - "waiting for span to emit", + Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span to emit", () -> traceRule.getSpans(), hasItem(spanLocator))); - List candidateSpans = traceRule.getSpans() - .stream() - .filter(spanLocator::matches) - .collect(Collectors.toList()); + List candidateSpans = + traceRule.getSpans().stream().filter(spanLocator::matches).collect(Collectors.toList()); assertThat(candidateSpans, hasSize(1)); SpanData data = candidateSpans.iterator().next(); - assertThat(data, allOf( - hasName(expectedName), - hasKind(SpanKind.CLIENT), - hasStatusWithCode(StatusCode.OK), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(tableName), - matcher)); + assertThat(data, + allOf(hasName(expectedName), hasKind(SpanKind.CLIENT), hasStatusWithCode(StatusCode.OK), + buildConnectionAttributesMatcher(conn), buildTableAttributesMatcher(tableName), matcher)); } @Test @@ -343,9 +339,9 @@ public void testCheckAndMutateList() { .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) .build(new Delete(Bytes.toBytes(0))))).toArray(new CompletableFuture[0])) .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test @@ -353,15 +349,14 @@ public void testCheckAndMutateAll() { table.checkAndMutateAll(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) .build(new Delete(Bytes.toBytes(0))))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } private void testCheckAndMutateBuilder(Row op) { AsyncTable.CheckAndMutateBuilder builder = - table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")) + table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) .ifEquals(Bytes.toBytes("v")); if (op instanceof Put) { Put put = (Put) op; @@ -380,8 +375,8 @@ private void testCheckAndMutateBuilder(Row op) { @Test public void testCheckAndMutateBuilderThenPut() { - Put put = new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v")); + Put put = new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), + Bytes.toBytes("v")); testCheckAndMutateBuilder(put); } @@ -392,10 +387,9 @@ public void testCheckAndMutateBuilderThenDelete() { @Test public void testCheckAndMutateBuilderThenMutations() throws IOException { - RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add(new Delete(Bytes.toBytes(0))); + RowMutations mutations = + new RowMutations(Bytes.toBytes(0)).add(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), + Bytes.toBytes("cq"), Bytes.toBytes("v"))).add(new Delete(Bytes.toBytes(0))); testCheckAndMutateBuilder(mutations); } @@ -420,8 +414,8 @@ private void testCheckAndMutateWithFilterBuilder(Row op) { @Test public void testCheckAndMutateWithFilterBuilderThenPut() { - Put put = new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v")); + Put put = new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), + Bytes.toBytes("v")); testCheckAndMutateWithFilterBuilder(put); } @@ -432,18 +426,16 @@ public void testCheckAndMutateWithFilterBuilderThenDelete() { @Test public void testCheckAndMutateWithFilterBuilderThenMutations() throws IOException { - RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add(new Delete(Bytes.toBytes(0))); + RowMutations mutations = + new RowMutations(Bytes.toBytes(0)).add(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), + Bytes.toBytes("cq"), Bytes.toBytes("v"))).add(new Delete(Bytes.toBytes(0))); testCheckAndMutateWithFilterBuilder(mutations); } @Test public void testMutateRow() throws IOException { - final RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) + final RowMutations mutations = new RowMutations(Bytes.toBytes(0)).add(new Put(Bytes.toBytes(0)) + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) .add(new Delete(Bytes.toBytes(0))); table.mutateRow(mutations).join(); assertTrace("BATCH", hasAttributes( @@ -463,19 +455,22 @@ public void testScan() throws Throwable { final AtomicReference throwable = new AtomicReference<>(); final Scan scan = new Scan().setCaching(1).setMaxResultSize(1).setLimit(1); table.scan(scan, new ScanResultConsumer() { - @Override public boolean onNext(Result result) { + @Override + public boolean onNext(Result result) { if (result.getRow() != null) { count.incrementAndGet(); } return true; } - @Override public void onError(Throwable error) { + @Override + public void onError(Throwable error) { throwable.set(error); doneSignal.countDown(); } - @Override public void onComplete() { + @Override + public void onComplete() { doneSignal.countDown(); } }); @@ -509,15 +504,15 @@ public void testExistsList() { .allOf( table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testExistsAll() { table.existsAll(Arrays.asList(new Get(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test @@ -525,15 +520,15 @@ public void testGetList() { CompletableFuture .allOf(table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testGetAll() { table.getAll(Arrays.asList(new Get(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test @@ -542,16 +537,16 @@ public void testPutList() { .allOf(table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))).toArray(new CompletableFuture[0])) .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @Test public void testPutAll() { table.putAll(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @Test @@ -560,15 +555,15 @@ public void testDeleteList() { .allOf( table.delete(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testDeleteAll() { table.deleteAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test @@ -577,14 +572,14 @@ public void testBatch() { .allOf( table.batch(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testBatchAll() { table.batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java index 15d5104730a4..efb993561a5e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,13 +27,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestAttributes { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAttributes.class); + HBaseClassTestRule.forClass(TestAttributes.class); + + private static final byte[] ROW = new byte[] { 'r' }; - private static final byte [] ROW = new byte [] {'r'}; @Test public void testPutAttributes() { Put put = new Put(ROW); @@ -48,22 +49,22 @@ public void testPutAttributes() { put.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttribute("attribute1"))); Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - put.getAttributesMap().get("attribute1"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttributesMap().get("attribute1"))); // overriding attribute value put.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttribute("attribute1"))); Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - put.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), put.getAttributesMap().get("attribute1"))); // adding another attribute put.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttribute("attribute2"))); Assert.assertEquals(2, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - put.getAttributesMap().get("attribute2"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttributesMap().get("attribute2"))); // removing attribute put.setAttribute("attribute2", null); @@ -86,7 +87,7 @@ public void testPutAttributes() { @Test public void testDeleteAttributes() { - Delete del = new Delete(new byte [] {'r'}); + Delete del = new Delete(new byte[] { 'r' }); Assert.assertTrue(del.getAttributesMap().isEmpty()); Assert.assertNull(del.getAttribute("absent")); @@ -98,22 +99,22 @@ public void testDeleteAttributes() { del.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttribute("attribute1"))); Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - del.getAttributesMap().get("attribute1"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttributesMap().get("attribute1"))); // overriding attribute value del.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttribute("attribute1"))); Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - del.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), del.getAttributesMap().get("attribute1"))); // adding another attribute del.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttribute("attribute2"))); Assert.assertEquals(2, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - del.getAttributesMap().get("attribute2"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttributesMap().get("attribute2"))); // removing attribute del.setAttribute("attribute2", null); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java index 73953d0db75e..ba23d1053938 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestBufferedMutatorParams { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBufferedMutatorParams.class); + HBaseClassTestRule.forClass(TestBufferedMutatorParams.class); @Rule public TestName name = new TestName(); @@ -98,28 +98,26 @@ public Future submit(Runnable task) { } @Override - public List> invokeAll( - Collection> tasks) throws InterruptedException { + public List> invokeAll(Collection> tasks) + throws InterruptedException { return null; } @Override - public List> invokeAll( - Collection> tasks, long timeout, TimeUnit unit) - throws InterruptedException { + public List> invokeAll(Collection> tasks, long timeout, + TimeUnit unit) throws InterruptedException { return null; } @Override public T invokeAny(Collection> tasks) - throws InterruptedException, ExecutionException { + throws InterruptedException, ExecutionException { return null; } @Override - public T invokeAny(Collection> tasks, - long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { + public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { return null; } } @@ -129,8 +127,8 @@ public T invokeAny(Collection> tasks, */ private static class MockExceptionListener implements BufferedMutator.ExceptionListener { @Override - public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator mutator) throws RetriesExhaustedWithDetailsException { + public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) + throws RetriesExhaustedWithDetailsException { } } @@ -141,13 +139,9 @@ public void testClone() { BufferedMutatorParams bmp = new BufferedMutatorParams(TableName.valueOf(tableName)); BufferedMutator.ExceptionListener listener = new MockExceptionListener(); - bmp - .writeBufferSize(17) - .setWriteBufferPeriodicFlushTimeoutMs(123) - .setWriteBufferPeriodicFlushTimerTickMs(456) - .maxKeyValueSize(13) - .pool(pool) - .listener(listener); + bmp.writeBufferSize(17).setWriteBufferPeriodicFlushTimeoutMs(123) + .setWriteBufferPeriodicFlushTimerTickMs(456).maxKeyValueSize(13).pool(pool) + .listener(listener); bmp.implementationClassName("someClassName"); BufferedMutatorParams clone = bmp.clone(); @@ -172,19 +166,17 @@ public void testClone() { /** * Confirm all fields are equal. - * @param some some instance + * @param some some instance * @param clone a clone of that instance, but not the same instance. */ - private void cloneTest(BufferedMutatorParams some, - BufferedMutatorParams clone) { + private void cloneTest(BufferedMutatorParams some, BufferedMutatorParams clone) { assertFalse(some == clone); - assertEquals(some.getTableName().toString(), - clone.getTableName().toString()); + assertEquals(some.getTableName().toString(), clone.getTableName().toString()); assertEquals(some.getWriteBufferSize(), clone.getWriteBufferSize()); assertEquals(some.getWriteBufferPeriodicFlushTimeoutMs(), - clone.getWriteBufferPeriodicFlushTimeoutMs()); + clone.getWriteBufferPeriodicFlushTimeoutMs()); assertEquals(some.getWriteBufferPeriodicFlushTimerTickMs(), - clone.getWriteBufferPeriodicFlushTimerTickMs()); + clone.getWriteBufferPeriodicFlushTimerTickMs()); assertEquals(some.getMaxKeyValueSize(), clone.getMaxKeyValueSize()); assertTrue(some.getListener() == clone.getListener()); assertTrue(some.getPool() == clone.getPool()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java index 0df04b8043f8..6ae602aaadb5 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,11 +36,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestClientExponentialBackoff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientExponentialBackoff.class); + HBaseClassTestRule.forClass(TestClientExponentialBackoff.class); ServerName server = Mockito.mock(ServerName.class); byte[] regionname = Bytes.toBytes("region"); @@ -67,8 +67,8 @@ public void testMaxLoad() { ServerStatistics stats = new ServerStatistics(); update(stats, 100); - assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, - regionname, stats)); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, + backoff.getBackoffTime(server, regionname, stats)); // another policy with a different max timeout long max = 100; @@ -78,20 +78,20 @@ public void testMaxLoad() { // test beyond 100 still doesn't exceed the max update(stats, 101); - assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, - regionname, stats)); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, + backoff.getBackoffTime(server, regionname, stats)); assertEquals(max, backoffShortTimeout.getBackoffTime(server, regionname, stats)); // and that when we are below 100, its less than the max timeout update(stats, 99); - assertTrue(backoff.getBackoffTime(server, - regionname, stats) < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); + assertTrue(backoff.getBackoffTime(server, regionname, stats) + < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); assertTrue(backoffShortTimeout.getBackoffTime(server, regionname, stats) < max); } /** - * Make sure that we get results in the order that we expect - backoff for a load of 1 should - * less than backoff for 10, which should be less than that for 50. + * Make sure that we get results in the order that we expect - backoff for a load of 1 should less + * than backoff for 10, which should be less than that for 50. */ @Test public void testResultOrdering() { @@ -105,9 +105,8 @@ public void testResultOrdering() { for (int i = 1; i <= 100; i++) { update(stats, i); long next = backoff.getBackoffTime(server, regionname, stats); - assertTrue( - "Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + - "load " + i, previous < next); + assertTrue("Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + + "load " + i, previous < next); previous = next; } } @@ -151,8 +150,7 @@ public void testCompactionPressurePolicy() { long previous = backoffTime; update(stats, 0, 0, 50); backoffTime = backoff.getBackoffTime(server, regionname, stats); - assertTrue("Compaction pressure should be bigger", - backoffTime > previous); + assertTrue("Compaction pressure should be bigger", backoffTime > previous); update(stats, 0, 0, 100); backoffTime = backoff.getBackoffTime(server, regionname, stats); @@ -161,18 +159,16 @@ public void testCompactionPressurePolicy() { } private void update(ServerStatistics stats, int load) { - ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() - .setMemStoreLoad(load).build(); + ClientProtos.RegionLoadStats stat = + ClientProtos.RegionLoadStats.newBuilder().setMemStoreLoad(load).build(); stats.update(regionname, ProtobufUtil.createRegionLoadStats(stat)); } private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy, - int compactionPressure) { - ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() - .setMemStoreLoad(memstoreLoad) - .setHeapOccupancy(heapOccupancy) - .setCompactionPressure(compactionPressure) - .build(); + int compactionPressure) { + ClientProtos.RegionLoadStats stat = + ClientProtos.RegionLoadStats.newBuilder().setMemStoreLoad(memstoreLoad) + .setHeapOccupancy(heapOccupancy).setCompactionPressure(compactionPressure).build(); stats.update(regionname, ProtobufUtil.createRegionLoadStats(stat)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index ea9a36171d49..a0869c4bdfe3 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +22,7 @@ import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; @@ -40,7 +41,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import java.util.Map; @Category({ MiscTests.class, SmallTests.class }) public class TestColumnFamilyDescriptorBuilder { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java index ac8aed866e68..900f4093d1b0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ private boolean contains(Collection> enumConsts, String value) { @Test public void test() - throws IllegalAccessException, InvocationTargetException, NoSuchMethodException { + throws IllegalAccessException, InvocationTargetException, NoSuchMethodException { Map> getMethod2Value = new HashMap<>(); ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java index 561b1f5715fd..a2df7e932395 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,7 +72,7 @@ public void close() { @BeforeClass public static void setUp() { CONF.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - ConnectionRegistryForTest.class, ConnectionRegistry.class); + ConnectionRegistryForTest.class, ConnectionRegistry.class); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java index b288f98f1f92..f1a8e000136d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,9 +55,8 @@ public void testBuild() { int priority = 100; String propertyKey = "propertyKey"; String propertyValue = "propertyValue"; - CoprocessorDescriptor cp = - CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path).setPriority(priority) - .setProperty(propertyKey, propertyValue).build(); + CoprocessorDescriptor cp = CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperty(propertyKey, propertyValue).build(); assertEquals(className, cp.getClassName()); assertEquals(path, cp.getJarPath().get()); assertEquals(priority, cp.getPriority()); @@ -73,13 +72,11 @@ public void testSetCoprocessor() throws IOException { String path = "path"; int priority = Math.abs(className.hashCode()); String propertyValue = "propertyValue"; - cps.add( - CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path).setPriority(priority) - .setProperty(propertyKey, propertyValue).build()); + cps.add(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperty(propertyKey, propertyValue).build()); } - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setCoprocessors(cps).build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())).setCoprocessors(cps).build(); for (CoprocessorDescriptor cp : cps) { boolean match = false; for (CoprocessorDescriptor that : tableDescriptor.getCoprocessorDescriptors()) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java index e855055fd889..cc329cd3d03a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,20 +30,19 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestDeleteTimeStamp { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDeleteTimeStamp.class); + HBaseClassTestRule.forClass(TestDeleteTimeStamp.class); private static final byte[] ROW = Bytes.toBytes("testRow"); private static final byte[] FAMILY = Bytes.toBytes("testFamily"); private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier"); /* - * Test for verifying that the timestamp in delete object is being honored. - * @throws Exception + * Test for verifying that the timestamp in delete object is being honored. n */ @Test public void testTimeStamp() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index b52ad3e8d2a4..44a1c577b10d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,43 +52,42 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestGet { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGet.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGet.class); - private static final byte [] ROW = new byte [] {'r'}; + private static final byte[] ROW = new byte[] { 'r' }; private static final String PB_GET = "CgNyb3ciEwoPdGVzdC5Nb2NrRmlsdGVyEgAwATgB"; private static final String PB_GET_WITH_FILTER_LIST = - "CgFyIosBCilvcmcuYXBhY2hlLmhhZG9vcC5oYmFzZS5maWx0ZXIuRmlsdGVyTGlzdBJeCAESEwoP" + - "dGVzdC5Nb2NrRmlsdGVyEgASEQoNbXkuTW9ja0ZpbHRlchIAEjIKLG9yZy5hcGFjaGUuaGFkb29w" + - "LmhiYXNlLmZpbHRlci5LZXlPbmx5RmlsdGVyEgIIADABOAE="; + "CgFyIosBCilvcmcuYXBhY2hlLmhhZG9vcC5oYmFzZS5maWx0ZXIuRmlsdGVyTGlzdBJeCAESEwoP" + + "dGVzdC5Nb2NrRmlsdGVyEgASEQoNbXkuTW9ja0ZpbHRlchIAEjIKLG9yZy5hcGFjaGUuaGFkb29w" + + "LmhiYXNlLmZpbHRlci5LZXlPbmx5RmlsdGVyEgIIADABOAE="; private static final String MOCK_FILTER_JAR = - "UEsDBBQACAgIANWDlEMAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAA" + - "AAAAAFBLAwQUAAgICADVg5RDAAAAAAAAAAAAAAAAFAAAAE1FVEEtSU5GL01BTklGRVNULk1G803M" + - "y0xLLS7RDUstKs7Mz7NSMNQz4OVyLkpNLElN0XWqBAmY6xnEG1gqaPgXJSbnpCo45xcV5BcllgCV" + - "a/Jy8XIBAFBLBwgxyqRbQwAAAEQAAABQSwMEFAAICAgAUoOUQwAAAAAAAAAAAAAAABMAAABteS9N" + - "b2NrRmlsdGVyLmNsYXNzdZHPTsJAEMa/LYVCRVFQMd68gQc38YrxUJUTetGQGE7bstrVwjbbYsSn" + - "0hOJJj6AD2WcFoP/4iYzX+bb32xmd9/en18B7GPLhY11BxsurEw3GUoHaqzSQ4ZCq91nsI/0UDLU" + - "emoszyYjX5oL4Ufk1Hs6EFFfGJXVn6adhirJ6NGUn+rgtquiVJoOQyUWJpFdo0cMjdbAa/8hnNj3" + - "pqmkbmvgMbgn94GMU6XHiYMm1ed6YgJJeDbNV+fejbgTVRRRYlj+cSZDW5trLmIRhJKHYqh1zENf" + - "JJJf5QCfcx45DJ3/WLmYgx/LRNJ1I/UgMmMxIXbo9WxkywLLZqHsUMVJGWlxdwb2lG+XKZdys4kK" + - "5eocgIsl0grVy0Q5+e9Y+V75BdblDIXHX/3b3/rLWEGNdJXCJmeNop7zjQ9QSwcI1kzyMToBAADs" + - "AQAAUEsDBBQACAgIAFKDlEMAAAAAAAAAAAAAAAAVAAAAdGVzdC9Nb2NrRmlsdGVyLmNsYXNzdVHB" + - "TsJAFJwthUJFERQx3ryBBzfxivFQlRN60ZAYTtuy2tXCNtti1K/SE4kmfoAfZXwtBg3RTd6bzOy8" + - "zezux+frO4ADbLuwsemg6cLKcIuhdKgmKj1iKLQ7Awb7WI8kQ62vJvJ8OvaluRR+REqjrwMRDYRR" + - "Gf8W7TRUCUO9n8ok5Wc6uOupKJWmy1CJhUlkz+gxQ7M99Dp/eJzY9x5JZrCGHoN7+hDIOFV6kjho" + - "Eb/QUxNIsmeJfib3b8W9qKKIEslLpzJ0tLnhIhZBKHkoRlrHPPRFIvl1buBzn0cKQ/c/r1wk4Scy" + - "kXTpSD2JTFhkxC69oY1sWWBZGuoOMU7ICIt7M7CXfLtMvZSLLVSoV+cGuFghrBBfJZeT/5GV75Xf" + - "YF3NUHhemt/5NV/GGmqE61Q2KXWqRu7f+AJQSwcIrS5nKDoBAADyAQAAUEsBAhQAFAAICAgA1YOU" + - "QwAAAAACAAAAAAAAAAkABAAAAAAAAAAAAAAAAAAAAE1FVEEtSU5GL/7KAABQSwECFAAUAAgICADV" + - "g5RDMcqkW0MAAABEAAAAFAAAAAAAAAAAAAAAAAA9AAAATUVUQS1JTkYvTUFOSUZFU1QuTUZQSwEC" + - "FAAUAAgICABSg5RD1kzyMToBAADsAQAAEwAAAAAAAAAAAAAAAADCAAAAbXkvTW9ja0ZpbHRlci5j" + - "bGFzc1BLAQIUABQACAgIAFKDlEOtLmcoOgEAAPIBAAAVAAAAAAAAAAAAAAAAAD0CAAB0ZXN0L01v" + - "Y2tGaWx0ZXIuY2xhc3NQSwUGAAAAAAQABAABAQAAugMAAAAA"; + "UEsDBBQACAgIANWDlEMAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAA" + + "AAAAAFBLAwQUAAgICADVg5RDAAAAAAAAAAAAAAAAFAAAAE1FVEEtSU5GL01BTklGRVNULk1G803M" + + "y0xLLS7RDUstKs7Mz7NSMNQz4OVyLkpNLElN0XWqBAmY6xnEG1gqaPgXJSbnpCo45xcV5BcllgCV" + + "a/Jy8XIBAFBLBwgxyqRbQwAAAEQAAABQSwMEFAAICAgAUoOUQwAAAAAAAAAAAAAAABMAAABteS9N" + + "b2NrRmlsdGVyLmNsYXNzdZHPTsJAEMa/LYVCRVFQMd68gQc38YrxUJUTetGQGE7bstrVwjbbYsSn" + + "0hOJJj6AD2WcFoP/4iYzX+bb32xmd9/en18B7GPLhY11BxsurEw3GUoHaqzSQ4ZCq91nsI/0UDLU" + + "emoszyYjX5oL4Ufk1Hs6EFFfGJXVn6adhirJ6NGUn+rgtquiVJoOQyUWJpFdo0cMjdbAa/8hnNj3" + + "pqmkbmvgMbgn94GMU6XHiYMm1ed6YgJJeDbNV+fejbgTVRRRYlj+cSZDW5trLmIRhJKHYqh1zENf" + + "JJJf5QCfcx45DJ3/WLmYgx/LRNJ1I/UgMmMxIXbo9WxkywLLZqHsUMVJGWlxdwb2lG+XKZdys4kK" + + "5eocgIsl0grVy0Q5+e9Y+V75BdblDIXHX/3b3/rLWEGNdJXCJmeNop7zjQ9QSwcI1kzyMToBAADs" + + "AQAAUEsDBBQACAgIAFKDlEMAAAAAAAAAAAAAAAAVAAAAdGVzdC9Nb2NrRmlsdGVyLmNsYXNzdVHB" + + "TsJAFJwthUJFERQx3ryBBzfxivFQlRN60ZAYTtuy2tXCNtti1K/SE4kmfoAfZXwtBg3RTd6bzOy8" + + "zezux+frO4ADbLuwsemg6cLKcIuhdKgmKj1iKLQ7Awb7WI8kQ62vJvJ8OvaluRR+REqjrwMRDYRR" + + "Gf8W7TRUCUO9n8ok5Wc6uOupKJWmy1CJhUlkz+gxQ7M99Dp/eJzY9x5JZrCGHoN7+hDIOFV6kjho" + + "Eb/QUxNIsmeJfib3b8W9qKKIEslLpzJ0tLnhIhZBKHkoRlrHPPRFIvl1buBzn0cKQ/c/r1wk4Scy" + + "kXTpSD2JTFhkxC69oY1sWWBZGuoOMU7ICIt7M7CXfLtMvZSLLVSoV+cGuFghrBBfJZeT/5GV75Xf" + + "YF3NUHhemt/5NV/GGmqE61Q2KXWqRu7f+AJQSwcIrS5nKDoBAADyAQAAUEsBAhQAFAAICAgA1YOU" + + "QwAAAAACAAAAAAAAAAkABAAAAAAAAAAAAAAAAAAAAE1FVEEtSU5GL/7KAABQSwECFAAUAAgICADV" + + "g5RDMcqkW0MAAABEAAAAFAAAAAAAAAAAAAAAAAA9AAAATUVUQS1JTkYvTUFOSUZFU1QuTUZQSwEC" + + "FAAUAAgICABSg5RD1kzyMToBAADsAQAAEwAAAAAAAAAAAAAAAADCAAAAbXkvTW9ja0ZpbHRlci5j" + + "bGFzc1BLAQIUABQACAgIAFKDlEOtLmcoOgEAAPIBAAAVAAAAAAAAAAAAAAAAAD0CAAB0ZXN0L01v" + + "Y2tGaWx0ZXIuY2xhc3NQSwUGAAAAAAQABAABAQAAugMAAAAA"; @Test public void testAttributesSerialization() throws IOException { @@ -121,22 +120,22 @@ public void testGetAttributes() { get.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttribute("attribute1"))); Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - get.getAttributesMap().get("attribute1"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttributesMap().get("attribute1"))); // overriding attribute value get.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttribute("attribute1"))); Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - get.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), get.getAttributesMap().get("attribute1"))); // adding another attribute get.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttribute("attribute2"))); Assert.assertEquals(2, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - get.getAttributesMap().get("attribute2"))); + Assert + .assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttributesMap().get("attribute2"))); // removing attribute get.setAttribute("attribute2", null); @@ -209,14 +208,12 @@ public void TestGetRowFromGetCopyConstructor() throws Exception { @Test public void testDynamicFilter() throws Exception { Configuration conf = HBaseConfiguration.create(); - String localPath = conf.get("hbase.local.dir") - + File.separator + "jars" + File.separator; + String localPath = conf.get("hbase.local.dir") + File.separator + "jars" + File.separator; File jarFile = new File(localPath, "MockFilter.jar"); jarFile.delete(); assertFalse("Should be deleted: " + jarFile.getPath(), jarFile.exists()); - ClientProtos.Get getProto1 = - ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET)); + ClientProtos.Get getProto1 = ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET)); ClientProtos.Get getProto2 = ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET_WITH_FILTER_LIST)); try { @@ -230,9 +227,8 @@ public void testDynamicFilter() throws Exception { fail("Should not be able to load the filter class"); } catch (IOException ioe) { assertTrue(ioe.getCause() instanceof InvocationTargetException); - InvocationTargetException ite = (InvocationTargetException)ioe.getCause(); - assertTrue(ite.getTargetException() - instanceof DeserializationException); + InvocationTargetException ite = (InvocationTargetException) ioe.getCause(); + assertTrue(ite.getTargetException() instanceof DeserializationException); } FileOutputStream fos = new FileOutputStream(jarFile); fos.write(Base64.getDecoder().decode(MOCK_FILTER_JAR)); @@ -243,7 +239,7 @@ public void testDynamicFilter() throws Exception { Get get2 = ProtobufUtil.toGet(getProto2); assertTrue(get2.getFilter() instanceof FilterList); - List filters = ((FilterList)get2.getFilter()).getFilters(); + List filters = ((FilterList) get2.getFilter()).getFilters(); assertEquals(3, filters.size()); assertEquals("test.MockFilter", filters.get(0).getClass().getName()); assertEquals("my.MockFilter", filters.get(1).getClass().getName()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java index 7a36696d1544..2c8f38ed1575 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertArrayEquals; @@ -60,36 +58,17 @@ public void testScanCopyConstructor() throws Exception { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) - .setACL("test_user2", new Permission(Permission.Action.READ)) - .setAllowPartialResults(true) - .setAsyncPrefetch(false) - .setAttribute("test_key", Bytes.toBytes("test_value")) - .setAuthorizations(new Authorizations("test_label")) - .setBatch(10) - .setCacheBlocks(false) - .setCaching(10) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("scan_copy_constructor") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLimit(100) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultSize(100) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setMvccReadPoint(5) - .setNeedCursorResult(true) - .setPriority(1) - .setRaw(true) - .setReplicaId(3) - .setReversed(true) - .setRowOffsetPerColumnFamily(5) - .setStartStopRowForPrefixScan(Bytes.toBytes("row_")) - .setScanMetricsEnabled(true) - .setReadType(Scan.ReadType.STREAM) - .withStartRow(Bytes.toBytes("row_1")) - .withStopRow(Bytes.toBytes("row_2")) - .setTimeRange(0, 13); + .setACL("test_user2", new Permission(Permission.Action.READ)).setAllowPartialResults(true) + .setAsyncPrefetch(false).setAttribute("test_key", Bytes.toBytes("test_value")) + .setAuthorizations(new Authorizations("test_label")).setBatch(10).setCacheBlocks(false) + .setCaching(10).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("scan_copy_constructor").setIsolationLevel(IsolationLevel.READ_COMMITTED).setLimit(100) + .setLoadColumnFamiliesOnDemand(false).setMaxResultSize(100).setMaxResultsPerColumnFamily(1000) + .readVersions(9999).setMvccReadPoint(5).setNeedCursorResult(true).setPriority(1).setRaw(true) + .setReplicaId(3).setReversed(true).setRowOffsetPerColumnFamily(5) + .setStartStopRowForPrefixScan(Bytes.toBytes("row_")).setScanMetricsEnabled(true) + .setReadType(Scan.ReadType.STREAM).withStartRow(Bytes.toBytes("row_1")) + .withStopRow(Bytes.toBytes("row_2")).setTimeRange(0, 13); // create a copy of existing scan object Scan scanCopy = new ImmutableScan(scan); @@ -210,8 +189,7 @@ private void testUnmodifiableSetters(Scan scanCopy) throws IOException { scanCopy.setCaching(1); throw new RuntimeException("Should not reach here"); } catch (UnsupportedOperationException e) { - assertEquals("ImmutableScan does not allow access to setCaching", - e.getMessage()); + assertEquals("ImmutableScan does not allow access to setCaching", e.getMessage()); } try { scanCopy.setLoadColumnFamiliesOnDemand(true); @@ -302,8 +280,7 @@ private void testUnmodifiableSetters(Scan scanCopy) throws IOException { scanCopy.setAllowPartialResults(true); throw new RuntimeException("Should not reach here"); } catch (UnsupportedOperationException e) { - assertEquals("ImmutableScan does not allow access to setAllowPartialResults", - e.getMessage()); + assertEquals("ImmutableScan does not allow access to setAllowPartialResults", e.getMessage()); } try { scanCopy.setId("id"); @@ -382,12 +359,13 @@ private void compareGetters(Scan scan, Scan scanCopy) { } private static boolean isGetter(Method method) { - if ("hashCode".equals(method.getName()) || "equals".equals(method.getName()) - || method.getName().startsWith("set")) { + if ( + "hashCode".equals(method.getName()) || "equals".equals(method.getName()) + || method.getName().startsWith("set") + ) { return false; } - return !void.class.equals(method.getReturnType()) - && !Scan.class.equals(method.getReturnType()); + return !void.class.equals(method.getReturnType()) && !Scan.class.equals(method.getReturnType()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java index 75bad5ea416f..309c1007ae09 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,27 +29,27 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestIncrement { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrement.class); + HBaseClassTestRule.forClass(TestIncrement.class); @Test public void testIncrementInstance() { final long expected = 13; - Increment inc = new Increment(new byte [] {'r'}); + Increment inc = new Increment(new byte[] { 'r' }); int total = 0; for (int i = 0; i < 2; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); inc.addColumn(bytes, bytes, expected); total++; } - Map> familyMapOfLongs = inc.getFamilyMapOfLongs(); + Map> familyMapOfLongs = inc.getFamilyMapOfLongs(); int found = 0; - for (Map.Entry> entry: familyMapOfLongs.entrySet()) { - for (Map.Entry e: entry.getValue().entrySet()) { + for (Map.Entry> entry : familyMapOfLongs.entrySet()) { + for (Map.Entry e : entry.getValue().entrySet()) { assertEquals(expected, e.getValue().longValue()); found++; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java index 3c8b04dde174..a3f170bfcd74 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java index 7abbbd0d72bc..c4b04c40523b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,15 +51,16 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -@Category({ClientTests.class, MetricsTests.class, SmallTests.class}) +@Category({ ClientTests.class, MetricsTests.class, SmallTests.class }) public class TestMetricsConnection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsConnection.class); + HBaseClassTestRule.forClass(TestMetricsConnection.class); private static MetricsConnection METRICS; private static final ThreadPoolExecutor BATCH_POOL = (ThreadPoolExecutor) Executors.newFixedThreadPool(2); + @BeforeClass public static void beforeClass() { METRICS = new MetricsConnection("mocked-connection", () -> BATCH_POOL, () -> null); @@ -92,71 +93,54 @@ public void testMetricsConnectionScope() throws IOException { @Test public void testStaticMetrics() throws IOException { final byte[] foo = Bytes.toBytes("foo"); - final RegionSpecifier region = RegionSpecifier.newBuilder() - .setValue(ByteString.EMPTY) - .setType(RegionSpecifierType.REGION_NAME) - .build(); + final RegionSpecifier region = RegionSpecifier.newBuilder().setValue(ByteString.EMPTY) + .setType(RegionSpecifierType.REGION_NAME).build(); final int loop = 5; for (int i = 0; i < loop; i++) { - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Get"), - GetRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Scan"), - ScanRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Multi"), - MultiRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Get"), + GetRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Scan"), + ScanRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Multi"), + MultiRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))).setRegion(region) + .build(), + MetricsConnection.newCallStats()); } - for (String method: new String[]{"Get", "Scan", "Mutate"}) { + for (String method : new String[] { "Get", "Scan", "Mutate" }) { final String metricKey = "rpcCount_" + ClientService.getDescriptor().getName() + "_" + method; final long metricVal = METRICS.rpcCounters.get(metricKey).getCount(); assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal >= loop); } - for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { - METRICS.getTracker, METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, - METRICS.deleteTracker, METRICS.incrementTracker, METRICS.putTracker - }) { + for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { METRICS.getTracker, + METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, METRICS.deleteTracker, + METRICS.incrementTracker, METRICS.putTracker }) { assertEquals("Failed to invoke callTimer on " + t, loop, t.callTimer.getCount()); assertEquals("Failed to invoke reqHist on " + t, loop, t.reqHist.getCount()); assertEquals("Failed to invoke respHist on " + t, loop, t.respHist.getCount()); } - RatioGauge executorMetrics = (RatioGauge) METRICS.getMetricRegistry() - .getMetrics().get(METRICS.getExecutorPoolName()); - RatioGauge metaMetrics = (RatioGauge) METRICS.getMetricRegistry() - .getMetrics().get(METRICS.getMetaPoolName()); + RatioGauge executorMetrics = + (RatioGauge) METRICS.getMetricRegistry().getMetrics().get(METRICS.getExecutorPoolName()); + RatioGauge metaMetrics = + (RatioGauge) METRICS.getMetricRegistry().getMetrics().get(METRICS.getMetaPoolName()); assertEquals(Ratio.of(0, 3).getValue(), executorMetrics.getValue(), 0); assertEquals(Double.NaN, metaMetrics.getValue(), 0); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java index 99699a4fea6f..dcb4d6eb88ad 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestMutation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMutation.class); + HBaseClassTestRule.forClass(TestMutation.class); @Test public void testAppendCopyConstructor() throws IOException { @@ -50,20 +50,16 @@ public void testAppendCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Type.Put) - .setValue(Bytes.toBytes(100)) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Put) + .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("value")); origin.setTimeRange(100, 1000); Append clone = new Append(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("value")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -73,20 +69,16 @@ public void testIncrementCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(100)) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), 4); origin.setTimeRange(100, 1000); Increment clone = new Increment(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q1"), 3); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -96,12 +88,8 @@ public void testDeleteCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Type.Delete) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Delete).build()); origin.addColumn(family, Bytes.toBytes("q0")); origin.addColumns(family, Bytes.toBytes("q1")); origin.addFamily(family); @@ -111,7 +99,7 @@ public void testDeleteCopyConstructor() throws IOException { assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q3")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -121,20 +109,16 @@ public void testPutCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes("value")) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) + .setValue(Bytes.toBytes("value")).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("V-01")); origin.addColumn(family, Bytes.toBytes("q1"), 100, Bytes.toBytes("V-01")); Put clone = new Put(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q2"), Bytes.toBytes("V-02")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -160,10 +144,10 @@ private void assertEquals(Mutation origin, Mutation clone) { Assert.assertEquals(origin.getTimestamp(), clone.getTimestamp()); Assert.assertEquals(origin.getPriority(), clone.getPriority()); if (origin instanceof Append) { - assertEquals(((Append)origin).getTimeRange(), ((Append)clone).getTimeRange()); + assertEquals(((Append) origin).getTimeRange(), ((Append) clone).getTimeRange()); } if (origin instanceof Increment) { - assertEquals(((Increment)origin).getTimeRange(), ((Increment)clone).getTimeRange()); + assertEquals(((Increment) origin).getTimeRange(), ((Increment) clone).getTimeRange()); } } @@ -179,65 +163,55 @@ public void testRowIsImmutableOrNot() { // Test when row key is immutable Put putRowIsImmutable = new Put(rowKey, true); - assertTrue(rowKey == putRowIsImmutable.getRow()); // No local copy is made + assertTrue(rowKey == putRowIsImmutable.getRow()); // No local copy is made // Test when row key is not immutable Put putRowIsNotImmutable = new Put(rowKey, 1000L, false); - assertTrue(rowKey != putRowIsNotImmutable.getRow()); // A local copy is made + assertTrue(rowKey != putRowIsNotImmutable.getRow()); // A local copy is made } // HBASE-14882 @Test public void testAddImmutableToPut() throws IOException { - byte[] row = Bytes.toBytes("immutable-row"); - byte[] family = Bytes.toBytes("immutable-family"); + byte[] row = Bytes.toBytes("immutable-row"); + byte[] family = Bytes.toBytes("immutable-family"); byte[] qualifier0 = Bytes.toBytes("immutable-qualifier-0"); - byte[] value0 = Bytes.toBytes("immutable-value-0"); + byte[] value0 = Bytes.toBytes("immutable-value-0"); byte[] qualifier1 = Bytes.toBytes("immutable-qualifier-1"); - byte[] value1 = Bytes.toBytes("immutable-value-1"); - long ts1 = 5000L; + byte[] value1 = Bytes.toBytes("immutable-value-1"); + long ts1 = 5000L; // "true" indicates that the input row is immutable Put put = new Put(row, true); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier0) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(value0) - .build()) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier1) - .setTimestamp(ts1) - .setType(Type.Put) - .setValue(value1) - .build()); + put + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier0).setTimestamp(put.getTimestamp()).setType(Type.Put) + .setValue(value0).build()) + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier1).setTimestamp(ts1).setType(Type.Put).setValue(value1).build()); // Verify the cell of family:qualifier0 Cell cell0 = put.get(family, qualifier0).get(0); // Verify no local copy is made for family, qualifier or value - assertTrue(cell0.getFamilyArray() == family); + assertTrue(cell0.getFamilyArray() == family); assertTrue(cell0.getQualifierArray() == qualifier0); - assertTrue(cell0.getValueArray() == value0); + assertTrue(cell0.getValueArray() == value0); // Verify timestamp - assertTrue(cell0.getTimestamp() == put.getTimestamp()); + assertTrue(cell0.getTimestamp() == put.getTimestamp()); // Verify the cell of family:qualifier1 Cell cell1 = put.get(family, qualifier1).get(0); // Verify no local copy is made for family, qualifier or value - assertTrue(cell1.getFamilyArray() == family); + assertTrue(cell1.getFamilyArray() == family); assertTrue(cell1.getQualifierArray() == qualifier1); - assertTrue(cell1.getValueArray() == value1); + assertTrue(cell1.getValueArray() == value1); // Verify timestamp - assertTrue(cell1.getTimestamp() == ts1); + assertTrue(cell1.getTimestamp() == ts1); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index 600c444ad463..4e0152a59a3d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,19 +71,19 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; /** - * Run tests that use the functionality of the Operation superclass for - * Puts, Gets, Deletes, Scans, and MultiPuts. + * Run tests that use the functionality of the Operation superclass for Puts, Gets, Deletes, Scans, + * and MultiPuts. */ -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestOperation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestOperation.class); + HBaseClassTestRule.forClass(TestOperation.class); - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte [] VALUE = Bytes.toBytes("testValue"); + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[] VALUE = Bytes.toBytes("testValue"); private static Gson GSON = GsonUtil.createGson().create(); @@ -94,19 +94,19 @@ public class TestOperation { private static List L_TS_LIST = Arrays.asList(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L); private static TimestampsFilter L_TS_FILTER = new TimestampsFilter(L_TS_LIST); private static String STR_L_TS_FILTER = - L_TS_FILTER.getClass().getSimpleName() + " (5/11): [0, 1, 2, 3, 4]"; + L_TS_FILTER.getClass().getSimpleName() + " (5/11): [0, 1, 2, 3, 4]"; private static String COL_NAME_1 = "col1"; private static ColumnPrefixFilter COL_PRE_FILTER = - new ColumnPrefixFilter(Bytes.toBytes(COL_NAME_1)); + new ColumnPrefixFilter(Bytes.toBytes(COL_NAME_1)); private static String STR_COL_PRE_FILTER = - COL_PRE_FILTER.getClass().getSimpleName() + " " + COL_NAME_1; + COL_PRE_FILTER.getClass().getSimpleName() + " " + COL_NAME_1; private static String COL_NAME_2 = "col2"; private static ColumnRangeFilter CR_FILTER = - new ColumnRangeFilter(Bytes.toBytes(COL_NAME_1), true, Bytes.toBytes(COL_NAME_2), false); - private static String STR_CR_FILTER = CR_FILTER.getClass().getSimpleName() - + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; + new ColumnRangeFilter(Bytes.toBytes(COL_NAME_1), true, Bytes.toBytes(COL_NAME_2), false); + private static String STR_CR_FILTER = + CR_FILTER.getClass().getSimpleName() + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; private static int COL_COUNT = 9; private static ColumnCountGetFilter CCG_FILTER = new ColumnCountGetFilter(COL_COUNT); @@ -115,14 +115,13 @@ public class TestOperation { private static int LIMIT = 3; private static int OFFSET = 4; private static ColumnPaginationFilter CP_FILTER = new ColumnPaginationFilter(LIMIT, OFFSET); - private static String STR_CP_FILTER = CP_FILTER.getClass().getSimpleName() - + " (" + LIMIT + ", " + OFFSET + ")"; + private static String STR_CP_FILTER = + CP_FILTER.getClass().getSimpleName() + " (" + LIMIT + ", " + OFFSET + ")"; private static String STOP_ROW_KEY = "stop"; private static InclusiveStopFilter IS_FILTER = - new InclusiveStopFilter(Bytes.toBytes(STOP_ROW_KEY)); - private static String STR_IS_FILTER = - IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; + new InclusiveStopFilter(Bytes.toBytes(STOP_ROW_KEY)); + private static String STR_IS_FILTER = IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; private static String PREFIX = "prefix"; private static PrefixFilter PREFIX_FILTER = new PrefixFilter(Bytes.toBytes(PREFIX)); @@ -131,15 +130,15 @@ public class TestOperation { private static byte[][] PREFIXES = { Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2") }; private static MultipleColumnPrefixFilter MCP_FILTER = new MultipleColumnPrefixFilter(PREFIXES); private static String STR_MCP_FILTER = - MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]"; + MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]"; - private static byte[][] L_PREFIXES = { - Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2"), Bytes.toBytes("3"), - Bytes.toBytes("4"), Bytes.toBytes("5"), Bytes.toBytes("6"), Bytes.toBytes("7") }; + private static byte[][] L_PREFIXES = + { Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2"), Bytes.toBytes("3"), + Bytes.toBytes("4"), Bytes.toBytes("5"), Bytes.toBytes("6"), Bytes.toBytes("7") }; private static MultipleColumnPrefixFilter L_MCP_FILTER = - new MultipleColumnPrefixFilter(L_PREFIXES); + new MultipleColumnPrefixFilter(L_PREFIXES); private static String STR_L_MCP_FILTER = - L_MCP_FILTER.getClass().getSimpleName() + " (5/8): [0, 1, 2, 3, 4]"; + L_MCP_FILTER.getClass().getSimpleName() + " (5/8): [0, 1, 2, 3, 4]"; private static int PAGE_SIZE = 9; private static PageFilter PAGE_FILTER = new PageFilter(PAGE_SIZE); @@ -147,143 +146,133 @@ public class TestOperation { private static SkipFilter SKIP_FILTER = new SkipFilter(L_TS_FILTER); private static String STR_SKIP_FILTER = - SKIP_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; + SKIP_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; private static WhileMatchFilter WHILE_FILTER = new WhileMatchFilter(L_TS_FILTER); private static String STR_WHILE_FILTER = - WHILE_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; + WHILE_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; private static KeyOnlyFilter KEY_ONLY_FILTER = new KeyOnlyFilter(); private static String STR_KEY_ONLY_FILTER = KEY_ONLY_FILTER.getClass().getSimpleName(); private static FirstKeyOnlyFilter FIRST_KEY_ONLY_FILTER = new FirstKeyOnlyFilter(); private static String STR_FIRST_KEY_ONLY_FILTER = - FIRST_KEY_ONLY_FILTER.getClass().getSimpleName(); + FIRST_KEY_ONLY_FILTER.getClass().getSimpleName(); private static CompareOperator CMP_OP = CompareOperator.EQUAL; private static byte[] CMP_VALUE = Bytes.toBytes("value"); private static BinaryComparator BC = new BinaryComparator(CMP_VALUE); private static DependentColumnFilter DC_FILTER = - new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC); - private static String STR_DC_FILTER = String.format( - "%s (%s, %s, %s, %s, %s)", DC_FILTER.getClass().getSimpleName(), - Bytes.toStringBinary(FAMILY), Bytes.toStringBinary(QUALIFIER), true, - CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); + new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC); + private static String STR_DC_FILTER = String.format("%s (%s, %s, %s, %s, %s)", + DC_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), true, CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); private static FamilyFilter FAMILY_FILTER = new FamilyFilter(CMP_OP, BC); private static String STR_FAMILY_FILTER = - FAMILY_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + FAMILY_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; private static QualifierFilter QUALIFIER_FILTER = new QualifierFilter(CMP_OP, BC); private static String STR_QUALIFIER_FILTER = - QUALIFIER_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + QUALIFIER_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; private static RowFilter ROW_FILTER = new RowFilter(CMP_OP, BC); private static String STR_ROW_FILTER = ROW_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; private static ValueFilter VALUE_FILTER = new ValueFilter(CMP_OP, BC); private static String STR_VALUE_FILTER = - VALUE_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + VALUE_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; private static SingleColumnValueFilter SCV_FILTER = - new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); + new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); private static String STR_SCV_FILTER = String.format("%s (%s, %s, %s, %s)", - SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), - Bytes.toStringBinary(CMP_VALUE)); + SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); private static SingleColumnValueExcludeFilter SCVE_FILTER = - new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); + new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); private static String STR_SCVE_FILTER = String.format("%s (%s, %s, %s, %s)", - SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); - - private static FilterList AND_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ALL, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); - private static String STR_AND_FILTER_LIST = String.format( - "%s AND (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - - private static FilterList OR_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ONE, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); - private static String STR_OR_FILTER_LIST = String.format( - "%s OR (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - - private static FilterList L_FILTER_LIST = new FilterList( - Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, - CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); - private static String STR_L_FILTER_LIST = String.format( - "%s AND (5/8): [%s, %s, %s, %s, %s, %s]", - L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, - STR_CR_FILTER, STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); - - private static Filter[] FILTERS = { - TS_FILTER, // TimestampsFilter - L_TS_FILTER, // TimestampsFilter - COL_PRE_FILTER, // ColumnPrefixFilter - CP_FILTER, // ColumnPaginationFilter - CR_FILTER, // ColumnRangeFilter - CCG_FILTER, // ColumnCountGetFilter - IS_FILTER, // InclusiveStopFilter - PREFIX_FILTER, // PrefixFilter - PAGE_FILTER, // PageFilter - SKIP_FILTER, // SkipFilter - WHILE_FILTER, // WhileMatchFilter - KEY_ONLY_FILTER, // KeyOnlyFilter + SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); + + private static FilterList AND_FILTER_LIST = new FilterList(Operator.MUST_PASS_ALL, + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); + private static String STR_AND_FILTER_LIST = String.format("%s AND (3/3): [%s, %s, %s]", + AND_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + + private static FilterList OR_FILTER_LIST = new FilterList(Operator.MUST_PASS_ONE, + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); + private static String STR_OR_FILTER_LIST = String.format("%s OR (3/3): [%s, %s, %s]", + AND_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + + private static FilterList L_FILTER_LIST = new FilterList(Arrays.asList((Filter) TS_FILTER, + L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); + private static String STR_L_FILTER_LIST = String.format("%s AND (5/8): [%s, %s, %s, %s, %s, %s]", + L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER, + STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); + + private static Filter[] FILTERS = { TS_FILTER, // TimestampsFilter + L_TS_FILTER, // TimestampsFilter + COL_PRE_FILTER, // ColumnPrefixFilter + CP_FILTER, // ColumnPaginationFilter + CR_FILTER, // ColumnRangeFilter + CCG_FILTER, // ColumnCountGetFilter + IS_FILTER, // InclusiveStopFilter + PREFIX_FILTER, // PrefixFilter + PAGE_FILTER, // PageFilter + SKIP_FILTER, // SkipFilter + WHILE_FILTER, // WhileMatchFilter + KEY_ONLY_FILTER, // KeyOnlyFilter FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - MCP_FILTER, // MultipleColumnPrefixFilter - L_MCP_FILTER, // MultipleColumnPrefixFilter - DC_FILTER, // DependentColumnFilter - FAMILY_FILTER, // FamilyFilter - QUALIFIER_FILTER, // QualifierFilter - ROW_FILTER, // RowFilter - VALUE_FILTER, // ValueFilter - SCV_FILTER, // SingleColumnValueFilter - SCVE_FILTER, // SingleColumnValueExcludeFilter - AND_FILTER_LIST, // FilterList - OR_FILTER_LIST, // FilterList - L_FILTER_LIST, // FilterList + MCP_FILTER, // MultipleColumnPrefixFilter + L_MCP_FILTER, // MultipleColumnPrefixFilter + DC_FILTER, // DependentColumnFilter + FAMILY_FILTER, // FamilyFilter + QUALIFIER_FILTER, // QualifierFilter + ROW_FILTER, // RowFilter + VALUE_FILTER, // ValueFilter + SCV_FILTER, // SingleColumnValueFilter + SCVE_FILTER, // SingleColumnValueExcludeFilter + AND_FILTER_LIST, // FilterList + OR_FILTER_LIST, // FilterList + L_FILTER_LIST, // FilterList }; - private static String[] FILTERS_INFO = { - STR_TS_FILTER, // TimestampsFilter - STR_L_TS_FILTER, // TimestampsFilter - STR_COL_PRE_FILTER, // ColumnPrefixFilter - STR_CP_FILTER, // ColumnPaginationFilter - STR_CR_FILTER, // ColumnRangeFilter - STR_CCG_FILTER, // ColumnCountGetFilter - STR_IS_FILTER, // InclusiveStopFilter - STR_PREFIX_FILTER, // PrefixFilter - STR_PAGE_FILTER, // PageFilter - STR_SKIP_FILTER, // SkipFilter - STR_WHILE_FILTER, // WhileMatchFilter - STR_KEY_ONLY_FILTER, // KeyOnlyFilter + private static String[] FILTERS_INFO = { STR_TS_FILTER, // TimestampsFilter + STR_L_TS_FILTER, // TimestampsFilter + STR_COL_PRE_FILTER, // ColumnPrefixFilter + STR_CP_FILTER, // ColumnPaginationFilter + STR_CR_FILTER, // ColumnRangeFilter + STR_CCG_FILTER, // ColumnCountGetFilter + STR_IS_FILTER, // InclusiveStopFilter + STR_PREFIX_FILTER, // PrefixFilter + STR_PAGE_FILTER, // PageFilter + STR_SKIP_FILTER, // SkipFilter + STR_WHILE_FILTER, // WhileMatchFilter + STR_KEY_ONLY_FILTER, // KeyOnlyFilter STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - STR_MCP_FILTER, // MultipleColumnPrefixFilter - STR_L_MCP_FILTER, // MultipleColumnPrefixFilter - STR_DC_FILTER, // DependentColumnFilter - STR_FAMILY_FILTER, // FamilyFilter - STR_QUALIFIER_FILTER, // QualifierFilter - STR_ROW_FILTER, // RowFilter - STR_VALUE_FILTER, // ValueFilter - STR_SCV_FILTER, // SingleColumnValueFilter - STR_SCVE_FILTER, // SingleColumnValueExcludeFilter - STR_AND_FILTER_LIST, // FilterList - STR_OR_FILTER_LIST, // FilterList - STR_L_FILTER_LIST, // FilterList + STR_MCP_FILTER, // MultipleColumnPrefixFilter + STR_L_MCP_FILTER, // MultipleColumnPrefixFilter + STR_DC_FILTER, // DependentColumnFilter + STR_FAMILY_FILTER, // FamilyFilter + STR_QUALIFIER_FILTER, // QualifierFilter + STR_ROW_FILTER, // RowFilter + STR_VALUE_FILTER, // ValueFilter + STR_SCV_FILTER, // SingleColumnValueFilter + STR_SCVE_FILTER, // SingleColumnValueExcludeFilter + STR_AND_FILTER_LIST, // FilterList + STR_OR_FILTER_LIST, // FilterList + STR_L_FILTER_LIST, // FilterList }; static { - assertEquals("The sizes of static arrays do not match: " - + "[FILTERS: %d <=> FILTERS_INFO: %d]", - FILTERS.length, FILTERS_INFO.length); + assertEquals("The sizes of static arrays do not match: " + "[FILTERS: %d <=> FILTERS_INFO: %d]", + FILTERS.length, FILTERS_INFO.length); } /** - * Test the client Operations' JSON encoding to ensure that produced JSON is - * parseable and that the details are present and not corrupted. - * + * Test the client Operations' JSON encoding to ensure that produced JSON is parseable and that + * the details are present and not corrupted. * @throws IOException if the JSON conversion fails */ @Test @@ -297,16 +286,14 @@ public void testOperationJSON() throws IOException { }.getType(); Map parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("startRow incorrect in Scan.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); + assertEquals("startRow incorrect in Scan.toJSON()", Bytes.toStringBinary(ROW), + parsedJSON.get("startRow")); // check for the family and the qualifier. - List familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + List familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Scan.toJSON()", familyInfo); assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Scan.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); + assertEquals("Qualifier incorrect in Scan.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); // produce a Get Operation Get get = new Get(ROW); @@ -315,16 +302,13 @@ public void testOperationJSON() throws IOException { json = get.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row incorrect in Get.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row incorrect in Get.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Get.toJSON()", familyInfo); assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Get.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); + assertEquals("Qualifier incorrect in Get.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); // produce a Put operation Put put = new Put(ROW); @@ -333,17 +317,14 @@ public void testOperationJSON() throws IOException { json = put.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row absent in Put.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row absent in Put.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Put.toJSON()", familyInfo); assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size()); Map kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Put.toJSON()", - Bytes.toStringBinary(QUALIFIER), - kvMap.get("qualifier")); + assertEquals("Qualifier incorrect in Put.toJSON()", Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); assertEquals("Value length incorrect in Put.toJSON()", VALUE.length, ((Number) kvMap.get("vlen")).intValue()); @@ -354,16 +335,14 @@ public void testOperationJSON() throws IOException { json = delete.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row absent in Delete.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row absent in Delete.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Delete.toJSON()", familyInfo); assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size()); kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Delete.toJSON()", - Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); + assertEquals("Qualifier incorrect in Delete.toJSON()", Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); } @Test @@ -386,7 +365,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2013L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -395,7 +374,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2001L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -405,7 +384,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2001L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(1970L, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -414,29 +393,16 @@ public void testPutCreationWithByteBuffer() { @Test @SuppressWarnings("rawtypes") public void testOperationSubClassMethodsAreBuilderStyle() { - /* All Operation subclasses should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * Scan scan = new Scan() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * All Operation subclasses should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: Scan scan = new Scan() .setFoo(foo) .setBar(bar) + * .setBuz(buz) This test ensures that all methods starting with "set" returns the declaring + * object */ // TODO: We should ensure all subclasses of Operation is checked. - Class[] classes = new Class[] { - Operation.class, - OperationWithAttributes.class, - Mutation.class, - Query.class, - Delete.class, - Increment.class, - Append.class, - Put.class, - Get.class, - Scan.class}; + Class[] classes = new Class[] { Operation.class, OperationWithAttributes.class, Mutation.class, + Query.class, Delete.class, Increment.class, Append.class, Put.class, Get.class, Scan.class }; BuilderStyleTest.assertClassesAreBuilderStyle(classes); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java index ef9d4c96d282..1e533633c767 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,16 +27,15 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) /** - * Addresses HBASE-6047 - * We test put.has call with all of its polymorphic magic + * Addresses HBASE-6047 We test put.has call with all of its polymorphic magic */ public class TestPutDotHas { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPutDotHas.class); + HBaseClassTestRule.forClass(TestPutDotHas.class); public static final byte[] ROW_01 = Bytes.toBytes("row-01"); public static final byte[] QUALIFIER_01 = Bytes.toBytes("qualifier-01"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java index 8572c0b47a1a..f57145cdb1b2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java index 3b66f7eb2e60..f74b79a0672e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -141,7 +141,7 @@ public void testContainsRange() { @Test public void testContainsRangeForMetaTable() { TableDescriptor tableDesc = - TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build(); byte[] startRow = HConstants.EMPTY_START_ROW; byte[] row1 = Bytes.toBytes("a,a,0"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java index 5a211719227b..0403ca647355 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,40 +34,38 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionInfoDisplay { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionInfoDisplay.class); + HBaseClassTestRule.forClass(TestRegionInfoDisplay.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); @Test public void testRegionDetailsForDisplay() throws IOException { - byte[] startKey = new byte[] {0x01, 0x01, 0x02, 0x03}; - byte[] endKey = new byte[] {0x01, 0x01, 0x02, 0x04}; + byte[] startKey = new byte[] { 0x01, 0x01, 0x02, 0x03 }; + byte[] endKey = new byte[] { 0x01, 0x01, 0x02, 0x04 }; Configuration conf = new Configuration(); conf.setBoolean("hbase.display.keys", false); RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setStartKey(startKey).setEndKey(endKey).build(); checkEquality(ri, conf); // check HRIs with non-default replicaId - ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(startKey) - .setEndKey(endKey) - .setSplit(false) - .setRegionId(EnvironmentEdgeManager.currentTime()) - .setReplicaId(1).build(); + ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setStartKey(startKey) + .setEndKey(endKey).setSplit(false).setRegionId(EnvironmentEdgeManager.currentTime()) + .setReplicaId(1).build(); checkEquality(ri, conf); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY, - RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); + RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_START_KEY, - RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); + RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); RegionState state = RegionState.createForTesting(ri, RegionState.State.OPEN); String descriptiveNameForDisplay = - RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf); + RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf); String originalDescriptive = state.toDescriptiveString(); checkDescriptiveNameEquality(descriptiveNameForDisplay, originalDescriptive, startKey); @@ -75,25 +73,22 @@ public void testRegionDetailsForDisplay() throws IOException { Assert.assertArrayEquals(endKey, RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); Assert.assertArrayEquals(startKey, RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); Assert.assertEquals(originalDescriptive, - RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf)); + RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf)); } private void checkDescriptiveNameEquality(String descriptiveNameForDisplay, String origDesc, - byte[] startKey) { + byte[] startKey) { // except for the "hidden-start-key" substring everything else should exactly match - String firstPart = descriptiveNameForDisplay.substring(0, - descriptiveNameForDisplay.indexOf( - new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8))); - String secondPart = descriptiveNameForDisplay.substring( - descriptiveNameForDisplay.indexOf( - new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8)) + - RegionInfoDisplay.HIDDEN_START_KEY.length); + String firstPart = descriptiveNameForDisplay.substring(0, descriptiveNameForDisplay + .indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8))); + String secondPart = descriptiveNameForDisplay.substring(descriptiveNameForDisplay + .indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8)) + + RegionInfoDisplay.HIDDEN_START_KEY.length); String firstPartOrig = origDesc.substring(0, origDesc.indexOf(Bytes.toStringBinary(startKey))); String secondPartOrig = origDesc.substring( - origDesc.indexOf(Bytes.toStringBinary(startKey)) + - Bytes.toStringBinary(startKey).length()); - assert(firstPart.equals(firstPartOrig)); - assert(secondPart.equals(secondPartOrig)); + origDesc.indexOf(Bytes.toStringBinary(startKey)) + Bytes.toStringBinary(startKey).length()); + assert (firstPart.equals(firstPartOrig)); + assert (secondPart.equals(secondPartOrig)); } private void checkEquality(RegionInfo ri, Configuration conf) throws IOException { @@ -102,18 +97,18 @@ private void checkEquality(RegionInfo ri, Configuration conf) throws IOException byte[][] modifiedRegionNameParts = RegionInfo.parseRegionName(modifiedRegionName); byte[][] regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); - //same number of parts - assert(modifiedRegionNameParts.length == regionNameParts.length); + // same number of parts + assert (modifiedRegionNameParts.length == regionNameParts.length); for (int i = 0; i < regionNameParts.length; i++) { // all parts should match except for [1] where in the modified one, // we should have "hidden_start_key" if (i != 1) { - System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + - Bytes.toString(modifiedRegionNameParts[i])); + System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + + Bytes.toString(modifiedRegionNameParts[i])); Assert.assertArrayEquals(regionNameParts[i], modifiedRegionNameParts[i]); } else { - System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + - Bytes.toString(modifiedRegionNameParts[i])); + System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + + Bytes.toString(modifiedRegionNameParts[i])); Assert.assertNotEquals(regionNameParts[i], modifiedRegionNameParts[i]); Assert.assertArrayEquals(modifiedRegionNameParts[1], RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java index 5b591030c966..7ee074b84b9f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,15 +29,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestResultStatsUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResultStatsUtil.class); + HBaseClassTestRule.forClass(TestResultStatsUtil.class); - private static final RegionLoadStats regionLoadStats = new RegionLoadStats(100, - 10,90); - private static final byte[] regionName = {80}; + private static final RegionLoadStats regionLoadStats = new RegionLoadStats(100, 10, 90); + private static final byte[] regionName = { 80 }; private static final ServerName server = ServerName.parseServerName("3.1.yg.n,50,1"); @Test @@ -51,12 +50,12 @@ public void testUpdateStats() { // Check that the tracker was updated as expected ServerStatistics stats = serverStatisticTracker.getStats(server); - assertEquals(regionLoadStats.memstoreLoad, stats.getStatsForRegion(regionName) - .getMemStoreLoadPercent()); - assertEquals(regionLoadStats.compactionPressure, stats.getStatsForRegion(regionName) - .getCompactionPressure()); - assertEquals(regionLoadStats.heapOccupancy, stats.getStatsForRegion(regionName) - .getHeapOccupancyPercent()); + assertEquals(regionLoadStats.memstoreLoad, + stats.getStatsForRegion(regionName).getMemStoreLoadPercent()); + assertEquals(regionLoadStats.compactionPressure, + stats.getStatsForRegion(regionName).getCompactionPressure()); + assertEquals(regionLoadStats.heapOccupancy, + stats.getStatsForRegion(regionName).getHeapOccupancyPercent()); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java index 7b584e948610..f380be0048f3 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,14 +31,15 @@ import org.junit.rules.TestName; import org.mockito.Mockito; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRetriesExhaustedWithDetailsException { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRetriesExhaustedWithDetailsException.class); + HBaseClassTestRule.forClass(TestRetriesExhaustedWithDetailsException.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); /** * Assert that a RetriesExhaustedException that has RegionTooBusyException outputs region name. @@ -53,7 +54,7 @@ public void testRegionTooBusyException() { List hostAndPorts = new ArrayList<>(1); hostAndPorts.add("example.com:1234"); RetriesExhaustedException ree = - new RetriesExhaustedWithDetailsException(ts, rows, hostAndPorts); + new RetriesExhaustedWithDetailsException(ts, rows, hostAndPorts); assertTrue(ree.toString().contains(regionName)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java index 64983089ae06..9b2c598eaaae 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,15 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestRowComparator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowComparator.class); + HBaseClassTestRule.forClass(TestRowComparator.class); - private static final List DEFAULT_ROWS = IntStream.range(1, 9) - .mapToObj(String::valueOf).map(Bytes::toBytes).collect(Collectors.toList()); + private static final List DEFAULT_ROWS = IntStream.range(1, 9).mapToObj(String::valueOf) + .map(Bytes::toBytes).collect(Collectors.toList()); @Test public void testPut() { @@ -71,8 +71,7 @@ public void testGet() { } private static void test(Function f) { - List rows = new ArrayList(DEFAULT_ROWS.stream() - .map(f).collect(Collectors.toList())); + List rows = new ArrayList(DEFAULT_ROWS.stream().map(f).collect(Collectors.toList())); do { Collections.shuffle(rows); } while (needShuffle(rows)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java index 146895aca166..6c97c19f96cc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -168,7 +168,8 @@ protected CompletableFuture> fetchEndpoints() { return CompletableFuture.completedFuture(BOOTSTRAP_NODES); } - @Override public String getConnectionString() { + @Override + public String getConnectionString() { return "unimplemented"; } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java index 0fbf4bb07962..4023d745c065 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,11 +43,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestScan { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScan.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScan.class); @Test public void testAttributesSerialization() throws IOException { @@ -70,22 +69,14 @@ public void testAttributesSerialization() throws IOException { @Test public void testGetToScan() throws Exception { Get get = new Get(Bytes.toBytes(1)); - get.setCacheBlocks(true) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("get") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setRowOffsetPerColumnFamily(5) - .setTimeRange(0, 13) - .setAttribute("att_v0", Bytes.toBytes("att_v0")) - .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123) - .setReplicaId(3) - .setACL("test_user", new Permission(Permission.Action.READ)) - .setAuthorizations(new Authorizations("test_label")) - .setPriority(3); + get.setCacheBlocks(true).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("get").setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLoadColumnFamiliesOnDemand(false).setMaxResultsPerColumnFamily(1000).readVersions(9999) + .setRowOffsetPerColumnFamily(5).setTimeRange(0, 13) + .setAttribute("att_v0", Bytes.toBytes("att_v0")) + .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123).setReplicaId(3) + .setACL("test_user", new Permission(Permission.Action.READ)) + .setAuthorizations(new Authorizations("test_label")).setPriority(3); Scan scan = new Scan(get); assertEquals(get.getCacheBlocks(), scan.getCacheBlocks()); @@ -94,7 +85,7 @@ public void testGetToScan() throws Exception { assertEquals(get.getId(), scan.getId()); assertEquals(get.getIsolationLevel(), scan.getIsolationLevel()); assertEquals(get.getLoadColumnFamiliesOnDemandValue(), - scan.getLoadColumnFamiliesOnDemandValue()); + scan.getLoadColumnFamiliesOnDemandValue()); assertEquals(get.getMaxResultsPerColumnFamily(), scan.getMaxResultsPerColumnFamily()); assertEquals(get.getMaxVersions(), scan.getMaxVersions()); assertEquals(get.getRowOffsetPerColumnFamily(), scan.getRowOffsetPerColumnFamily()); @@ -102,9 +93,9 @@ public void testGetToScan() throws Exception { assertEquals(get.getTimeRange().getMax(), scan.getTimeRange().getMax()); assertTrue(Bytes.equals(get.getAttribute("att_v0"), scan.getAttribute("att_v0"))); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin(), - scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax(), - scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); assertEquals(get.getReplicaId(), scan.getReplicaId()); assertEquals(get.getACL(), scan.getACL()); assertEquals(get.getAuthorizations().getLabels(), scan.getAuthorizations().getLabels()); @@ -125,22 +116,22 @@ public void testScanAttributes() { scan.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - scan.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), scan.getAttributesMap().get("attribute1"))); // overriding attribute value scan.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - scan.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), scan.getAttributesMap().get("attribute1"))); // adding another attribute scan.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttribute("attribute2"))); Assert.assertEquals(2, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - scan.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), scan.getAttributesMap().get("attribute2"))); // removing attribute scan.setAttribute("attribute2", null); @@ -198,7 +189,7 @@ public void testSetStartRowAndSetStopRow() { scan.withStartRow(new byte[1]); scan.withStartRow(new byte[HConstants.MAX_ROW_LENGTH]); try { - scan.withStartRow(new byte[HConstants.MAX_ROW_LENGTH+1]); + scan.withStartRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { @@ -209,7 +200,7 @@ public void testSetStartRowAndSetStopRow() { scan.withStopRow(new byte[1]); scan.withStopRow(new byte[HConstants.MAX_ROW_LENGTH]); try { - scan.withStopRow(new byte[HConstants.MAX_ROW_LENGTH+1]); + scan.withStopRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { @@ -222,36 +213,17 @@ public void testScanCopyConstructor() throws Exception { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) - .setACL("test_user", new Permission(Permission.Action.READ)) - .setAllowPartialResults(true) - .setAsyncPrefetch(false) - .setAttribute("test_key", Bytes.toBytes("test_value")) - .setAuthorizations(new Authorizations("test_label")) - .setBatch(10) - .setCacheBlocks(false) - .setCaching(10) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("scan_copy_constructor") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLimit(100) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultSize(100) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setMvccReadPoint(5) - .setNeedCursorResult(true) - .setPriority(1) - .setRaw(true) - .setReplicaId(3) - .setReversed(true) - .setRowOffsetPerColumnFamily(5) - .setStartStopRowForPrefixScan(Bytes.toBytes("row_")) - .setScanMetricsEnabled(true) - .setReadType(ReadType.STREAM) - .withStartRow(Bytes.toBytes("row_1")) - .withStopRow(Bytes.toBytes("row_2")) - .setTimeRange(0, 13); + .setACL("test_user", new Permission(Permission.Action.READ)).setAllowPartialResults(true) + .setAsyncPrefetch(false).setAttribute("test_key", Bytes.toBytes("test_value")) + .setAuthorizations(new Authorizations("test_label")).setBatch(10).setCacheBlocks(false) + .setCaching(10).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("scan_copy_constructor").setIsolationLevel(IsolationLevel.READ_COMMITTED).setLimit(100) + .setLoadColumnFamiliesOnDemand(false).setMaxResultSize(100).setMaxResultsPerColumnFamily(1000) + .readVersions(9999).setMvccReadPoint(5).setNeedCursorResult(true).setPriority(1).setRaw(true) + .setReplicaId(3).setReversed(true).setRowOffsetPerColumnFamily(5) + .setStartStopRowForPrefixScan(Bytes.toBytes("row_")).setScanMetricsEnabled(true) + .setReadType(ReadType.STREAM).withStartRow(Bytes.toBytes("row_1")) + .withStopRow(Bytes.toBytes("row_2")).setTimeRange(0, 13); // create a copy of existing scan object Scan scanCopy = new Scan(scan); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java index 4b63f5b2168d..a7fcac95ee95 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,15 +47,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestSimpleRequestController { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSimpleRequestController.class); + HBaseClassTestRule.forClass(TestSimpleRequestController.class); - private static final TableName DUMMY_TABLE - = TableName.valueOf("DUMMY_TABLE"); + private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1"); private static final byte[] DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2"); private static final byte[] DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3"); @@ -124,17 +123,15 @@ public void testTaskCheckerHost() throws IOException { final Map taskCounterPerServer = new HashMap<>(); final Map taskCounterPerRegion = new HashMap<>(); SimpleRequestController.TaskCountChecker countChecker = - new SimpleRequestController.TaskCountChecker( - maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, taskCounterPerServer, taskCounterPerRegion); + new SimpleRequestController.TaskCountChecker(maxTotalConcurrentTasks, + maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, tasksInProgress, + taskCounterPerServer, taskCounterPerRegion); final long maxHeapSizePerRequest = 2 * 1024 * 1024; // unlimiited SimpleRequestController.RequestHeapSizeChecker sizeChecker = - new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); + new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); RequestController.Checker checker = - SimpleRequestController.newChecker(Arrays.asList(countChecker, sizeChecker)); + SimpleRequestController.newChecker(Arrays.asList(countChecker, sizeChecker)); ReturnCode loc1Code = checker.canTakeRow(LOC1, createPut(maxHeapSizePerRequest)); assertEquals(ReturnCode.INCLUDE, loc1Code); @@ -165,8 +162,8 @@ public void testTaskCheckerHost() throws IOException { @Test public void testRequestHeapSizeChecker() throws IOException { final long maxHeapSizePerRequest = 2 * 1024 * 1024; - SimpleRequestController.RequestHeapSizeChecker checker - = new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); + SimpleRequestController.RequestHeapSizeChecker checker = + new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); // inner state is unchanged. for (int i = 0; i != 10; ++i) { @@ -207,10 +204,10 @@ public void testRequestHeapSizeChecker() throws IOException { @Test public void testRequestRowsChecker() throws IOException { final long maxRowCount = 100; - SimpleRequestController.RequestRowsChecker checker - = new SimpleRequestController.RequestRowsChecker(maxRowCount); + SimpleRequestController.RequestRowsChecker checker = + new SimpleRequestController.RequestRowsChecker(maxRowCount); - final long heapSizeOfRow = 100; //unused + final long heapSizeOfRow = 100; // unused // inner state is unchanged. for (int i = 0; i != 10; ++i) { ReturnCode code = checker.canTakeOperation(LOC1, heapSizeOfRow); @@ -252,8 +249,8 @@ public void testRequestRowsChecker() throws IOException { @Test public void testSubmittedSizeChecker() { final long maxHeapSizeSubmit = 2 * 1024 * 1024; - SimpleRequestController.SubmittedSizeChecker checker - = new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit); + SimpleRequestController.SubmittedSizeChecker checker = + new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit); for (int i = 0; i != 10; ++i) { ReturnCode include = checker.canTakeOperation(LOC1, 100000); @@ -289,10 +286,8 @@ public void testTaskCountChecker() throws InterruptedIOException { Map taskCounterPerServer = new HashMap<>(); Map taskCounterPerRegion = new HashMap<>(); SimpleRequestController.TaskCountChecker checker = new SimpleRequestController.TaskCountChecker( - maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, taskCounterPerServer, taskCounterPerRegion); + maxTotalConcurrentTasks, maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, + tasksInProgress, taskCounterPerServer, taskCounterPerRegion); // inner state is unchanged. for (int i = 0; i != 10; ++i) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index a9b7cd99fe37..81db193a3042 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -155,9 +155,8 @@ public void testSetListRemoveCP() throws Exception { @Test(expected = IllegalArgumentException.class) public void testRemoveNonExistingCoprocessor() throws Exception { String className = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertFalse(desc.hasCoprocessor(className)); TableDescriptorBuilder.newBuilder(desc).removeCoprocessor(className).build(); } @@ -354,23 +353,18 @@ public void testStringCustomizedValues() throws HBaseException { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(131072).build(); - TableDescriptor htd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setColumnFamily(hcd).setDurability(Durability.ASYNC_WAL).build(); assertEquals( - "'testStringCustomizedValues', " + - "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, " + "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, " + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", htd.toStringCustomizedValues()); - htd = TableDescriptorBuilder.newBuilder(htd) - .setMaxFileSize("10737942528") - .setMemStoreFlushSize("256MB") - .build(); + htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528") + .setMemStoreFlushSize("256MB").build(); assertEquals( - "'testStringCustomizedValues', " + - "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, " + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", @@ -381,7 +375,7 @@ public void testStringCustomizedValues() throws HBaseException { public void testGetSetRegionServerGroup() { String groupName = name.getMethodName(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setRegionServerGroup(groupName).build(); + .setRegionServerGroup(groupName).build(); assertEquals(htd.getValue(RSGroupInfo.TABLE_DESC_PROP_GROUP), groupName); htd = TableDescriptorBuilder.newBuilder(htd).setRegionServerGroup(null).build(); assertNull(htd.getValue(RSGroupInfo.TABLE_DESC_PROP_GROUP)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java index 44d199764c5b..8ba350b03b10 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import static org.junit.Assert.assertEquals; import java.util.Arrays; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableDescriptorUtils.TableDescriptorDelta; @@ -35,7 +33,7 @@ public class TestTableDescriptorUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableDescriptorUtils.class); + HBaseClassTestRule.forClass(TestTableDescriptorUtils.class); @Test public void testDelta() { @@ -43,32 +41,25 @@ public void testDelta() { ColumnFamilyDescriptor cf2 = ColumnFamilyDescriptorBuilder.of("cf2"); ColumnFamilyDescriptor cf3 = ColumnFamilyDescriptorBuilder.of("cf3"); ColumnFamilyDescriptor cf4 = ColumnFamilyDescriptorBuilder.of("cf4"); - TableDescriptor td = TableDescriptorBuilder - .newBuilder(TableName.valueOf("test")) - .setColumnFamilies(Arrays.asList(cf1, cf2, cf3, cf4)) - .build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("test")) + .setColumnFamilies(Arrays.asList(cf1, cf2, cf3, cf4)).build(); TableDescriptorDelta selfCompare = TableDescriptorUtils.computeDelta(td, td); assertEquals(0, selfCompare.getColumnsAdded().size()); assertEquals(0, selfCompare.getColumnsDeleted().size()); assertEquals(0, selfCompare.getColumnsModified().size()); - ColumnFamilyDescriptor modCf2 = ColumnFamilyDescriptorBuilder - .newBuilder(cf2).setMaxVersions(5).build(); - ColumnFamilyDescriptor modCf3 = ColumnFamilyDescriptorBuilder - .newBuilder(cf3).setMaxVersions(5).build(); + ColumnFamilyDescriptor modCf2 = + ColumnFamilyDescriptorBuilder.newBuilder(cf2).setMaxVersions(5).build(); + ColumnFamilyDescriptor modCf3 = + ColumnFamilyDescriptorBuilder.newBuilder(cf3).setMaxVersions(5).build(); ColumnFamilyDescriptor cf5 = ColumnFamilyDescriptorBuilder.of("cf5"); ColumnFamilyDescriptor cf6 = ColumnFamilyDescriptorBuilder.of("cf6"); ColumnFamilyDescriptor cf7 = ColumnFamilyDescriptorBuilder.of("cf7"); - TableDescriptor newTd = TableDescriptorBuilder - .newBuilder(td) - .removeColumnFamily(Bytes.toBytes("cf1")) - .modifyColumnFamily(modCf2) - .modifyColumnFamily(modCf3) - .setColumnFamily(cf5) - .setColumnFamily(cf6) - .setColumnFamily(cf7) - .build(); + TableDescriptor newTd = + TableDescriptorBuilder.newBuilder(td).removeColumnFamily(Bytes.toBytes("cf1")) + .modifyColumnFamily(modCf2).modifyColumnFamily(modCf3).setColumnFamily(cf5) + .setColumnFamily(cf6).setColumnFamily(cf7).build(); TableDescriptorDelta delta = TableDescriptorUtils.computeDelta(td, newTd); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java index 2c7061259f90..57f62148004d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java @@ -75,56 +75,43 @@ private static void populateChildren(final Map spansById) { } private static List findRoots(final Map spansById) { - return spansById.values() - .stream() + return spansById.values().stream() .filter(node -> Objects.equals(node.spanData.getParentSpanId(), SpanId.getInvalid())) .collect(Collectors.toList()); } public void render(final Consumer writer) { - for (ListIterator iter = graphs.listIterator(); iter.hasNext(); ) { + for (ListIterator iter = graphs.listIterator(); iter.hasNext();) { final int idx = iter.nextIndex(); final Node node = iter.next(); render(writer, node, 0, idx == 0); } } - private static void render( - final Consumer writer, - final Node node, - final int indent, - final boolean isFirst - ) { + private static void render(final Consumer writer, final Node node, final int indent, + final boolean isFirst) { writer.accept(render(node.spanData, indent, isFirst)); final List children = new ArrayList<>(node.children.values()); - for (ListIterator iter = children.listIterator(); iter.hasNext(); ) { + for (ListIterator iter = children.listIterator(); iter.hasNext();) { final int idx = iter.nextIndex(); final Node child = iter.next(); render(writer, child, indent + 2, idx == 0); } } - private static String render( - final SpanData spanData, - final int indent, - final boolean isFirst - ) { + private static String render(final SpanData spanData, final int indent, final boolean isFirst) { final StringBuilder sb = new StringBuilder(); for (int i = 0; i < indent; i++) { sb.append(' '); } - return sb.append(isFirst ? "└─ " : "├─ ") - .append(render(spanData)) - .toString(); + return sb.append(isFirst ? "└─ " : "├─ ").append(render(spanData)).toString(); } private static String render(final SpanData spanData) { return new ToStringBuilder(spanData, ToStringStyle.NO_CLASS_NAME_STYLE) - .append("spanId", spanData.getSpanId()) - .append("name", spanData.getName()) - .append("hasEnded", spanData.hasEnded()) - .toString(); + .append("spanId", spanData.getSpanId()).append("name", spanData.getName()) + .append("hasEnded", spanData.hasEnded()).toString(); } private static class Node { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java index c7bb205076cd..d73abba69078 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java @@ -20,6 +20,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasProperty; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import java.util.Arrays; @@ -32,12 +33,11 @@ */ public final class AttributesMatchers { - private AttributesMatchers() { } + private AttributesMatchers() { + } - public static Matcher containsEntry( - Matcher> keyMatcher, - Matcher valueMatcher - ) { + public static Matcher containsEntry(Matcher> keyMatcher, + Matcher valueMatcher) { return new IsAttributesContaining<>(keyMatcher, valueMatcher); } @@ -53,10 +53,8 @@ public static Matcher containsEntryWithStringValuesOf(String key, St return containsEntry(AttributeKey.stringArrayKey(key), Arrays.asList(values)); } - public static Matcher containsEntryWithStringValuesOf( - String key, - Matcher> matcher - ) { + public static Matcher containsEntryWithStringValuesOf(String key, + Matcher> matcher) { return new IsAttributesContaining<>(equalTo(AttributeKey.stringArrayKey(key)), matcher); } @@ -64,37 +62,28 @@ private static final class IsAttributesContaining extends TypeSafeMatcher> keyMatcher; private final Matcher valueMatcher; - private IsAttributesContaining( - final Matcher> keyMatcher, - final Matcher valueMatcher - ) { + private IsAttributesContaining(final Matcher> keyMatcher, + final Matcher valueMatcher) { this.keyMatcher = keyMatcher; this.valueMatcher = valueMatcher; } @Override protected boolean matchesSafely(Attributes item) { - return item.asMap().entrySet().stream().anyMatch(e -> allOf( - hasProperty("key", keyMatcher), - hasProperty("value", valueMatcher)) - .matches(e)); + return item.asMap().entrySet().stream().anyMatch( + e -> allOf(hasProperty("key", keyMatcher), hasProperty("value", valueMatcher)).matches(e)); } @Override public void describeMismatchSafely(Attributes item, Description mismatchDescription) { - mismatchDescription - .appendText("Attributes was ") - .appendValueList("[", ", ", "]", item.asMap().entrySet()); + mismatchDescription.appendText("Attributes was ").appendValueList("[", ", ", "]", + item.asMap().entrySet()); } @Override public void describeTo(Description description) { - description - .appendText("Attributes containing [") - .appendDescriptionOf(keyMatcher) - .appendText("->") - .appendDescriptionOf(valueMatcher) - .appendText("]"); + description.appendText("Attributes containing [").appendDescriptionOf(keyMatcher) + .appendText("->").appendDescriptionOf(valueMatcher).appendText("]"); } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java index e24245fb4c62..ec2110b9a345 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.hamcrest.Matchers.equalTo; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.sdk.trace.data.EventData; import org.hamcrest.FeatureMatcher; @@ -28,12 +29,14 @@ */ public final class EventMatchers { - private EventMatchers() { } + private EventMatchers() { + } public static Matcher hasAttributes(Matcher matcher) { - return new FeatureMatcher( - matcher, "EventData having attributes that ", "attributes") { - @Override protected Attributes featureValueOf(EventData actual) { + return new FeatureMatcher(matcher, "EventData having attributes that ", + "attributes") { + @Override + protected Attributes featureValueOf(EventData actual) { return actual.getAttributes(); } }; @@ -45,7 +48,8 @@ public static Matcher hasName(String name) { public static Matcher hasName(Matcher matcher) { return new FeatureMatcher(matcher, "EventData with a name that ", "name") { - @Override protected String featureValueOf(EventData actual) { + @Override + protected String featureValueOf(EventData actual) { return actual.getName(); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java index 026deb0afe45..6d0468c32ed5 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.client.trace.hamcrest.AttributesMatchers.containsEntry; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; @@ -39,21 +40,22 @@ */ public final class SpanDataMatchers { - private SpanDataMatchers() { } + private SpanDataMatchers() { + } public static Matcher hasAttributes(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData having attributes that ", "attributes" - ) { - @Override protected Attributes featureValueOf(SpanData item) { + return new FeatureMatcher(matcher, "SpanData having attributes that ", + "attributes") { + @Override + protected Attributes featureValueOf(SpanData item) { return item.getAttributes(); } }; } public static Matcher hasDuration(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData having duration that ", "duration") { + return new FeatureMatcher(matcher, "SpanData having duration that ", + "duration") { @Override protected Duration featureValueOf(SpanData item) { return Duration.ofNanos(item.getEndEpochNanos() - item.getStartEpochNanos()); @@ -63,19 +65,23 @@ protected Duration featureValueOf(SpanData item) { public static Matcher hasEnded() { return new TypeSafeMatcher() { - @Override protected boolean matchesSafely(SpanData item) { + @Override + protected boolean matchesSafely(SpanData item) { return item.hasEnded(); } - @Override public void describeTo(Description description) { + + @Override + public void describeTo(Description description) { description.appendText("SpanData that hasEnded"); } }; } public static Matcher hasEvents(Matcher> matcher) { - return new FeatureMatcher>( - matcher, "SpanData having events that", "events") { - @Override protected Iterable featureValueOf(SpanData item) { + return new FeatureMatcher>(matcher, + "SpanData having events that", "events") { + @Override + protected Iterable featureValueOf(SpanData item) { return item.getEvents(); } }; @@ -88,21 +94,20 @@ public static Matcher hasExceptionWithType(Matcher mat public static Matcher hasException(Matcher matcher) { return new FeatureMatcher(matcher, "SpanData having Exception with Attributes that", "exception attributes") { - @Override protected Attributes featureValueOf(SpanData actual) { - return actual.getEvents() - .stream() + @Override + protected Attributes featureValueOf(SpanData actual) { + return actual.getEvents().stream() .filter(e -> Objects.equals(SemanticAttributes.EXCEPTION_EVENT_NAME, e.getName())) - .map(EventData::getAttributes) - .findFirst() - .orElse(null); + .map(EventData::getAttributes).findFirst().orElse(null); } }; } public static Matcher hasKind(SpanKind kind) { - return new FeatureMatcher( - equalTo(kind), "SpanData with kind that", "SpanKind") { - @Override protected SpanKind featureValueOf(SpanData item) { + return new FeatureMatcher(equalTo(kind), "SpanData with kind that", + "SpanKind") { + @Override + protected SpanKind featureValueOf(SpanData item) { return item.getKind(); } }; @@ -114,7 +119,8 @@ public static Matcher hasName(String name) { public static Matcher hasName(Matcher matcher) { return new FeatureMatcher(matcher, "SpanKind with a name that", "name") { - @Override protected String featureValueOf(SpanData item) { + @Override + protected String featureValueOf(SpanData item) { return item.getName(); } }; @@ -130,9 +136,9 @@ public static Matcher hasParentSpanId(SpanData parent) { public static Matcher hasParentSpanId(Matcher matcher) { return new FeatureMatcher(matcher, "SpanKind with a parentSpanId that", - "parentSpanId" - ) { - @Override protected String featureValueOf(SpanData item) { + "parentSpanId") { + @Override + protected String featureValueOf(SpanData item) { return item.getParentSpanId(); } }; @@ -141,13 +147,15 @@ public static Matcher hasParentSpanId(Matcher matcher) { public static Matcher hasStatusWithCode(StatusCode statusCode) { final Matcher matcher = is(equalTo(statusCode)); return new TypeSafeMatcher() { - @Override protected boolean matchesSafely(SpanData item) { + @Override + protected boolean matchesSafely(SpanData item) { final StatusData statusData = item.getStatus(); - return statusData != null - && statusData.getStatusCode() != null + return statusData != null && statusData.getStatusCode() != null && matcher.matches(statusData.getStatusCode()); } - @Override public void describeTo(Description description) { + + @Override + public void describeTo(Description description) { description.appendText("SpanData with StatusCode that ").appendDescriptionOf(matcher); } }; @@ -158,9 +166,10 @@ public static Matcher hasTraceId(String traceId) { } public static Matcher hasTraceId(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData with a traceId that ", "traceId") { - @Override protected String featureValueOf(SpanData item) { + return new FeatureMatcher(matcher, "SpanData with a traceId that ", + "traceId") { + @Override + protected String featureValueOf(SpanData item) { return item.getTraceId(); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java index 98f1ffd9c913..3b5e453856ec 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.apache.hadoop.hbase.client.trace.hamcrest.AttributesMatchers.containsEntry; import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasAttributes; import static org.hamcrest.Matchers.allOf; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.sdk.trace.data.SpanData; import org.apache.hadoop.hbase.TableName; @@ -29,24 +29,23 @@ public final class TraceTestUtil { - private TraceTestUtil() { } + private TraceTestUtil() { + } /** * All {@link Span}s involving {@code conn} should include these attributes. */ public static Matcher buildConnectionAttributesMatcher(AsyncConnectionImpl conn) { - return hasAttributes(allOf( - containsEntry("db.system", "hbase"), - containsEntry("db.connection_string", "nothing"), - containsEntry("db.user", conn.getUser().toString()))); + return hasAttributes( + allOf(containsEntry("db.system", "hbase"), containsEntry("db.connection_string", "nothing"), + containsEntry("db.user", conn.getUser().toString()))); } /** * All {@link Span}s involving {@code tableName} should include these attributes. */ public static Matcher buildTableAttributesMatcher(TableName tableName) { - return hasAttributes(allOf( - containsEntry("db.name", tableName.getNamespaceAsString()), + return hasAttributes(allOf(containsEntry("db.name", tableName.getNamespaceAsString()), containsEntry("db.hbase.table", tableName.getNameAsString()))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java index 275fb0931aec..401d38d66cd0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class TestClientExceptionsUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientExceptionsUtil.class); + HBaseClassTestRule.forClass(TestClientExceptionsUtil.class); @Test public void testFindException() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java index 868f3b7fda43..721c258fb303 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +34,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestComparators { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestComparators.class); + HBaseClassTestRule.forClass(TestComparators.class); @Test public void testCellFieldsCompare() throws Exception { @@ -105,55 +105,55 @@ public void testCellFieldsCompare() throws Exception { assertFalse(PrivateCellUtil.qualifierStartsWith(kv, q2)); assertFalse(PrivateCellUtil.qualifierStartsWith(kv, Bytes.toBytes("longerthanthequalifier"))); - //Binary component comparisons + // Binary component comparisons byte[] val = Bytes.toBytes("abcd"); kv = new KeyValue(r0, f, q1, val); buffer = ByteBuffer.wrap(kv.getBuffer()); bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); - //equality check - //row comparison - //row is "row0"(set by variable r0) - //and we are checking for equality to 'o' at position 1 - //'r' is at position 0. + // equality check + // row comparison + // row is "row0"(set by variable r0) + // and we are checking for equality to 'o' at position 1 + // 'r' is at position 0. byte[] component = Bytes.toBytes("o"); comparable = new BinaryComponentComparator(component, 1); assertEquals(0, PrivateCellUtil.compareRow(bbCell, comparable)); assertEquals(0, PrivateCellUtil.compareRow(kv, comparable)); - //value comparison - //value is "abcd"(set by variable val). - //and we are checking for equality to 'c' at position 2. - //'a' is at position 0. + // value comparison + // value is "abcd"(set by variable val). + // and we are checking for equality to 'c' at position 2. + // 'a' is at position 0. component = Bytes.toBytes("c"); comparable = new BinaryComponentComparator(component, 2); - assertEquals(0,PrivateCellUtil.compareValue(bbCell, comparable)); - assertEquals(0,PrivateCellUtil.compareValue(kv, comparable)); + assertEquals(0, PrivateCellUtil.compareValue(bbCell, comparable)); + assertEquals(0, PrivateCellUtil.compareValue(kv, comparable)); - //greater than + // greater than component = Bytes.toBytes("z"); - //checking for greater than at position 1. - //for both row("row0") and value("abcd") - //'z' > 'r' + // checking for greater than at position 1. + // for both row("row0") and value("abcd") + // 'z' > 'r' comparable = new BinaryComponentComparator(component, 1); - //row comparison + // row comparison assertTrue(PrivateCellUtil.compareRow(bbCell, comparable) > 0); assertTrue(PrivateCellUtil.compareRow(kv, comparable) > 0); - //value comparison - //'z' > 'a' + // value comparison + // 'z' > 'a' assertTrue(PrivateCellUtil.compareValue(bbCell, comparable) > 0); assertTrue(PrivateCellUtil.compareValue(kv, comparable) > 0); - //less than + // less than component = Bytes.toBytes("a"); - //checking for less than at position 1 for row ("row0") + // checking for less than at position 1 for row ("row0") comparable = new BinaryComponentComparator(component, 1); - //row comparison - //'a' < 'r' + // row comparison + // 'a' < 'r' assertTrue(PrivateCellUtil.compareRow(bbCell, comparable) < 0); assertTrue(PrivateCellUtil.compareRow(kv, comparable) < 0); - //value comparison - //checking for less than at position 2 for value("abcd") - //'a' < 'c' + // value comparison + // checking for less than at position 2 for value("abcd") + // 'a' < 'c' comparable = new BinaryComponentComparator(component, 2); assertTrue(PrivateCellUtil.compareValue(bbCell, comparable) < 0); assertTrue(PrivateCellUtil.compareValue(kv, comparable) < 0); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java index df63523b24cd..860544ba1350 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestKeyOnlyFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestKeyOnlyFilter.class); + HBaseClassTestRule.forClass(TestKeyOnlyFilter.class); @Parameterized.Parameter public boolean lenAsVal; @@ -64,12 +64,10 @@ public void testKeyOnly() throws Exception { byte[] q = Bytes.toBytes("qual1"); byte[] v = Bytes.toBytes("val1"); byte[] tags = Bytes.toBytes("tag1"); - KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, - v.length, tags); + KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, v.length, tags); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); - ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, - buffer.remaining()); + ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); // KV format: // Rebuild as: <0:4> @@ -86,41 +84,34 @@ public void testKeyOnly() throws Exception { KeyValue KeyOnlyKeyValue = new KeyValue(newBuffer); KeyOnlyCell keyOnlyCell = new KeyOnlyCell(kv, lenAsVal); - KeyOnlyByteBufferExtendedCell keyOnlyByteBufferedCell = new KeyOnlyByteBufferExtendedCell( - bbCell, lenAsVal); + KeyOnlyByteBufferExtendedCell keyOnlyByteBufferedCell = + new KeyOnlyByteBufferExtendedCell(bbCell, lenAsVal); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyCell)); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingFamily(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(CellUtil - .matchingFamily(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingFamily(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, - keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(KeyOnlyKeyValue.getValueLength() == keyOnlyByteBufferedCell - .getValueLength()); + assertTrue(KeyOnlyKeyValue.getValueLength() == keyOnlyByteBufferedCell.getValueLength()); assertEquals(8 + keyLen + (lenAsVal ? 4 : 0), KeyOnlyKeyValue.getSerializedSize()); assertEquals(8 + keyLen + (lenAsVal ? 4 : 0), keyOnlyCell.getSerializedSize()); if (keyOnlyByteBufferedCell.getValueLength() > 0) { - assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, - keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); } assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyCell.getTimestamp()); - assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyByteBufferedCell - .getTimestamp()); + assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyByteBufferedCell.getTimestamp()); assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyCell.getTypeByte()); - assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyByteBufferedCell - .getTypeByte()); + assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyByteBufferedCell.getTypeByte()); assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyCell.getTagsLength()); - assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyByteBufferedCell - .getTagsLength()); + assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyByteBufferedCell.getTagsLength()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java index 60c8cd084997..ae5fb23161a7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,14 +31,14 @@ public class TestLongComparator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLongComparator.class); + HBaseClassTestRule.forClass(TestLongComparator.class); - private long[] values = { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, - Long.MAX_VALUE }; + private long[] values = + { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, Long.MAX_VALUE }; @Test public void testSimple() { - for (int i = 1; i < values.length ; i++) { + for (int i = 1; i < values.length; i++) { for (int j = 0; j < i; j++) { LongComparator cp = new LongComparator(values[i]); assertEquals(1, cp.compareTo(Bytes.toBytes(values[j]))); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java index 62eba1ecea5c..f9c93811b4eb 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ public class TestCellBlockBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellBlockBuilder.class); + HBaseClassTestRule.forClass(TestCellBlockBuilder.class); private static final Logger LOG = LoggerFactory.getLogger(TestCellBlockBuilder.class); @@ -71,19 +71,20 @@ public void testBuildCellBlock() throws IOException { } static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final Codec codec, - final CompressionCodec compressor) throws IOException { + final CompressionCodec compressor) throws IOException { doBuildCellBlockUndoCellBlock(builder, codec, compressor, 10, 1, false); } static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final Codec codec, - final CompressionCodec compressor, final int count, final int size, final boolean sized) - throws IOException { + final CompressionCodec compressor, final int count, final int size, final boolean sized) + throws IOException { Cell[] cells = getCells(count, size); - CellScanner cellScanner = sized ? getSizedCellScanner(cells) - : CellUtil.createCellScanner(Arrays.asList(cells).iterator()); + CellScanner cellScanner = sized + ? getSizedCellScanner(cells) + : CellUtil.createCellScanner(Arrays.asList(cells).iterator()); ByteBuffer bb = builder.buildCellBlock(codec, compressor, cellScanner); - cellScanner = builder.createCellScannerReusingBuffers(codec, compressor, - new SingleByteBuff(bb)); + cellScanner = + builder.createCellScannerReusingBuffers(codec, compressor, new SingleByteBuff(bb)); int i = 0; while (cellScanner.advance()) { i++; @@ -148,7 +149,7 @@ private static void usage(final int errorCode) { } private static void timerTests(final CellBlockBuilder builder, final int count, final int size, - final Codec codec, final CompressionCodec compressor) throws IOException { + final Codec codec, final CompressionCodec compressor) throws IOException { final int cycles = 1000; StopWatch timer = new StopWatch(); timer.start(); @@ -157,7 +158,7 @@ private static void timerTests(final CellBlockBuilder builder, final int count, } timer.stop(); LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false + ", count=" - + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); + + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); timer.reset(); timer.start(); for (int i = 0; i < cycles; i++) { @@ -165,18 +166,17 @@ private static void timerTests(final CellBlockBuilder builder, final int count, } timer.stop(); LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true + ", count=" - + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); + + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); } private static void timerTest(final CellBlockBuilder builder, final StopWatch timer, - final int count, final int size, final Codec codec, final CompressionCodec compressor, - final boolean sized) throws IOException { + final int count, final int size, final Codec codec, final CompressionCodec compressor, + final boolean sized) throws IOException { doBuildCellBlockUndoCellBlock(builder, codec, compressor, count, size, sized); } /** * For running a few tests of methods herein. - * * @param args the arguments to use for the timer test * @throws IOException if creating the build fails */ diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java index 48a079d3e75b..da962cac0d3c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,15 +33,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestConnectionId { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionId.class); + HBaseClassTestRule.forClass(TestConnectionId.class); private Configuration testConfig = HBaseConfiguration.create(); - private User testUser1 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); - private User testUser2 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); + private User testUser1 = + User.createUserForTesting(testConfig, "test", new String[] { "testgroup" }); + private User testUser2 = + User.createUserForTesting(testConfig, "test", new String[] { "testgroup" }); private String serviceName = "test"; private Address address = Address.fromParts("localhost", 999); private ConnectionId connectionId1 = new ConnectionId(testUser1, serviceName, address); @@ -71,9 +73,8 @@ public void testToString() { } /** - * Test if the over-ridden equals method satisfies all the properties - * (reflexive, symmetry, transitive and null) - * along with their hashcode + * Test if the over-ridden equals method satisfies all the properties (reflexive, symmetry, + * transitive and null) along with their hashcode */ @Test public void testEqualsWithHashCode() { @@ -87,8 +88,8 @@ public void testEqualsWithHashCode() { // Test the Transitive Property ConnectionId connectionId3 = new ConnectionId(testUser1, serviceName, address); - assertTrue(connectionId1.equals(connectionId) && connectionId.equals(connectionId3) && - connectionId1.equals(connectionId3)); + assertTrue(connectionId1.equals(connectionId) && connectionId.equals(connectionId3) + && connectionId1.equals(connectionId3)); assertEquals(connectionId.hashCode(), connectionId3.hashCode()); // Test For null @@ -99,8 +100,8 @@ public void testEqualsWithHashCode() { } /** - * Test the hashcode for same object and different object with both hashcode - * function and static hashcode function + * Test the hashcode for same object and different object with both hashcode function and static + * hashcode function */ @Test public void testHashCode() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java index dc94e91f4fde..0dafef0b7649 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -92,7 +92,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { verify(mockAppender, times(1)).append(any(org.apache.logging.log4j.core.LogEvent.class)); assertEquals(org.apache.logging.log4j.Level.DEBUG, level.get()); - assertEquals("Added failed server with address " + addr.toString() + " to list caused by " + - nullException.toString(), msg.get()); + assertEquals("Added failed server with address " + addr.toString() + " to list caused by " + + nullException.toString(), msg.get()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java index d829b4bfd654..a0b68646b145 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestHBaseRpcControllerImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseRpcControllerImpl.class); + HBaseClassTestRule.forClass(TestHBaseRpcControllerImpl.class); @Test public void testListOfCellScannerables() throws IOException { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java index 45da1e8560df..c327896f72ab 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -104,15 +104,16 @@ public void testWrapConnectionException() throws Exception { Address addr = Address.fromParts("127.0.0.1", 12345); for (Throwable exception : exceptions) { if (exception instanceof TimeoutException) { - assertThat(IPCUtil.wrapException(addr, null, exception), instanceOf(TimeoutIOException.class)); + assertThat(IPCUtil.wrapException(addr, null, exception), + instanceOf(TimeoutIOException.class)); } else { - IOException ioe = IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, - exception); + IOException ioe = + IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, exception); // Assert that the exception contains the Region name if supplied. HBASE-25735. // Not all exceptions get the region stuffed into it. if (ioe.getMessage() != null) { - assertTrue(ioe.getMessage(). - contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); + assertTrue(ioe.getMessage() + .contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); } assertThat(ioe, instanceOf(exception.getClass())); } @@ -135,8 +136,8 @@ public void run() { if (depth <= IPCUtil.MAX_DEPTH) { if (numElements <= numStackTraceElements.intValue()) { future.completeExceptionally( - new AssertionError("should call run directly but stack trace decreased from " + - numStackTraceElements.intValue() + " to " + numElements)); + new AssertionError("should call run directly but stack trace decreased from " + + numStackTraceElements.intValue() + " to " + numElements)); return; } numStackTraceElements.setValue(numElements); @@ -144,9 +145,9 @@ public void run() { } else { if (numElements >= numStackTraceElements.intValue()) { future.completeExceptionally( - new AssertionError("should call eventLoop.execute to prevent stack overflow but" + - " stack trace increased from " + numStackTraceElements.intValue() + " to " + - numElements)); + new AssertionError("should call eventLoop.execute to prevent stack overflow but" + + " stack trace increased from " + numStackTraceElements.intValue() + " to " + + numElements)); } else { future.complete(null); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java index 8782fe116b07..a9c40fd3bb79 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRemoteWithExtrasException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRemoteWithExtrasException.java index df6e6f2045ac..dfa3450b74a6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRemoteWithExtrasException.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRemoteWithExtrasException.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseServerException; @@ -37,18 +38,17 @@ public class TestRemoteWithExtrasException { HBaseClassTestRule.forClass(TestRemoteWithExtrasException.class); /** - * test verifies that we honor the inherent value of an exception for isServerOverloaded. - * We don't want a false value passed into RemoteWithExtrasExceptions to override the - * inherent value of an exception if it's already true. This could be due to an out of date - * server not sending the proto field we expect. + * test verifies that we honor the inherent value of an exception for isServerOverloaded. We don't + * want a false value passed into RemoteWithExtrasExceptions to override the inherent value of an + * exception if it's already true. This could be due to an out of date server not sending the + * proto field we expect. */ @Test public void itUsesExceptionDefaultValueForServerOverloaded() { // pass false for server overloaded, we still expect the exception to be true due to // the exception type - RemoteWithExtrasException ex = - new RemoteWithExtrasException(ServerOverloadedException.class.getName(), - "server is overloaded", false, false); + RemoteWithExtrasException ex = new RemoteWithExtrasException( + ServerOverloadedException.class.getName(), "server is overloaded", false, false); IOException result = ex.unwrapRemoteException(); assertEquals(result.getClass(), ServerOverloadedException.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java index ba1e27258d2d..48bd5498cd41 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestRpcClientDeprecatedNameMapping { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcClientDeprecatedNameMapping.class); + HBaseClassTestRule.forClass(TestRpcClientDeprecatedNameMapping.class); @Test public void test() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java index 62e204a65a2c..25010d190f65 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,24 +25,19 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestQuotaFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestQuotaFilter.class); + HBaseClassTestRule.forClass(TestQuotaFilter.class); @Test public void testClassMethodsAreBuilderStyle() { - /* ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * QuotaFilter qf - * = new QuotaFilter() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: QuotaFilter qf = new QuotaFilter() .setFoo(foo) + * .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" returns the + * declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(QuotaFilter.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java index 37a21dc2b18b..e2843180938e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +22,6 @@ import static org.junit.Assert.fail; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory.QuotaGlobalsSettingsBypass; @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestQuotaGlobalsSettingsBypass { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestQuotaGlobalsSettingsBypass.class); + HBaseClassTestRule.forClass(TestQuotaGlobalsSettingsBypass.class); @Test public void testMerge() throws IOException { @@ -47,19 +47,19 @@ public void testMerge() throws IOException { @Test public void testInvalidMerges() throws IOException { QuotaGlobalsSettingsBypass userBypass = - new QuotaGlobalsSettingsBypass("joe", null, null, null, true); + new QuotaGlobalsSettingsBypass("joe", null, null, null, true); QuotaGlobalsSettingsBypass tableBypass = - new QuotaGlobalsSettingsBypass(null, TableName.valueOf("table"), null, null, true); + new QuotaGlobalsSettingsBypass(null, TableName.valueOf("table"), null, null, true); QuotaGlobalsSettingsBypass namespaceBypass = - new QuotaGlobalsSettingsBypass(null, null, "ns", null, true); + new QuotaGlobalsSettingsBypass(null, null, "ns", null, true); QuotaGlobalsSettingsBypass regionServerBypass = - new QuotaGlobalsSettingsBypass(null, null, null, "all", true); + new QuotaGlobalsSettingsBypass(null, null, null, "all", true); QuotaGlobalsSettingsBypass userOnTableBypass = - new QuotaGlobalsSettingsBypass("joe", TableName.valueOf("table"), null, null, true); + new QuotaGlobalsSettingsBypass("joe", TableName.valueOf("table"), null, null, true); QuotaGlobalsSettingsBypass userOnNamespaceBypass = - new QuotaGlobalsSettingsBypass("joe", null, "ns", null, true); + new QuotaGlobalsSettingsBypass("joe", null, "ns", null, true); QuotaGlobalsSettingsBypass userOnRegionServerBypass = - new QuotaGlobalsSettingsBypass("joe", null, null, "all", true); + new QuotaGlobalsSettingsBypass("joe", null, null, "all", true); assertTrue(userBypass.merge(userBypass).getBypass()); expectFailure(userBypass, new QuotaGlobalsSettingsBypass("frank", null, null, null, false)); @@ -142,6 +142,7 @@ void expectFailure(QuotaSettings one, QuotaSettings two) throws IOException { try { one.merge(two); fail("Expected to see an Exception merging " + two + " into " + one); - } catch (IllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java index 6b9212f6260f..be659bc202d7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,28 +49,26 @@ public class TestQuotaSettingsFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestQuotaSettingsFactory.class); + HBaseClassTestRule.forClass(TestQuotaSettingsFactory.class); @Test public void testAllQuotasAddedToList() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) // Disable the table - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) // Disable the table + .build(); final long readLimit = 1000; final long writeLimit = 500; final Throttle throttle = Throttle.newBuilder() - // 1000 read reqs/min - .setReadNum(TimedQuota.newBuilder().setSoftLimit(readLimit) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) - // 500 write reqs/min - .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) - .build(); - final Quotas quotas = Quotas.newBuilder() - .setSpace(spaceQuota) // Set the FS quotas - .setThrottle(throttle) // Set some RPC limits - .build(); + // 1000 read reqs/min + .setReadNum(TimedQuota.newBuilder().setSoftLimit(readLimit) + .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) + // 500 write reqs/min + .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit) + .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) + .build(); + final Quotas quotas = Quotas.newBuilder().setSpace(spaceQuota) // Set the FS quotas + .setThrottle(throttle) // Set some RPC limits + .build(); final TableName tn = TableName.valueOf("my_table"); List settings = QuotaSettingsFactory.fromTableQuotas(tn, quotas); assertEquals(3, settings.size()); @@ -125,19 +123,15 @@ public void testAllQuotasAddedToList() { @Test(expected = IllegalArgumentException.class) public void testNeitherTableNorNamespace() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build(); QuotaSettingsFactory.fromSpace(null, null, spaceQuota); } @Test(expected = IllegalArgumentException.class) public void testBothTableAndNamespace() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build(); QuotaSettingsFactory.fromSpace(TableName.valueOf("foo"), "bar", spaceQuota); } @@ -147,10 +141,10 @@ public void testSpaceLimitSettings() { final long sizeLimit = 1024L * 1024L * 1024L * 75; // 75GB final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_INSERTS; QuotaSettings settings = - QuotaSettingsFactory.limitTableSpace(tableName, sizeLimit, violationPolicy); + QuotaSettingsFactory.limitTableSpace(tableName, sizeLimit, violationPolicy); assertNotNull("QuotaSettings should not be null", settings); assertTrue("Should be an instance of SpaceLimitSettings", - settings instanceof SpaceLimitSettings); + settings instanceof SpaceLimitSettings); SpaceLimitSettings spaceLimitSettings = (SpaceLimitSettings) settings; SpaceLimitRequest protoRequest = spaceLimitSettings.getProto(); assertTrue("Request should have a SpaceQuota", protoRequest.hasQuota()); @@ -167,7 +161,7 @@ public void testSpaceLimitSettingsForDeletes() { QuotaSettings nsSettings = QuotaSettingsFactory.removeNamespaceSpaceLimit(ns); assertNotNull("QuotaSettings should not be null", nsSettings); assertTrue("Should be an instance of SpaceLimitSettings", - nsSettings instanceof SpaceLimitSettings); + nsSettings instanceof SpaceLimitSettings); SpaceLimitRequest nsProto = ((SpaceLimitSettings) nsSettings).getProto(); assertTrue("Request should have a SpaceQuota", nsProto.hasQuota()); assertTrue("The remove attribute should be true", nsProto.getQuota().getRemove()); @@ -175,7 +169,7 @@ public void testSpaceLimitSettingsForDeletes() { QuotaSettings tableSettings = QuotaSettingsFactory.removeTableSpaceLimit(tn); assertNotNull("QuotaSettings should not be null", tableSettings); assertTrue("Should be an instance of SpaceLimitSettings", - tableSettings instanceof SpaceLimitSettings); + tableSettings instanceof SpaceLimitSettings); SpaceLimitRequest tableProto = ((SpaceLimitSettings) tableSettings).getProto(); assertTrue("Request should have a SpaceQuota", tableProto.hasQuota()); assertTrue("The remove attribute should be true", tableProto.getQuota().getRemove()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java index 2406d10ed0a0..a3fc235c2e8e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,12 +38,12 @@ /** * Test class for {@link SpaceLimitSettings}. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestSpaceLimitSettings { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSpaceLimitSettings.class); + HBaseClassTestRule.forClass(TestSpaceLimitSettings.class); @Test(expected = IllegalArgumentException.class) public void testInvalidTableQuotaSizeLimit() { @@ -130,14 +130,14 @@ public void testNamespaceQuota() { @Test public void testQuotaMerging() throws IOException { TableName tn = TableName.valueOf("foo"); - QuotaSettings originalSettings = QuotaSettingsFactory.limitTableSpace( - tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE); - QuotaSettings largerSizeLimit = QuotaSettingsFactory.limitTableSpace( - tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE); - QuotaSettings differentPolicy = QuotaSettingsFactory.limitTableSpace( - tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); - QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace( - "ns1", 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); + QuotaSettings originalSettings = + QuotaSettingsFactory.limitTableSpace(tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE); + QuotaSettings largerSizeLimit = + QuotaSettingsFactory.limitTableSpace(tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE); + QuotaSettings differentPolicy = + QuotaSettingsFactory.limitTableSpace(tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); + QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace("ns1", + 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); assertEquals(originalSettings.merge(largerSizeLimit), largerSizeLimit); assertEquals(originalSettings.merge(differentPolicy), differentPolicy); @@ -145,7 +145,7 @@ public void testQuotaMerging() throws IOException { originalSettings.merge(incompatibleSettings); fail("Should not be able to merge a Table space quota with a namespace space quota."); } catch (IllegalArgumentException e) { - //pass + // pass } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java index 53fb9bd3e927..11e2f737b315 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,27 +34,25 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestThrottleSettings { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestThrottleSettings.class); + HBaseClassTestRule.forClass(TestThrottleSettings.class); @Test public void testMerge() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); TimedQuota tq2 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest tr2 = ThrottleRequest.newBuilder().setTimedQuota(tq2) - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings merged = orig.merge(new ThrottleSettings("joe", null, null, null, tr2)); @@ -66,17 +64,15 @@ public void testMerge() throws IOException { @Test public void testIncompatibleThrottleTypes() throws IOException { TimedQuota requestsQuota = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest requestsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(requestsQuota) - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, requestsQuotaReq); TimedQuota readsQuota = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest readsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(readsQuota) - .setType(QuotaProtos.ThrottleType.READ_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.READ_NUMBER).build(); try { orig.merge(new ThrottleSettings("joe", null, null, null, readsQuotaReq)); @@ -89,17 +85,15 @@ public void testIncompatibleThrottleTypes() throws IOException { @Test public void testNoThrottleReturnsOriginal() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); - ThrottleRequest tr2 = ThrottleRequest.newBuilder() - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + ThrottleRequest tr2 = + ThrottleRequest.newBuilder().setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); - assertTrue( - "The same object should be returned by merge, but it wasn't", + assertTrue("The same object should be returned by merge, but it wasn't", orig == orig.merge(new ThrottleSettings("joe", null, null, null, tr2))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java index ae2d4262e647..fb74df394738 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -37,12 +36,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestReplicationPeerConfig { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationPeerConfig.class); + HBaseClassTestRule.forClass(TestReplicationPeerConfig.class); private static final String NAMESPACE_REPLICATE = "replicate"; private static final String NAMESPACE_OTHER = "other"; @@ -53,16 +52,11 @@ public class TestReplicationPeerConfig { @Test public void testClassMethodsAreBuilderStyle() { - /* ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * ReplicationPeerConfig htd - * = new ReplicationPeerConfig() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: ReplicationPeerConfig htd = new ReplicationPeerConfig() + * .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" + * returns the declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(ReplicationPeerConfig.class); @@ -72,48 +66,39 @@ public void testClassMethodsAreBuilderStyle() { public void testNeedToReplicateWithReplicatingAll() { // 1. replication_all flag is true, no namespaces and table-cfs config ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .build(); + .setReplicateAllUserTables(true).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // 2. replicate_all flag is true, and config in excludedTableCfs // Exclude empty table-cfs map peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(Maps.newHashMap()) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(Maps.newHashMap()).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude table B Map> tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_B)); // 3. replicate_all flag is true, and config in excludeNamespaces // Exclude empty namespace set peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet()) - .build(); + .setReplicateAllUserTables(true).setExcludeNamespaces(Sets.newHashSet()).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude namespace other - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude namespace replication - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // 4. replicate_all flag is true, and config excludeNamespaces and excludedTableCfs both @@ -121,30 +106,24 @@ public void testNeedToReplicateWithReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .setExcludeTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) + .setExcludeTableCFsMap(tableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Namespaces config conflicts with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_B)); tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_B)); } @@ -156,78 +135,61 @@ public void testNeedToReplicateWithoutReplicatingAll() { // 1. replication_all flag is false, no namespaces and table-cfs config peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .build(); + .setReplicateAllUserTables(false).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // 2. replicate_all flag is false, and only config table-cfs in peer // Set empty table-cfs map peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(Maps.newHashMap()) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(Maps.newHashMap()).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set table B tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_B)); // 3. replication_all flag is false, and only config namespace in peer // Set empty namespace set peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet()) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet()).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set namespace other peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set namespace replication peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // 4. replicate_all flag is false, and config namespaces and table-cfs both // Namespaces config doesn't conflict with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(false) + .setTableCFsMap(tableCfs).setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Namespaces config conflicts with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(false) + .setTableCFsMap(tableCfs).setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .setTableCFsMap(tableCfs) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(false) + .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); } @@ -236,9 +198,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { Map> excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, null); ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -246,9 +206,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, Lists.newArrayList()); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -256,9 +214,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, Lists.newArrayList(Bytes.toString(FAMILY1))); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -269,9 +225,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { Map> tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -279,9 +233,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, Lists.newArrayList()); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -289,9 +241,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, Lists.newArrayList(Bytes.toString(FAMILY1))); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java index cf5939031b02..6f7be8315853 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.security.Key; import java.security.KeyException; - import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -38,7 +37,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestEncryptionUtil { private static final String INVALID_HASH_ALG = "this-hash-algorithm-not-exists hopefully... :)"; @@ -46,11 +45,11 @@ public class TestEncryptionUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEncryptionUtil.class); + HBaseClassTestRule.forClass(TestEncryptionUtil.class); // There does not seem to be a ready way to test either getKeyFromBytesOrMasterKey // or createEncryptionContext, and the existing code under MobUtils appeared to be - // untested. Not ideal! + // untested. Not ideal! @Test public void testKeyWrappingUsingHashAlgDefault() throws Exception { @@ -146,15 +145,14 @@ private void testKeyWrapping(String hashAlgorithm) throws Exception { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + if (!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); } // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key @@ -168,7 +166,7 @@ private void testKeyWrapping(String hashAlgorithm) throws Exception { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); // unwrap with an incorrect key try { @@ -183,7 +181,7 @@ private void testWALKeyWrapping(String hashAlgorithm) throws Exception { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + if (!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); } @@ -204,7 +202,7 @@ private void testWALKeyWrapping(String hashAlgorithm) throws Exception { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); } private void testKeyWrappingWithMismatchingAlgorithms(Configuration conf) throws Exception { @@ -215,8 +213,7 @@ private void testKeyWrappingWithMismatchingAlgorithms(Configuration conf) throws // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java index 538a9b91c3c5..8d82ba538bda 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,12 +65,12 @@ import org.apache.hbase.thirdparty.com.google.common.base.Strings; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestHBaseSaslRpcClient { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseSaslRpcClient.class); + HBaseClassTestRule.forClass(TestHBaseSaslRpcClient.class); static { System.setProperty("java.security.krb5.realm", "DOMAIN.COM"); @@ -82,19 +82,18 @@ public class TestHBaseSaslRpcClient { private static final Logger LOG = LoggerFactory.getLogger(TestHBaseSaslRpcClient.class); - @Rule public ExpectedException exception = ExpectedException.none(); @Test public void testSaslClientUsesGivenRpcProtection() throws Exception { - Token token = createTokenMockWithCredentials(DEFAULT_USER_NAME, - DEFAULT_USER_PASSWORD); + Token token = + createTokenMockWithCredentials(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD); DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider(); for (SaslUtil.QualityOfProtection qop : SaslUtil.QualityOfProtection.values()) { String negotiatedQop = new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, token, - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false, qop.name(), - false) { + Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false, qop.name(), + false) { public String getQop() { return saslProps.get(Sasl.QOP); } @@ -114,9 +113,9 @@ public void testDigestSaslClientCallbackHandler() throws UnsupportedCallbackExce final RealmCallback realmCallback = mock(RealmCallback.class); // We can provide a realmCallback, but HBase presently does nothing with it. - Callback[] callbackArray = {nameCallback, passwordCallback, realmCallback}; + Callback[] callbackArray = { nameCallback, passwordCallback, realmCallback }; final DigestSaslClientCallbackHandler saslClCallbackHandler = - new DigestSaslClientCallbackHandler(token); + new DigestSaslClientCallbackHandler(token); saslClCallbackHandler.handle(callbackArray); verify(nameCallback).setName(anyString()); verify(passwordCallback).setPassword(any()); @@ -128,11 +127,11 @@ public void testDigestSaslClientCallbackHandlerWithException() { when(token.getIdentifier()).thenReturn(Bytes.toBytes(DEFAULT_USER_NAME)); when(token.getPassword()).thenReturn(Bytes.toBytes(DEFAULT_USER_PASSWORD)); final DigestSaslClientCallbackHandler saslClCallbackHandler = - new DigestSaslClientCallbackHandler(token); + new DigestSaslClientCallbackHandler(token); try { saslClCallbackHandler.handle(new Callback[] { mock(TextOutputCallback.class) }); } catch (UnsupportedCallbackException expEx) { - //expected + // expected } catch (Exception ex) { fail("testDigestSaslClientCallbackHandlerWithException error : " + ex.getMessage()); } @@ -140,7 +139,7 @@ public void testDigestSaslClientCallbackHandlerWithException() { @Test public void testHBaseSaslRpcClientCreation() throws Exception { - //creation kerberos principal check section + // creation kerberos principal check section assertFalse(assertSuccessCreationKerberosPrincipal(null)); assertFalse(assertSuccessCreationKerberosPrincipal("DOMAIN.COM")); assertFalse(assertSuccessCreationKerberosPrincipal("principal/DOMAIN.COM")); @@ -150,22 +149,22 @@ public void testHBaseSaslRpcClientCreation() throws Exception { LOG.warn("Could not create a SASL client with valid Kerberos credential"); } - //creation digest principal check section + // creation digest principal check section assertFalse(assertSuccessCreationDigestPrincipal(null, null)); assertFalse(assertSuccessCreationDigestPrincipal("", "")); assertFalse(assertSuccessCreationDigestPrincipal("", null)); assertFalse(assertSuccessCreationDigestPrincipal(null, "")); assertTrue(assertSuccessCreationDigestPrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - //creation simple principal check section + // creation simple principal check section assertFalse(assertSuccessCreationSimplePrincipal("", "")); assertFalse(assertSuccessCreationSimplePrincipal(null, null)); assertFalse(assertSuccessCreationSimplePrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - //exceptions check section + // exceptions check section assertTrue(assertIOExceptionThenSaslClientIsNull(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - assertTrue(assertIOExceptionWhenGetStreamsBeforeConnectCall( - DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); + assertTrue( + assertIOExceptionWhenGetStreamsBeforeConnectCall(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); } @Test @@ -182,47 +181,46 @@ public void testAuthMethodReadWrite() throws IOException { assertAuthMethodWrite(out, AuthMethod.DIGEST); } - private void assertAuthMethodRead(DataInputBuffer in, AuthMethod authMethod) - throws IOException { - in.reset(new byte[] {authMethod.code}, 1); + private void assertAuthMethodRead(DataInputBuffer in, AuthMethod authMethod) throws IOException { + in.reset(new byte[] { authMethod.code }, 1); assertEquals(authMethod, AuthMethod.read(in)); } private void assertAuthMethodWrite(DataOutputBuffer out, AuthMethod authMethod) - throws IOException { + throws IOException { authMethod.write(out); assertEquals(authMethod.code, out.getData()[0]); out.reset(); } private boolean assertIOExceptionWhenGetStreamsBeforeConnectCall(String principal, - String password) throws IOException { + String password) throws IOException { boolean inState = false; boolean outState = false; DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider() { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, Token token, - boolean fallbackAllowed, Map saslProps) { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) { return Mockito.mock(SaslClient.class); } }; HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), + Mockito.mock(SecurityInfo.class), false); try { rpcClient.getInputStream(); - } catch(IOException ex) { - //Sasl authentication exchange hasn't completed yet + } catch (IOException ex) { + // Sasl authentication exchange hasn't completed yet inState = true; } try { rpcClient.getOutputStream(); - } catch(IOException ex) { - //Sasl authentication exchange hasn't completed yet + } catch (IOException ex) { + // Sasl authentication exchange hasn't completed yet outState = true; } @@ -232,18 +230,17 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddress, private boolean assertIOExceptionThenSaslClientIsNull(String principal, String password) { try { DigestSaslClientAuthenticationProvider provider = - new DigestSaslClientAuthenticationProvider() { - @Override - public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, - Token token, boolean fallbackAllowed, - Map saslProps) { - return null; - } - }; + new DigestSaslClientAuthenticationProvider() { + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddress, + SecurityInfo securityInfo, Token token, + boolean fallbackAllowed, Map saslProps) { + return null; + } + }; new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), + Mockito.mock(SecurityInfo.class), false); return false; } catch (IOException ex) { return true; @@ -254,7 +251,7 @@ private boolean assertSuccessCreationKerberosPrincipal(String principal) { HBaseSaslRpcClient rpcClient = null; try { rpcClient = createSaslRpcClientForKerberos(principal); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; @@ -264,10 +261,10 @@ private boolean assertSuccessCreationDigestPrincipal(String principal, String pa HBaseSaslRpcClient rpcClient = null; try { rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), - new DigestSaslClientAuthenticationProvider(), - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); - } catch(Exception ex) { + new DigestSaslClientAuthenticationProvider(), + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), + Mockito.mock(SecurityInfo.class), false); + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; @@ -277,22 +274,20 @@ private boolean assertSuccessCreationSimplePrincipal(String principal, String pa HBaseSaslRpcClient rpcClient = null; try { rpcClient = createSaslRpcClientSimple(principal, password); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; } - private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) - throws IOException { + private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), - new GssSaslClientAuthenticationProvider(), createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + new GssSaslClientAuthenticationProvider(), createTokenMock(), Mockito.mock(InetAddress.class), + Mockito.mock(SecurityInfo.class), false); } - private Token createTokenMockWithCredentials( - String principal, String password) - throws IOException { + private Token createTokenMockWithCredentials(String principal, + String password) throws IOException { Token token = createTokenMock(); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(password)) { when(token.getIdentifier()).thenReturn(Bytes.toBytes(DEFAULT_USER_NAME)); @@ -302,10 +297,10 @@ private Token createTokenMockWithCredentials( } private HBaseSaslRpcClient createSaslRpcClientSimple(String principal, String password) - throws IOException { + throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), - new SimpleSaslClientAuthenticationProvider(), createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + new SimpleSaslClientAuthenticationProvider(), createTokenMock(), + Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); } @SuppressWarnings("unchecked") @@ -314,8 +309,8 @@ private Token createTokenMock() { } @Test(expected = IOException.class) - public void testFailedEvaluateResponse() throws IOException { - //prep mockin the SaslClient + public void testFailedEvaluateResponse() throws IOException { + // prep mockin the SaslClient SimpleSaslClientAuthenticationProvider mockProvider = Mockito.mock(SimpleSaslClientAuthenticationProvider.class); SaslClient mockClient = Mockito.mock(SaslClient.class); @@ -323,11 +318,10 @@ public void testFailedEvaluateResponse() throws IOException { Assert.assertNotNull(mockClient); Mockito.when(mockProvider.createClient(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.anyBoolean(), Mockito.any())).thenReturn(mockClient); - HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), - mockProvider, createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), mockProvider, + createTokenMock(), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); - //simulate getting an error from a failed saslServer.evaluateResponse + // simulate getting an error from a failed saslServer.evaluateResponse DataOutputBuffer errorBuffer = new DataOutputBuffer(); errorBuffer.writeInt(SaslStatus.ERROR.state); WritableUtils.writeString(errorBuffer, IOException.class.getName()); @@ -337,7 +331,7 @@ mockProvider, createTokenMock(), in.reset(errorBuffer.getData(), 0, errorBuffer.getLength()); DataOutputBuffer out = new DataOutputBuffer(); - //simulate that authentication exchange has completed quickly after sending the token + // simulate that authentication exchange has completed quickly after sending the token Mockito.when(mockClient.isComplete()).thenReturn(true); rpcClient.saslConnect(in, out); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java index 36f29dec240e..ccb23a99e37b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestSaslUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSaslUtil.class); + HBaseClassTestRule.forClass(TestSaslUtil.class); @Rule public ExpectedException exception = ExpectedException.none(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java index eff3b5f8dd0a..52ebebc372fa 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java @@ -23,7 +23,6 @@ import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -32,14 +31,15 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestDefaultProviderSelector { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDefaultProviderSelector.class); + HBaseClassTestRule.forClass(TestDefaultProviderSelector.class); BuiltInProviderSelector selector; + @Before public void setup() { selector = new BuiltInProviderSelector(); @@ -70,9 +70,9 @@ public void testDuplicateProviders() { @Test public void testExpectedProviders() { - HashSet providers = new HashSet<>(Arrays.asList( - new SimpleSaslClientAuthenticationProvider(), new GssSaslClientAuthenticationProvider(), - new DigestSaslClientAuthenticationProvider())); + HashSet providers = + new HashSet<>(Arrays.asList(new SimpleSaslClientAuthenticationProvider(), + new GssSaslClientAuthenticationProvider(), new DigestSaslClientAuthenticationProvider())); selector.configure(new Configuration(false), providers); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java index 2b399593e7c1..029c880600b4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java @@ -25,9 +25,7 @@ import java.net.InetAddress; import java.util.HashMap; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -44,16 +42,16 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; -@Category({SmallTests.class, SecurityTests.class}) +@Category({ SmallTests.class, SecurityTests.class }) public class TestSaslClientAuthenticationProviders { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSaslClientAuthenticationProviders.class); + HBaseClassTestRule.forClass(TestSaslClientAuthenticationProviders.class); @Test public void testCannotAddTheSameProviderTwice() { - HashMap registeredProviders = new HashMap<>(); + HashMap registeredProviders = new HashMap<>(); SaslClientAuthenticationProvider p1 = new SimpleSaslClientAuthenticationProvider(); SaslClientAuthenticationProvider p2 = new SimpleSaslClientAuthenticationProvider(); @@ -62,25 +60,26 @@ public void testCannotAddTheSameProviderTwice() { try { SaslClientAuthenticationProviders.addProviderIfNotExists(p2, registeredProviders); - } catch (RuntimeException e) {} + } catch (RuntimeException e) { + } assertSame("Expected the original provider to be present", p1, - registeredProviders.entrySet().iterator().next().getValue()); + registeredProviders.entrySet().iterator().next().getValue()); } @Test public void testInstanceIsCached() { Configuration conf = HBaseConfiguration.create(); SaslClientAuthenticationProviders providers1 = - SaslClientAuthenticationProviders.getInstance(conf); + SaslClientAuthenticationProviders.getInstance(conf); SaslClientAuthenticationProviders providers2 = - SaslClientAuthenticationProviders.getInstance(conf); + SaslClientAuthenticationProviders.getInstance(conf); assertSame(providers1, providers2); SaslClientAuthenticationProviders.reset(); SaslClientAuthenticationProviders providers3 = - SaslClientAuthenticationProviders.getInstance(conf); + SaslClientAuthenticationProviders.getInstance(conf); assertNotSame(providers1, providers3); assertEquals(providers1.getNumRegisteredProviders(), providers3.getNumRegisteredProviders()); } @@ -89,58 +88,66 @@ public void testInstanceIsCached() { public void testDifferentConflictingImplementationsFail() { Configuration conf = HBaseConfiguration.create(); conf.setStrings(SaslClientAuthenticationProviders.EXTRA_PROVIDERS_KEY, - ConflictingProvider1.class.getName(), ConflictingProvider2.class.getName()); + ConflictingProvider1.class.getName(), ConflictingProvider2.class.getName()); SaslClientAuthenticationProviders.getInstance(conf); } static class ConflictingProvider1 implements SaslClientAuthenticationProvider { - static final SaslAuthMethod METHOD1 = new SaslAuthMethod( - "FOO", (byte)12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); + static final SaslAuthMethod METHOD1 = + new SaslAuthMethod("FOO", (byte) 12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); public ConflictingProvider1() { } - @Override public SaslAuthMethod getSaslAuthMethod() { + @Override + public SaslAuthMethod getSaslAuthMethod() { return METHOD1; } - @Override public String getTokenKind() { + @Override + public String getTokenKind() { return null; } - @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddr, + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return null; } - @Override public UserInformation getUserInfo(User user) { + @Override + public UserInformation getUserInfo(User user) { return null; } } static class ConflictingProvider2 implements SaslClientAuthenticationProvider { - static final SaslAuthMethod METHOD2 = new SaslAuthMethod( - "BAR", (byte)12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); + static final SaslAuthMethod METHOD2 = + new SaslAuthMethod("BAR", (byte) 12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); public ConflictingProvider2() { } - @Override public SaslAuthMethod getSaslAuthMethod() { + @Override + public SaslAuthMethod getSaslAuthMethod() { return METHOD2; } - @Override public String getTokenKind() { + @Override + public String getTokenKind() { return null; } - @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddr, + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return null; } - @Override public UserInformation getUserInfo(User user) { + @Override + public UserInformation getUserInfo(User user) { return null; } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java index c78c765aca1f..f726b4178992 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -81,13 +81,13 @@ public void testObtainToken() throws Exception { shouldInjectFault.set(null, new ServiceException(injected)); try { - ClientTokenUtil.obtainToken((Connection)null); + ClientTokenUtil.obtainToken((Connection) null); fail("Should have injected exception."); } catch (IOException e) { assertException(injected, e); } - CompletableFuture future = ClientTokenUtil.obtainToken((AsyncConnection)null); + CompletableFuture future = ClientTokenUtil.obtainToken((AsyncConnection) null); try { future.get(); fail("Should have injected exception."); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 317dff9efebc..b27d832ee8c0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,9 +70,10 @@ public class TestProtobufUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProtobufUtil.class); + HBaseClassTestRule.forClass(TestProtobufUtil.class); private static final String TAG_STR = "tag-1"; - private static final byte TAG_TYPE = (byte)10; + private static final byte TAG_TYPE = (byte) 10; + public TestProtobufUtil() { } @@ -93,7 +94,6 @@ public void testException() throws IOException { /** * Test basic Get conversions. - * * @throws IOException if the conversion to a {@link Get} fails */ @Test @@ -126,7 +126,6 @@ public void testGet() throws IOException { /** * Test Delete Mutate conversions. - * * @throws IOException if the conversion to a {@link Delete} or a * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @@ -161,20 +160,16 @@ public void testDelete() throws IOException { // delete always have empty value, // add empty value to the original mutate - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { + for (ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { + for (QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { qualifier.setValue(ByteString.EMPTY); } } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.DELETE, delete)); + assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.DELETE, delete)); } /** * Test Put Mutate conversions. - * * @throws IOException if the conversion to a {@link Put} or a * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @@ -210,22 +205,18 @@ public void testPut() throws IOException { // value level timestamp specified, // add the timestamp to the original mutate long timestamp = put.getTimestamp(); - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { + for (ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { + for (QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { if (!qualifier.hasTimestamp()) { qualifier.setTimestamp(timestamp); } } } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.PUT, put)); + assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.PUT, put)); } /** * Test basic Scan conversions. - * * @throws IOException if the conversion to a {@link org.apache.hadoop.hbase.client.Scan} fails */ @Test @@ -259,19 +250,18 @@ public void testScan() throws IOException { scanBuilder.setIncludeStopRow(false); ClientProtos.Scan expectedProto = scanBuilder.build(); - ClientProtos.Scan actualProto = ProtobufUtil.toScan( - ProtobufUtil.toScan(expectedProto)); + ClientProtos.Scan actualProto = ProtobufUtil.toScan(ProtobufUtil.toScan(expectedProto)); assertEquals(expectedProto, actualProto); } @Test public void testToCell() { KeyValue kv1 = - new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); + new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); KeyValue kv2 = - new KeyValue(Bytes.toBytes("bbb"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); + new KeyValue(Bytes.toBytes("bbb"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); KeyValue kv3 = - new KeyValue(Bytes.toBytes("ccc"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); + new KeyValue(Bytes.toBytes("ccc"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); byte[] arr = new byte[kv1.getLength() + kv2.getLength() + kv3.getLength()]; System.arraycopy(kv1.getBuffer(), kv1.getOffset(), arr, 0, kv1.getLength()); System.arraycopy(kv2.getBuffer(), kv2.getOffset(), arr, kv1.getLength(), kv2.getLength()); @@ -281,15 +271,13 @@ public void testToCell() { dbb.put(arr); ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength()); CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV, false); - Cell newOffheapKV = - ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, - false); + Cell newOffheapKV = ProtobufUtil + .toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, false); assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0); } /** * Test Increment Mutate conversions. - * * @throws IOException if converting to an {@link Increment} or * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @@ -334,23 +322,20 @@ private MutationProto getIncrementMutation(Long timestamp) { } /** - * Older clients may not send along a timestamp in the MutationProto. Check that we - * default correctly. + * Older clients may not send along a timestamp in the MutationProto. Check that we default + * correctly. */ @Test public void testIncrementNoTimestamp() throws IOException { MutationProto mutation = getIncrementMutation(null); Increment increment = ProtobufUtil.toIncrement(mutation, null); assertEquals(HConstants.LATEST_TIMESTAMP, increment.getTimestamp()); - increment.getFamilyCellMap().values() - .forEach(cells -> - cells.forEach(cell -> - assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); + increment.getFamilyCellMap().values().forEach(cells -> cells + .forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); } /** * Test Append Mutate conversions. - * * @throws IOException if converting to an {@link Append} fails */ @Test @@ -373,15 +358,16 @@ public void testAppend() throws IOException { } /** - * Older clients may not send along a timestamp in the MutationProto. Check that we - * default correctly. + * Older clients may not send along a timestamp in the MutationProto. Check that we default + * correctly. */ @Test public void testAppendNoTimestamp() throws IOException { MutationProto mutation = getAppendMutation(null); Append append = ProtobufUtil.toAppend(mutation, null); assertEquals(HConstants.LATEST_TIMESTAMP, append.getTimestamp()); - append.getFamilyCellMap().values().forEach(cells -> cells.forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); + append.getFamilyCellMap().values().forEach(cells -> cells + .forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); } private MutationProto getAppendMutation(Long timestamp) { @@ -424,9 +410,9 @@ private static ProcedureProtos.Procedure createProcedure(long procId) { } private static LockServiceProtos.LockedResource createLockedResource( - LockServiceProtos.LockedResourceType resourceType, String resourceName, - LockServiceProtos.LockType lockType, - ProcedureProtos.Procedure exclusiveLockOwnerProcedure, int sharedLockCount) { + LockServiceProtos.LockedResourceType resourceType, String resourceName, + LockServiceProtos.LockType lockType, ProcedureProtos.Procedure exclusiveLockOwnerProcedure, + int sharedLockCount) { LockServiceProtos.LockedResource.Builder build = LockServiceProtos.LockedResource.newBuilder(); build.setResourceType(resourceType); build.setResourceName(resourceName); @@ -448,94 +434,65 @@ public void testProcedureInfo() { ProcedureProtos.Procedure procedure = builder.build(); String procJson = ProtobufUtil.toProcedureJson(Lists.newArrayList(procedure)); - assertEquals("[{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"1\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"," - + "\"stateMessage\":[{\"value\":\"QQ==\"}]" - + "}]", procJson); + assertEquals("[{" + "\"className\":\"java.lang.Object\"," + "\"procId\":\"1\"," + + "\"submittedTime\":\"0\"," + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"," + + "\"stateMessage\":[{\"value\":\"QQ==\"}]" + "}]", procJson); } @Test public void testServerLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.SERVER, "server", + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.SERVER, "server", LockServiceProtos.LockType.SHARED, null, 2); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"SERVER\"," - + "\"resourceName\":\"server\"," - + "\"lockType\":\"SHARED\"," - + "\"sharedLockCount\":2" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"SERVER\"," + "\"resourceName\":\"server\"," + + "\"lockType\":\"SHARED\"," + "\"sharedLockCount\":2" + "}]", lockJson); } @Test public void testNamespaceLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.NAMESPACE, "ns", + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.NAMESPACE, "ns", LockServiceProtos.LockType.EXCLUSIVE, createProcedure(2), 0); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"NAMESPACE\"," - + "\"resourceName\":\"ns\"," - + "\"lockType\":\"EXCLUSIVE\"," - + "\"exclusiveLockOwnerProcedure\":{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"2\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"" - + "}," - + "\"sharedLockCount\":0" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"NAMESPACE\"," + "\"resourceName\":\"ns\"," + + "\"lockType\":\"EXCLUSIVE\"," + "\"exclusiveLockOwnerProcedure\":{" + + "\"className\":\"java.lang.Object\"," + "\"procId\":\"2\"," + "\"submittedTime\":\"0\"," + + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"" + "}," + "\"sharedLockCount\":0" + "}]", + lockJson); } @Test public void testTableLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.TABLE, "table", + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.TABLE, "table", LockServiceProtos.LockType.SHARED, null, 2); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"TABLE\"," - + "\"resourceName\":\"table\"," - + "\"lockType\":\"SHARED\"," - + "\"sharedLockCount\":2" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"TABLE\"," + "\"resourceName\":\"table\"," + + "\"lockType\":\"SHARED\"," + "\"sharedLockCount\":2" + "}]", lockJson); } @Test public void testRegionLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.REGION, "region", + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.REGION, "region", LockServiceProtos.LockType.EXCLUSIVE, createProcedure(3), 0); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"REGION\"," - + "\"resourceName\":\"region\"," - + "\"lockType\":\"EXCLUSIVE\"," - + "\"exclusiveLockOwnerProcedure\":{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"3\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"" - + "}," - + "\"sharedLockCount\":0" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"REGION\"," + "\"resourceName\":\"region\"," + + "\"lockType\":\"EXCLUSIVE\"," + "\"exclusiveLockOwnerProcedure\":{" + + "\"className\":\"java.lang.Object\"," + "\"procId\":\"3\"," + "\"submittedTime\":\"0\"," + + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"" + "}," + "\"sharedLockCount\":0" + "}]", + lockJson); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encode/decode tags is set to true. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encode/decode tags is set to true. */ @Test public void testCellConversionWithTags() { @@ -546,7 +503,7 @@ public void testCellConversionWithTags() { Cell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(1, decodedTags.size()); + assertEquals(1, decodedTags.size()); Tag decodedTag = decodedTags.get(0); assertEquals(TAG_TYPE, decodedTag.getType()); assertEquals(TAG_STR, Tag.getValueAsString(decodedTag)); @@ -572,8 +529,8 @@ private Cell getCellFromProtoResult(CellProtos.Cell protoCell, boolean decodeTag /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encode/decode tags is set to false. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encode/decode tags is set to false. */ @Test public void testCellConversionWithoutTags() { @@ -583,14 +540,13 @@ public void testCellConversionWithoutTags() { Cell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encoding of tags is set to false - * and decoding of tags is set to true. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encoding of tags is set to false and decoding of tags is set to true. */ @Test public void testTagEncodeFalseDecodeTrue() { @@ -600,14 +556,13 @@ public void testTagEncodeFalseDecodeTrue() { Cell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encoding of tags is set to true - * and decoding of tags is set to false. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encoding of tags is set to true and decoding of tags is set to false. */ @Test public void testTagEncodeTrueDecodeFalse() { @@ -617,6 +572,6 @@ public void testTagEncodeTrueDecodeFalse() { Cell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java index 808e245062a1..cae296b4d0e3 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,32 +27,17 @@ import java.util.Set; /** - * Utility class to check whether a given class conforms to builder-style: - * Foo foo = - * new Foo() - * .setBar(bar) - * .setBaz(baz) + * Utility class to check whether a given class conforms to builder-style: Foo foo = new Foo() + * .setBar(bar) .setBaz(baz) */ public final class BuilderStyleTest { - private BuilderStyleTest() {} + private BuilderStyleTest() { + } /* * If a base class Foo declares a method setFoo() returning Foo, then the subclass should - * re-declare the methods overriding the return class with the subclass: - * - * class Foo { - * Foo setFoo() { - * .. - * return this; - * } - * } - * - * class Bar { - * Bar setFoo() { - * return (Bar) super.setFoo(); - * } - * } - * + * re-declare the methods overriding the return class with the subclass: class Foo { Foo setFoo() + * { .. return this; } } class Bar { Bar setFoo() { return (Bar) super.setFoo(); } } */ @SuppressWarnings("rawtypes") public static void assertClassesAreBuilderStyle(Class... classes) { @@ -66,13 +51,13 @@ public static void assertClassesAreBuilderStyle(Class... classes) { } Class ret = method.getReturnType(); if (method.getName().startsWith("set") || method.getName().startsWith("add")) { - System.out.println(" " + clazz.getSimpleName() + "." + method.getName() + "() : " - + ret.getSimpleName()); + System.out.println( + " " + clazz.getSimpleName() + "." + method.getName() + "() : " + ret.getSimpleName()); // because of subclass / super class method overrides, we group the methods fitting the // same signatures because we get two method definitions from java reflection: // Mutation.setDurability() : Mutation - // Delete.setDurability() : Mutation + // Delete.setDurability() : Mutation // Delete.setDurability() : Delete String sig = method.getName(); for (Class param : method.getParameterTypes()) { @@ -97,8 +82,8 @@ public static void assertClassesAreBuilderStyle(Class... classes) { } } String errorMsg = "All setXXX()|addXX() methods in " + clazz.getSimpleName() - + " should return a " + clazz.getSimpleName() + " object in builder style. " - + "Offending method:" + e.getValue().iterator().next().getName(); + + " should return a " + clazz.getSimpleName() + " object in builder style. " + + "Offending method:" + e.getValue().iterator().next().getName(); assertTrue(errorMsg, found); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java index 314cae9e175b..fbf92f641392 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,7 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hbase.util.PoolMap.PoolType; -import org.junit.After; import org.junit.Before; public abstract class PoolMapTestBase { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java index 2fd73caea46a..d7ce6265b8c5 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestRoundRobinPoolMap extends PoolMapTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRoundRobinPoolMap.class); + HBaseClassTestRule.forClass(TestRoundRobinPoolMap.class); @Override protected PoolType getPoolType() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java index 2f497c6fdfb5..45f533f1a730 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestThreadLocalPoolMap extends PoolMapTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestThreadLocalPoolMap.class); + HBaseClassTestRule.forClass(TestThreadLocalPoolMap.class); @Override protected PoolType getPoolType() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java index a8b7644c52af..c6d61ca44574 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public class TestZNodePaths { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZNodePaths.class); + HBaseClassTestRule.forClass(TestZNodePaths.class); @Test public void testIsClientReadable() { diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 70f3b75b3e8d..e3e09b534b0a 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,113 +31,6 @@ Apache HBase - Common Common functionality for HBase - - - - src/main/resources/ - - hbase-default.xml - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - - maven-assembly-plugin - - true - - - - maven-antrun-plugin - - - process-resources - - - - - - - run - - - - - generate-Version-information - generate-sources - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - versionInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - hbase-default.xml - - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase @@ -261,6 +154,112 @@ + + + + src/main/resources/ + + hbase-default.xml + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + + maven-assembly-plugin + + true + + + + maven-antrun-plugin + + + + run + + process-resources + + + + + + + + + generate-Version-information + + run + + generate-sources + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + versionInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + hbase-default.xml + + + + + net.revelc.code + warbucks-maven-plugin + + + + @@ -273,10 +272,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -313,7 +312,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -328,10 +329,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-aircompressor Apache HBase - Compression - Aircompressor Pure Java compression support using Aircompressor codecs - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -165,6 +131,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java index d8069703fd6e..c232ce412316 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java @@ -1,21 +1,23 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.Compressor; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; @@ -24,14 +26,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.airlift.compress.Compressor; /** * Hadoop compressor glue for aircompressor compressors. */ @InterfaceAudience.Private public abstract class HadoopCompressor - implements CanReinit, org.apache.hadoop.io.compress.Compressor { + implements CanReinit, org.apache.hadoop.io.compress.Compressor { protected static final Logger LOG = LoggerFactory.getLogger(HadoopCompressor.class); protected T compressor; @@ -163,7 +164,7 @@ public void reinit(Configuration conf) { public void reset() { LOG.trace("reset"); try { - compressor = (T)compressor.getClass().getDeclaredConstructor().newInstance(); + compressor = (T) compressor.getClass().getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java index cfdc67a665e1..ac59cd296c0e 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java @@ -1,35 +1,36 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.Decompressor; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.airlift.compress.Decompressor; /** * Hadoop decompressor glue for aircompressor decompressors. */ @InterfaceAudience.Private public class HadoopDecompressor - implements org.apache.hadoop.io.compress.Decompressor { + implements org.apache.hadoop.io.compress.Decompressor { protected static final Logger LOG = LoggerFactory.getLogger(HadoopDecompressor.class); protected T decompressor; @@ -102,7 +103,7 @@ public boolean needsDictionary() { public void reset() { LOG.trace("reset"); try { - decompressor = (T)decompressor.getClass().getDeclaredConstructor().newInstance(); + decompressor = (T) decompressor.getClass().getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java index d45128da59d2..70ea7943e8da 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.lz4.Lz4Compressor; +import io.airlift.compress.lz4.Lz4Decompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -31,8 +34,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.lz4.Lz4Compressor; -import io.airlift.compress.lz4.Lz4Decompressor; /** * Hadoop Lz4 codec implemented with aircompressor. @@ -77,7 +78,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -88,7 +89,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java index 4797cd402100..5395dba5d0dd 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.lzo.LzoCompressor; +import io.airlift.compress.lzo.LzoDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -31,8 +34,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.lzo.LzoCompressor; -import io.airlift.compress.lzo.LzoDecompressor; /** * Hadoop Lzo codec implemented with aircompressor. @@ -77,7 +78,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -88,7 +89,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java index 602cbf860b31..2448404191f9 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.snappy.SnappyCompressor; +import io.airlift.compress.snappy.SnappyDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -31,8 +34,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.snappy.SnappyCompressor; -import io.airlift.compress.snappy.SnappyDecompressor; /** * Hadoop snappy codec implemented with aircompressor. @@ -77,7 +78,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -88,7 +89,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java index 25f8120ccd67..3e8d345c660c 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.zstd.ZstdCompressor; +import io.airlift.compress.zstd.ZstdDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -31,21 +34,18 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.zstd.ZstdCompressor; -import io.airlift.compress.zstd.ZstdDecompressor; /** * Hadoop codec implementation for Zstandard, implemented with aircompressor. *

    - * Unlike the other codecs this one should be considered as under development and unstable - * (as in changing), reflecting the status of aircompressor's zstandard implementation. + * Unlike the other codecs this one should be considered as under development and unstable (as in + * changing), reflecting the status of aircompressor's zstandard implementation. *

    - * NOTE: This codec is NOT data format compatible with the Hadoop native zstandard codec. - * There are issues with both framing and limitations of the aircompressor zstandard - * compressor. This codec can be used as an alternative to the native codec, if the native - * codec cannot be made available and/or an eventual migration will never be necessary - * (i.e. this codec's performance meets anticipated requirements). Once you begin using this - * alternative you will be locked into it. + * NOTE: This codec is NOT data format compatible with the Hadoop native zstandard codec. There are + * issues with both framing and limitations of the aircompressor zstandard compressor. This codec + * can be used as an alternative to the native codec, if the native codec cannot be made available + * and/or an eventual migration will never be necessary (i.e. this codec's performance meets + * anticipated requirements). Once you begin using this alternative you will be locked into it. */ @InterfaceAudience.Private public class ZstdCodec implements Configurable, CompressionCodec { @@ -86,7 +86,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -97,7 +97,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java index 1defda25a593..a59896673ff5 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLz4 extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); + HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); private static Configuration conf; @@ -49,8 +49,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZ4); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java index 98ee5c04bafe..2f7d2ef571a8 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLzo extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLzo.class); + HBaseClassTestRule.forClass(TestHFileCompressionLzo.class); private static Configuration conf; @@ -49,8 +49,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZO); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java index a6d863b61a5e..62a363e63f10 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionSnappy extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); + HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); private static Configuration conf; @@ -49,8 +49,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.SNAPPY); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java index de0f4575e62a..f15114bd6e6a 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionZstd extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); + HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); private static Configuration conf; @@ -49,8 +49,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.ZSTD); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java index db1cc7214fd1..0ba491b84659 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -28,7 +29,7 @@ public class TestLz4Codec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLz4Codec.class); + HBaseClassTestRule.forClass(TestLz4Codec.class); @Test public void testLz4CodecSmall() throws Exception { @@ -39,7 +40,7 @@ public void testLz4CodecSmall() throws Exception { public void testLz4CodecLarge() throws Exception { codecLargeTest(new Lz4Codec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new Lz4Codec(), 2); - codecLargeTest(new Lz4Codec(), 10); // high compressability + codecLargeTest(new Lz4Codec(), 10); // high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java index bd1b75aecc1b..15e6700c7bb5 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -28,7 +29,7 @@ public class TestLzoCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLzoCodec.class); + HBaseClassTestRule.forClass(TestLzoCodec.class); @Test public void testLzoCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public void testLzoCodecSmall() throws Exception { public void testLzoCodecLarge() throws Exception { codecLargeTest(new LzoCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new LzoCodec(), 2); - codecLargeTest(new LzoCodec(), 10); // very high compressability + codecLargeTest(new LzoCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java index 2646b1942025..54a1608851e9 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -28,7 +29,7 @@ public class TestSnappyCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnappyCodec.class); + HBaseClassTestRule.forClass(TestSnappyCodec.class); @Test public void testSnappyCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public void testSnappyCodecSmall() throws Exception { public void testSnappyCodecLarge() throws Exception { codecLargeTest(new SnappyCodec(), 1.1); // poor compressability codecLargeTest(new SnappyCodec(), 2); - codecLargeTest(new SnappyCodec(), 10); // very high compressability + codecLargeTest(new SnappyCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java index 23d7777f07c7..34a7dcfedfc1 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionLz4 extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLz4.class); + HBaseClassTestRule.forClass(TestWALCompressionLz4.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java index 997d6873c617..9c5bc8838c07 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionLzo extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLzo.class); + HBaseClassTestRule.forClass(TestWALCompressionLzo.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java index 924e46a77eee..72813bcbd656 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionSnappy extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); + HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java index 0de6de2b027c..0f5c80ce269b 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionZstd extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionZstd.class); + HBaseClassTestRule.forClass(TestWALCompressionZstd.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java index 6b924c5ff9e8..6a2fdaf33991 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -28,7 +29,7 @@ public class TestZstdCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZstdCodec.class); + HBaseClassTestRule.forClass(TestZstdCodec.class); @Test public void testZstdCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public void testZstdCodecSmall() throws Exception { public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability codecLargeTest(new ZstdCodec(), 2); - codecLargeTest(new ZstdCodec(), 10); // very high compressability + codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-brotli/pom.xml b/hbase-compression/hbase-compression-brotli/pom.xml index 887399c241a3..247c119804d8 100644 --- a/hbase-compression/hbase-compression-brotli/pom.xml +++ b/hbase-compression/hbase-compression-brotli/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-brotli Apache HBase - Compression - Brotli Compression support using Brotli4j - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -150,6 +116,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCodec.java b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCodec.java index d052d6a08389..16aa764ba3bb 100644 --- a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCodec.java +++ b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.brotli; @@ -78,7 +79,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -89,7 +90,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCompressor.java b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCompressor.java index c45eb0d1401a..4e6e6602bbb9 100644 --- a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCompressor.java +++ b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliCompressor.java @@ -1,25 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.brotli; import com.aayushatharva.brotli4j.Brotli4jLoader; import com.aayushatharva.brotli4j.encoder.Encoder; import com.aayushatharva.brotli4j.encoder.Encoders; - import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; diff --git a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliDecompressor.java b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliDecompressor.java index 8f167cd39608..9174644a959b 100644 --- a/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliDecompressor.java +++ b/hbase-compression/hbase-compression-brotli/src/main/java/org/apache/hadoop/hbase/io/compress/brotli/BrotliDecompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.brotli; @@ -86,7 +87,6 @@ public int decompress(byte[] b, int off, int len) throws IOException { return 0; } - @Override public void end() { LOG.trace("end"); diff --git a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestBrotliCodec.java b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestBrotliCodec.java index 50de8aae6077..f83a4ab728f5 100644 --- a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestBrotliCodec.java +++ b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestBrotliCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.brotli; @@ -28,7 +29,7 @@ public class TestBrotliCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBrotliCodec.class); + HBaseClassTestRule.forClass(TestBrotliCodec.class); @Test public void testBrotliCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public void testBrotliCodecSmall() throws Exception { public void testBrotliCodecLarge() throws Exception { codecLargeTest(new BrotliCodec(), 1.1); // poor compressability codecLargeTest(new BrotliCodec(), 2); - codecLargeTest(new BrotliCodec(), 10); // very high compressability + codecLargeTest(new BrotliCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestHFileCompressionBrotli.java b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestHFileCompressionBrotli.java index 8bc62482dfa9..251a0a94cfe0 100644 --- a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestHFileCompressionBrotli.java +++ b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestHFileCompressionBrotli.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionBrotli extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionBrotli.class); + HBaseClassTestRule.forClass(TestHFileCompressionBrotli.class); private static Configuration conf; @@ -49,8 +49,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.BROTLI); } diff --git a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java index ac25951d2d4d..e37276fed6db 100644 --- a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java +++ b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionBrotli extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionBrotli.class); + HBaseClassTestRule.forClass(TestWALCompressionBrotli.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-lz4/pom.xml b/hbase-compression/hbase-compression-lz4/pom.xml index c023da743d8f..21f4bea7f25d 100644 --- a/hbase-compression/hbase-compression-lz4/pom.xml +++ b/hbase-compression/hbase-compression-lz4/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-lz4 Apache HBase - Compression - LZ4 Pure Java compression support using lz4-java - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +115,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java index 949db8f4b623..8f0f5dee672c 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; @@ -75,7 +76,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -86,7 +87,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java index 7d70c428b70a..243d227a8be9 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java @@ -1,23 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.nio.ByteBuffer; +import net.jpountz.lz4.LZ4Compressor; +import net.jpountz.lz4.LZ4Factory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -25,8 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import net.jpountz.lz4.LZ4Compressor; -import net.jpountz.lz4.LZ4Factory; /** * Hadoop compressor glue for lz4-java. diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java index df32ecb6ce34..3aef246ec7d1 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java @@ -1,30 +1,31 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.nio.ByteBuffer; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4SafeDecompressor; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import net.jpountz.lz4.LZ4Factory; -import net.jpountz.lz4.LZ4SafeDecompressor; /** * Hadoop decompressor glue for lz4-java. diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java index 06c113dea5ee..127083b24119 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLz4 extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); + HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); private static Configuration conf; @@ -49,8 +49,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZ4); } diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java index 0c237e105bac..bd1cebfda7b8 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; @@ -28,7 +29,7 @@ public class TestLz4Codec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLz4Codec.class); + HBaseClassTestRule.forClass(TestLz4Codec.class); @Test public void testLz4CodecSmall() throws Exception { @@ -38,8 +39,8 @@ public void testLz4CodecSmall() throws Exception { @Test public void testLz4CodecLarge() throws Exception { codecLargeTest(new Lz4Codec(), 1.1); // poor compressability, expansion with this codec - codecLargeTest(new Lz4Codec(), 2); - codecLargeTest(new Lz4Codec(), 10); // very high compressability + codecLargeTest(new Lz4Codec(), 2); + codecLargeTest(new Lz4Codec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java index fdf9b0a9cc14..81b5d943dc6d 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionLz4 extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLz4.class); + HBaseClassTestRule.forClass(TestWALCompressionLz4.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-snappy/pom.xml b/hbase-compression/hbase-compression-snappy/pom.xml index 63e099853f83..d7232174c6e1 100644 --- a/hbase-compression/hbase-compression-snappy/pom.xml +++ b/hbase-compression/hbase-compression-snappy/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-snappy Apache HBase - Compression - Snappy Pure Java compression support using Xerial Snappy - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +115,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java index 5ea47c82a672..b8048ac04062 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; @@ -75,7 +76,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -86,7 +87,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, Snappy.maxCompressedLength(bufferSize) - bufferSize); // overhead only diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java index 5a9cf6982c24..ba3fe470ca20 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java index 289265850f97..e7934b3e242c 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java index 15ed99ef65f1..42b0735dd26b 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionSnappy extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); + HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); private static Configuration conf; @@ -49,8 +49,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.SNAPPY); } diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java index 99fd09dc013c..f62c35956736 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; @@ -28,7 +29,7 @@ public class TestSnappyCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnappyCodec.class); + HBaseClassTestRule.forClass(TestSnappyCodec.class); @Test public void testSnappyCodecSmall() throws Exception { @@ -39,7 +40,7 @@ public void testSnappyCodecSmall() throws Exception { public void testSnappyCodecLarge() throws Exception { codecLargeTest(new SnappyCodec(), 1.1); // poor compressability codecLargeTest(new SnappyCodec(), 2); - codecLargeTest(new SnappyCodec(), 10); // very high compressability + codecLargeTest(new SnappyCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java index ba59b6525340..dfbb63d0f6cc 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionSnappy extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); + HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-xz/pom.xml b/hbase-compression/hbase-compression-xz/pom.xml index 4efd0504f4ea..980cb30fe44a 100644 --- a/hbase-compression/hbase-compression-xz/pom.xml +++ b/hbase-compression/hbase-compression-xz/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-xz Apache HBase - Compression - XZ Pure Java compression support using XZ for Java - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +115,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java index 154eaf0a32d3..d4b8ce011481 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; @@ -75,7 +76,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -86,7 +87,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, CompressionUtil.compressionOverhead(bufferSize)); diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java index 74d707876718..08eb33301e8a 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; @@ -20,8 +21,8 @@ import java.nio.BufferOverflowException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; +import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -111,7 +112,7 @@ protected void checkSizeAndGrow(int extra) { } }) { try (LZMAOutputStream out = - new LZMAOutputStream(lowerOut, lzOptions, uncompressed, ARRAY_CACHE)) { + new LZMAOutputStream(lowerOut, lzOptions, uncompressed, ARRAY_CACHE)) { out.write(inBuf.array(), inBuf.arrayOffset(), uncompressed); } } diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java index 30b3ae0bf0e7..4da49d913c8d 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java index 481c7287aa38..734740635084 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,12 +32,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLzma extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLzma.class); + HBaseClassTestRule.forClass(TestHFileCompressionLzma.class); private static Configuration conf; @@ -51,8 +51,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZMA); } diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java index 63978abe838b..e5320da16777 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; @@ -29,7 +30,7 @@ public class TestLzmaCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLzmaCodec.class); + HBaseClassTestRule.forClass(TestLzmaCodec.class); @Test public void testLzmaCodecSmall() throws Exception { @@ -39,8 +40,8 @@ public void testLzmaCodecSmall() throws Exception { @Test public void testLzmaCodecLarge() throws Exception { codecLargeTest(new LzmaCodec(), 1.1); // poor compressability - codecLargeTest(new LzmaCodec(), 2); - codecLargeTest(new LzmaCodec(), 10); // very high compressability + codecLargeTest(new LzmaCodec(), 2); + codecLargeTest(new LzmaCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java index 89ce68b0600e..ee937230cd26 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionLzma extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLzma.class); + HBaseClassTestRule.forClass(TestWALCompressionLzma.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-zstd/pom.xml b/hbase-compression/hbase-compression-zstd/pom.xml index 55f38c73ccdc..ef1437e09d90 100644 --- a/hbase-compression/hbase-compression-zstd/pom.xml +++ b/hbase-compression/hbase-compression-zstd/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-zstd Apache HBase - Compression - ZStandard Pure Java compression support using zstd-jni - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -138,6 +115,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java index 521af5b25dd7..6848f0dfc48b 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -81,7 +82,7 @@ public CompressionInputStream createInputStream(InputStream in) throws IOExcepti @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { + throws IOException { return new BlockDecompressorStream(in, d, getBufferSize(conf)); } @@ -92,10 +93,10 @@ public CompressionOutputStream createOutputStream(OutputStream out) throws IOExc @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { + throws IOException { int bufferSize = getBufferSize(conf); return new BlockCompressorStream(out, c, bufferSize, - (int)Zstd.compressBound(bufferSize) - bufferSize); // overhead only + (int) Zstd.compressBound(bufferSize) - bufferSize); // overhead only } @Override @@ -117,8 +118,7 @@ public String getDefaultExtension() { static int getLevel(Configuration conf) { return conf.getInt(ZSTD_LEVEL_KEY, - conf.getInt( - CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, + conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT)); } @@ -143,10 +143,8 @@ static byte[] getDictionary(final Configuration conf) { // Reference: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md static boolean isDictionary(byte[] dictionary) { - return (dictionary[0] == (byte)0x37 && - dictionary[1] == (byte)0xA4 && - dictionary[2] == (byte)0x30 && - dictionary[3] == (byte)0xEC); + return (dictionary[0] == (byte) 0x37 && dictionary[1] == (byte) 0xA4 + && dictionary[2] == (byte) 0x30 && dictionary[3] == (byte) 0xEC); } static int getDictionaryId(byte[] dictionary) { diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index 7e0c78d32a87..181c8dba06b6 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; @@ -25,8 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdDictCompress; /** * Hadoop compressor glue for zstd-jni. diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index 6bfa84e1c598..dd962f72098a 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; @@ -25,8 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdDictDecompress; /** * Hadoop decompressor glue for zstd-java. diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java index 42c56a822d4d..8a2e9db503e5 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,12 +32,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionZstd extends HFileTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); + HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); private static Configuration conf; @@ -51,8 +51,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + Path path = + new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.ZSTD); } diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java index e75de9b9c466..55d61cf83ec5 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestWALCompressionZstd extends CompressedWALTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionZstd.class); + HBaseClassTestRule.forClass(TestWALCompressionZstd.class); @Rule public TestName name = new TestName(); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java index bf1c78cbc17f..6a66ac5f0e10 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -31,7 +32,7 @@ public class TestZstdCodec extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZstdCodec.class); + HBaseClassTestRule.forClass(TestZstdCodec.class); @Test public void testZstdCodecSmall() throws Exception { @@ -41,8 +42,8 @@ public void testZstdCodecSmall() throws Exception { @Test public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability - codecLargeTest(new ZstdCodec(), 2); - codecLargeTest(new ZstdCodec(), 10); // very high compressability + codecLargeTest(new ZstdCodec(), 2); + codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java index 5a76a4531f25..2f5a9784ec4d 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -53,7 +54,7 @@ public class TestZstdDictionary extends CompressionTestBase { public static void setUp() throws Exception { Configuration conf = new Configuration(); TEST_DATA = DictionaryCache.loadFromResource(conf, - DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024*1024); + DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024 * 1024); assertNotNull("Failed to load test data", TEST_DATA); } @@ -76,7 +77,7 @@ public void test() throws Exception { public static void main(String[] args) throws IOException { // Write 1000 1k blocks for training to the specified file // Train with: - // zstd --train -B1024 -o + // zstd --train -B1024 -o if (args.length < 1) { System.err.println("Usage: TestZstdCodec "); System.exit(-1); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java index 6d850114bbbd..f39c40115e8f 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java @@ -85,8 +85,7 @@ public void test() throws Exception { final TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName) .setCompressionType(Compression.Algorithm.ZSTD) - .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath) - .build()) + .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath).build()) .build(); final Admin admin = TEST_UTIL.getAdmin(); admin.createTable(td, new byte[][] { Bytes.toBytes(1) }); @@ -108,6 +107,7 @@ public void test() throws Exception { public boolean evaluate() throws Exception { return TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3; } + @Override public String explainFailure() throws Exception { return "Split has not finished yet"; @@ -120,7 +120,7 @@ public String explainFailure() throws Exception { RegionInfo regionA = null; RegionInfo regionB = null; - for (RegionInfo region: admin.getRegions(tableName)) { + for (RegionInfo region : admin.getRegions(tableName)) { if (region.getStartKey().length == 0) { regionA = region; } else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) { @@ -129,16 +129,14 @@ public String explainFailure() throws Exception { } assertNotNull(regionA); assertNotNull(regionB); - admin.mergeRegionsAsync(new byte[][] { - regionA.getRegionName(), - regionB.getRegionName() - }, false).get(30, TimeUnit.SECONDS); + admin + .mergeRegionsAsync(new byte[][] { regionA.getRegionName(), regionB.getRegionName() }, false) + .get(30, TimeUnit.SECONDS); assertEquals(2, admin.getRegions(tableName).size()); ServerName expected = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(); assertEquals(expected, TEST_UTIL.getConnection().getRegionLocator(tableName) .getRegionLocation(Bytes.toBytes(1), true).getServerName()); - try (AsyncConnection asyncConn = - ConnectionFactory.createAsyncConnection(conf).get()) { + try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(conf).get()) { assertEquals(expected, asyncConn.getRegionLocator(tableName) .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); } diff --git a/hbase-compression/pom.xml b/hbase-compression/pom.xml index 4a795d5cf93f..d802455dec48 100644 --- a/hbase-compression/pom.xml +++ b/hbase-compression/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-compression + pom Apache HBase - Compression Pure Java compression support parent - pom hbase-compression-aircompressor @@ -81,10 +81,10 @@ spotbugs-maven-plugin - false spotbugs + false ${project.basedir}/../dev-support/spotbugs-exclude.xml diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index ab1a4e3bdef7..058c6d8186d9 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -33,33 +33,6 @@ true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - org.apache.hbase.thirdparty @@ -237,6 +210,33 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + @@ -258,7 +258,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 8df15417fa31..c58ff3165c5c 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -58,29 +58,26 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateService; /** - * This client class is for invoking the aggregate functions deployed on the - * Region Server side via the AggregateService. This class will implement the - * supporting functionality for summing/processing the individual results - * obtained from the AggregateService for each region. + * This client class is for invoking the aggregate functions deployed on the Region Server side via + * the AggregateService. This class will implement the supporting functionality for + * summing/processing the individual results obtained from the AggregateService for each region. *

    - * This will serve as the client side handler for invoking the aggregate - * functions. - * For all aggregate functions, + * This will serve as the client side handler for invoking the aggregate functions. For all + * aggregate functions, *

      *
    • start row < end row is an essential condition (if they are not * {@link HConstants#EMPTY_BYTE_ARRAY}) - *
    • Column family can't be null. In case where multiple families are - * provided, an IOException will be thrown. An optional column qualifier can - * also be defined.
    • - *
    • For methods to find maximum, minimum, sum, rowcount, it returns the - * parameter type. For average and std, it returns a double value. For row - * count, it returns a long value.
    • + *
    • Column family can't be null. In case where multiple families are provided, an IOException + * will be thrown. An optional column qualifier can also be defined.
    • + *
    • For methods to find maximum, minimum, sum, rowcount, it returns the parameter type. For + * average and std, it returns a double value. For row count, it returns a long value.
    • *
    - *

    Call {@link #close()} when done. + *

    + * Call {@link #close()} when done. */ @InterfaceAudience.Public public class AggregationClient implements Closeable { - // TODO: This class is not used. Move to examples? + // TODO: This class is not used. Move to examples? private static final Logger log = LoggerFactory.getLogger(AggregationClient.class); private final Connection connection; @@ -152,38 +149,35 @@ public void close() throws IOException { } /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. + * It gives the maximum value of a column for a given column family for the given range. In case + * qualifier is null, a max of all values for the given family is returned. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return max val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public R max( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R + max(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return max(table, ci, scan); } } /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. + * It gives the maximum value of a column for a given column family for the given range. In case + * qualifier is null, a max of all values for the given family is returned. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return max val <> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R max(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R max(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MaxCallBack implements Batch.Callback { R max = null; @@ -199,61 +193,58 @@ public synchronized void update(byte[] region, byte[] row, R result) { } MaxCallBack aMaxCallBack = new MaxCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMax(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() > 0) { - ByteString b = response.getFirstPart(0); - Q q = getParsedGenericInstance(ci.getClass(), 3, b); - return ci.getCellValueFromProto(q); - } - return null; + new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMax(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + if (response.getFirstPartCount() > 0) { + ByteString b = response.getFirstPart(0); + Q q = getParsedGenericInstance(ci.getClass(), 3, b); + return ci.getCellValueFromProto(q); } - }, aMaxCallBack); + return null; + } + }, aMaxCallBack); return aMaxCallBack.getMax(); } /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. + * It gives the minimum value of a column for a given column family for the given range. In case + * qualifier is null, a min of all values for the given family is returned. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return min val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public R min( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R + min(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return min(table, ci, scan); } } /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. + * It gives the minimum value of a column for a given column family for the given range. In case + * qualifier is null, a min of all values for the given family is returned. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return min val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R min(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R min(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MinCallBack implements Batch.Callback { private R min = null; @@ -270,68 +261,66 @@ public synchronized void update(byte[] region, byte[] row, R result) { MinCallBack minCallBack = new MinCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMin(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() > 0) { - ByteString b = response.getFirstPart(0); - Q q = getParsedGenericInstance(ci.getClass(), 3, b); - return ci.getCellValueFromProto(q); - } - return null; + new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMin(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, minCallBack); + if (response.getFirstPartCount() > 0) { + ByteString b = response.getFirstPart(0); + Q q = getParsedGenericInstance(ci.getClass(), 3, b); + return ci.getCellValueFromProto(q); + } + return null; + } + }, minCallBack); log.debug("Min fom all regions is: " + minCallBack.getMinimum()); return minCallBack.getMinimum(); } /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * It gives the row count, by summing up the individual results obtained from regions. In case the + * qualifier is null, FirstKeyValueFilter is used to optimised the operation. In case qualifier is + * provided, I can't use the filter as it may set the flag to skip to next row, but the value read + * is not of the given filter: in this case, this particular row will not be counted ==> an + * error. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public long rowCount( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public long + rowCount(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return rowCount(table, ci, scan); } } /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * It gives the row count, by summing up the individual results obtained from regions. In case the + * qualifier is null, FirstKeyValueFilter is used to optimised the operation. In case qualifier is + * provided, I can't use the filter as it may set the flag to skip to next row, but the value read + * is not of the given filter: in this case, this particular row will not be counted ==> an + * error. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - long rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public long + rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true); class RowNumCallback implements Batch.Callback { private final AtomicLong rowCountL = new AtomicLong(0); @@ -348,57 +337,56 @@ public void update(byte[] region, byte[] row, Long result) { RowNumCallback rowNum = new RowNumCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public Long call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getRowNum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); - ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); - bb.rewind(); - return bb.getLong(); + new Batch.Call() { + @Override + public Long call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getRowNum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, rowNum); + byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); + ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); + bb.rewind(); + return bb.getLong(); + } + }, rowNum); return rowNum.getRowNumCount(); } /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. + * It sums up the value returned from various regions. In case qualifier is null, summation of all + * the column qualifiers in the given family is done. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return sum <S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public S sum( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public S + sum(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return sum(table, ci, scan); } } /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. + * It sums up the value returned from various regions. In case qualifier is null, summation of all + * the column qualifiers in the given family is done. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return sum <S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - S sum(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public S sum(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class SumCallBack implements Batch.Callback { @@ -415,59 +403,59 @@ public synchronized void update(byte[] region, byte[] row, S result) { } SumCallBack sumCallBack = new SumCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public S call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - // Not sure what is going on here why I have to do these casts. TODO. - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getSum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() == 0) { - return null; - } - ByteString b = response.getFirstPart(0); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - return s; + new Batch.Call() { + @Override + public S call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + // Not sure what is going on here why I have to do these casts. TODO. + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getSum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, sumCallBack); + if (response.getFirstPartCount() == 0) { + return null; + } + ByteString b = response.getFirstPart(0); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + return s; + } + }, sumCallBack); return sumCallBack.getSumResult(); } /** - * It computes average while fetching sum and row count from all the - * corresponding regions. Approach is to compute a global sum of region level - * sum and rowcount and then compute the average. + * It computes average while fetching sum and row count from all the corresponding regions. + * Approach is to compute a global sum of region level sum and rowcount and then compute the + * average. * @param tableName the name of the table to scan - * @param scan the HBase scan object to use to read data from HBase - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @param scan the HBase scan object to use to read data from HBase + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ private Pair getAvgArgs( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return getAvgArgs(table, ci, scan); } } /** - * It computes average while fetching sum and row count from all the - * corresponding regions. Approach is to compute a global sum of region level - * sum and rowcount and then compute the average. + * It computes average while fetching sum and row count from all the corresponding regions. + * Approach is to compute a global sum of region level sum and rowcount and then compute the + * average. * @param table table to scan. - * @param scan the HBase scan object to use to read data from HBase - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @param scan the HBase scan object to use to read data from HBase + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - private - Pair getAvgArgs(final Table table, final ColumnInterpreter ci, - final Scan scan) throws Throwable { + private Pair + getAvgArgs(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class AvgCallBack implements Batch.Callback> { S sum = null; @@ -486,90 +474,85 @@ public synchronized void update(byte[] region, byte[] row, Pair result) AvgCallBack avgCallBack = new AvgCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { - @Override - public Pair call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getAvg(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - Pair pair = new Pair<>(null, 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - ByteString b = response.getFirstPart(0); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - pair.setFirst(s); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); + new Batch.Call>() { + @Override + public Pair call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getAvg(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + Pair pair = new Pair<>(null, 0L); + if (response.getFirstPartCount() == 0) { return pair; } - }, avgCallBack); + ByteString b = response.getFirstPart(0); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + pair.setFirst(s); + ByteBuffer bb = + ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, avgCallBack); return avgCallBack.getAvgArgs(); } /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. + * This is the client side interface/handle for calling the average method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the average and returs the double value. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - double avg(final TableName tableName, final ColumnInterpreter ci, - Scan scan) throws Throwable { + public double + avg(final TableName tableName, final ColumnInterpreter ci, Scan scan) + throws Throwable { Pair p = getAvgArgs(tableName, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); } /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. + * This is the client side interface/handle for calling the average method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the average and returs the double value. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public double avg( - final Table table, final ColumnInterpreter ci, Scan scan) - throws Throwable { + public double + avg(final Table table, final ColumnInterpreter ci, Scan scan) throws Throwable { Pair p = getAvgArgs(table, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); } /** - * It computes a global standard deviation for a given column and its value. - * Standard deviation is square root of (average of squares - - * average*average). From individual regions, it obtains sum, square sum and - * number of rows. With these, the above values are computed to get the global - * std. + * It computes a global standard deviation for a given column and its value. Standard deviation is + * square root of (average of squares - average*average). From individual regions, it obtains sum, + * square sum and number of rows. With these, the above values are computed to get the global std. * @param table table to scan. - * @param scan the HBase scan object to use to read data from HBase + * @param scan the HBase scan object to use to read data from HBase * @return standard deviations - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - private - Pair, Long> getStdArgs(final Table table, final ColumnInterpreter ci, - final Scan scan) throws Throwable { + private Pair, Long> + getStdArgs(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class StdCallback implements Batch.Callback, Long>> { long rowCountVal = 0L; @@ -595,75 +578,72 @@ public synchronized void update(byte[] region, byte[] row, Pair, Long> r StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call, Long>>() { - @Override - public Pair, Long> call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getStd(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - List list = new ArrayList<>(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - ByteString b = response.getFirstPart(i); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - list.add(s); - } - pair.setFirst(list); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); + new Batch.Call, Long>>() { + @Override + public Pair, Long> call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getStd(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); + if (response.getFirstPartCount() == 0) { return pair; } - }, stdCallback); + List list = new ArrayList<>(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + ByteString b = response.getFirstPart(i); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + list.add(s); + } + pair.setFirst(list); + ByteBuffer bb = + ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, stdCallback); return stdCallback.getStdParams(); } /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. + * This is the client side interface/handle for calling the std method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the std and returns the double value. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - double std(final TableName tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { + public double std( + final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return std(table, ci, scan); } } /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. + * This is the client side interface/handle for calling the std method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the std and returns the double value. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public double std( - final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { + public double + std(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { Pair, Long> p = getStdArgs(table, ci, scan); double res = 0d; double avg = ci.divideForAvg(p.getFirst().get(0), p.getSecond()); @@ -674,21 +654,19 @@ public double st } /** - * It helps locate the region with median for a given column whose weight - * is specified in an optional column. - * From individual regions, it obtains sum of values and sum of weights. + * It helps locate the region with median for a given column whose weight is specified in an + * optional column. From individual regions, it obtains sum of values and sum of weights. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase - * @return pair whose first element is a map between start row of the region - * and (sum of values, sum of weights) for the region, the second element is - * (sum of values, sum of weights) for all the regions chosen - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase + * @return pair whose first element is a map between start row of the region and (sum of values, + * sum of weights) for the region, the second element is (sum of values, sum of weights) + * for all the regions chosen + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ private - Pair>, List> - getMedianArgs(final Table table, + Pair>, List> getMedianArgs(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); final NavigableMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -712,64 +690,63 @@ public synchronized void update(byte[] region, byte[] row, List result) { } StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { - @Override - public List call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMedian(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } + new Batch.Call>() { + @Override + public List call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMedian(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } - List list = new ArrayList<>(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - ByteString b = response.getFirstPart(i); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - list.add(s); - } - return list; + List list = new ArrayList<>(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + ByteString b = response.getFirstPart(i); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + list.add(s); } + return list; + } - }, stdCallback); + }, stdCallback); return stdCallback.getMedianParams(); } /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. + * This is the client side interface/handler for calling the median method for a given cf-cq + * combination. This method collects the necessary parameters to compute the median and returns + * the median. * @param tableName the name of the table to scan - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return R the median - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R median(final TableName tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { + public R median( + final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return median(table, ci, scan); } } /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. + * This is the client side interface/handler for calling the median method for a given cf-cq + * combination. This method collects the necessary parameters to compute the median and returns + * the median. * @param table table to scan. - * @param ci the user's ColumnInterpreter implementation - * @param scan the HBase scan object to use to read data from HBase + * @param ci the user's ColumnInterpreter implementation + * @param scan the HBase scan object to use to read data from HBase * @return R the median - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R median(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { + public R median(final Table table, + ColumnInterpreter ci, Scan scan) throws Throwable { Pair>, List> p = getMedianArgs(table, ci, scan); byte[] startRow = null; byte[] colFamily = scan.getFamilies()[0]; diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java index 953fd6e2dfeb..0f212e6d8673 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client.coprocessor; - import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -40,18 +39,21 @@ */ @InterfaceAudience.Private public final class AggregationHelper { - private AggregationHelper() {} + private AggregationHelper() { + } /** - * @param scan the HBase scan object to use to read data from HBase + * @param scan the HBase scan object to use to read data from HBase * @param canFamilyBeAbsent whether column family can be absent in familyMap of scan */ private static void validateParameters(Scan scan, boolean canFamilyBeAbsent) throws IOException { - if (scan == null + if ( + scan == null || (Bytes.equals(scan.getStartRow(), scan.getStopRow()) - && !Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)) + && !Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)) || ((Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) > 0) - && !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW))) { + && !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) + ) { throw new IOException("Agg client Exception: Startrow should be smaller than Stoprow"); } else if (!canFamilyBeAbsent) { if (scan.getFamilyMap().size() != 1) { @@ -61,8 +63,8 @@ private static void validateParameters(Scan scan, boolean canFamilyBeAbsent) thr } static AggregateRequest - validateArgAndGetPB(Scan scan, ColumnInterpreter ci, boolean canFamilyBeAbsent) - throws IOException { + validateArgAndGetPB(Scan scan, ColumnInterpreter ci, boolean canFamilyBeAbsent) + throws IOException { validateParameters(scan, canFamilyBeAbsent); final AggregateRequest.Builder requestBuilder = AggregateRequest.newBuilder(); requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName()); @@ -79,15 +81,15 @@ private static void validateParameters(Scan scan, boolean canFamilyBeAbsent) thr * assumed to be a PB Message subclass, and the instance is created using parseFrom method on the * passed ByteString. * @param runtimeClass the runtime type of the class - * @param position the position of the argument in the class declaration - * @param b the ByteString which should be parsed to get the instance created + * @param position the position of the argument in the class declaration + * @param b the ByteString which should be parsed to get the instance created * @return the instance * @throws IOException Either we couldn't instantiate the method object, or "parseFrom" failed. */ @SuppressWarnings("unchecked") // Used server-side too by Aggregation Coprocesor Endpoint. Undo this interdependence. TODO. public static T getParsedGenericInstance(Class runtimeClass, int position, - ByteString b) throws IOException { + ByteString b) throws IOException { Type type = runtimeClass.getGenericSuperclass(); Type argType = ((ParameterizedType) type).getActualTypeArguments()[position]; Class classType = (Class) argType; diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java index ee4eeee99b76..64bb187ad07f 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,10 +54,11 @@ */ @InterfaceAudience.Public public final class AsyncAggregationClient { - private AsyncAggregationClient() {} + private AsyncAggregationClient() { + } private static abstract class AbstractAggregationCallback - implements CoprocessorCallback { + implements CoprocessorCallback { private final CompletableFuture future; protected boolean finished = false; @@ -84,8 +85,7 @@ public synchronized void onError(Throwable error) { completeExceptionally(error); } - protected abstract void aggregate(RegionInfo region, AggregateResponse resp) - throws IOException; + protected abstract void aggregate(RegionInfo region, AggregateResponse resp) throws IOException; @Override public synchronized void onRegionComplete(RegionInfo region, AggregateResponse resp) { @@ -109,15 +109,15 @@ public synchronized void onComplete() { } private static R - getCellValueFromProto(ColumnInterpreter ci, AggregateResponse resp, - int firstPartIndex) throws IOException { + getCellValueFromProto(ColumnInterpreter ci, AggregateResponse resp, + int firstPartIndex) throws IOException { Q q = getParsedGenericInstance(ci.getClass(), 3, resp.getFirstPart(firstPartIndex)); return ci.getCellValueFromProto(q); } private static S - getPromotedValueFromProto(ColumnInterpreter ci, AggregateResponse resp, - int firstPartIndex) throws IOException { + getPromotedValueFromProto(ColumnInterpreter ci, AggregateResponse resp, + int firstPartIndex) throws IOException { T t = getParsedGenericInstance(ci.getClass(), 4, resp.getFirstPart(firstPartIndex)); return ci.getPromotedValueFromProto(t); } @@ -127,7 +127,7 @@ private static byte[] nullToEmpty(byte[] b) { } public static CompletableFuture - max(AsyncTable table, ColumnInterpreter ci, Scan scan) { + max(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -156,15 +156,15 @@ protected R getFinalResult() { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getMax(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getMax(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static CompletableFuture - min(AsyncTable table, ColumnInterpreter ci, Scan scan) { + min(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -194,16 +194,16 @@ protected R getFinalResult() { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getMin(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getMin(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static - CompletableFuture rowCount(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + rowCount(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -227,15 +227,15 @@ protected Long getFinalResult() { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getRowNum(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getRowNum(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static CompletableFuture - sum(AsyncTable table, ColumnInterpreter ci, Scan scan) { + sum(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -261,16 +261,16 @@ protected S getFinalResult() { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getSum(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getSum(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static - CompletableFuture avg(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + avg(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -298,16 +298,16 @@ protected Double getFinalResult() { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getAvg(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getAvg(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } public static - CompletableFuture std(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + std(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -341,19 +341,19 @@ protected Double getFinalResult() { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getStd(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getStd(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } // the map key is the startRow of the region private static - CompletableFuture> - sumByRegion(AsyncTable table, ColumnInterpreter ci, Scan scan) { + CompletableFuture> + sumByRegion(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture> future = - new CompletableFuture>(); + new CompletableFuture>(); AggregateRequest req; try { req = validateArgAndGetPB(scan, ci, false); @@ -363,9 +363,9 @@ protected Double getFinalResult() { } int firstPartIndex = scan.getFamilyMap().get(scan.getFamilies()[0]).size() - 1; AbstractAggregationCallback> callback = - new AbstractAggregationCallback>(future) { + new AbstractAggregationCallback>(future) { - private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); @Override protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { @@ -380,16 +380,16 @@ protected NavigableMap getFinalResult() { } }; table - . coprocessorService(AggregateService::newStub, - (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback) - .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) - .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); + . coprocessorService(AggregateService::newStub, + (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback) + .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) + .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; } private static void findMedian( - CompletableFuture future, AsyncTable table, - ColumnInterpreter ci, Scan scan, NavigableMap sumByRegion) { + CompletableFuture future, AsyncTable table, + ColumnInterpreter ci, Scan scan, NavigableMap sumByRegion) { double halfSum = ci.divideForAvg(sumByRegion.values().stream().reduce(ci::add).get(), 2L); S movingSum = null; byte[] startRow = null; @@ -455,9 +455,9 @@ public void onComplete() { }); } - public static - CompletableFuture median(AsyncTable table, - ColumnInterpreter ci, Scan scan) { + public static CompletableFuture + median(AsyncTable table, ColumnInterpreter ci, + Scan scan) { CompletableFuture future = new CompletableFuture<>(); addListener(sumByRegion(table, ci, scan), (sumByRegion, error) -> { if (error != null) { diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index 73eef3104265..84d8c1fbf354 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -48,11 +48,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateService; /** - * A concrete AggregateProtocol implementation. Its system level coprocessor - * that computes the aggregate function at a region level. - * {@link ColumnInterpreter} is used to interpret column value. This class is - * parameterized with the following (these are the types with which the {@link ColumnInterpreter} - * is parameterized, and for more description on these, refer to {@link ColumnInterpreter}): + * A concrete AggregateProtocol implementation. Its system level coprocessor that computes the + * aggregate function at a region level. {@link ColumnInterpreter} is used to interpret column + * value. This class is parameterized with the following (these are the types with which the + * {@link ColumnInterpreter} is parameterized, and for more description on these, refer to + * {@link ColumnInterpreter}): * @param Cell value data type * @param Promoted data type * @param

    PB message that is used to transport initializer specific bytes @@ -66,15 +66,14 @@ public class AggregateImplementation done) { + RpcCallback done) { InternalScanner scanner = null; AggregateResponse response = null; T max = null; @@ -112,24 +111,24 @@ public void getMax(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Maximum from this region is " - + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + max); + + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + max); done.run(response); } /** - * Gives the minimum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, minimum value for the - * entire column family will be returned. + * Gives the minimum for a given combination of column qualifier and column family, in the given + * row range as defined in the Scan object. In its current implementation, it takes one column + * family and one column qualifier (if provided). In case of null column qualifier, minimum value + * for the entire column family will be returned. */ @Override public void getMin(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; T min = null; @@ -156,8 +155,8 @@ public void getMin(RpcController controller, AggregateRequest request, results.clear(); } while (hasMoreRows); if (min != null) { - response = AggregateResponse.newBuilder().addFirstPart( - ci.getProtoForCellType(min).toByteString()).build(); + response = AggregateResponse.newBuilder() + .addFirstPart(ci.getProtoForCellType(min).toByteString()).build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -165,24 +164,24 @@ public void getMin(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Minimum from this region is " - + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + min); + + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + min); done.run(response); } /** - * Gives the sum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, sum for the entire column - * family will be returned. + * Gives the sum for a given combination of column qualifier and column family, in the given row + * range as defined in the Scan object. In its current implementation, it takes one column family + * and one column qualifier (if provided). In case of null column qualifier, sum for the entire + * column family will be returned. */ @Override public void getSum(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; long sum = 0L; @@ -212,8 +211,8 @@ public void getSum(RpcController controller, AggregateRequest request, results.clear(); } while (hasMoreRows); if (sumVal != null) { - response = AggregateResponse.newBuilder().addFirstPart( - ci.getProtoForPromotedType(sumVal).toByteString()).build(); + response = AggregateResponse.newBuilder() + .addFirstPart(ci.getProtoForPromotedType(sumVal).toByteString()).build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -221,21 +220,22 @@ public void getSum(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } - log.debug("Sum from this region is " - + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + sum); + log.debug("Sum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString() + + ": " + sum); done.run(response); } /** - * Gives the row count for the given column family and column qualifier, in - * the given row range as defined in the Scan object. + * Gives the row count for the given column family and column qualifier, in the given row range as + * defined in the Scan object. */ @Override public void getRowNum(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; long counter = 0L; List results = new ArrayList<>(); @@ -244,8 +244,8 @@ public void getRowNum(RpcController controller, AggregateRequest request, Scan scan = ProtobufUtil.toScan(request.getScan()); byte[][] colFamilies = scan.getFamilies(); byte[] colFamily = colFamilies != null ? colFamilies[0] : null; - NavigableSet qualifiers = colFamilies != null ? - scan.getFamilyMap().get(colFamily) : null; + NavigableSet qualifiers = + colFamilies != null ? scan.getFamilyMap().get(colFamily) : null; byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); @@ -264,38 +264,35 @@ public void getRowNum(RpcController controller, AggregateRequest request, } while (hasMoreRows); ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter); bb.rewind(); - response = AggregateResponse.newBuilder().addFirstPart( - ByteString.copyFrom(bb)).build(); + response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build(); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Row counter from this region is " - + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + counter); + + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + counter); done.run(response); } /** - * Gives a Pair with first object as Sum and second object as row count, - * computed for a given combination of column qualifier and column family in - * the given row range as defined in the Scan object. In its current - * implementation, it takes one column family and one column qualifier (if - * provided). In case of null column qualifier, an aggregate sum over all the - * entire column family will be returned. + * Gives a Pair with first object as Sum and second object as row count, computed for a given + * combination of column qualifier and column family in the given row range as defined in the Scan + * object. In its current implementation, it takes one column family and one column qualifier (if + * provided). In case of null column qualifier, an aggregate sum over all the entire column family + * will be returned. *

    - * The average is computed in - * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by - * processing results from all regions, so its "ok" to pass sum and a Long - * type. + * The average is computed in AggregationClient#avg(byte[], ColumnInterpreter, Scan) by processing + * results from all regions, so its "ok" to pass sum and a Long type. */ @Override public void getAvg(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; try { @@ -318,8 +315,8 @@ public void getAvg(RpcController controller, AggregateRequest request, hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { - sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, - qualifier, results.get(i)))); + sumVal = + ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i)))); } rowCountVal++; } while (hasMoreRows); @@ -338,24 +335,24 @@ public void getAvg(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); } /** - * Gives a Pair with first object a List containing Sum and sum of squares, - * and the second object as row count. It is computed for a given combination of - * column qualifier and column family in the given row range as defined in the - * Scan object. In its current implementation, it takes one column family and - * one column qualifier (if provided). The idea is get the value of variance first: - * the average of the squares less the square of the average a standard - * deviation is square root of variance. + * Gives a Pair with first object a List containing Sum and sum of squares, and the second object + * as row count. It is computed for a given combination of column qualifier and column family in + * the given row range as defined in the Scan object. In its current implementation, it takes one + * column family and one column qualifier (if provided). The idea is get the value of variance + * first: the average of the squares less the square of the average a standard deviation is square + * root of variance. */ @Override public void getStd(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { InternalScanner scanner = null; AggregateResponse response = null; try { @@ -379,8 +376,8 @@ public void getStd(RpcController controller, AggregateRequest request, hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { - tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, - qualifier, results.get(i)))); + tempVal = + ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i)))); } results.clear(); sumVal = ci.add(sumVal, tempVal); @@ -404,23 +401,22 @@ public void getStd(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); } /** - * Gives a List containing sum of values and sum of weights. - * It is computed for the combination of column - * family and column qualifier(s) in the given row range as defined in the - * Scan object. In its current implementation, it takes one column family and - * two column qualifiers. The first qualifier is for values column and - * the second qualifier (optional) is for weight column. + * Gives a List containing sum of values and sum of weights. It is computed for the combination of + * column family and column qualifier(s) in the given row range as defined in the Scan object. In + * its current implementation, it takes one column family and two column qualifiers. The first + * qualifier is for values column and the second qualifier (optional) is for weight column. */ @Override public void getMedian(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; try { @@ -447,11 +443,10 @@ public void getMedian(RpcController controller, AggregateRequest request, int listSize = results.size(); for (int i = 0; i < listSize; i++) { Cell kv = results.get(i); - tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, - valQualifier, kv))); + tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv))); if (weightQualifier != null) { - tempWeight = ci.add(tempWeight, - ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); + tempWeight = + ci.add(tempWeight, ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); } } results.clear(); @@ -471,7 +466,8 @@ public void getMedian(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); @@ -479,11 +475,11 @@ public void getMedian(RpcController controller, AggregateRequest request, @SuppressWarnings("unchecked") // Used server-side too by Aggregation Coprocesor Endpoint. Undo this interdependence. TODO. - ColumnInterpreter constructColumnInterpreterFromRequest( - AggregateRequest request) throws IOException { + ColumnInterpreter constructColumnInterpreterFromRequest(AggregateRequest request) + throws IOException { String className = request.getInterpreterClassName(); try { - ColumnInterpreter ci; + ColumnInterpreter ci; Class cls = Class.forName(className); ci = (ColumnInterpreter) cls.getDeclaredConstructor().newInstance(); @@ -493,8 +489,8 @@ ColumnInterpreter constructColumnInterpreterFromRequest( ci.initialize(initMsg); } return ci; - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | - NoSuchMethodException | InvocationTargetException e) { + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | NoSuchMethodException | InvocationTargetException e) { throw new IOException(e); } } @@ -507,17 +503,17 @@ public Iterable getServices() { /** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this - * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded - * on a table region, so always expects this to be an instance of + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on + * a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of - * {@code RegionCoprocessorEnvironment} + * {@code RegionCoprocessorEnvironment} */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException("Must be loaded on a table region!"); } diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index 2b38dcbaae48..b763d5750521 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.coprocessor; @@ -81,10 +80,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ExportProtos; /** - * Export an HBase table. Writes content to sequence files up in HDFS. Use - * {@link Import} to read it back in again. It is implemented by the endpoint - * technique. - * + * Export an HBase table. Writes content to sequence files up in HDFS. Use {@link Import} to read it + * back in again. It is implemented by the endpoint technique. * @see org.apache.hadoop.hbase.mapreduce.Export */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -93,7 +90,7 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces private static final Logger LOG = LoggerFactory.getLogger(Export.class); private static final Class DEFAULT_CODEC = DefaultCodec.class; private static final SequenceFile.CompressionType DEFAULT_TYPE = - SequenceFile.CompressionType.RECORD; + SequenceFile.CompressionType.RECORD; private RegionCoprocessorEnvironment env = null; private UserProvider userProvider; @@ -110,31 +107,29 @@ static Map run(final Configuration conf, final String[] args) return null; } Triple arguments = - ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); + ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); } - public static Map run(final Configuration conf, TableName tableName, - Scan scan, Path dir) throws Throwable { + public static Map run(final Configuration conf, TableName tableName, Scan scan, + Path dir) throws Throwable { FileSystem fs = dir.getFileSystem(conf); UserProvider userProvider = UserProvider.instantiate(conf); checkDir(fs, dir); FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); fsDelegationToken.acquireDelegationToken(fs); try { - final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, - scan, fsDelegationToken.getUserToken()); + final ExportProtos.ExportRequest request = + getConfiguredRequest(conf, dir, scan, fsDelegationToken.getUserToken()); try (Connection con = ConnectionFactory.createConnection(conf); - Table table = con.getTable(tableName)) { + Table table = con.getTable(tableName)) { Map result = new TreeMap<>(Bytes.BYTES_COMPARATOR); - table.coprocessorService(ExportProtos.ExportService.class, - scan.getStartRow(), - scan.getStopRow(), - (ExportProtos.ExportService service) -> { + table.coprocessorService(ExportProtos.ExportService.class, scan.getStartRow(), + scan.getStopRow(), (ExportProtos.ExportService service) -> { ServerRpcController controller = new ServerRpcController(); Map rval = new TreeMap<>(Bytes.BYTES_COMPARATOR); - CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); service.export(controller, request, rpcCallback); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -159,8 +154,8 @@ private static boolean getCompression(final ExportProtos.ExportRequest request) } } - private static SequenceFile.CompressionType getCompressionType( - final ExportProtos.ExportRequest request) { + private static SequenceFile.CompressionType + getCompressionType(final ExportProtos.ExportRequest request) { if (request.hasCompressType()) { return SequenceFile.CompressionType.valueOf(request.getCompressType()); } else { @@ -169,24 +164,24 @@ private static SequenceFile.CompressionType getCompressionType( } private static CompressionCodec getCompressionCodec(final Configuration conf, - final ExportProtos.ExportRequest request) { + final ExportProtos.ExportRequest request) { try { Class codecClass; if (request.hasCompressCodec()) { - codecClass = conf.getClassByName(request.getCompressCodec()) - .asSubclass(CompressionCodec.class); + codecClass = + conf.getClassByName(request.getCompressCodec()).asSubclass(CompressionCodec.class); } else { codecClass = DEFAULT_CODEC; } return ReflectionUtils.newInstance(codecClass, conf); } catch (ClassNotFoundException e) { - throw new IllegalArgumentException("Compression codec " - + request.getCompressCodec() + " was not found.", e); + throw new IllegalArgumentException( + "Compression codec " + request.getCompressCodec() + " was not found.", e); } } private static SequenceFile.Writer.Option getOutputPath(final Configuration conf, - final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { + final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { Path file = new Path(request.getOutputPath(), "export-" + info.getEncodedName()); FileSystem fs = file.getFileSystem(conf); if (fs.exists(file)) { @@ -196,14 +191,14 @@ private static SequenceFile.Writer.Option getOutputPath(final Configuration conf } private static List getWriterOptions(final Configuration conf, - final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { + final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { List rval = new LinkedList<>(); rval.add(SequenceFile.Writer.keyClass(ImmutableBytesWritable.class)); rval.add(SequenceFile.Writer.valueClass(Result.class)); rval.add(getOutputPath(conf, info, request)); if (getCompression(request)) { rval.add(SequenceFile.Writer.compression(getCompressionType(request), - getCompressionCodec(conf, request))); + getCompressionCodec(conf, request))); } else { rval.add(SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE)); } @@ -211,12 +206,12 @@ private static List getWriterOptions(final Configura } private static ExportProtos.ExportResponse processData(final Region region, - final Configuration conf, final UserProvider userProvider, final Scan scan, - final Token userToken, final List opts) throws IOException { + final Configuration conf, final UserProvider userProvider, final Scan scan, + final Token userToken, final List opts) throws IOException { ScanCoprocessor cp = new ScanCoprocessor(region); RegionScanner scanner = null; try (RegionOp regionOp = new RegionOp(region); - SecureWriter out = new SecureWriter(conf, userProvider, userToken, opts)) { + SecureWriter out = new SecureWriter(conf, userProvider, userToken, opts)) { scanner = cp.checkScannerOpen(scan); ImmutableBytesWritable key = new ImmutableBytesWritable(); long rowCount = 0; @@ -235,11 +230,13 @@ private static ExportProtos.ExportResponse processData(final Region region, } Cell firstCell = cells.get(0); for (Cell cell : cells) { - if (Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), + if ( + Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength(), cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()) != 0) { - throw new IOException("Why the RegionScanner#nextRaw returns the data of different" - + " rows?? first row=" + cell.getRowLength()) != 0 + ) { + throw new IOException( + "Why the RegionScanner#nextRaw returns the data of different" + " rows?? first row=" + Bytes.toHex(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength()) + ", current row=" @@ -258,10 +255,8 @@ private static ExportProtos.ExportResponse processData(final Region region, } results.clear(); } while (hasMore); - return ExportProtos.ExportResponse.newBuilder() - .setRowCount(rowCount) - .setCellCount(cellCount) - .build(); + return ExportProtos.ExportResponse.newBuilder().setRowCount(rowCount).setCellCount(cellCount) + .build(); } finally { cp.checkScannerClose(scanner); } @@ -276,31 +271,24 @@ private static void checkDir(final FileSystem fs, final Path dir) throws IOExcep } } - private static ExportProtos.ExportRequest getConfiguredRequest(Configuration conf, - Path dir, final Scan scan, final Token userToken) throws IOException { + private static ExportProtos.ExportRequest getConfiguredRequest(Configuration conf, Path dir, + final Scan scan, final Token userToken) throws IOException { boolean compressed = conf.getBoolean(FileOutputFormat.COMPRESS, false); - String compressionType = conf.get(FileOutputFormat.COMPRESS_TYPE, - DEFAULT_TYPE.toString()); - String compressionCodec = conf.get(FileOutputFormat.COMPRESS_CODEC, - DEFAULT_CODEC.getName()); + String compressionType = conf.get(FileOutputFormat.COMPRESS_TYPE, DEFAULT_TYPE.toString()); + String compressionCodec = conf.get(FileOutputFormat.COMPRESS_CODEC, DEFAULT_CODEC.getName()); DelegationToken protoToken = null; if (userToken != null) { protoToken = DelegationToken.newBuilder() - .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) - .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); - } - LOG.info("compressed=" + compressed - + ", compression type=" + compressionType - + ", compression codec=" + compressionCodec - + ", userToken=" + userToken); + .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) + .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); + } + LOG.info("compressed=" + compressed + ", compression type=" + compressionType + + ", compression codec=" + compressionCodec + ", userToken=" + userToken); ExportProtos.ExportRequest.Builder builder = ExportProtos.ExportRequest.newBuilder() - .setScan(ProtobufUtil.toScan(scan)) - .setOutputPath(dir.toString()) - .setCompressed(compressed) - .setCompressCodec(compressionCodec) - .setCompressType(compressionType); + .setScan(ProtobufUtil.toScan(scan)).setOutputPath(dir.toString()).setCompressed(compressed) + .setCompressCodec(compressionCodec).setCompressType(compressionType); if (protoToken != null) { builder.setFsToken(protoToken); } @@ -328,11 +316,11 @@ public Iterable getServices() { @Override public void export(RpcController controller, ExportProtos.ExportRequest request, - RpcCallback done) { + RpcCallback done) { Region region = env.getRegion(); Configuration conf = HBaseConfiguration.create(env.getConfiguration()); conf.setStrings("io.serializations", conf.get("io.serializations"), - ResultSerialization.class.getName()); + ResultSerialization.class.getName()); try { Scan scan = validateKey(region.getRegionInfo(), request); Token userToken = null; @@ -340,12 +328,11 @@ public void export(RpcController controller, ExportProtos.ExportRequest request, LOG.warn("Hadoop security is enable, but no found of user token"); } else if (userProvider.isHadoopSecurityEnabled()) { userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), - request.getFsToken().getPassword().toByteArray(), - new Text(request.getFsToken().getKind()), - new Text(request.getFsToken().getService())); + request.getFsToken().getPassword().toByteArray(), + new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService())); } - ExportProtos.ExportResponse response = processData(region, conf, userProvider, - scan, userToken, getWriterOptions(conf, region.getRegionInfo(), request)); + ExportProtos.ExportResponse response = processData(region, conf, userProvider, scan, + userToken, getWriterOptions(conf, region.getRegionInfo(), request)); done.run(response); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -354,18 +341,16 @@ public void export(RpcController controller, ExportProtos.ExportRequest request, } private Scan validateKey(final RegionInfo region, final ExportProtos.ExportRequest request) - throws IOException { + throws IOException { Scan scan = ProtobufUtil.toScan(request.getScan()); byte[] regionStartKey = region.getStartKey(); byte[] originStartKey = scan.getStartRow(); - if (originStartKey == null - || Bytes.compareTo(originStartKey, regionStartKey) < 0) { + if (originStartKey == null || Bytes.compareTo(originStartKey, regionStartKey) < 0) { scan.withStartRow(regionStartKey); } byte[] regionEndKey = region.getEndKey(); byte[] originEndKey = scan.getStopRow(); - if (originEndKey == null - || Bytes.compareTo(originEndKey, regionEndKey) > 0) { + if (originEndKey == null || Bytes.compareTo(originEndKey, regionEndKey) > 0) { scan.withStartRow(regionEndKey); } return scan; @@ -423,8 +408,8 @@ void checkScannerClose(final InternalScanner s) throws IOException { } } - boolean preScannerNext(final InternalScanner s, - final List results, final int limit) throws IOException { + boolean preScannerNext(final InternalScanner s, final List results, final int limit) + throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { @@ -433,9 +418,8 @@ boolean preScannerNext(final InternalScanner s, } } - boolean postScannerNext(final InternalScanner s, - final List results, final int limit, boolean hasMore) - throws IOException { + boolean postScannerNext(final InternalScanner s, final List results, final int limit, + boolean hasMore) throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { @@ -447,15 +431,13 @@ boolean postScannerNext(final InternalScanner s, private static class SecureWriter implements Closeable { private final PrivilegedWriter privilegedWriter; - SecureWriter(final Configuration conf, final UserProvider userProvider, - final Token userToken, final List opts) - throws IOException { + SecureWriter(final Configuration conf, final UserProvider userProvider, final Token userToken, + final List opts) throws IOException { User user = getActiveUser(userProvider, userToken); try { SequenceFile.Writer sequenceFileWriter = - user.runAs((PrivilegedExceptionAction) () -> - SequenceFile.createWriter(conf, - opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); + user.runAs((PrivilegedExceptionAction) () -> SequenceFile + .createWriter(conf, opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); privilegedWriter = new PrivilegedWriter(user, sequenceFileWriter); } catch (InterruptedException e) { throw new IOException(e); @@ -467,7 +449,7 @@ void append(final Object key, final Object value) throws IOException { } private static User getActiveUser(final UserProvider userProvider, final Token userToken) - throws IOException { + throws IOException { User user = RpcServer.getRequestUser().orElse(userProvider.getCurrent()); if (user == null && userToken != null) { LOG.warn("No found of user credentials, but a token was got from user request"); @@ -483,8 +465,7 @@ public void close() throws IOException { } } - private static class PrivilegedWriter implements PrivilegedExceptionAction, - Closeable { + private static class PrivilegedWriter implements PrivilegedExceptionAction, Closeable { private final User user; private final SequenceFile.Writer out; private Object key; @@ -541,11 +522,8 @@ public long getCellCount() { @Override public String toString() { StringBuilder builder = new StringBuilder(35); - return builder.append("rowCount=") - .append(rowCount) - .append(", cellCount=") - .append(cellCount) - .toString(); + return builder.append("rowCount=").append(rowCount).append(", cellCount=").append(cellCount) + .toString(); } } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java index e2f036043407..426ec0573d30 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestAsyncAggregationClient { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncAggregationClient.class); + HBaseClassTestRule.forClass(TestAsyncAggregationClient.class); private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -76,9 +76,9 @@ public static void setUp() throws Exception { CONN = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); TABLE = CONN.getTable(TABLE_NAME); TABLE.putAll(LongStream.range(0, COUNT) - .mapToObj(l -> new Put(Bytes.toBytes(String.format("%03d", l))) - .addColumn(CF, CQ, Bytes.toBytes(l)).addColumn(CF, CQ2, Bytes.toBytes(l * l))) - .collect(Collectors.toList())).get(); + .mapToObj(l -> new Put(Bytes.toBytes(String.format("%03d", l))) + .addColumn(CF, CQ, Bytes.toBytes(l)).addColumn(CF, CQ2, Bytes.toBytes(l * l))) + .collect(Collectors.toList())).get(); } @AfterClass @@ -90,34 +90,35 @@ public static void tearDown() throws Exception { @Test public void testMax() throws InterruptedException, ExecutionException { assertEquals(COUNT - 1, AsyncAggregationClient - .max(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + .max(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); } @Test public void testMin() throws InterruptedException, ExecutionException { assertEquals(0, AsyncAggregationClient - .min(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + .min(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); } @Test public void testRowCount() throws InterruptedException, ExecutionException { assertEquals(COUNT, AsyncAggregationClient - .rowCount(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get() - .longValue()); + .rowCount(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get() + .longValue()); } @Test public void testSum() throws InterruptedException, ExecutionException { assertEquals(COUNT * (COUNT - 1) / 2, AsyncAggregationClient - .sum(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + .sum(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); } private static final double DELTA = 1E-3; @Test public void testAvg() throws InterruptedException, ExecutionException { - assertEquals((COUNT - 1) / 2.0, AsyncAggregationClient + assertEquals( + (COUNT - 1) / 2.0, AsyncAggregationClient .avg(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().doubleValue(), DELTA); } @@ -125,11 +126,12 @@ public void testAvg() throws InterruptedException, ExecutionException { @Test public void testStd() throws InterruptedException, ExecutionException { double avgSq = - LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() - / (double) COUNT; + LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() + / (double) COUNT; double avg = (COUNT - 1) / 2.0; double std = Math.sqrt(avgSq - avg * avg); - assertEquals(std, AsyncAggregationClient + assertEquals( + std, AsyncAggregationClient .std(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().doubleValue(), DELTA); } @@ -146,16 +148,14 @@ public void testMedian() throws InterruptedException, ExecutionException { break; } } - assertEquals(median, - AsyncAggregationClient - .median(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get() - .longValue()); + assertEquals(median, AsyncAggregationClient + .median(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); } @Test public void testMedianWithWeight() throws InterruptedException, ExecutionException { long halfSum = - LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() / 2; + LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() / 2; long median = 0L; long sum = 0L; for (int i = 0; i < COUNT; i++) { @@ -165,7 +165,8 @@ public void testMedianWithWeight() throws InterruptedException, ExecutionExcepti break; } } - assertEquals(median, AsyncAggregationClient + assertEquals(median, + AsyncAggregationClient .median(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ).addColumn(CF, CQ2)) .get().longValue()); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java index 587fd70753f6..6b62d67a9697 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,7 +78,7 @@ public HBaseRpcController newController(RegionInfo regionInfo, CellScanner cellS @Override public HBaseRpcController newController(RegionInfo regionInfo, - List cellIterables) { + List cellIterables) { return new CountingRpcController(super.newController(regionInfo, cellIterables)); } } @@ -140,7 +140,7 @@ public void testCountController() throws Exception { conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT + 1); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { byte[] row = Bytes.toBytes("row"); Put p = new Put(row); p.addColumn(fam1, fam1, Bytes.toBytes("val0")); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java index 9dd8e243bdb8..d97c05204afd 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java @@ -43,7 +43,7 @@ * The aggregation implementation at a region. */ public class ColumnAggregationEndpoint extends ColumnAggregationService - implements RegionCoprocessor { + implements RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(ColumnAggregationEndpoint.class); private RegionCoprocessorEnvironment env = null; @@ -55,7 +55,7 @@ public Iterable getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -71,8 +71,8 @@ public void sum(RpcController controller, SumRequest request, RpcCallback getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -74,7 +74,7 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void sum(RpcController controller, ColumnAggregationNullResponseSumRequest request, - RpcCallback done) { + RpcCallback done) { // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. @@ -122,9 +122,8 @@ public void sum(RpcController controller, ColumnAggregationNullResponseSumReques } } } - done.run(ColumnAggregationNullResponseSumResponse.newBuilder().setSum(sumResult) - .build()); - LOG.info("Returning sum " + sumResult + " for region " + - Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); + done.run(ColumnAggregationNullResponseSumResponse.newBuilder().setSum(sumResult).build()); + LOG.info("Returning sum " + sumResult + " for region " + + Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java index 1ab9b5eca5e0..4588f9940561 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java @@ -43,15 +43,15 @@ import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.ColumnAggregationWithErrorsProtos.ColumnAggregationWithErrorsSumResponse; /** - * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on - * the last region in the table. This allows tests to ensure correct error handling of - * coprocessor endpoints throwing exceptions. + * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on the + * last region in the table. This allows tests to ensure correct error handling of coprocessor + * endpoints throwing exceptions. */ public class ColumnAggregationEndpointWithErrors - extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors - implements RegionCoprocessor { + extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors + implements RegionCoprocessor { private static final Logger LOG = - LoggerFactory.getLogger(ColumnAggregationEndpointWithErrors.class); + LoggerFactory.getLogger(ColumnAggregationEndpointWithErrors.class); private RegionCoprocessorEnvironment env = null; @@ -63,7 +63,7 @@ public Iterable getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -76,7 +76,7 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request, - RpcCallback done) { + RpcCallback done) { // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java index 65130cfbb0e9..99e5567eb953 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.coprocessor; - import java.io.IOException; import java.util.Collections; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -42,8 +41,9 @@ * service methods. For internal use by unit tests only. */ public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto - implements MasterCoprocessor, RegionCoprocessor { - public ProtobufCoprocessorService() {} + implements MasterCoprocessor, RegionCoprocessor { + public ProtobufCoprocessorService() { + } @Override public Iterable getServices() { @@ -52,36 +52,36 @@ public Iterable getServices() { @Override public void ping(RpcController controller, TestProtos.EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { done.run(TestProtos.EmptyResponseProto.getDefaultInstance()); } @Override public void echo(RpcController controller, TestProtos.EchoRequestProto request, - RpcCallback done) { + RpcCallback done) { String message = request.getMessage(); done.run(TestProtos.EchoResponseProto.newBuilder().setMessage(message).build()); } @Override public void error(RpcController controller, TestProtos.EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, new IOException("Test exception")); done.run(null); } @Override public void pause(RpcController controller, PauseRequestProto request, - RpcCallback done) { + RpcCallback done) { Threads.sleepWithoutInterrupt(request.getMs()); done.run(EmptyResponseProto.getDefaultInstance()); } @Override public void addr(RpcController controller, EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { done.run(AddrResponseProto.newBuilder() - .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build()); + .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build()); } @Override diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java index 584c1a4d5565..9abc4dc5a6f7 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java @@ -57,7 +57,7 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncCoprocessorEndpoint.class); + HBaseClassTestRule.forClass(TestAsyncCoprocessorEndpoint.class); private static final FileNotFoundException WHAT_TO_THROW = new FileNotFoundException("/file.txt"); private static final String DUMMY_VALUE = "val"; @@ -78,12 +78,12 @@ public static void setUpBeforeClass() throws Exception { @Test public void testMasterCoprocessorService() throws Exception { TestProtos.EchoRequestProto request = - TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); - TestProtos.EchoResponseProto response = - admin - . - coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, - (s, c, done) -> s.echo(c, request, done)).get(); + TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); + TestProtos.EchoResponseProto response = admin. coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto::newStub, + (s, c, done) -> s.echo(c, request, done)) + .get(); assertEquals("hello", response.getMessage()); } @@ -91,10 +91,11 @@ public void testMasterCoprocessorService() throws Exception { public void testMasterCoprocessorError() throws Exception { TestProtos.EmptyRequestProto emptyRequest = TestProtos.EmptyRequestProto.getDefaultInstance(); try { - admin - . - coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, - (s, c, done) -> s.error(c, emptyRequest, done)).get(); + admin. coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto::newStub, + (s, c, done) -> s.error(c, emptyRequest, done)) + .get(); fail("Should have thrown an exception"); } catch (Exception e) { } @@ -104,13 +105,13 @@ public void testMasterCoprocessorError() throws Exception { public void testRegionServerCoprocessorService() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); DummyRegionServerEndpointProtos.DummyRequest request = - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); + DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); DummyRegionServerEndpointProtos.DummyResponse response = - admin - . coprocessorService( - DummyRegionServerEndpointProtos.DummyService::newStub, - (s, c, done) -> s.dummyCall(c, request, done), serverName).get(); + admin. coprocessorService( + DummyRegionServerEndpointProtos.DummyService::newStub, + (s, c, done) -> s.dummyCall(c, request, done), serverName) + .get(); assertEquals(DUMMY_VALUE, response.getValue()); } @@ -118,13 +119,13 @@ DummyRegionServerEndpointProtos.DummyResponse> coprocessorService( public void testRegionServerCoprocessorServiceError() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); DummyRegionServerEndpointProtos.DummyRequest request = - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); + DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); try { - admin - . coprocessorService( - DummyRegionServerEndpointProtos.DummyService::newStub, - (s, c, done) -> s.dummyThrow(c, request, done), serverName).get(); + admin. coprocessorService( + DummyRegionServerEndpointProtos.DummyService::newStub, + (s, c, done) -> s.dummyThrow(c, request, done), serverName) + .get(); fail("Should have thrown an exception"); } catch (Exception e) { assertTrue(e.getCause() instanceof RetriesExhaustedException); @@ -133,8 +134,9 @@ DummyRegionServerEndpointProtos.DummyResponse> coprocessorService( } public static class DummyRegionServerEndpoint extends DummyService - implements RegionServerCoprocessor { - public DummyRegionServerEndpoint() {} + implements RegionServerCoprocessor { + public DummyRegionServerEndpoint() { + } @Override public Iterable getServices() { @@ -151,14 +153,13 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void dummyCall(RpcController controller, DummyRequest request, - RpcCallback callback) { + RpcCallback callback) { callback.run(DummyResponse.newBuilder().setValue(DUMMY_VALUE).build()); } @Override - public void dummyThrow(RpcController controller, - DummyRequest request, - RpcCallback done) { + public void dummyThrow(RpcController controller, DummyRequest request, + RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, WHAT_TO_THROW); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java index 67e4ff1bb297..278fc7643f10 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,17 +60,16 @@ /** * TestEndpoint: test cases to verify the batch execution of coprocessor Endpoint */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestBatchCoprocessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBatchCoprocessorEndpoint.class); + HBaseClassTestRule.forClass(TestBatchCoprocessorEndpoint.class); private static final Logger LOG = LoggerFactory.getLogger(TestBatchCoprocessorEndpoint.class); - private static final TableName TEST_TABLE = - TableName.valueOf("TestTable"); + private static final TableName TEST_TABLE = TableName.valueOf("TestTable"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -87,12 +86,12 @@ public static void setupBeforeClass() throws Exception { // set configure to indicate which cp should be loaded Configuration conf = util.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), - ProtobufCoprocessorService.class.getName(), - ColumnAggregationEndpointWithErrors.class.getName(), - ColumnAggregationEndpointNullResponse.class.getName()); + org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), + ProtobufCoprocessorService.class.getName(), + ColumnAggregationEndpointWithErrors.class.getName(), + ColumnAggregationEndpointNullResponse.class.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); + ProtobufCoprocessorService.class.getName()); util.startMiniCluster(2); Admin admin = util.getAdmin(); @@ -120,24 +119,22 @@ public static void tearDownAfterClass() throws Exception { public void testAggregationNullResponse() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); ColumnAggregationNullResponseSumRequest.Builder builder = - ColumnAggregationNullResponseSumRequest - .newBuilder(); + ColumnAggregationNullResponseSumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(TEST_FAMILY)); if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { builder.setQualifier(ByteString.copyFrom(TEST_QUALIFIER)); } - Map results = - table.batchCoprocessorService( - ColumnAggregationServiceNullResponse.getDescriptor().findMethodByName("sum"), - builder.build(), ROWS[0], ROWS[ROWS.length - 1], - ColumnAggregationNullResponseSumResponse.getDefaultInstance()); + Map results = table.batchCoprocessorService( + ColumnAggregationServiceNullResponse.getDescriptor().findMethodByName("sum"), + builder.build(), ROWS[0], ROWS[ROWS.length - 1], + ColumnAggregationNullResponseSumResponse.getDefaultInstance()); int sumResult = 0; int expectedResult = 0; - for (Map.Entry e : - results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + for (Map.Entry e : results.entrySet()) { + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < rowSeperator2; i++) { @@ -156,29 +153,29 @@ private static byte[][] makeN(byte[] base, int n) { } private Map sum(final Table table, final byte[] family, - final byte[] qualifier, final byte[] start, final byte[] end) throws ServiceException, - Throwable { - ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest - .newBuilder(); + final byte[] qualifier, final byte[] start, final byte[] end) + throws ServiceException, Throwable { + ColumnAggregationProtos.SumRequest.Builder builder = + ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); if (qualifier != null && qualifier.length > 0) { builder.setQualifier(ByteString.copyFrom(qualifier)); } return table.batchCoprocessorService( - ColumnAggregationProtos.ColumnAggregationService.getDescriptor().findMethodByName("sum"), - builder.build(), start, end, ColumnAggregationProtos.SumResponse.getDefaultInstance()); + ColumnAggregationProtos.ColumnAggregationService.getDescriptor().findMethodByName("sum"), + builder.build(), start, end, ColumnAggregationProtos.SumResponse.getDefaultInstance()); } @Test public void testAggregationWithReturnValue() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], - ROWS[ROWS.length - 1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < ROWSIZE; i++) { @@ -189,13 +186,12 @@ public void testAggregationWithReturnValue() throws Throwable { results.clear(); // scan: for region 2 and region 3 - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], - ROWS[ROWS.length - 1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -208,13 +204,13 @@ public void testAggregationWithReturnValue() throws Throwable { @Test public void testAggregation() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[0], ROWS[ROWS.length - 1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < ROWSIZE; i++) { @@ -227,8 +223,8 @@ public void testAggregation() throws Throwable { sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -241,14 +237,10 @@ public void testAggregation() throws Throwable { @Test public void testAggregationWithErrors() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - final Map results = - Collections.synchronizedMap( - new TreeMap( - Bytes.BYTES_COMPARATOR - )); + final Map results = Collections.synchronizedMap( + new TreeMap(Bytes.BYTES_COMPARATOR)); ColumnAggregationWithErrorsSumRequest.Builder builder = - ColumnAggregationWithErrorsSumRequest - .newBuilder(); + ColumnAggregationWithErrorsSumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(TEST_FAMILY)); if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { builder.setQualifier(ByteString.copyFrom(TEST_QUALIFIER)); @@ -257,18 +249,18 @@ public void testAggregationWithErrors() throws Throwable { boolean hasError = false; try { table.batchCoprocessorService( - ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors.getDescriptor() - .findMethodByName("sum"), - builder.build(), ROWS[0], ROWS[ROWS.length - 1], - ColumnAggregationWithErrorsSumResponse.getDefaultInstance(), - new Batch.Callback() { + ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors.getDescriptor() + .findMethodByName("sum"), + builder.build(), ROWS[0], ROWS[ROWS.length - 1], + ColumnAggregationWithErrorsSumResponse.getDefaultInstance(), + new Batch.Callback() { - @Override - public void update(byte[] region, byte[] row, - ColumnAggregationWithErrorsSumResponse result) { - results.put(region, result); - } - }); + @Override + public void update(byte[] region, byte[] row, + ColumnAggregationWithErrorsSumResponse result) { + results.put(region, result); + } + }); } catch (Throwable t) { LOG.info("Exceptions in coprocessor service", t); hasError = true; @@ -277,8 +269,8 @@ public void update(byte[] region, byte[] row, int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < rowSeperator2; i++) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 705bf626f5e2..47d8ebf4fd68 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -69,11 +69,11 @@ /** * Test coprocessors class loading. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestClassLoading { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClassLoading.class); + HBaseClassTestRule.forClass(TestClassLoading.class); private static final Logger LOG = LoggerFactory.getLogger(TestClassLoading.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -96,17 +96,17 @@ public Optional getMasterObserver() { static final String cpName6 = "TestCP6"; private static Class regionCoprocessor1 = ColumnAggregationEndpoint.class; - // TOOD: Fix the import of this handler. It is coming in from a package that is far away. + // TOOD: Fix the import of this handler. It is coming in from a package that is far away. private static Class regionCoprocessor2 = TestServerCustomProtocol.PingHandler.class; private static Class regionServerCoprocessor = SampleRegionWALCoprocessor.class; private static Class masterCoprocessor = TestMasterCoprocessor.class; private static final String[] regionServerSystemCoprocessors = - new String[]{ regionServerCoprocessor.getSimpleName() }; + new String[] { regionServerCoprocessor.getSimpleName() }; - private static final String[] masterRegionServerSystemCoprocessors = new String[] { - regionCoprocessor1.getSimpleName(), MultiRowMutationEndpoint.class.getSimpleName(), - regionServerCoprocessor.getSimpleName() }; + private static final String[] masterRegionServerSystemCoprocessors = + new String[] { regionCoprocessor1.getSimpleName(), + MultiRowMutationEndpoint.class.getSimpleName(), regionServerCoprocessor.getSimpleName() }; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -114,19 +114,15 @@ public static void setUpBeforeClass() throws Exception { // regionCoprocessor1 will be loaded on all regionservers, since it is // loaded for any tables (user or meta). - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - regionCoprocessor1.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, regionCoprocessor1.getName()); // regionCoprocessor2 will be loaded only on regionservers that serve a // user table region. Therefore, if there are no user tables loaded, // this coprocessor will not be loaded on any regionserver. - conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - regionCoprocessor2.getName()); + conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, regionCoprocessor2.getName()); - conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - regionServerCoprocessor.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - masterCoprocessor.getName()); + conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, regionServerCoprocessor.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, masterCoprocessor.getName()); TEST_UTIL.startMiniCluster(1); cluster = TEST_UTIL.getDFSCluster(); } @@ -137,11 +133,9 @@ public static void tearDownAfterClass() throws Exception { } static File buildCoprocessorJar(String className) throws Exception { - String code = - "import org.apache.hadoop.hbase.coprocessor.*;" + - "public class " + className + " implements RegionCoprocessor {}"; - return ClassLoaderTestHelper.buildJar( - TEST_UTIL.getDataTestDir().toString(), className, code); + String code = "import org.apache.hadoop.hbase.coprocessor.*;" + "public class " + className + + " implements RegionCoprocessor {}"; + return ClassLoaderTestHelper.buildJar(TEST_UTIL.getDataTestDir().toString(), className, code); } @Test @@ -155,32 +149,26 @@ public void testClassLoadingFromHDFS() throws Exception { // copy the jars into dfs fs.copyFromLocalFile(new Path(jarFile1.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + - jarFile1.getName(); + String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName(); Path pathOnHDFS1 = new Path(jarFileOnHDFS1); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(pathOnHDFS1)); + assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS1)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1); fs.copyFromLocalFile(new Path(jarFile2.getPath()), - new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + - jarFile2.getName(); + new Path(fs.getUri().toString() + Path.SEPARATOR)); + String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName(); Path pathOnHDFS2 = new Path(jarFileOnHDFS2); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(pathOnHDFS2)); + assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS2)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2); // create a table that references the coprocessors TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); // without configuration values - tdb.setValue("COPROCESSOR$1", jarFileOnHDFS1 + "|" + cpName1 - + "|" + Coprocessor.PRIORITY_USER); + tdb.setValue("COPROCESSOR$1", jarFileOnHDFS1 + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER); // with configuration values - tdb.setValue("COPROCESSOR$2", jarFileOnHDFS2 + "|" + cpName2 - + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); + tdb.setValue("COPROCESSOR$2", + jarFileOnHDFS2 + "|" + cpName2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { @@ -189,19 +177,18 @@ public void testClassLoadingFromHDFS() throws Exception { admin.deleteTable(tableName); } CoprocessorClassLoader.clearCache(); - byte[] startKey = {10, 63}; - byte[] endKey = {12, 43}; + byte[] startKey = { 10, 63 }; + byte[] endKey = { 12, 43 }; TableDescriptor tableDescriptor = tdb.build(); admin.createTable(tableDescriptor, startKey, endKey, 4); waitForTable(tableDescriptor.getTableName()); // verify that the coprocessors were loaded - boolean foundTableRegion=false; + boolean foundTableRegion = false; boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true; Map> regionsActiveClassLoaders = new HashMap<>(); SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: - hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { foundTableRegion = true; CoprocessorEnvironment env; @@ -219,8 +206,8 @@ public void testClassLoadingFromHDFS() throws Exception { found2_k2 = false; found2_k3 = false; } - regionsActiveClassLoaders - .put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); + regionsActiveClassLoaders.put(region, + ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); } } @@ -235,18 +222,16 @@ public void testClassLoadingFromHDFS() throws Exception { CoprocessorClassLoader.getIfCached(pathOnHDFS1)); assertNotNull(jarFileOnHDFS2 + " was not cached", CoprocessorClassLoader.getIfCached(pathOnHDFS2)); - //two external jar used, should be one classloader per jar - assertEquals("The number of cached classloaders should be equal to the number" + - " of external jar files", + // two external jar used, should be one classloader per jar + assertEquals( + "The number of cached classloaders should be equal to the number" + " of external jar files", 2, CoprocessorClassLoader.getAllCached().size()); - //check if region active classloaders are shared across all RS regions - Set externalClassLoaders = new HashSet<>( - CoprocessorClassLoader.getAllCached()); + // check if region active classloaders are shared across all RS regions + Set externalClassLoaders = new HashSet<>(CoprocessorClassLoader.getAllCached()); for (Map.Entry> regionCP : regionsActiveClassLoaders.entrySet()) { assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached." - + " ClassLoader Cache:" + externalClassLoaders - + " Region ClassLoaders:" + regionCP.getValue(), - externalClassLoaders.containsAll(regionCP.getValue())); + + " ClassLoader Cache:" + externalClassLoaders + " Region ClassLoaders:" + + regionCP.getValue(), externalClassLoaders.containsAll(regionCP.getValue())); } } @@ -261,10 +246,9 @@ public void testClassLoadingFromLocalFS() throws Exception { // create a table that references the jar TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName3)); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); - tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + - Coprocessor.PRIORITY_USER); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); + tdb.setValue("COPROCESSOR$1", + getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER); TableDescriptor tableDescriptor = tdb.build(); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); @@ -273,7 +257,7 @@ public void testClassLoadingFromLocalFS() throws Exception { // verify that the coprocessor was loaded boolean found = false; SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) { found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null); } @@ -288,10 +272,9 @@ public void testPrivateClassLoader() throws Exception { // create a table that references the jar TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName4)); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); - tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + - Coprocessor.PRIORITY_USER); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); + tdb.setValue("COPROCESSOR$1", + getLocalPath(jarFile) + "|" + cpName4 + "|" + Coprocessor.PRIORITY_USER); TableDescriptor tableDescriptor = tdb.build(); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); @@ -300,7 +283,7 @@ public void testPrivateClassLoader() throws Exception { // verify that the coprocessor was loaded correctly boolean found = false; SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) { Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4); if (cp != null) { @@ -328,17 +311,14 @@ public void testHBase3810() throws Exception { String cpKey2 = " Coprocessor$2 "; String cpKey3 = " coprocessor$03 "; - String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + - Coprocessor.PRIORITY_USER; + String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER; String cpValue2 = getLocalPath(jarFile2) + " | " + cpName2 + " | "; // load from default class loader - String cpValue3 = - " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; + String cpValue3 = " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; // create a table that references the jar TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); // add 3 coprocessors by setting htd attributes directly. tdb.setValue(cpKey1, cpValue1); @@ -346,24 +326,18 @@ public void testHBase3810() throws Exception { tdb.setValue(cpKey3, cpValue3); // add 2 coprocessor by using new htd.setCoprocessor() api - CoprocessorDescriptor coprocessorDescriptor = CoprocessorDescriptorBuilder - .newBuilder(cpName5) + CoprocessorDescriptor coprocessorDescriptor = CoprocessorDescriptorBuilder.newBuilder(cpName5) .setJarPath(new Path(getLocalPath(jarFile5)).toString()) - .setPriority(Coprocessor.PRIORITY_USER) - .setProperties(Collections.emptyMap()) - .build(); + .setPriority(Coprocessor.PRIORITY_USER).setProperties(Collections.emptyMap()).build(); tdb.setCoprocessor(coprocessorDescriptor); Map kvs = new HashMap<>(); kvs.put("k1", "v1"); kvs.put("k2", "v2"); kvs.put("k3", "v3"); - coprocessorDescriptor = CoprocessorDescriptorBuilder - .newBuilder(cpName6) + coprocessorDescriptor = CoprocessorDescriptorBuilder.newBuilder(cpName6) .setJarPath(new Path(getLocalPath(jarFile6)).toString()) - .setPriority(Coprocessor.PRIORITY_USER) - .setProperties(kvs) - .build(); + .setPriority(Coprocessor.PRIORITY_USER).setProperties(kvs).build(); tdb.setCoprocessor(coprocessorDescriptor); Admin admin = TEST_UTIL.getAdmin(); @@ -379,26 +353,20 @@ public void testHBase3810() throws Exception { waitForTable(tableDescriptor.getTableName()); // verify that the coprocessor was loaded - boolean found_2 = false, found_1 = false, found_3 = false, - found_5 = false, found_6 = false; - boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, - found6_k4 = false; + boolean found_2 = false, found_1 = false, found_3 = false, found_5 = false, found_6 = false; + boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, found6_k4 = false; SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { - found_1 = found_1 || - (region.getCoprocessorHost().findCoprocessor(cpName1) != null); - found_2 = found_2 || - (region.getCoprocessorHost().findCoprocessor(cpName2) != null); - found_3 = found_3 || - (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") - != null); - found_5 = found_5 || - (region.getCoprocessorHost().findCoprocessor(cpName5) != null); + found_1 = found_1 || (region.getCoprocessorHost().findCoprocessor(cpName1) != null); + found_2 = found_2 || (region.getCoprocessorHost().findCoprocessor(cpName2) != null); + found_3 = + found_3 || (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") != null); + found_5 = found_5 || (region.getCoprocessorHost().findCoprocessor(cpName5) != null); CoprocessorEnvironment env = - region.getCoprocessorHost().findCoprocessorEnvironment(cpName6); + region.getCoprocessorHost().findCoprocessorEnvironment(cpName6); if (env != null) { found_6 = true; Configuration conf = env.getConfiguration(); @@ -438,28 +406,23 @@ void loadingClassFromLibDirInJar(String libPrefix) throws Exception { File innerJarFile2 = buildCoprocessorJar(cpName2); File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar"); - ClassLoaderTestHelper.addJarFilesToJar( - outerJarFile, libPrefix, innerJarFile1, innerJarFile2); + ClassLoaderTestHelper.addJarFilesToJar(outerJarFile, libPrefix, innerJarFile1, innerJarFile2); // copy the jars into dfs fs.copyFromLocalFile(new Path(outerJarFile.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + - outerJarFile.getName(); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(new Path(jarFileOnHDFS))); + String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + outerJarFile.getName(); + assertTrue("Copy jar file to HDFS failed.", fs.exists(new Path(jarFileOnHDFS))); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS); // create a table that references the coprocessors TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); - // without configuration values - tdb.setValue("COPROCESSOR$1", jarFileOnHDFS + "|" + cpName1 - + "|" + Coprocessor.PRIORITY_USER); - // with configuration values - tdb.setValue("COPROCESSOR$2", jarFileOnHDFS + "|" + cpName2 - + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); + // without configuration values + tdb.setValue("COPROCESSOR$1", jarFileOnHDFS + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER); + // with configuration values + tdb.setValue("COPROCESSOR$2", + jarFileOnHDFS + "|" + cpName2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { @@ -473,10 +436,9 @@ void loadingClassFromLibDirInJar(String libPrefix) throws Exception { waitForTable(tableDescriptor.getTableName()); // verify that the coprocessors were loaded - boolean found1 = false, found2 = false, found2_k1 = false, - found2_k2 = false, found2_k3 = false; + boolean found1 = false, found2 = false, found2_k1 = false, found2_k2 = false, found2_k3 = false; SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { CoprocessorEnvironment env; env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); @@ -509,24 +471,21 @@ public void testRegionServerCoprocessorsReported() throws Exception { } /** - * return the subset of all regionservers - * (actually returns set of ServerLoads) - * which host some region in a given table. - * used by assertAllRegionServers() below to - * test reporting of loaded coprocessors. + * return the subset of all regionservers (actually returns set of ServerLoads) which host some + * region in a given table. used by assertAllRegionServers() below to test reporting of loaded + * coprocessors. * @param tableName : given table. * @return subset of all servers. */ Map serversForTable(String tableName) { Map serverLoadHashMap = new HashMap<>(); - for(Map.Entry server: - TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). - getOnlineServers().entrySet()) { - for(Map.Entry region: - server.getValue().getRegionMetrics().entrySet()) { + for (Map.Entry server : TEST_UTIL.getMiniHBaseCluster().getMaster() + .getServerManager().getOnlineServers().entrySet()) { + for (Map.Entry region : server.getValue().getRegionMetrics() + .entrySet()) { if (region.getValue().getNameAsString().equals(tableName)) { // this server hosts a region of tableName: add this server.. - serverLoadHashMap.put(server.getKey(),server.getValue()); + serverLoadHashMap.put(server.getKey(), server.getValue()); // .. and skip the rest of the regions that it hosts. break; } @@ -547,13 +506,12 @@ void assertAllRegionServers(String tableName) throws InterruptedException { } for (int i = 0; i < 5; i++) { boolean any_failed = false; - for(Map.Entry server: servers.entrySet()) { + for (Map.Entry server : servers.entrySet()) { String[] actualCoprocessors = server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]); if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) { - LOG.debug("failed comparison: actual: " + - Arrays.toString(actualCoprocessors) + - " ; expected: " + Arrays.toString(expectedCoprocessors)); + LOG.debug("failed comparison: actual: " + Arrays.toString(actualCoprocessors) + + " ; expected: " + Arrays.toString(expectedCoprocessors)); any_failed = true; expectedCoprocessors = switchExpectedCoprocessors(expectedCoprocessors); break; @@ -584,11 +542,9 @@ public void testMasterCoprocessorsReported() { // HBASE 4070: Improve region server metrics to report loaded coprocessors // to master: verify that the master is reporting the correct set of // loaded coprocessors. - final String loadedMasterCoprocessorsVerify = - "[" + masterCoprocessor.getSimpleName() + "]"; + final String loadedMasterCoprocessorsVerify = "[" + masterCoprocessor.getSimpleName() + "]"; String loadedMasterCoprocessors = - java.util.Arrays.toString( - TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); + java.util.Arrays.toString(TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java index d6d0d4ce43dd..c479b3bee56e 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java @@ -68,16 +68,15 @@ /** * TestEndpoint: test cases to verify coprocessor Endpoint */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorEndpoint.class); + HBaseClassTestRule.forClass(TestCoprocessorEndpoint.class); private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorEndpoint.class); - private static final TableName TEST_TABLE = - TableName.valueOf("TestCoprocessorEndpoint"); + private static final TableName TEST_TABLE = TableName.valueOf("TestCoprocessorEndpoint"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -95,10 +94,10 @@ public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), - ProtobufCoprocessorService.class.getName()); + org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), + ProtobufCoprocessorService.class.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); + ProtobufCoprocessorService.class.getName()); util.startMiniCluster(2); Admin admin = util.getAdmin(); @@ -121,17 +120,15 @@ public static void tearDownAfterClass() throws Exception { util.shutdownMiniCluster(); } - private Map sum(final Table table, final byte [] family, - final byte [] qualifier, final byte [] start, final byte [] end) - throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, - start, end, - new Batch.Call() { + private Map sum(final Table table, final byte[] family, final byte[] qualifier, + final byte[] start, final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, + end, new Batch.Call() { @Override public Long call(ColumnAggregationProtos.ColumnAggregationService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(UnsafeByteOperations.unsafeWrap(family)); @@ -147,12 +144,12 @@ public Long call(ColumnAggregationProtos.ColumnAggregationService instance) @Test public void testAggregation() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[0], ROWS[ROWS.length-1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue(); } for (int i = 0; i < ROWSIZE; i++) { @@ -163,12 +160,11 @@ public void testAggregation() throws Throwable { results.clear(); // scan: for region 2 and region 3 - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[rowSeperator1], ROWS[ROWS.length-1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -183,77 +179,73 @@ public void testCoprocessorService() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); List regions; - try(RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { + try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { regions = rl.getAllRegionLocations(); } final TestProtos.EchoRequestProto request = - TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); - final Map results = Collections.synchronizedMap( - new TreeMap(Bytes.BYTES_COMPARATOR)); + TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); + final Map results = + Collections.synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); try { // scan: for all regions final RpcController controller = new ServerRpcController(); - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[0], ROWS[ROWS.length - 1], + table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], + ROWS[ROWS.length - 1], new Batch.Call() { @Override - public TestProtos.EchoResponseProto call( - TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + public TestProtos.EchoResponseProto + call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call returning result " + response); return response; } - }, - new Batch.Callback() { + }, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { assertNotNull(result); assertEquals("hello", result.getMessage()); results.put(region, result.getMessage()); } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(3, results.size()); for (HRegionLocation info : regions) { - LOG.info("Region info is "+info.getRegion().getRegionNameAsString()); + LOG.info("Region info is " + info.getRegion().getRegionNameAsString()); assertTrue(results.containsKey(info.getRegion().getRegionName())); } results.clear(); // scan: for region 2 and region 3 - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[rowSeperator1], ROWS[ROWS.length - 1], + table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[rowSeperator1], + ROWS[ROWS.length - 1], new Batch.Call() { @Override - public TestProtos.EchoResponseProto call( - TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + public TestProtos.EchoResponseProto + call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call returning result " + response); return response; } - }, - new Batch.Callback() { + }, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { assertNotNull(result); assertEquals("hello", result.getMessage()); results.put(region, result.getMessage()); } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(2, results.size()); } finally { @@ -265,38 +257,37 @@ public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto resul public void testCoprocessorServiceNullResponse() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); List regions; - try(RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { + try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { regions = rl.getAllRegionLocations(); } final TestProtos.EchoRequestProto request = - TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); + TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); try { // scan: for all regions final RpcController controller = new ServerRpcController(); // test that null results are supported - Map results = - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, + Map results = table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], ROWS[ROWS.length - 1], new Batch.Call() { public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) - throws IOException { + throws IOException { CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call got result " + response); return null; } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(3, results.size()); for (HRegionLocation region : regions) { RegionInfo info = region.getRegion(); - LOG.info("Region info is "+info.getRegionNameAsString()); + LOG.info("Region info is " + info.getRegionNameAsString()); assertTrue(results.containsKey(info.getRegionName())); assertNull(results.get(info.getRegionName())); } @@ -309,9 +300,9 @@ public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) public void testMasterCoprocessorService() throws Throwable { Admin admin = util.getAdmin(); final TestProtos.EchoRequestProto request = - TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); + TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = - TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); + TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); assertEquals("hello", service.echo(null, request).getMessage()); } @@ -326,7 +317,7 @@ public void testCoprocessorError() throws Exception { CoprocessorRpcChannel protocol = table.coprocessorService(ROWS[0]); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = - TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol); + TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol); service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance()); fail("Should have thrown an exception"); @@ -340,7 +331,7 @@ public void testCoprocessorError() throws Exception { public void testMasterCoprocessorError() throws Throwable { Admin admin = util.getAdmin(); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = - TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); + TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); try { service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance()); fail("Should have thrown an exception"); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java index ada5f1c7a53f..9b06011f458b 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java @@ -33,6 +33,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; import java.util.List; @@ -84,10 +85,12 @@ import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; + import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoResponseProto; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto; @@ -95,7 +98,7 @@ /** * Test cases to verify tracing coprocessor Endpoint execution */ -@Category({ CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorEndpointTracing { private static final Logger logger = LoggerFactory.getLogger(TestCoprocessorEndpointTracing.class); @@ -105,8 +108,8 @@ public class TestCoprocessorEndpointTracing { HBaseClassTestRule.forClass(TestCoprocessorEndpointTracing.class); private static final OpenTelemetryClassRule otelClassRule = OpenTelemetryClassRule.create(); - private static final MiniClusterRule miniclusterRule = MiniClusterRule.newBuilder() - .setConfiguration(() -> { + private static final MiniClusterRule miniclusterRule = + MiniClusterRule.newBuilder().setConfiguration(() -> { final Configuration conf = HBaseConfiguration.create(); conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, @@ -114,8 +117,7 @@ public class TestCoprocessorEndpointTracing { conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, ProtobufCoprocessorService.class.getName()); return conf; - }) - .build(); + }).build(); private static final ConnectionRule connectionRule = ConnectionRule.createAsyncConnectionRule(miniclusterRule::createAsyncConnection); @@ -133,10 +135,8 @@ protected void before() throws Throwable { } @ClassRule - public static final TestRule testRule = RuleChain.outerRule(otelClassRule) - .around(miniclusterRule) - .around(connectionRule) - .around(new Setup()); + public static final TestRule testRule = RuleChain.outerRule(otelClassRule).around(miniclusterRule) + .around(connectionRule).around(new Setup()); private static final TableName TEST_TABLE = TableName.valueOf(TestCoprocessorEndpointTracing.class.getSimpleName()); @@ -189,8 +189,7 @@ public void onError(Throwable error) { final Map results = TraceUtil.trace(() -> { table.coprocessorService(TestProtobufRpcProto::newStub, - (stub, controller, cb) -> stub.echo(controller, request, cb), callback) - .execute(); + (stub, controller, cb) -> stub.echo(controller, request, cb), callback).execute(); try { return future.get(); } catch (InterruptedException | ExecutionException e) { @@ -199,31 +198,21 @@ public void onError(Throwable error) { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - equalTo("hello")))); + assertThat(results.values(), everyItem(allOf(notNullValue(), equalTo("hello")))); final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -237,45 +226,33 @@ public void traceSyncTableEndpointCall() throws Exception { new CoprocessorRpcUtils.BlockingRpcCallback<>(); final Map results = TraceUtil.trace(() -> { try { - return table.coprocessorService(TestProtobufRpcProto.class, null, null, - t -> { - t.echo(controller, request, callback); - return callback.get(); - }); + return table.coprocessorService(TestProtobufRpcProto.class, null, null, t -> { + t.echo(controller, request, callback); + return callback.get(); + }); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -300,34 +277,23 @@ public void traceSyncTableEndpointCallAndCallback() throws Exception { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -350,9 +316,7 @@ public void traceSyncTableRegionCoprocessorRpcChannel() throws Exception { assertEquals("hello", response.getMessage()); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); @@ -361,13 +325,10 @@ public void traceSyncTableRegionCoprocessorRpcChannel() throws Exception { * The Table instance isn't issuing a command here, it's not a table operation, so don't expect * there to be a span like `COPROC_EXEC table`. */ - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = + allOf(hasName(containsString("COPROC_EXEC")), hasParentSpanId(testSpan)); assertThat(spans, not(hasItem(tableOpMatcher))); } @@ -380,41 +341,30 @@ public void traceSyncTableBatchEndpoint() throws Exception { final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final Map response = TraceUtil.trace(() -> { try { - return table.batchCoprocessorService( - descriptor, request, null, null, EchoResponseProto.getDefaultInstance()); + return table.batchCoprocessorService(descriptor, request, null, null, + EchoResponseProto.getDefaultInstance()); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(response); - assertThat(response.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(response.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -436,34 +386,23 @@ public void traceSyncTableBatchEndpointCallback() throws Exception { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -475,27 +414,20 @@ public void traceAsyncAdminEndpoint() throws Exception { final ServiceCaller callback = (stub, controller, cb) -> stub.echo(controller, request, cb); - final String response = TraceUtil.tracedFuture( - () -> admin.coprocessorService(TestProtobufRpcProto::newStub, callback), - testName.getMethodName()) - .get() - .getMessage(); + final String response = TraceUtil + .tracedFuture(() -> admin.coprocessorService(TestProtobufRpcProto::newStub, callback), + testName.getMethodName()) + .get().getMessage(); assertEquals("hello", response); - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.MasterService/ExecMasterService"), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.MasterService/ExecMasterService"), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -516,27 +448,21 @@ public void traceSyncAdminEndpoint() throws Exception { assertEquals("hello", response); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.MasterService/ExecMasterService"), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.MasterService/ExecMasterService"), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } private void waitForAndLog(Matcher spanMatcher) { final Configuration conf = connectionRule.getAsyncConnection().getConfiguration(); - Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>( - otelClassRule::getSpans, hasItem(spanMatcher))); + Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), + new MatcherPredicate<>(otelClassRule::getSpans, hasItem(spanMatcher))); final List spans = otelClassRule.getSpans(); if (logger.isDebugEnabled()) { StringTraceRenderer renderer = new StringTraceRenderer(spans); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java index 5a0827c2d75b..5039d818c7ed 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java @@ -48,11 +48,11 @@ import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.ColumnAggregationProtos; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorTableEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorTableEndpoint.class); + HBaseClassTestRule.forClass(TestCoprocessorTableEndpoint.class); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); @@ -111,17 +111,16 @@ private static byte[][] makeN(byte[] base, int n) { return ret; } - private static Map sum(final Table table, final byte [] family, - final byte [] qualifier, final byte [] start, final byte [] end) - throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, - start, end, - new Batch.Call() { + private static Map sum(final Table table, final byte[] family, + final byte[] qualifier, final byte[] start, final byte[] end) + throws ServiceException, Throwable { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, + end, new Batch.Call() { @Override public Long call(ColumnAggregationProtos.ColumnAggregationService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); @@ -160,8 +159,8 @@ private static void updateTable(TableDescriptor tableDescriptor) throws Exceptio private static final void verifyTable(TableName tableName) throws Throwable { Table table = TEST_UTIL.getConnection().getTable(tableName); try { - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], - ROWS[ROWS.length-1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { @@ -174,7 +173,7 @@ private static final void verifyTable(TableName tableName) throws Throwable { // scan: for region 2 and region 3 results.clear(); - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length-1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java index 8a4c7b21b553..cd9b474cf3e1 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,12 +26,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestImportExport extends org.apache.hadoop.hbase.mapreduce.TestImportExport { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportExport.class); + HBaseClassTestRule.forClass(TestImportExport.class); @BeforeClass public static void beforeClass() throws Throwable { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java index 81b14b949be4..7fee3d73d04b 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,12 +46,12 @@ import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyService; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRegionServerCoprocessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionServerCoprocessorEndpoint.class); + HBaseClassTestRule.forClass(TestRegionServerCoprocessorEndpoint.class); public static final FileNotFoundException WHAT_TO_THROW = new FileNotFoundException("/file.txt"); private static HBaseTestingUtil TEST_UTIL = null; @@ -76,13 +76,14 @@ public static void tearDownAfterClass() throws Exception { public void testEndpoint() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); - final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + final CoprocessorRpcUtils.BlockingRpcCallback< + DummyRegionServerEndpointProtos.DummyResponse> rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = - ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, - TEST_UTIL.getAdmin().coprocessorService(serverName)); - service.dummyCall(controller, - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); + ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, + TEST_UTIL.getAdmin().coprocessorService(serverName)); + service.dummyCall(controller, DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), + rpcCallback); assertEquals(DUMMY_VALUE, rpcCallback.get().getValue()); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -93,20 +94,21 @@ public void testEndpoint() throws Exception { public void testEndpointExceptions() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); - final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + final CoprocessorRpcUtils.BlockingRpcCallback< + DummyRegionServerEndpointProtos.DummyResponse> rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = - ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, - TEST_UTIL.getAdmin().coprocessorService(serverName)); + ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, + TEST_UTIL.getAdmin().coprocessorService(serverName)); service.dummyThrow(controller, - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); + DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); assertEquals(null, rpcCallback.get()); assertTrue(controller.failedOnException()); assertEquals(WHAT_TO_THROW.getClass(), controller.getFailedOn().getCause().getClass()); } public static class DummyRegionServerEndpoint extends DummyService - implements RegionServerCoprocessor { + implements RegionServerCoprocessor { @Override public Iterable getServices() { @@ -115,14 +117,13 @@ public Iterable getServices() { @Override public void dummyCall(RpcController controller, DummyRequest request, - RpcCallback callback) { + RpcCallback callback) { callback.run(DummyResponse.newBuilder().setValue(DUMMY_VALUE).build()); } @Override - public void dummyThrow(RpcController controller, - DummyRequest request, - RpcCallback done) { + public void dummyThrow(RpcController controller, DummyRequest request, + RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, WHAT_TO_THROW); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index a13cb5b5bf20..0ac9424c092b 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -84,11 +84,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestSecureExport { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureExport.class); + HBaseClassTestRule.forClass(TestSecureExport.class); private static final Logger LOG = LoggerFactory.getLogger(TestSecureExport.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -125,25 +125,20 @@ public class TestSecureExport { private static final String TOPSECRET = "topsecret"; @Rule public final TestName name = new TestName(); + private static void setUpKdcServer() throws Exception { KDC = UTIL.setupMiniKdc(KEYTAB_FILE); USERNAME = UserGroupInformation.getLoginUser().getShortUserName(); SERVER_PRINCIPAL = USERNAME + "/" + LOCALHOST; HTTP_PRINCIPAL = "HTTP/" + LOCALHOST; - KDC.createPrincipal(KEYTAB_FILE, - SERVER_PRINCIPAL, - HTTP_PRINCIPAL, - USER_ADMIN + "/" + LOCALHOST, - USER_OWNER + "/" + LOCALHOST, - USER_RX + "/" + LOCALHOST, - USER_RO + "/" + LOCALHOST, - USER_XO + "/" + LOCALHOST, - USER_NONE + "/" + LOCALHOST); + KDC.createPrincipal(KEYTAB_FILE, SERVER_PRINCIPAL, HTTP_PRINCIPAL, USER_ADMIN + "/" + LOCALHOST, + USER_OWNER + "/" + LOCALHOST, USER_RX + "/" + LOCALHOST, USER_RO + "/" + LOCALHOST, + USER_XO + "/" + LOCALHOST, USER_NONE + "/" + LOCALHOST); } private static User getUserByLogin(final String user) throws IOException { - return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI( - getPrinciple(user), KEYTAB_FILE.getAbsolutePath())); + return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI(getPrinciple(user), + KEYTAB_FILE.getAbsolutePath())); } private static String getPrinciple(final String user) { @@ -152,28 +147,27 @@ private static String getPrinciple(final String user) { private static void setUpClusterKdc() throws Exception { HBaseKerberosUtils.setSecuredConfiguration(UTIL.getConfiguration(), - SERVER_PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); + SERVER_PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); HBaseKerberosUtils.setSSLConfiguration(UTIL, TestSecureExport.class); UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - UTIL.getConfiguration().get( - CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + Export.class.getName()); + UTIL.getConfiguration().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + + Export.class.getName()); } private static void addLabels(final Configuration conf, final List users, - final List labels) throws Exception { - PrivilegedExceptionAction action - = () -> { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels.toArray(new String[labels.size()])); - for (String user : users) { - VisibilityClient.setAuths(conn, labels.toArray(new String[labels.size()]), user); - } - } catch (Throwable t) { - throw new IOException(t); + final List labels) throws Exception { + PrivilegedExceptionAction action = () -> { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels.toArray(new String[labels.size()])); + for (String user : users) { + VisibilityClient.setAuths(conn, labels.toArray(new String[labels.size()]), user); } - return null; - }; + } catch (Throwable t) { + throw new IOException(t); + } + return null; + }; getUserByLogin(USER_ADMIN).runAs(action); } @@ -199,7 +193,7 @@ private static void clearOutput(Path path) throws IOException { @BeforeClass public static void beforeClass() throws Exception { UserProvider.setUserProviderForTesting(UTIL.getConfiguration(), - HadoopSecurityEnabledUserProviderForTesting.class); + HadoopSecurityEnabledUserProviderForTesting.class); setUpKdcServer(); SecureTestUtil.enableSecurity(UTIL.getConfiguration()); UTIL.getConfiguration().setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); @@ -211,15 +205,11 @@ public static void beforeClass() throws Exception { UTIL.waitUntilAllRegionsAssigned(VisibilityConstants.LABELS_TABLE_NAME); UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME, 50000); UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME, 50000); - SecureTestUtil.grantGlobal(UTIL, USER_ADMIN, - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.EXEC, - Permission.Action.READ, - Permission.Action.WRITE); + SecureTestUtil.grantGlobal(UTIL, USER_ADMIN, Permission.Action.ADMIN, Permission.Action.CREATE, + Permission.Action.EXEC, Permission.Action.READ, Permission.Action.WRITE); SecureTestUtil.grantGlobal(UTIL, USER_OWNER, Permission.Action.CREATE); addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER), - Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); + Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); } @AfterClass @@ -231,49 +221,38 @@ public static void afterClass() throws Exception { } /** - * Test the ExportEndpoint's access levels. The {@link Export} test is ignored - * since the access exceptions cannot be collected from the mappers. + * Test the ExportEndpoint's access levels. The {@link Export} test is ignored since the access + * exceptions cannot be collected from the mappers. */ @Test public void testAccessCase() throws Throwable { final String exportTable = name.getMethodName(); - TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(exportTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .build(); + TableDescriptor exportHtd = TableDescriptorBuilder.newBuilder(TableName.valueOf(exportTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)).build(); User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); - SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); - SecureTestUtil.grantOnTable(UTIL, USER_RO, - TableName.valueOf(exportTable), null, null, - Permission.Action.READ); - SecureTestUtil.grantOnTable(UTIL, USER_RX, - TableName.valueOf(exportTable), null, null, - Permission.Action.READ, - Permission.Action.EXEC); - SecureTestUtil.grantOnTable(UTIL, USER_XO, - TableName.valueOf(exportTable), null, null, - Permission.Action.EXEC); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][] { Bytes.toBytes("s") }); + SecureTestUtil.grantOnTable(UTIL, USER_RO, TableName.valueOf(exportTable), null, null, + Permission.Action.READ); + SecureTestUtil.grantOnTable(UTIL, USER_RX, TableName.valueOf(exportTable), null, null, + Permission.Action.READ, Permission.Action.EXEC); + SecureTestUtil.grantOnTable(UTIL, USER_XO, TableName.valueOf(exportTable), null, null, + Permission.Action.EXEC); assertEquals(4, PermissionStorage - .getTablePermissions(UTIL.getConfiguration(), TableName.valueOf(exportTable)).size()); + .getTablePermissions(UTIL.getConfiguration(), TableName.valueOf(exportTable)).size()); AccessTestAction putAction = () -> { Put p = new Put(ROW1); p.addColumn(FAMILYA, Bytes.toBytes("qual_0"), NOW, QUAL); p.addColumn(FAMILYA, Bytes.toBytes("qual_1"), NOW, QUAL); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table t = conn.getTable(TableName.valueOf(exportTable))) { + Table t = conn.getTable(TableName.valueOf(exportTable))) { t.put(p); } return null; }; // no hdfs access. - SecureTestUtil.verifyAllowed(putAction, - getUserByLogin(USER_ADMIN), - getUserByLogin(USER_OWNER)); - SecureTestUtil.verifyDenied(putAction, - getUserByLogin(USER_RO), - getUserByLogin(USER_XO), - getUserByLogin(USER_RX), - getUserByLogin(USER_NONE)); + SecureTestUtil.verifyAllowed(putAction, getUserByLogin(USER_ADMIN), getUserByLogin(USER_OWNER)); + SecureTestUtil.verifyDenied(putAction, getUserByLogin(USER_RO), getUserByLogin(USER_XO), + getUserByLogin(USER_RX), getUserByLogin(USER_NONE)); final FileSystem fs = UTIL.getDFSCluster().getFileSystem(); final Path openDir = fs.makeQualified(new Path("testAccessCase")); @@ -282,9 +261,9 @@ public void testAccessCase() throws Throwable { final Path output = fs.makeQualified(new Path(openDir, "output")); AccessTestAction exportAction = () -> { try { - String[] args = new String[]{exportTable, output.toString()}; - Map result - = Export.run(new Configuration(UTIL.getConfiguration()), args); + String[] args = new String[] { exportTable, output.toString() }; + Map result = + Export.run(new Configuration(UTIL.getConfiguration()), args); long rowCount = 0; long cellCount = 0; for (Export.Response r : result.values()) { @@ -308,7 +287,7 @@ public void testAccessCase() throws Throwable { assertEquals("Unexpected file owner", currentUserName, outputDirFileStatus.getOwner()); FileStatus[] outputFileStatus = fs.listStatus(new Path(openDir, "output")); - for (FileStatus fileStatus: outputFileStatus) { + for (FileStatus fileStatus : outputFileStatus) { assertEquals("Unexpected file owner", currentUserName, fileStatus.getOwner()); } } else { @@ -318,14 +297,10 @@ public void testAccessCase() throws Throwable { clearOutput(output); } }; - SecureTestUtil.verifyDenied(exportAction, - getUserByLogin(USER_RO), - getUserByLogin(USER_XO), + SecureTestUtil.verifyDenied(exportAction, getUserByLogin(USER_RO), getUserByLogin(USER_XO), getUserByLogin(USER_NONE)); - SecureTestUtil.verifyAllowed(exportAction, - getUserByLogin(USER_ADMIN), - getUserByLogin(USER_OWNER), - getUserByLogin(USER_RX)); + SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_ADMIN), + getUserByLogin(USER_OWNER), getUserByLogin(USER_RX)); AccessTestAction deleteAction = () -> { UTIL.deleteTable(TableName.valueOf(exportTable)); return null; @@ -339,12 +314,11 @@ public void testAccessCase() throws Throwable { public void testVisibilityLabels() throws IOException, Throwable { final String exportTable = name.getMethodName() + "_export"; final String importTable = name.getMethodName() + "_import"; - final TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(exportTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .build(); + final TableDescriptor exportHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(exportTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)).build(); User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); - SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][] { Bytes.toBytes("s") }); AccessTestAction putAction = () -> { Put p1 = new Put(ROW1); p1.addColumn(FAMILYA, QUAL, NOW, QUAL); @@ -356,7 +330,7 @@ public void testVisibilityLabels() throws IOException, Throwable { p3.addColumn(FAMILYA, QUAL, NOW, QUAL); p3.setCellVisibility(new CellVisibility("!" + CONFIDENTIAL + " & " + TOPSECRET)); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table t = conn.getTable(TableName.valueOf(exportTable))) { + Table t = conn.getTable(TableName.valueOf(exportTable))) { t.put(p1); t.put(p2); t.put(p3); @@ -373,7 +347,7 @@ public void testVisibilityLabels() throws IOException, Throwable { for (final Pair, Integer> labelsAndRowCount : labelsAndRowCounts) { final List labels = labelsAndRowCount.getFirst(); final int rowCount = labelsAndRowCount.getSecond(); - //create a open permission directory. + // create a open permission directory. final Path openDir = new Path("testAccessCase"); final FileSystem fs = openDir.getFileSystem(UTIL.getConfiguration()); fs.mkdirs(openDir); @@ -384,10 +358,9 @@ public void testVisibilityLabels() throws IOException, Throwable { labels.forEach(v -> buf.append(v).append(",")); buf.deleteCharAt(buf.length() - 1); try { - String[] args = new String[]{ - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + buf.toString(), - exportTable, - output.toString(),}; + String[] args = + new String[] { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + buf.toString(), + exportTable, output.toString(), }; Export.run(new Configuration(UTIL.getConfiguration()), args); return null; } catch (ServiceException | IOException ex) { @@ -397,19 +370,16 @@ public void testVisibilityLabels() throws IOException, Throwable { } }; SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_OWNER)); - final TableDescriptor importHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)) - .build(); - SecureTestUtil.createTable(UTIL, owner, importHtd, new byte[][]{Bytes.toBytes("s")}); + final TableDescriptor importHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)).build(); + SecureTestUtil.createTable(UTIL, owner, importHtd, new byte[][] { Bytes.toBytes("s") }); AccessTestAction importAction = () -> { - String[] args = new String[]{ - "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, - importTable, - output.toString() - }; - assertEquals(0, ToolRunner.run( - new Configuration(UTIL.getConfiguration()), new Import(), args)); + String[] args = + new String[] { "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, + importTable, output.toString() }; + assertEquals(0, + ToolRunner.run(new Configuration(UTIL.getConfiguration()), new Import(), args)); return null; }; SecureTestUtil.verifyAllowed(importAction, getUserByLogin(USER_OWNER)); @@ -417,8 +387,8 @@ public void testVisibilityLabels() throws IOException, Throwable { Scan scan = new Scan(); scan.setAuthorizations(new Authorizations(labels)); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table table = conn.getTable(importHtd.getTableName()); - ResultScanner scanner = table.getScanner(scan)) { + Table table = conn.getTable(importHtd.getTableName()); + ResultScanner scanner = table.getScanner(scan)) { int count = 0; for (Result r : scanner) { ++count; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java index 63b2b1d68544..6ffdd5b31e39 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,18 +35,16 @@ public class TestCoprocessorRpcUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorRpcUtils.class); + HBaseClassTestRule.forClass(TestCoprocessorRpcUtils.class); @Test public void testServiceName() throws Exception { // verify that we de-namespace build in HBase rpc services - ServiceDescriptor authService = - AuthenticationProtos.AuthenticationService.getDescriptor(); + ServiceDescriptor authService = AuthenticationProtos.AuthenticationService.getDescriptor(); assertEquals(authService.getName(), CoprocessorRpcUtils.getServiceName(authService)); // non-hbase rpc services should remain fully qualified - ServiceDescriptor dummyService = - DummyRegionServerEndpointProtos.DummyService.getDescriptor(); + ServiceDescriptor dummyService = DummyRegionServerEndpointProtos.DummyService.getDescriptor(); assertEquals(dummyService.getFullName(), CoprocessorRpcUtils.getServiceName(dummyService)); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java index fca207570936..17b4257100a3 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java @@ -68,11 +68,11 @@ import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.PingProtos.PingResponse; import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.PingProtos.PingService; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestServerCustomProtocol { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServerCustomProtocol.class); + HBaseClassTestRule.forClass(TestServerCustomProtocol.class); private static final Logger LOG = LoggerFactory.getLogger(TestServerCustomProtocol.class); static final String WHOAREYOU = "Who are you?"; @@ -98,27 +98,27 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void ping(RpcController controller, PingRequest request, - RpcCallback done) { + RpcCallback done) { this.counter++; done.run(PingResponse.newBuilder().setPong("pong").build()); } @Override public void count(RpcController controller, CountRequest request, - RpcCallback done) { + RpcCallback done) { done.run(CountResponse.newBuilder().setCount(this.counter).build()); } @Override - public void increment(RpcController controller, - IncrementCountRequest request, RpcCallback done) { + public void increment(RpcController controller, IncrementCountRequest request, + RpcCallback done) { this.counter += request.getDiff(); done.run(IncrementCountResponse.newBuilder().setCount(this.counter).build()); } @Override public void hello(RpcController controller, HelloRequest request, - RpcCallback done) { + RpcCallback done) { if (!request.hasName()) { done.run(HelloResponse.newBuilder().setResponse(WHOAREYOU).build()); } else if (request.getName().equals(NOBODY)) { @@ -130,7 +130,7 @@ public void hello(RpcController controller, HelloRequest request, @Override public void noop(RpcController controller, NoopRequest request, - RpcCallback done) { + RpcCallback done) { done.run(NoopResponse.newBuilder().build()); } @@ -190,10 +190,10 @@ public static void tearDownAfterClass() throws Exception { @Test public void testSingleProxy() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = ping(table, null, null); + Map results = ping(table, null, null); // There are three regions so should get back three results. assertEquals(3, results.size()); - for (Map.Entry e: results.entrySet()) { + for (Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response", "pong", e.getValue()); } hello(table, "George", HELLO + "George"); @@ -202,8 +202,7 @@ public void testSingleProxy() throws Throwable { LOG.info("Who are you"); hello(table, NOBODY, null); LOG.info(NOBODY); - Map intResults = table.coprocessorService(PingService.class, - null, null, + Map intResults = table.coprocessorService(PingService.class, null, null, new Batch.Call() { @Override public Integer call(PingService instance) throws IOException { @@ -214,108 +213,103 @@ public Integer call(PingService instance) throws IOException { } }); int count = -1; - for (Map.Entry e: intResults.entrySet()) { + for (Map.Entry e : intResults.entrySet()) { assertTrue(e.getValue() > 0); count = e.getValue(); } final int diff = 5; - intResults = table.coprocessorService(PingService.class, - null, null, + intResults = table.coprocessorService(PingService.class, null, null, new Batch.Call() { @Override public Integer call(PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.increment(null, - IncrementCountRequest.newBuilder().setDiff(diff).build(), + instance.increment(null, IncrementCountRequest.newBuilder().setDiff(diff).build(), rpcCallback); return rpcCallback.get().getCount(); } }); // There are three regions so should get back three results. assertEquals(3, results.size()); - for (Map.Entry e: intResults.entrySet()) { + for (Map.Entry e : intResults.entrySet()) { assertEquals(e.getValue().intValue(), count + diff); } table.close(); } - private Map hello(final Table table, final String send, final String response) - throws ServiceException, Throwable { - Map results = hello(table, send); - for (Map.Entry e: results.entrySet()) { + private Map hello(final Table table, final String send, final String response) + throws ServiceException, Throwable { + Map results = hello(table, send); + for (Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response", response, e.getValue()); } return results; } - private Map hello(final Table table, final String send) - throws ServiceException, Throwable { + private Map hello(final Table table, final String send) + throws ServiceException, Throwable { return hello(table, send, null, null); } - private Map hello(final Table table, final String send, final byte [] start, - final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(PingService.class, - start, end, - new Batch.Call() { - @Override - public String call(PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - HelloRequest.Builder builder = HelloRequest.newBuilder(); - if (send != null) { - builder.setName(send); - } - instance.hello(null, builder.build(), rpcCallback); - HelloResponse r = rpcCallback.get(); - return r != null && r.hasResponse()? r.getResponse(): null; + private Map hello(final Table table, final String send, final byte[] start, + final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(PingService.class, start, end, + new Batch.Call() { + @Override + public String call(PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + HelloRequest.Builder builder = HelloRequest.newBuilder(); + if (send != null) { + builder.setName(send); } - }); + instance.hello(null, builder.build(), rpcCallback); + HelloResponse r = rpcCallback.get(); + return r != null && r.hasResponse() ? r.getResponse() : null; + } + }); } - private Map compoundOfHelloAndPing(final Table table, final byte [] start, - final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(PingService.class, - start, end, - new Batch.Call() { - @Override - public String call(PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - HelloRequest.Builder builder = HelloRequest.newBuilder(); - // Call ping on same instance. Use result calling hello on same instance. - builder.setName(doPing(instance)); - instance.hello(null, builder.build(), rpcCallback); - HelloResponse r = rpcCallback.get(); - return r != null && r.hasResponse()? r.getResponse(): null; - } - }); + private Map compoundOfHelloAndPing(final Table table, final byte[] start, + final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(PingService.class, start, end, + new Batch.Call() { + @Override + public String call(PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + HelloRequest.Builder builder = HelloRequest.newBuilder(); + // Call ping on same instance. Use result calling hello on same instance. + builder.setName(doPing(instance)); + instance.hello(null, builder.build(), rpcCallback); + HelloResponse r = rpcCallback.get(); + return r != null && r.hasResponse() ? r.getResponse() : null; + } + }); } - private Map noop(final Table table, final byte [] start, final byte [] end) - throws ServiceException, Throwable { + private Map noop(final Table table, final byte[] start, final byte[] end) + throws ServiceException, Throwable { return table.coprocessorService(PingService.class, start, end, - new Batch.Call() { - @Override - public String call(PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - NoopRequest.Builder builder = NoopRequest.newBuilder(); - instance.noop(null, builder.build(), rpcCallback); - rpcCallback.get(); - // Looks like null is expected when void. That is what the test below is looking for - return null; - } - }); + new Batch.Call() { + @Override + public String call(PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + NoopRequest.Builder builder = NoopRequest.newBuilder(); + instance.noop(null, builder.build(), rpcCallback); + rpcCallback.get(); + // Looks like null is expected when void. That is what the test below is looking for + return null; + } + }); } @Test public void testSingleMethod() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = table.coprocessorService(PingService.class, - null, ROW_A, + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + Map results = table.coprocessorService(PingService.class, null, ROW_A, new Batch.Call() { @Override public String call(PingService instance) throws IOException { @@ -342,10 +336,10 @@ public String call(PingService instance) throws IOException { @Test public void testRowRange() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - for (HRegionLocation e: locator.getAllRegionLocations()) { - LOG.info("Region " + e.getRegion().getRegionNameAsString() - + ", servername=" + e.getServerName()); + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + for (HRegionLocation e : locator.getAllRegionLocations()) { + LOG.info( + "Region " + e.getRegion().getRegionNameAsString() + ", servername=" + e.getServerName()); } // Here are what regions looked like on a run: // @@ -353,7 +347,7 @@ public void testRowRange() throws Throwable { // test,bbb,1355943549661.110393b070dd1ed93441e0bc9b3ffb7e. // test,ccc,1355943549665.c3d6d125141359cbbd2a43eaff3cdf74. - Map results = ping(table, null, ROW_A); + Map results = ping(table, null, ROW_A); // Should contain first region only. assertEquals(1, results.size()); verifyRegionResults(locator, results, ROW_A); @@ -376,7 +370,7 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegion().getRegionName())); + results.get(loc.getRegion().getRegionName())); // test explicit start + end results = ping(table, ROW_AB, ROW_BC); @@ -386,7 +380,7 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegion().getRegionName())); + results.get(loc.getRegion().getRegionName())); // test single region results = ping(table, ROW_B, ROW_BC); @@ -395,15 +389,15 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_A, true); assertNull("Should be missing region for row aaa (prior to start)", - results.get(loc.getRegion().getRegionName())); + results.get(loc.getRegion().getRegionName())); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegion().getRegionName())); + results.get(loc.getRegion().getRegionName())); } } - private Map ping(final Table table, final byte [] start, final byte [] end) - throws ServiceException, Throwable { + private Map ping(final Table table, final byte[] start, final byte[] end) + throws ServiceException, Throwable { return table.coprocessorService(PingService.class, start, end, new Batch.Call() { @Override @@ -415,7 +409,7 @@ public String call(PingService instance) throws IOException { private static String doPing(PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.ping(null, PingRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getPong(); } @@ -423,8 +417,8 @@ private static String doPing(PingService instance) throws IOException { @Test public void testCompoundCall() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = compoundOfHelloAndPing(table, ROW_A, ROW_C); + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + Map results = compoundOfHelloAndPing(table, ROW_A, ROW_C); verifyRegionResults(locator, results, "Hello, pong", ROW_A); verifyRegionResults(locator, results, "Hello, pong", ROW_B); verifyRegionResults(locator, results, "Hello, pong", ROW_C); @@ -434,8 +428,8 @@ public void testCompoundCall() throws Throwable { @Test public void testNullCall() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = hello(table, null, ROW_A, ROW_C); + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + Map results = hello(table, null, ROW_A, ROW_C); verifyRegionResults(locator, results, "Who are you?", ROW_A); verifyRegionResults(locator, results, "Who are you?", ROW_B); verifyRegionResults(locator, results, "Who are you?", ROW_C); @@ -445,8 +439,8 @@ public void testNullCall() throws Throwable { @Test public void testNullReturn() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); - RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = hello(table, "nobody", ROW_A, ROW_C); + RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { + Map results = hello(table, "nobody", ROW_A, ROW_C); verifyRegionResults(locator, results, null, ROW_A); verifyRegionResults(locator, results, null, ROW_B); verifyRegionResults(locator, results, null, ROW_C); @@ -456,7 +450,7 @@ public void testNullReturn() throws Throwable { @Test public void testEmptyReturnType() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE)) { - Map results = noop(table, ROW_A, ROW_C); + Map results = noop(table, ROW_A, ROW_C); assertEquals("Should have results from three regions", 3, results.size()); // all results should be null for (Object v : results.values()) { @@ -465,24 +459,22 @@ public void testEmptyReturnType() throws Throwable { } } - private void verifyRegionResults(RegionLocator table, Map results, byte[] row) - throws Exception { + private void verifyRegionResults(RegionLocator table, Map results, byte[] row) + throws Exception { verifyRegionResults(table, results, "pong", row); } private void verifyRegionResults(RegionLocator regionLocator, Map results, - String expected, byte[] row) throws Exception { - for (Map.Entry e: results.entrySet()) { - LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + - ", result key=" + Bytes.toString(e.getKey()) + - ", value=" + e.getValue()); + String expected, byte[] row) throws Exception { + for (Map.Entry e : results.entrySet()) { + LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + ", result key=" + + Bytes.toString(e.getKey()) + ", value=" + e.getValue()); } HRegionLocation loc = regionLocator.getRegionLocation(row, true); byte[] region = loc.getRegion().getRegionName(); - assertTrue("Results should contain region " + - Bytes.toStringBinary(region) + " for row '" + Bytes.toStringBinary(row)+ "'", - results.containsKey(region)); - assertEquals("Invalid result for row '"+Bytes.toStringBinary(row)+"'", - expected, results.get(region)); + assertTrue("Results should contain region " + Bytes.toStringBinary(region) + " for row '" + + Bytes.toStringBinary(row) + "'", results.containsKey(region)); + assertEquals("Invalid result for row '" + Bytes.toStringBinary(row) + "'", expected, + results.get(region)); } } diff --git a/hbase-examples/README.txt b/hbase-examples/README.txt index 7ac2d13bd9c7..894586615c00 100644 --- a/hbase-examples/README.txt +++ b/hbase-examples/README.txt @@ -64,7 +64,7 @@ Example code. ON PROTOBUFS This maven module has core protobuf definition files ('.protos') used by hbase -examples. +examples. Generation of java files from protobuf .proto files included here is done as part of the build. diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index a3cebc055a7a..396c2949dee2 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -35,95 +35,6 @@ --> 3.17.3 - - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - ${surefire.firstPartGroups} - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} - true - - - - - - com.google.code.maven-replacer-plugin - replacer - 1.5.3 - - - process-sources - - replace - - - - - ${basedir}/target/generated-sources/ - - **/*.java - - - true - - - ([^\.])com.google.protobuf - $1org.apache.hbase.thirdparty.com.google.protobuf - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - org.apache.hbase.thirdparty @@ -258,8 +169,8 @@ org.apache.hbase hbase-http - test test-jar + test org.slf4j @@ -292,6 +203,95 @@ test + + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + ${surefire.firstPartGroups} + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} + true + + + + + + com.google.code.maven-replacer-plugin + replacer + 1.5.3 + + ${basedir}/target/generated-sources/ + + **/*.java + + + true + + + ([^\.])com.google.protobuf + $1org.apache.hbase.thirdparty.com.google.protobuf + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + + + + + replace + + process-sources + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + @@ -313,7 +313,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -329,8 +331,8 @@ jaxb-api - javax.ws.rs - jsr311-api + javax.ws.rs + jsr311-api @@ -342,10 +344,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-external-blockcache Apache HBase - External Block Cache - - HBase module that provides out of process block cache. + HBase module that provides out of process block cache. Currently Memcached is the reference implementation for external block cache. External block caches allow HBase to take advantage of other more complex caches that can live longer than the HBase regionserver process and are not necessarily tied to a single computer - life time. However external block caches add in extra operational overhead. - - + life time. However external block caches add in extra operational overhead. + + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-server + + + net.spy + spymemcached + true + + + org.slf4j + slf4j-api + + + junit + junit + test + + + @@ -60,10 +81,10 @@ versionInfo-source - generate-sources add-source + generate-sources ${project.build.directory}/generated-sources/java @@ -91,31 +112,6 @@ - - - org.apache.hbase - hbase-common - - - org.apache.hbase - hbase-server - - - net.spy - spymemcached - true - - - org.slf4j - slf4j-api - - - junit - junit - test - - - @@ -128,10 +124,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -167,7 +163,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -182,10 +180,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-hadoop-compat Apache HBase - Hadoop Compatibility - - Interfaces to be implemented in order to smooth - over hadoop version differences - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - + Interfaces to be implemented in order to smooth + over hadoop version differences @@ -166,8 +134,36 @@ + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + - + skipHadoopCompatTests @@ -190,15 +186,14 @@ - org.eclipse.m2e lifecycle-mapping - - + diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java index d29e7bc1d3b3..0aef77b73700 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Iterator; import java.util.ServiceLoader; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +36,8 @@ public class CompatibilityFactory { /** * This is a static only class don't let any instance be created. */ - protected CompatibilityFactory() {} + protected CompatibilityFactory() { + } public static synchronized T getInstance(Class klass) { T instance = null; @@ -48,10 +47,9 @@ public static synchronized T getInstance(Class klass) { instance = it.next(); if (it.hasNext()) { StringBuilder msg = new StringBuilder(); - msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); + msg.append("ServiceLoader provided more than one implementation for class: ").append(klass) + .append(", using implementation: ").append(instance.getClass()) + .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java index 0e633b8b15f4..ccd4d4f97add 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,39 +15,39 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.ServiceLoader; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be - * created. + * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be + * created. */ @InterfaceAudience.Private public class CompatibilitySingletonFactory extends CompatibilityFactory { public static enum SingletonStorage { INSTANCE; + private final Object lock = new Object(); private final Map instances = new HashMap<>(); } + private static final Logger LOG = LoggerFactory.getLogger(CompatibilitySingletonFactory.class); /** * This is a static only class don't let anyone create an instance. */ - protected CompatibilitySingletonFactory() { } + protected CompatibilitySingletonFactory() { + } /** * Get the singleton instance of Any classes defined by compatibiliy jar's - * * @return the singleton */ @SuppressWarnings("unchecked") @@ -62,9 +62,8 @@ public static T getInstance(Class klass) { if (it.hasNext()) { StringBuilder msg = new StringBuilder(); msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); + .append(klass).append(", using implementation: ").append(instance.getClass()) + .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java index c0a8519c10cd..af7e87483d1d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -44,23 +43,21 @@ public interface MetricsIOSource extends BaseSource { */ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String FS_READ_TIME_HISTO_KEY = "fsReadTime"; String FS_PREAD_TIME_HISTO_KEY = "fsPReadTime"; String FS_WRITE_HISTO_KEY = "fsWriteTime"; String CHECKSUM_FAILURES_KEY = "fsChecksumFailureCount"; - String FS_READ_TIME_HISTO_DESC - = "Latency of HFile's sequential reads on this region server in milliseconds"; - String FS_PREAD_TIME_HISTO_DESC - = "Latency of HFile's positional reads on this region server in milliseconds"; - String FS_WRITE_TIME_HISTO_DESC - = "Latency of HFile's writes on this region server in milliseconds"; + String FS_READ_TIME_HISTO_DESC = + "Latency of HFile's sequential reads on this region server in milliseconds"; + String FS_PREAD_TIME_HISTO_DESC = + "Latency of HFile's positional reads on this region server in milliseconds"; + String FS_WRITE_TIME_HISTO_DESC = + "Latency of HFile's writes on this region server in milliseconds"; String CHECKSUM_FAILURES_DESC = "Number of checksum failures for the HBase HFile checksums at the" - + " HBase level (separate from HDFS checksums)"; - + + " HBase level (separate from HDFS checksums)"; /** * Update the fs sequential read time histogram diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java index fdb318adaac3..6ef5d180cd5e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -38,21 +37,18 @@ public MetricsIOSourceImpl(MetricsIOWrapper wrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); } - public MetricsIOSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsIOWrapper wrapper) { + public MetricsIOSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext, MetricsIOWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; - fsReadTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); - fsPReadTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); - fsWriteTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); + fsReadTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); + fsPReadTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); + fsWriteTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java index 3ba8cd5d0ae8..e3dc724d8b7d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index 69bd040e7f95..a1ec313f97a3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSource; @@ -25,20 +23,15 @@ @InterfaceAudience.Private public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses"; - String AUTHORIZATION_SUCCESSES_DESC = - "Number of authorization successes."; + String AUTHORIZATION_SUCCESSES_DESC = "Number of authorization successes."; String AUTHORIZATION_FAILURES_NAME = "authorizationFailures"; - String AUTHORIZATION_FAILURES_DESC = - "Number of authorization failures."; + String AUTHORIZATION_FAILURES_DESC = "Number of authorization failures."; String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses"; - String AUTHENTICATION_SUCCESSES_DESC = - "Number of authentication successes."; + String AUTHENTICATION_SUCCESSES_DESC = "Number of authentication successes."; String AUTHENTICATION_FAILURES_NAME = "authenticationFailures"; - String AUTHENTICATION_FAILURES_DESC = - "Number of authentication failures."; + String AUTHENTICATION_FAILURES_DESC = "Number of authentication failures."; String AUTHENTICATION_FALLBACKS_NAME = "authenticationFallbacks"; - String AUTHENTICATION_FALLBACKS_DESC = - "Number of fallbacks to insecure authentication."; + String AUTHENTICATION_FALLBACKS_DESC = "Number of fallbacks to insecure authentication."; String SENT_BYTES_NAME = "sentBytes"; String SENT_BYTES_DESC = "Number of bytes sent."; String RECEIVED_BYTES_NAME = "receivedBytes"; @@ -54,27 +47,26 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String TOTAL_CALL_TIME_NAME = "totalCallTime"; String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time."; String QUEUE_SIZE_NAME = "queueSize"; - String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + - "parsed and is waiting to run or is currently being executed."; + String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + + "parsed and is waiting to run or is currently being executed."; String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; - String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + - "parsed requests waiting in scheduler to be executed"; + String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + + "parsed requests waiting in scheduler to be executed"; String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue"; String METAPRIORITY_QUEUE_NAME = "numCallsInMetaPriorityQueue"; String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue"; - String REPLICATION_QUEUE_DESC = - "Number of calls in the replication call queue waiting to be run"; + String REPLICATION_QUEUE_DESC = "Number of calls in the replication call queue waiting to be run"; String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String METAPRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String WRITE_QUEUE_NAME = "numCallsInWriteQueue"; - String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " + - "parsed requests waiting in scheduler to be executed"; + String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " + + "parsed requests waiting in scheduler to be executed"; String READ_QUEUE_NAME = "numCallsInReadQueue"; - String READ_QUEUE_DESC = "Number of calls in the read call queue; " + - "parsed requests waiting in scheduler to be executed"; + String READ_QUEUE_DESC = "Number of calls in the read call queue; " + + "parsed requests waiting in scheduler to be executed"; String SCAN_QUEUE_NAME = "numCallsInScanQueue"; - String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " + - "parsed requests waiting in scheduler to be executed"; + String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " + + "parsed requests waiting in scheduler to be executed"; String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections"; String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections."; String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler"; @@ -92,17 +84,16 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String NUM_ACTIVE_SCAN_HANDLER_NAME = "numActiveScanHandler"; String NUM_ACTIVE_SCAN_HANDLER_DESC = "Number of active scan rpc handlers."; String NUM_GENERAL_CALLS_DROPPED_NAME = "numGeneralCallsDropped"; - String NUM_GENERAL_CALLS_DROPPED_DESC = "Total number of calls in general queue which " + - "were dropped by CoDel RPC executor"; + String NUM_GENERAL_CALLS_DROPPED_DESC = + "Total number of calls in general queue which " + "were dropped by CoDel RPC executor"; String NUM_LIFO_MODE_SWITCHES_NAME = "numLifoModeSwitches"; - String NUM_LIFO_MODE_SWITCHES_DESC = "Total number of calls in general queue which " + - "were served from the tail of the queue"; + String NUM_LIFO_MODE_SWITCHES_DESC = + "Total number of calls in general queue which " + "were served from the tail of the queue"; // Direct Memory Usage metrics String NETTY_DM_USAGE_NAME = "nettyDirectMemoryUsage"; String NETTY_DM_USAGE_DESC = "Current Netty direct memory usage."; - void authorizationSuccess(); void authorizationFailure(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java index 7f1415ae86f2..027c197333a3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; @@ -34,18 +32,16 @@ public abstract class MetricsHBaseServerSourceFactory { static final String METRICS_DESCRIPTION = "Metrics about HBase Server IPC"; /** - * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. - * - * JMX_CONTEXT will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX + * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. JMX_CONTEXT + * will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX */ static final String METRICS_JMX_CONTEXT_SUFFIX = ",sub=" + METRICS_NAME; abstract MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrapper wrapper); /** - * From the name of the class that's starting up create the - * context that an IPC source should register itself. - * + * From the name of the class that's starting up create the context that an IPC source should + * register itself. * @param serverName The name of the class that's starting up. * @return The Camel Cased context name. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java index 67325c0728e5..bdb87a727a51 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.HashMap; import java.util.Locale; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourceFactory { private enum SourceStorage { INSTANCE; + HashMap sources = new HashMap<>(); } @@ -37,19 +35,16 @@ public MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrap } private static synchronized MetricsHBaseServerSource getSource(String serverName, - MetricsHBaseServerWrapper wrap) { + MetricsHBaseServerWrapper wrap) { String context = createContextName(serverName); MetricsHBaseServerSource source = SourceStorage.INSTANCE.sources.get(context); if (source == null) { - //Create the source. - source = new MetricsHBaseServerSourceImpl( - context, - METRICS_DESCRIPTION, - context.toLowerCase(Locale.ROOT), - context + METRICS_JMX_CONTEXT_SUFFIX, wrap); - - //Store back in storage + // Create the source. + source = new MetricsHBaseServerSourceImpl(context, METRICS_DESCRIPTION, + context.toLowerCase(Locale.ROOT), context + METRICS_JMX_CONTEXT_SUFFIX, wrap); + + // Store back in storage SourceStorage.INSTANCE.sources.put(context, source); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java index e4fee95e2c4d..440ebc6f5a6c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; @@ -29,7 +27,7 @@ @InterfaceAudience.Private public class MetricsHBaseServerSourceImpl extends ExceptionTrackingSourceImpl - implements MetricsHBaseServerSource { + implements MetricsHBaseServerSource { private final MetricsHBaseServerWrapper wrapper; private final MutableFastCounter authorizationSuccesses; private final MutableFastCounter authorizationFailures; @@ -39,45 +37,40 @@ public class MetricsHBaseServerSourceImpl extends ExceptionTrackingSourceImpl private final MutableFastCounter sentBytes; private final MutableFastCounter receivedBytes; - private MetricHistogram queueCallTime; private MetricHistogram processCallTime; private MetricHistogram totalCallTime; private MetricHistogram requestSize; private MetricHistogram responseSize; - public MetricsHBaseServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsHBaseServerWrapper wrapper) { + public MetricsHBaseServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsHBaseServerWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; this.authorizationSuccesses = this.getMetricsRegistry().newCounter(AUTHORIZATION_SUCCESSES_NAME, - AUTHORIZATION_SUCCESSES_DESC, 0L); + AUTHORIZATION_SUCCESSES_DESC, 0L); this.authorizationFailures = this.getMetricsRegistry().newCounter(AUTHORIZATION_FAILURES_NAME, - AUTHORIZATION_FAILURES_DESC, 0L); - this.authenticationSuccesses = this.getMetricsRegistry().newCounter( - AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); + AUTHORIZATION_FAILURES_DESC, 0L); + this.authenticationSuccesses = this.getMetricsRegistry() + .newCounter(AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); this.authenticationFailures = this.getMetricsRegistry().newCounter(AUTHENTICATION_FAILURES_NAME, - AUTHENTICATION_FAILURES_DESC, 0L); - this.authenticationFallbacks = this.getMetricsRegistry().newCounter( - AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); - this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, - SENT_BYTES_DESC, 0L); - this.receivedBytes = this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, - RECEIVED_BYTES_DESC, 0L); - this.queueCallTime = this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, - QUEUE_CALL_TIME_DESC); - this.processCallTime = this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, - PROCESS_CALL_TIME_DESC); - this.totalCallTime = this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, - TOTAL_CALL_TIME_DESC); - this.requestSize = this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, - REQUEST_SIZE_DESC); - this.responseSize = this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, - RESPONSE_SIZE_DESC); + AUTHENTICATION_FAILURES_DESC, 0L); + this.authenticationFallbacks = this.getMetricsRegistry() + .newCounter(AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); + this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, SENT_BYTES_DESC, 0L); + this.receivedBytes = + this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, RECEIVED_BYTES_DESC, 0L); + this.queueCallTime = + this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, QUEUE_CALL_TIME_DESC); + this.processCallTime = + this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, PROCESS_CALL_TIME_DESC); + this.totalCallTime = + this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, TOTAL_CALL_TIME_DESC); + this.requestSize = + this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, REQUEST_SIZE_DESC); + this.responseSize = + this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, RESPONSE_SIZE_DESC); } @Override @@ -146,44 +139,40 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { if (wrapper != null) { mrb.addGauge(Interns.info(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC), wrapper.getTotalQueueSize()) - .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC), - wrapper.getGeneralQueueLength()) - .addGauge(Interns.info(REPLICATION_QUEUE_NAME, - REPLICATION_QUEUE_DESC), wrapper.getReplicationQueueLength()) - .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC), - wrapper.getPriorityQueueLength()) - .addGauge(Interns.info(METAPRIORITY_QUEUE_NAME, METAPRIORITY_QUEUE_DESC), - wrapper.getMetaPriorityQueueLength()) - .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, - NUM_OPEN_CONNECTIONS_DESC), wrapper.getNumOpenConnections()) - .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, - NUM_ACTIVE_HANDLER_DESC), wrapper.getActiveRpcHandlerCount()) - .addGauge(Interns.info(NUM_ACTIVE_GENERAL_HANDLER_NAME, NUM_ACTIVE_GENERAL_HANDLER_DESC), - wrapper.getActiveGeneralRpcHandlerCount()) - .addGauge( - Interns.info(NUM_ACTIVE_PRIORITY_HANDLER_NAME, NUM_ACTIVE_PRIORITY_HANDLER_DESC), - wrapper.getActivePriorityRpcHandlerCount()) - .addGauge( - Interns.info(NUM_ACTIVE_REPLICATION_HANDLER_NAME, NUM_ACTIVE_REPLICATION_HANDLER_DESC), - wrapper.getActiveReplicationRpcHandlerCount()) - .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME, - NUM_GENERAL_CALLS_DROPPED_DESC), wrapper.getNumGeneralCallsDropped()) - .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME, - NUM_LIFO_MODE_SWITCHES_DESC), wrapper.getNumLifoModeSwitches()) - .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC), - wrapper.getWriteQueueLength()) - .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC), - wrapper.getReadQueueLength()) - .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC), - wrapper.getScanQueueLength()) - .addGauge(Interns.info(NUM_ACTIVE_WRITE_HANDLER_NAME, NUM_ACTIVE_WRITE_HANDLER_DESC), - wrapper.getActiveWriteRpcHandlerCount()) - .addGauge(Interns.info(NUM_ACTIVE_READ_HANDLER_NAME, NUM_ACTIVE_READ_HANDLER_DESC), - wrapper.getActiveReadRpcHandlerCount()) - .addGauge(Interns.info(NUM_ACTIVE_SCAN_HANDLER_NAME, NUM_ACTIVE_SCAN_HANDLER_DESC), - wrapper.getActiveScanRpcHandlerCount()) - .addGauge(Interns.info(NETTY_DM_USAGE_NAME, NETTY_DM_USAGE_DESC), - wrapper.getNettyDmUsage()); + .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC), + wrapper.getGeneralQueueLength()) + .addGauge(Interns.info(REPLICATION_QUEUE_NAME, REPLICATION_QUEUE_DESC), + wrapper.getReplicationQueueLength()) + .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC), + wrapper.getPriorityQueueLength()) + .addGauge(Interns.info(METAPRIORITY_QUEUE_NAME, METAPRIORITY_QUEUE_DESC), + wrapper.getMetaPriorityQueueLength()) + .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, NUM_OPEN_CONNECTIONS_DESC), + wrapper.getNumOpenConnections()) + .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, NUM_ACTIVE_HANDLER_DESC), + wrapper.getActiveRpcHandlerCount()) + .addGauge(Interns.info(NUM_ACTIVE_GENERAL_HANDLER_NAME, NUM_ACTIVE_GENERAL_HANDLER_DESC), + wrapper.getActiveGeneralRpcHandlerCount()) + .addGauge(Interns.info(NUM_ACTIVE_PRIORITY_HANDLER_NAME, NUM_ACTIVE_PRIORITY_HANDLER_DESC), + wrapper.getActivePriorityRpcHandlerCount()) + .addGauge( + Interns.info(NUM_ACTIVE_REPLICATION_HANDLER_NAME, NUM_ACTIVE_REPLICATION_HANDLER_DESC), + wrapper.getActiveReplicationRpcHandlerCount()) + .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME, NUM_GENERAL_CALLS_DROPPED_DESC), + wrapper.getNumGeneralCallsDropped()) + .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME, NUM_LIFO_MODE_SWITCHES_DESC), + wrapper.getNumLifoModeSwitches()) + .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC), wrapper.getWriteQueueLength()) + .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC), wrapper.getReadQueueLength()) + .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC), wrapper.getScanQueueLength()) + .addGauge(Interns.info(NUM_ACTIVE_WRITE_HANDLER_NAME, NUM_ACTIVE_WRITE_HANDLER_DESC), + wrapper.getActiveWriteRpcHandlerCount()) + .addGauge(Interns.info(NUM_ACTIVE_READ_HANDLER_NAME, NUM_ACTIVE_READ_HANDLER_DESC), + wrapper.getActiveReadRpcHandlerCount()) + .addGauge(Interns.info(NUM_ACTIVE_SCAN_HANDLER_NAME, NUM_ACTIVE_SCAN_HANDLER_DESC), + wrapper.getActiveScanRpcHandlerCount()) + .addGauge(Interns.info(NETTY_DM_USAGE_NAME, NETTY_DM_USAGE_DESC), + wrapper.getNettyDmUsage()); } metricsRegistry.snapshot(mrb, all); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java index db30c0348c35..136294883b69 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java index b4f62b3970b7..f1555f660bef 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Cluster; @@ -43,23 +41,20 @@ protected JobUtil() { /** * Initializes the staging directory and returns the path. - * * @param conf system configuration * @return staging directory path - * @throws IOException if the ownership on the staging directory is not as expected + * @throws IOException if the ownership on the staging directory is not as expected * @throws InterruptedException if the thread getting the staging directory is interrupted */ - public static Path getStagingDir(Configuration conf) - throws IOException, InterruptedException { + public static Path getStagingDir(Configuration conf) throws IOException, InterruptedException { return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf); } /** * Initializes the staging directory and returns the qualified path. - * * @param conf conf system configuration * @return qualified staging directory path - * @throws IOException if the ownership on the staging directory is not as expected + * @throws IOException if the ownership on the staging directory is not as expected * @throws InterruptedException if the thread getting the staging directory is interrupted */ public static Path getQualifiedStagingDir(Configuration conf) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java index 4487021fac16..76a70edbddf3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -55,11 +54,11 @@ public interface MetricsAssignmentManagerSource extends BaseSource { String RIT_COUNT_DESC = "Current number of Regions In Transition (Gauge)."; String RIT_COUNT_OVER_THRESHOLD_DESC = - "Current number of Regions In Transition over threshold time (Gauge)."; + "Current number of Regions In Transition over threshold time (Gauge)."; String RIT_OLDEST_AGE_DESC = - "Timestamp in milliseconds of the oldest Region In Transition (Gauge)."; + "Timestamp in milliseconds of the oldest Region In Transition (Gauge)."; String RIT_DURATION_DESC = - "Total durations in milliseconds for all Regions in Transition (Histogram)."; + "Total durations in milliseconds for all Regions in Transition (Histogram)."; // HBCK report metrics String ORPHAN_REGIONS_ON_RS = "orphanRegionsOnRS"; @@ -80,7 +79,7 @@ public interface MetricsAssignmentManagerSource extends BaseSource { String OVERLAPS_DESC = "Current number of Overlaps (Gauge)."; String UNKNOWN_SERVER_REGIONS_DESC = "Current number of Unknown Server Regions (Gauge)."; String EMPTY_REGION_INFO_REGIONS_DESC = - "Current number of Regions with Empty Region Info (Gauge)."; + "Current number of Regions with Empty Region Info (Gauge)."; String ASSIGN_METRIC_PREFIX = "assign"; String UNASSIGN_METRIC_PREFIX = "unassign"; @@ -95,21 +94,18 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of regions in transition. - * * @param ritCount count of the regions in transition. */ void setRIT(int ritCount); /** * Set the count of the number of regions that have been in transition over the threshold time. - * * @param ritCountOverThreshold number of regions in transition for longer than threshold. */ void setRITCountOverThreshold(int ritCountOverThreshold); /** * Set the oldest region in transition. - * * @param age age of the oldest RIT. */ void setRITOldestAge(long age); @@ -122,35 +118,30 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of orphan regions on RS. - * * @param orphanRegionsOnRs count of the orphan regions on RS in HBCK chore report. */ void setOrphanRegionsOnRs(int orphanRegionsOnRs); /** * Set the number of orphan regions on FS. - * * @param orphanRegionsOnFs count of the orphan regions on FS in HBCK chore report. */ void setOrphanRegionsOnFs(int orphanRegionsOnFs); /** * Set the number of inconsistent regions. - * * @param inconsistentRegions count of the inconsistent regions in HBCK chore report. */ void setInconsistentRegions(int inconsistentRegions); /** * Set the number of holes. - * * @param holes count of the holes in CatalogJanitor Consistency report. */ void setHoles(int holes); /** * Set the number of overlaps. - * * @param overlaps count of the overlaps in CatalogJanitor Consistency report. */ void setOverlaps(int overlaps); @@ -158,14 +149,14 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of unknown server regions. * @param unknownServerRegions count of the unknown server regions in CatalogJanitor Consistency - * report. + * report. */ void setUnknownServerRegions(int unknownServerRegions); /** * Set the number of regions with empty region info. * @param emptyRegionInfoRegions count of the regions with empty region info in CatalogJanitor - * Consistency report. + * Consistency report. */ void setEmptyRegionInfoRegions(int emptyRegionInfoRegions); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java index a2b2897b94b5..6be0f2f33002 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -28,9 +27,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsAssignmentManagerSourceImpl - extends BaseSourceImpl - implements MetricsAssignmentManagerSource { +public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl + implements MetricsAssignmentManagerSource { private MutableGaugeLong ritGauge; private MutableGaugeLong ritCountOverThresholdGauge; @@ -63,16 +61,15 @@ public MetricsAssignmentManagerSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsAssignmentManagerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsAssignmentManagerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } public void init() { ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, RIT_COUNT_DESC, 0L); - ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, - RIT_COUNT_OVER_THRESHOLD_DESC,0L); + ritCountOverThresholdGauge = + metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, RIT_COUNT_OVER_THRESHOLD_DESC, 0L); ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, RIT_OLDEST_AGE_DESC, 0L); ritDurationHisto = metricsRegistry.newTimeHistogram(RIT_DURATION_NAME, RIT_DURATION_DESC); operationCounter = metricsRegistry.getCounter(OPERATION_COUNT_NAME, 0L); @@ -80,22 +77,22 @@ public void init() { unknownServerOpenRegions = metricsRegistry.newGauge(UNKNOWN_SERVER_OPEN_REGIONS, "", 0); orphanRegionsOnRsGauge = - metricsRegistry.newGauge(ORPHAN_REGIONS_ON_RS, ORPHAN_REGIONS_ON_RS_DESC, 0L); + metricsRegistry.newGauge(ORPHAN_REGIONS_ON_RS, ORPHAN_REGIONS_ON_RS_DESC, 0L); orphanRegionsOnFsGauge = - metricsRegistry.newGauge(ORPHAN_REGIONS_ON_FS, ORPHAN_REGIONS_ON_FS_DESC, 0L); + metricsRegistry.newGauge(ORPHAN_REGIONS_ON_FS, ORPHAN_REGIONS_ON_FS_DESC, 0L); inconsistentRegionsGauge = - metricsRegistry.newGauge(INCONSISTENT_REGIONS, INCONSISTENT_REGIONS_DESC, 0L); + metricsRegistry.newGauge(INCONSISTENT_REGIONS, INCONSISTENT_REGIONS_DESC, 0L); holesGauge = metricsRegistry.newGauge(HOLES, HOLES_DESC, 0L); overlapsGauge = metricsRegistry.newGauge(OVERLAPS, OVERLAPS_DESC, 0L); unknownServerRegionsGauge = - metricsRegistry.newGauge(UNKNOWN_SERVER_REGIONS, UNKNOWN_SERVER_REGIONS_DESC, 0L); + metricsRegistry.newGauge(UNKNOWN_SERVER_REGIONS, UNKNOWN_SERVER_REGIONS_DESC, 0L); emptyRegionInfoRegionsGauge = - metricsRegistry.newGauge(EMPTY_REGION_INFO_REGIONS, EMPTY_REGION_INFO_REGIONS_DESC, 0L); + metricsRegistry.newGauge(EMPTY_REGION_INFO_REGIONS, EMPTY_REGION_INFO_REGIONS_DESC, 0L); /** - * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is - * moving away from using Hadoop's metric2 to having independent HBase specific Metrics. Use + * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is moving + * away from using Hadoop's metric2 to having independent HBase specific Metrics. Use * {@link BaseSourceImpl#registry} to register the new metrics. */ assignMetrics = new OperationMetrics(registry, ASSIGN_METRIC_PREFIX); @@ -222,7 +219,7 @@ public OperationMetrics getCloseMetrics() { public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName); metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java index 91dc71a034cc..53ed8a25ed0e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -54,7 +53,6 @@ public interface MetricsMasterFileSystemSource extends BaseSource { String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()"; String SPLIT_SIZE_DESC = "Size of WAL files being split"; - void updateMetaWALSplitTime(long time); void updateMetaWALSplitSize(long size); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java index d78efce2add9..dc2a2824269b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,9 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsMasterFilesystemSourceImpl - extends BaseSourceImpl - implements MetricsMasterFileSystemSource { +public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl + implements MetricsMasterFileSystemSource { private MetricHistogram splitSizeHisto; private MetricHistogram splitTimeHisto; @@ -36,9 +34,8 @@ public MetricsMasterFilesystemSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsMasterFilesystemSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsMasterFilesystemSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -47,9 +44,9 @@ public void init() { splitSizeHisto = metricsRegistry.newSizeHistogram(SPLIT_SIZE_NAME, SPLIT_SIZE_DESC); splitTimeHisto = metricsRegistry.newTimeHistogram(SPLIT_TIME_NAME, SPLIT_TIME_DESC); metaSplitTimeHisto = - metricsRegistry.newTimeHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC); + metricsRegistry.newTimeHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC); metaSplitSizeHisto = - metricsRegistry.newSizeHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC); + metricsRegistry.newSizeHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC); } @Override @@ -62,7 +59,6 @@ public void updateSplitSize(long size) { splitSizeHisto.add(size); } - @Override public void updateMetaWALSplitTime(long time) { metaSplitTimeHisto.add(time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java index db4f25ec03e3..07ceaaf2e241 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java index 197f9f9fe754..a399e53b4fb3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java index 6fd254e9a690..dc5773cb9046 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java index 69e7d7958fab..001ab0168145 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,29 +24,20 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, + * following the pattern */ @InterfaceAudience.Private -public class MetricsMasterProcSourceImpl - extends BaseSourceImpl implements MetricsMasterProcSource { +public class MetricsMasterProcSourceImpl extends BaseSourceImpl implements MetricsMasterProcSource { private final MetricsMasterWrapper masterWrapper; public MetricsMasterProcSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MetricsMasterProcSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { + public MetricsMasterProcSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; @@ -64,13 +54,12 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // masterWrapper can be null because this function is called inside of init. if (masterWrapper != null) { - metricsRecordBuilder - .addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), - masterWrapper.getNumWALFiles()); + metricsRecordBuilder.addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), + masterWrapper.getNumWALFiles()); } metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java index 8450432ade67..c0b4c73cc61e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,16 +41,16 @@ public interface MetricsMasterQuotaSource extends BaseSource { String NUM_REGION_SIZE_REPORTS_DESC = "Number of Region sizes reported"; String QUOTA_OBSERVER_CHORE_TIME_NAME = "quotaObserverChoreTime"; String QUOTA_OBSERVER_CHORE_TIME_DESC = - "Histogram for the time in millis for the QuotaObserverChore"; + "Histogram for the time in millis for the QuotaObserverChore"; String SNAPSHOT_OBSERVER_CHORE_TIME_NAME = "snapshotQuotaObserverChoreTime"; String SNAPSHOT_OBSERVER_CHORE_TIME_DESC = - "Histogram for the time in millis for the SnapshotQuotaObserverChore"; + "Histogram for the time in millis for the SnapshotQuotaObserverChore"; String SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME = "snapshotObserverSizeComputationTime"; String SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC = - "Histogram for the time in millis to compute the size of each snapshot"; + "Histogram for the time in millis to compute the size of each snapshot"; String SNAPSHOT_OBSERVER_FETCH_TIME_NAME = "snapshotObserverSnapshotFetchTime"; String SNAPSHOT_OBSERVER_FETCH_TIME_DESC = - "Histogram for the time in millis to fetch all snapshots from HBase"; + "Histogram for the time in millis to fetch all snapshots from HBase"; String TABLE_QUOTA_USAGE_NAME = "tableSpaceQuotaOverview"; String TABLE_QUOTA_USAGE_DESC = "A JSON summary of the usage of all tables with space quotas"; String NS_QUOTA_USAGE_NAME = "namespaceSpaceQuotaOverview"; @@ -57,40 +58,35 @@ public interface MetricsMasterQuotaSource extends BaseSource { /** * Updates the metric tracking the number of space quotas defined in the system. - * * @param numSpaceQuotas The number of space quotas defined */ void updateNumSpaceQuotas(long numSpaceQuotas); /** - * Updates the metric tracking the number of tables the master has computed to be in - * violation of their space quota. - * + * Updates the metric tracking the number of tables the master has computed to be in violation of + * their space quota. * @param numTablesInViolation The number of tables violating a space quota */ void updateNumTablesInSpaceQuotaViolation(long numTablesInViolation); /** - * Updates the metric tracking the number of namespaces the master has computed to be in - * violation of their space quota. - * + * Updates the metric tracking the number of namespaces the master has computed to be in violation + * of their space quota. * @param numNamespacesInViolation The number of namespaces violating a space quota */ void updateNumNamespacesInSpaceQuotaViolation(long numNamespacesInViolation); /** - * Updates the metric tracking the number of region size reports the master is currently - * retaining in memory. - * + * Updates the metric tracking the number of region size reports the master is currently retaining + * in memory. * @param numCurrentRegionSizeReports The number of region size reports the master is holding in - * memory + * memory */ void updateNumCurrentSpaceQuotaRegionSizeReports(long numCurrentRegionSizeReports); /** - * Updates the metric tracking the amount of time taken by the {@code QuotaObserverChore} - * which runs periodically. - * + * Updates the metric tracking the amount of time taken by the {@code QuotaObserverChore} which + * runs periodically. * @param time The execution time of the chore in milliseconds */ void incrementSpaceQuotaObserverChoreTime(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java index 2dcd945ea811..a53652b0f3dc 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java index 0fae0e744059..6a489eb70019 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java index 750c1c959fcb..71c0ea63ac25 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +19,6 @@ import java.util.Map; import java.util.Map.Entry; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; @@ -33,7 +33,7 @@ */ @InterfaceAudience.Private public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl - implements MetricsMasterQuotaSource { + implements MetricsMasterQuotaSource { private final MetricsMasterWrapper wrapper; private final MutableGaugeLong spaceQuotasGauge; private final MutableGaugeLong tablesViolatingQuotasGauge; @@ -48,30 +48,29 @@ public MetricsMasterQuotaSourceImpl(MetricsMasterWrapper wrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); } - public MetricsMasterQuotaSourceImpl( - String metricsName, String metricsDescription, String metricsContext, - String metricsJmxContext, MetricsMasterWrapper wrapper) { + public MetricsMasterQuotaSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; - spaceQuotasGauge = getMetricsRegistry().newGauge( - NUM_SPACE_QUOTAS_NAME, NUM_SPACE_QUOTAS_DESC, 0L); - tablesViolatingQuotasGauge = getMetricsRegistry().newGauge( - NUM_TABLES_QUOTA_VIOLATIONS_NAME, NUM_TABLES_QUOTA_VIOLATIONS_DESC, 0L); - namespacesViolatingQuotasGauge = getMetricsRegistry().newGauge( - NUM_NS_QUOTA_VIOLATIONS_NAME, NUM_NS_QUOTA_VIOLATIONS_DESC, 0L); - regionSpaceReportsGauge = getMetricsRegistry().newGauge( - NUM_REGION_SIZE_REPORTS_NAME, NUM_REGION_SIZE_REPORTS_DESC, 0L); + spaceQuotasGauge = + getMetricsRegistry().newGauge(NUM_SPACE_QUOTAS_NAME, NUM_SPACE_QUOTAS_DESC, 0L); + tablesViolatingQuotasGauge = getMetricsRegistry().newGauge(NUM_TABLES_QUOTA_VIOLATIONS_NAME, + NUM_TABLES_QUOTA_VIOLATIONS_DESC, 0L); + namespacesViolatingQuotasGauge = + getMetricsRegistry().newGauge(NUM_NS_QUOTA_VIOLATIONS_NAME, NUM_NS_QUOTA_VIOLATIONS_DESC, 0L); + regionSpaceReportsGauge = + getMetricsRegistry().newGauge(NUM_REGION_SIZE_REPORTS_NAME, NUM_REGION_SIZE_REPORTS_DESC, 0L); - quotaObserverTimeHisto = getMetricsRegistry().newTimeHistogram( - QUOTA_OBSERVER_CHORE_TIME_NAME, QUOTA_OBSERVER_CHORE_TIME_DESC); - snapshotObserverTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); + quotaObserverTimeHisto = getMetricsRegistry().newTimeHistogram(QUOTA_OBSERVER_CHORE_TIME_NAME, + QUOTA_OBSERVER_CHORE_TIME_DESC); + snapshotObserverTimeHisto = getMetricsRegistry() + .newTimeHistogram(SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); snapshotObserverSizeComputationTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); - snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); + SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); + snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry() + .newTimeHistogram(SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); } @Override @@ -109,7 +108,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder record = metricsCollector.addRecord(metricsRegistry.info()); if (wrapper != null) { // Summarize the tables - Map> tableUsages = wrapper.getTableSpaceUtilization(); + Map> tableUsages = wrapper.getTableSpaceUtilization(); String tableSummary = "[]"; if (tableUsages != null && !tableUsages.isEmpty()) { tableSummary = generateJsonQuotaSummary(tableUsages.entrySet(), "table"); @@ -118,7 +117,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // Summarize the namespaces String nsSummary = "[]"; - Map> namespaceUsages = wrapper.getNamespaceSpaceUtilization(); + Map> namespaceUsages = wrapper.getNamespaceSpaceUtilization(); if (namespaceUsages != null && !namespaceUsages.isEmpty()) { nsSummary = generateJsonQuotaSummary(namespaceUsages.entrySet(), "namespace"); } @@ -130,10 +129,10 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { /** * Summarizes the usage and limit for many targets (table or namespace) into JSON. */ - private String generateJsonQuotaSummary( - Iterable>> data, String target) { + private String generateJsonQuotaSummary(Iterable>> data, + String target) { StringBuilder sb = new StringBuilder(); - for (Entry> tableUsage : data) { + for (Entry> tableUsage : data) { String tableName = tableUsage.getKey(); long usage = tableUsage.getValue().getKey(); long limit = tableUsage.getValue().getValue(); @@ -141,7 +140,7 @@ private String generateJsonQuotaSummary( sb.append(", "); } sb.append("{").append(target).append("=").append(tableName).append(", usage=").append(usage) - .append(", limit=").append(limit).append("}"); + .append(", limit=").append(limit).append("}"); } sb.insert(0, "[").append("]"); return sb.toString(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java index 5f275e847dc6..3b81baaa2b25 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -74,7 +73,7 @@ public interface MetricsMasterSource extends BaseSource { String MASTER_ACTIVE_TIME_DESC = "Master Active Time"; String MASTER_START_TIME_DESC = "Master Start Time"; String MASTER_FINISHED_INITIALIZATION_TIME_DESC = - "Timestamp when Master has finished initializing"; + "Timestamp when Master has finished initializing"; String AVERAGE_LOAD_DESC = "AverageLoad"; String LIVE_REGION_SERVERS_DESC = "Names of live RegionServers"; String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers"; @@ -95,27 +94,22 @@ public interface MetricsMasterSource extends BaseSource { /** * Increment the number of requests the cluster has seen. - * * @param inc Ammount to increment the total by. */ void incRequests(final long inc); /** * Increment the number of read requests the cluster has seen. - * * @param inc Ammount to increment the total by. */ void incReadRequests(final long inc); - /** * Increment the number of write requests the cluster has seen. - * * @param inc Ammount to increment the total by. */ void incWriteRequests(final long inc); - /** * @return {@link OperationMetrics} containing common metrics for server crash operation */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java index fce574a2cf07..bfdf348b34f7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java index a4b3fa194f9c..84c49062f03a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; /** - * Factory to create MetricsMasterSource when given a MetricsMasterWrapper + * Factory to create MetricsMasterSource when given a MetricsMasterWrapper */ @InterfaceAudience.Private public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory { private static enum FactoryStorage { INSTANCE; + MetricsMasterSource masterSource; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index 4072d8d20835..8280083e472f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -28,13 +27,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, + * following the pattern */ @InterfaceAudience.Private -public class MetricsMasterSourceImpl - extends BaseSourceImpl implements MetricsMasterSource { +public class MetricsMasterSourceImpl extends BaseSourceImpl implements MetricsMasterSource { private final MetricsMasterWrapper masterWrapper; private MutableFastCounter clusterRequestsCounter; @@ -44,18 +41,11 @@ public class MetricsMasterSourceImpl private OperationMetrics serverCrashMetrics; public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MetricsMasterSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { + public MetricsMasterSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; @@ -69,9 +59,9 @@ public void init() { clusterWriteRequestsCounter = metricsRegistry.newCounter(CLUSTER_WRITE_REQUESTS_NAME, "", 0L); /* - * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is - * moving away from using Hadoop's metric2 to having independent HBase specific Metrics. Use - * {@link BaseSourceImpl#registry} to register the new metrics. + * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is moving + * away from using Hadoop's metric2 to having independent HBase specific Metrics. Use {@link + * BaseSourceImpl#registry} to register the new metrics. */ serverCrashMetrics = new OperationMetrics(registry, SERVER_CRASH_METRIC_PREFIX); } @@ -105,46 +95,44 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { PairOfSameType regionNumberPair = masterWrapper.getRegionCounts(); metricsRecordBuilder - .addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC), - masterWrapper.getMergePlanCount()) - .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC), - masterWrapper.getSplitPlanCount()) - .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, - MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime()) - .addGauge(Interns.info(MASTER_START_TIME_NAME, - MASTER_START_TIME_DESC), masterWrapper.getStartTime()) - .addGauge(Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, - MASTER_FINISHED_INITIALIZATION_TIME_DESC), - masterWrapper.getMasterInitializationTime()) - .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), - masterWrapper.getAverageLoad()) - .addGauge(Interns.info(ONLINE_REGION_COUNT_NAME, ONLINE_REGION_COUNT_DESC), - regionNumberPair.getFirst()) - .addGauge(Interns.info(OFFLINE_REGION_COUNT_NAME, OFFLINE_REGION_COUNT_DESC), - regionNumberPair.getSecond()) - .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC), - masterWrapper.getRegionServers()) - .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, - NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getNumRegionServers()) - .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC), - masterWrapper.getDeadRegionServers()) - .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, - NUMBER_OF_DEAD_REGION_SERVERS_DESC), - masterWrapper.getNumDeadRegionServers()) - .tag(Interns.info(DRAINING_REGION_SERVER_NAME, DRAINING_REGION_SERVER_DESC), - masterWrapper.getDrainingRegionServers()) - .addGauge(Interns.info(NUM_DRAINING_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), - masterWrapper.getNumDrainingRegionServers()) - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - masterWrapper.getZookeeperQuorum()) - .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName()) - .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) - .tag(Interns.info(IS_ACTIVE_MASTER_NAME, IS_ACTIVE_MASTER_DESC), - String.valueOf(masterWrapper.getIsActiveMaster())); + .addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC), + masterWrapper.getMergePlanCount()) + .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC), + masterWrapper.getSplitPlanCount()) + .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, MASTER_ACTIVE_TIME_DESC), + masterWrapper.getActiveTime()) + .addGauge(Interns.info(MASTER_START_TIME_NAME, MASTER_START_TIME_DESC), + masterWrapper.getStartTime()) + .addGauge(Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, + MASTER_FINISHED_INITIALIZATION_TIME_DESC), masterWrapper.getMasterInitializationTime()) + .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), + masterWrapper.getAverageLoad()) + .addGauge(Interns.info(ONLINE_REGION_COUNT_NAME, ONLINE_REGION_COUNT_DESC), + regionNumberPair.getFirst()) + .addGauge(Interns.info(OFFLINE_REGION_COUNT_NAME, OFFLINE_REGION_COUNT_DESC), + regionNumberPair.getSecond()) + .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC), + masterWrapper.getRegionServers()) + .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), + masterWrapper.getNumRegionServers()) + .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC), + masterWrapper.getDeadRegionServers()) + .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, NUMBER_OF_DEAD_REGION_SERVERS_DESC), + masterWrapper.getNumDeadRegionServers()) + .tag(Interns.info(DRAINING_REGION_SERVER_NAME, DRAINING_REGION_SERVER_DESC), + masterWrapper.getDrainingRegionServers()) + .addGauge(Interns.info(NUM_DRAINING_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), + masterWrapper.getNumDrainingRegionServers()) + .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), + masterWrapper.getZookeeperQuorum()) + .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName()) + .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) + .tag(Interns.info(IS_ACTIVE_MASTER_NAME, IS_ACTIVE_MASTER_DESC), + String.valueOf(masterWrapper.getIsActiveMaster())); } metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java index 1b3a75c3b84e..a900edf115e3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.util.Map; @@ -42,63 +41,54 @@ public interface MetricsMasterWrapper { /** * Get Average Load - * * @return Average Load */ double getAverageLoad(); /** * Get the Cluster ID - * * @return Cluster ID */ String getClusterId(); /** * Get the ZooKeeper Quorum Info - * * @return ZooKeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors - * * @return Co-processors */ String[] getCoprocessors(); /** * Get hbase master start time - * * @return Start time of master in milliseconds */ long getStartTime(); /** * Get the hbase master active time - * * @return Time in milliseconds when master became active */ long getActiveTime(); /** * Whether this master is the active master - * * @return True if this is the active master */ boolean getIsActiveMaster(); /** * Get the live region servers - * * @return Live region servers */ String getRegionServers(); /** * Get the number of live region servers - * * @return number of Live region servers */ @@ -106,28 +96,24 @@ public interface MetricsMasterWrapper { /** * Get the dead region servers - * * @return Dead region Servers */ String getDeadRegionServers(); /** * Get the number of dead region servers - * * @return number of Dead region Servers */ int getNumDeadRegionServers(); /** * Get the draining region servers - * * @return Draining region server */ String getDrainingRegionServers(); /** * Get the number of draining region servers - * * @return number of draining region servers */ int getNumDrainingRegionServers(); @@ -150,12 +136,12 @@ public interface MetricsMasterWrapper { /** * Gets the space usage and limit for each table. */ - Map> getTableSpaceUtilization(); + Map> getTableSpaceUtilization(); /** * Gets the space usage and limit for each namespace. */ - Map> getNamespaceSpaceUtilization(); + Map> getNamespaceSpaceUtilization(); /** * Get the time in Millis when the master finished initializing/becoming the active master @@ -164,7 +150,6 @@ public interface MetricsMasterWrapper { /** * Get the online and offline region counts - * * @return pair of count for online regions and offline regions */ PairOfSameType getRegionCounts(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java index 15315b6c3ef8..88e21621f100 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java index 7077f73ea47b..52311218734f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -33,20 +32,18 @@ public MetricsSnapshotSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsSnapshotSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsSnapshotSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @Override public void init() { - snapshotTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); - snapshotCloneTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); - snapshotRestoreTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); + snapshotTimeHisto = metricsRegistry.newTimeHistogram(SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); + snapshotCloneTimeHisto = + metricsRegistry.newTimeHistogram(SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); + snapshotRestoreTimeHisto = + metricsRegistry.newTimeHistogram(SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java index 6b8c40ba5127..502de8859ae9 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public interface MetricsBalancerSource extends BaseSource { +public interface MetricsBalancerSource extends BaseSource { /** * The name of the metrics diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java index 7bccbb70d584..9cd07dbb2bff 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -32,9 +31,8 @@ public MetricsBalancerSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsBalancerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsBalancerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); updateBalancerStatus(true); } @@ -57,6 +55,6 @@ public void incrMiscInvocations() { @Override public void updateBalancerStatus(boolean status) { - metricsRegistry.tag(BALANCER_STATUS,"", String.valueOf(status), true); + metricsRegistry.tag(BALANCER_STATUS, "", String.valueOf(status), true); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java index 6eecc1233fd3..f3318c40ab50 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface extends the basic metrics balancer source to add a function - * to report metrics that related to stochastic load balancer. The purpose is to - * offer an insight to the internal cost calculations that can be useful to tune - * the balancer. For details, refer to HBASE-13965 + * This interface extends the basic metrics balancer source to add a function to report metrics that + * related to stochastic load balancer. The purpose is to offer an insight to the internal cost + * calculations that can be useful to tune the balancer. For details, refer to HBASE-13965 */ @InterfaceAudience.Private public interface MetricsStochasticBalancerSource extends MetricsBalancerSource { @@ -38,5 +36,5 @@ public interface MetricsStochasticBalancerSource extends MetricsBalancerSource { * Reports stochastic load balancer costs to JMX */ public void updateStochasticCost(String tableName, String costFunctionName, - String costFunctionDesc, Double value); + String costFunctionDesc, Double value); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java index de1dd81b17fa..358e4a795152 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl implements - MetricsStochasticBalancerSource { +public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl + implements MetricsStochasticBalancerSource { private static final String TABLE_FUNCTION_SEP = "_"; // Most Recently Used(MRU) cache @@ -38,14 +36,14 @@ public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceIm private int mruCap = calcMruCap(metricsSize); private final Map> stochasticCosts = - new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { - private static final long serialVersionUID = 8204713453436906599L; + new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { + private static final long serialVersionUID = 8204713453436906599L; - @Override - protected boolean removeEldestEntry(Map.Entry> eldest) { - return size() > mruCap; - } - }; + @Override + protected boolean removeEldestEntry(Map.Entry> eldest) { + return size() > mruCap; + } + }; private Map costFunctionDescs = new ConcurrentHashMap<>(); /** @@ -67,7 +65,7 @@ public void updateMetricsSize(int size) { * Reports stochastic load balancer costs to JMX */ public void updateStochasticCost(String tableName, String costFunctionName, String functionDesc, - Double cost) { + Double cost) { if (tableName == null || costFunctionName == null || cost == null) { return; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java index 76391bb8d7b7..3ed8cce6385e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; /** - * BaseSource for dynamic metrics to announce to Metrics2. - * In hbase-hadoop{1|2}-compat there is an implementation of this interface. + * BaseSource for dynamic metrics to announce to Metrics2. In hbase-hadoop{1|2}-compat there is an + * implementation of this interface. */ @InterfaceAudience.Private public interface BaseSource { @@ -36,7 +35,6 @@ public interface BaseSource { /** * Set a gauge to a specific value. - * * @param gaugeName the name of the gauge * @param value the value */ @@ -44,7 +42,6 @@ public interface BaseSource { /** * Add some amount to a gauge. - * * @param gaugeName the name of the gauge * @param delta the amount to change the gauge by. */ @@ -52,7 +49,6 @@ public interface BaseSource { /** * Subtract some amount from a gauge. - * * @param gaugeName the name of the gauge * @param delta the amount to change the gauge by. */ @@ -60,14 +56,12 @@ public interface BaseSource { /** * Remove a metric and no longer announce it. - * * @param key Name of the gauge to remove. */ void removeMetric(String key); /** * Add some amount to a counter. - * * @param counterName the name of the counter * @param delta the amount to change the counter by. */ @@ -75,17 +69,14 @@ public interface BaseSource { /** * Add some value to a histogram. - * - * @param name the name of the histogram + * @param name the name of the histogram * @param value the value to add to the histogram */ void updateHistogram(String name, long value); - /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. + * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver - * * @return The string context used to register this source to hadoop's metrics2 system. */ String getMetricsContext(); @@ -96,20 +87,19 @@ public interface BaseSource { String getMetricsDescription(); /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * Get the name of the context in JMX that this source will be exposed through. This is in + * ObjectName format. With the default context being Hadoop -> HBase */ String getMetricsJmxContext(); /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL + * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ String getMetricsName(); default MetricRegistryInfo getMetricRegistryInfo() { - return new MetricRegistryInfo(getMetricsName(), getMetricsDescription(), - getMetricsContext(), getMetricsJmxContext(), true); + return new MetricRegistryInfo(getMetricsName(), getMetricsDescription(), getMetricsContext(), + getMetricsJmxContext(), true); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java index a90d810701c5..9f11ff971286 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.hbase.metrics.impl.GlobalMetricRegistriesAdapter; @@ -33,16 +32,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to - * DefaultMetricsSystem and creation of the metrics registry. - * - * All MetricsSource's in hbase-hadoop2-compat should derive from this class. + * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to + * DefaultMetricsSystem and creation of the metrics registry. All MetricsSource's in + * hbase-hadoop2-compat should derive from this class. */ @InterfaceAudience.Private public class BaseSourceImpl implements BaseSource, MetricsSource { private static enum DefaultMetricsSystemInitializer { INSTANCE; + private boolean inited = false; synchronized void init(String name) { @@ -62,10 +61,10 @@ synchronized void init(String name) { } /** - * @deprecated Use hbase-metrics/hbase-metrics-api module interfaces for new metrics. - * Defining BaseSources for new metric groups (WAL, RPC, etc) is not needed anymore, - * however, for existing {@link BaseSource} implementations, please use the field - * named "registry" which is a {@link MetricRegistry} instance together with the + * @deprecated Use hbase-metrics/hbase-metrics-api module interfaces for new metrics. Defining + * BaseSources for new metric groups (WAL, RPC, etc) is not needed anymore, however, + * for existing {@link BaseSource} implementations, please use the field named + * "registry" which is a {@link MetricRegistry} instance together with the * {@link HBaseMetrics2HadoopMetricsAdapter}. */ @Deprecated @@ -77,17 +76,16 @@ synchronized void init(String name) { /** * Note that there are at least 4 MetricRegistry definitions in the source code. The first one is - * Hadoop Metrics2 MetricRegistry, second one is DynamicMetricsRegistry which is HBase's fork - * of the Hadoop metrics2 class. The third one is the dropwizard metrics implementation of + * Hadoop Metrics2 MetricRegistry, second one is DynamicMetricsRegistry which is HBase's fork of + * the Hadoop metrics2 class. The third one is the dropwizard metrics implementation of * MetricRegistry, and finally a new API abstraction in HBase that is the * o.a.h.h.metrics.MetricRegistry class. This last one is the new way to use metrics within the - * HBase code. However, the others are in play because of existing metrics2 based code still - * needs to coexists until we get rid of all of our BaseSource and convert them to the new - * framework. Until that happens, new metrics can use the new API, but will be collected - * through the HBaseMetrics2HadoopMetricsAdapter class. - * - * BaseSourceImpl has two MetricRegistries. metricRegistry is for hadoop Metrics2 based - * metrics, while the registry is for hbase-metrics based metrics. + * HBase code. However, the others are in play because of existing metrics2 based code still needs + * to coexists until we get rid of all of our BaseSource and convert them to the new framework. + * Until that happens, new metrics can use the new API, but will be collected through the + * HBaseMetrics2HadoopMetricsAdapter class. BaseSourceImpl has two MetricRegistries. + * metricRegistry is for hadoop Metrics2 based metrics, while the registry is for hbase-metrics + * based metrics. */ protected final MetricRegistry registry; @@ -101,11 +99,8 @@ synchronized void init(String name) { */ protected final HBaseMetrics2HadoopMetricsAdapter metricsAdapter; - public BaseSourceImpl( - String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public BaseSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { this.metricsName = metricsName; this.metricsDescription = metricsDescription; @@ -115,7 +110,7 @@ public BaseSourceImpl( metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext); DefaultMetricsSystemInitializer.INSTANCE.init(metricsName); - //Register this instance. + // Register this instance. DefaultMetricsSystem.instance().register(metricsJmxContext, metricsDescription, this); // hbase-metrics module based metrics are registered in the hbase MetricsRegistry. @@ -132,7 +127,6 @@ public void init() { /** * Set a single gauge to a value. - * * @param gaugeName gauge name * @param value the new value of the gauge. */ @@ -143,7 +137,6 @@ public void setGauge(String gaugeName, long value) { /** * Add some amount to a gauge. - * * @param gaugeName The name of the gauge to increment. * @param delta The amount to increment the gauge by. */ @@ -154,7 +147,6 @@ public void incGauge(String gaugeName, long delta) { /** * Decrease the value of a named gauge. - * * @param gaugeName The name of the gauge. * @param delta the ammount to subtract from a gauge value. */ @@ -165,7 +157,6 @@ public void decGauge(String gaugeName, long delta) { /** * Increment a named counter by some value. - * * @param key the name of the counter * @param delta the ammount to increment */ @@ -183,7 +174,6 @@ public void updateHistogram(String name, long value) { /** * Remove a named gauge. - * * @param key the key of the gauge to remove */ public void removeMetric(String key) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java index 3c5f898fc290..afa5f17361a7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -26,19 +25,20 @@ */ @InterfaceAudience.Private public interface ExceptionTrackingSource extends BaseSource { - String EXCEPTIONS_NAME="exceptions"; - String EXCEPTIONS_DESC="Exceptions caused by requests"; - String EXCEPTIONS_TYPE_DESC="Number of requests that resulted in the specified type of Exception"; - String EXCEPTIONS_OOO_NAME="exceptions.OutOfOrderScannerNextException"; - String EXCEPTIONS_BUSY_NAME="exceptions.RegionTooBusyException"; - String EXCEPTIONS_UNKNOWN_NAME="exceptions.UnknownScannerException"; - String EXCEPTIONS_SCANNER_RESET_NAME="exceptions.ScannerResetException"; - String EXCEPTIONS_SANITY_NAME="exceptions.FailedSanityCheckException"; - String EXCEPTIONS_MOVED_NAME="exceptions.RegionMovedException"; - String EXCEPTIONS_NSRE_NAME="exceptions.NotServingRegionException"; + String EXCEPTIONS_NAME = "exceptions"; + String EXCEPTIONS_DESC = "Exceptions caused by requests"; + String EXCEPTIONS_TYPE_DESC = + "Number of requests that resulted in the specified type of Exception"; + String EXCEPTIONS_OOO_NAME = "exceptions.OutOfOrderScannerNextException"; + String EXCEPTIONS_BUSY_NAME = "exceptions.RegionTooBusyException"; + String EXCEPTIONS_UNKNOWN_NAME = "exceptions.UnknownScannerException"; + String EXCEPTIONS_SCANNER_RESET_NAME = "exceptions.ScannerResetException"; + String EXCEPTIONS_SANITY_NAME = "exceptions.FailedSanityCheckException"; + String EXCEPTIONS_MOVED_NAME = "exceptions.RegionMovedException"; + String EXCEPTIONS_NSRE_NAME = "exceptions.NotServingRegionException"; String EXCEPTIONS_MULTI_TOO_LARGE_NAME = "exceptions.multiResponseTooLarge"; - String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + - "rest of the requests will have to be retried."; + String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + + "rest of the requests will have to be retried."; String EXCEPTIONS_CALL_QUEUE_TOO_BIG = "exceptions.callQueueTooBig"; String EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC = "Call queue is full"; String EXCEPTIONS_QUOTA_EXCEEDED = "exceptions.quotaExceeded"; @@ -54,18 +54,32 @@ public interface ExceptionTrackingSource extends BaseSource { * Different types of exceptions */ void outOfOrderException(); + void failedSanityException(); + void movedRegionException(); + void notServingRegionException(); + void unknownScannerException(); + void scannerResetException(); + void tooBusyException(); + void multiActionTooLargeException(); + void callQueueTooBigException(); + void quotaExceededException(); + void rpcThrottlingException(); + void callDroppedException(); + void callTimedOut(); + void requestTooBigException(); + void otherExceptions(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java index a4e75ba0137e..58abc166bf70 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java @@ -15,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; /** - * Common base implementation for metrics sources which need to track exceptions thrown or - * received. + * Common base implementation for metrics sources which need to track exceptions thrown or received. */ @InterfaceAudience.Private -public class ExceptionTrackingSourceImpl extends BaseSourceImpl - implements ExceptionTrackingSource { +public class ExceptionTrackingSourceImpl extends BaseSourceImpl implements ExceptionTrackingSource { protected MutableFastCounter exceptions; protected MutableFastCounter exceptionsOOO; protected MutableFastCounter exceptionsBusy; @@ -46,7 +43,7 @@ public class ExceptionTrackingSourceImpl extends BaseSourceImpl protected MutableFastCounter otherExceptions; public ExceptionTrackingSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -54,36 +51,36 @@ public ExceptionTrackingSourceImpl(String metricsName, String metricsDescription public void init() { super.init(); this.exceptions = this.getMetricsRegistry().newCounter(EXCEPTIONS_NAME, EXCEPTIONS_DESC, 0L); - this.exceptionsOOO = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsBusy = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsUnknown = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsScannerReset = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_SCANNER_RESET_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsSanity = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsMoved = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsNSRE = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsOOO = + this.getMetricsRegistry().newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsBusy = + this.getMetricsRegistry().newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsUnknown = + this.getMetricsRegistry().newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsScannerReset = + this.getMetricsRegistry().newCounter(EXCEPTIONS_SCANNER_RESET_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsSanity = + this.getMetricsRegistry().newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsMoved = + this.getMetricsRegistry().newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsNSRE = + this.getMetricsRegistry().newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); this.exceptionsMultiTooLarge = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); + .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); this.exceptionsCallQueueTooBig = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_QUEUE_TOO_BIG, EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC, 0L); - this.exceptionsQuotaExceeded = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_QUOTA_EXCEEDED, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsRpcThrottling = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_RPC_THROTTLING, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsCallDropped = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_DROPPED, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsCallTimedOut = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_TIMED_OUT, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionRequestTooBig = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_REQUEST_TOO_BIG, EXCEPTIONS_TYPE_DESC, 0L); - this.otherExceptions = this.getMetricsRegistry() - .newCounter(OTHER_EXCEPTIONS, EXCEPTIONS_TYPE_DESC, 0L); + .newCounter(EXCEPTIONS_CALL_QUEUE_TOO_BIG, EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC, 0L); + this.exceptionsQuotaExceeded = + this.getMetricsRegistry().newCounter(EXCEPTIONS_QUOTA_EXCEEDED, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsRpcThrottling = + this.getMetricsRegistry().newCounter(EXCEPTIONS_RPC_THROTTLING, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsCallDropped = + this.getMetricsRegistry().newCounter(EXCEPTIONS_CALL_DROPPED, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsCallTimedOut = + this.getMetricsRegistry().newCounter(EXCEPTIONS_CALL_TIMED_OUT, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionRequestTooBig = + this.getMetricsRegistry().newCounter(EXCEPTIONS_REQUEST_TOO_BIG, EXCEPTIONS_TYPE_DESC, 0L); + this.otherExceptions = + this.getMetricsRegistry().newCounter(OTHER_EXCEPTIONS, EXCEPTIONS_TYPE_DESC, 0L); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java index 254d3b4a9719..5952a60a0a59 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.yetus.audience.InterfaceAudience; @@ -39,25 +37,25 @@ public final class Interns { private static LoadingCache> infoCache = - CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) - .build(new CacheLoader>() { - public ConcurrentHashMap load(String key) { - return new ConcurrentHashMap<>(); - } - }); + CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) + .build(new CacheLoader>() { + public ConcurrentHashMap load(String key) { + return new ConcurrentHashMap<>(); + } + }); private static LoadingCache> tagCache = - CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) - .build(new CacheLoader>() { - public ConcurrentHashMap load(MetricsInfo key) { - return new ConcurrentHashMap<>(); - } - }); + CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) + .build(new CacheLoader>() { + public ConcurrentHashMap load(MetricsInfo key) { + return new ConcurrentHashMap<>(); + } + }); - private Interns(){} + private Interns() { + } /** * Get a metric info object - * * @return an interned metric info object */ public static MetricsInfo info(String name, String description) { @@ -72,7 +70,6 @@ public static MetricsInfo info(String name, String description) { /** * Get a metrics tag - * * @param info of the tag * @param value of the tag * @return an interned metrics tag @@ -89,7 +86,6 @@ public static MetricsTag tag(MetricsInfo info, String value) { /** * Get a metrics tag - * * @param name of the tag * @param description of the tag * @param value of the tag diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java index 6cb542586c98..d488eeb0512c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -24,7 +23,7 @@ * Interface for sources that will export JvmPauseMonitor metrics */ @InterfaceAudience.Private -public interface JvmPauseMonitorSource { +public interface JvmPauseMonitorSource { String INFO_THRESHOLD_COUNT_KEY = "pauseInfoThresholdExceeded"; String INFO_THRESHOLD_COUNT_DESC = "Count of INFO level pause threshold alerts"; @@ -36,7 +35,7 @@ public interface JvmPauseMonitorSource { String PAUSE_TIME_WITHOUT_GC_KEY = "pauseTimeWithoutGc"; String PAUSE_TIME_WITHOUT_GC_DESC = - "Histogram for excessive pause times without GC activity detected"; + "Histogram for excessive pause times without GC activity detected"; /** * Increment the INFO level threshold exceeded count @@ -52,14 +51,12 @@ public interface JvmPauseMonitorSource { /** * Update the pause time histogram where GC activity was detected. - * * @param t time it took */ void updatePauseTimeWithGc(long t); /** * Update the pause time histogram where GC activity was not detected. - * * @param t time it took */ void updatePauseTimeWithoutGc(long t); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java index 575ca31c6442..3cd7613fb215 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,26 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import javax.management.ObjectName; import org.apache.yetus.audience.InterfaceAudience; /** - * Object that will register an mbean with the underlying metrics implementation. + * Object that will register an mbean with the underlying metrics implementation. */ @InterfaceAudience.Private -public interface MBeanSource { +public interface MBeanSource { /** * Register an mbean with the underlying metrics system * @param serviceName Metrics service/system name * @param metricsName name of the metrics object to expose - * @param theMbean the actual MBean + * @param theMbean the actual MBean * @return ObjectName from jmx */ - ObjectName register(String serviceName, String metricsName, - Object theMbean); + ObjectName register(String serviceName, String metricsName, Object theMbean); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java index a5ffe8fb5e2c..f54d26bdb54d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import javax.management.ObjectName; - import org.apache.hadoop.metrics2.util.MBeans; import org.apache.yetus.audience.InterfaceAudience; @@ -33,7 +31,7 @@ public class MBeanSourceImpl implements MBeanSource { * Register an mbean with the underlying metrics system * @param serviceName Metrics service/system name * @param metricsName name of the metrics obejct to expose - * @param theMbean the actual MBean + * @param theMbean the actual MBean * @return ObjectName from jmx */ @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java index 42d139cb4e5a..c36a592682ef 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.MetricsInfo; @@ -37,30 +36,33 @@ class MetricsInfoImpl implements MetricsInfo { this.description = Preconditions.checkNotNull(description, "description"); } - @Override public String name() { + @Override + public String name() { return name; } - @Override public String description() { + @Override + public String description() { return description; } - @Override public boolean equals(Object obj) { + @Override + public boolean equals(Object obj) { if (obj instanceof MetricsInfo) { MetricsInfo other = (MetricsInfo) obj; - return Objects.equal(name, other.name()) && - Objects.equal(description, other.description()); + return Objects.equal(name, other.name()) && Objects.equal(description, other.description()); } return false; } - @Override public int hashCode() { + @Override + public int hashCode() { return Objects.hashCode(name, description); } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name).add("description", description) - .toString(); + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("description", description) + .toString(); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java index 064c9ca3f9a1..b90b6a3c674b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -41,9 +40,9 @@ public OperationMetrics(final MetricRegistry registry, final String metricNamePr Preconditions.checkNotNull(metricNamePrefix); /** - * TODO: As of now, Metrics description cannot be added/ registered with - * {@link MetricRegistry}. As metric names are unambiguous but concise, descriptions of - * metrics need to be made available someplace for users. + * TODO: As of now, Metrics description cannot be added/ registered with {@link MetricRegistry}. + * As metric names are unambiguous but concise, descriptions of metrics need to be made + * available someplace for users. */ submittedCounter = registry.counter(metricNamePrefix + SUBMITTED_COUNT); timeHisto = registry.histogram(metricNamePrefix + TIME); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java index a816d4970449..9afa094524b7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,13 +40,11 @@ /** * This class acts as an adapter to export the MetricRegistry's in the global registry. Each - * MetricRegistry will be registered or unregistered from the metric2 system. The collection will - * be performed via the MetricsSourceAdapter and the MetricRegistry will collected like a - * BaseSource instance for a group of metrics (like WAL, RPC, etc) with the MetricRegistryInfo's - * JMX context. - * - *

    Developer note: - * Unlike the current metrics2 based approach, the new metrics approach + * MetricRegistry will be registered or unregistered from the metric2 system. The collection will be + * performed via the MetricsSourceAdapter and the MetricRegistry will collected like a BaseSource + * instance for a group of metrics (like WAL, RPC, etc) with the MetricRegistryInfo's JMX context. + *

    + * Developer note: Unlike the current metrics2 based approach, the new metrics approach * (hbase-metrics-api and hbase-metrics modules) work by having different MetricRegistries that are * initialized and used from the code that lives in their respective modules (hbase-server, etc). * There is no need to define BaseSource classes and do a lot of indirection. The MetricRegistry'es @@ -54,7 +52,6 @@ * MetricRegistries.global() and register adapters to the metrics2 subsystem. These adapters then * report the actual values by delegating to * {@link HBaseMetrics2HadoopMetricsAdapter#snapshotAllMetrics(MetricRegistry, MetricsCollector)}. - * * We do not initialize the Hadoop Metrics2 system assuming that other BaseSources already do so * (see BaseSourceImpl). Once the last BaseSource is moved to the new system, the metric2 * initialization should be moved here. @@ -67,6 +64,7 @@ public final class GlobalMetricRegistriesAdapter { private class MetricsSourceAdapter implements MetricsSource { private final MetricRegistry registry; + MetricsSourceAdapter(MetricRegistry registry) { this.registry = registry; } @@ -135,7 +133,7 @@ private void doRun() { MetricsSourceAdapter adapter = new MetricsSourceAdapter(registry); LOG.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription()); DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), - info.getMetricsDescription(), adapter); + info.getMetricsDescription(), adapter); registeredSources.put(info, adapter); // next collection will collect the newly registered MetricSource. Doing this here leads to // ConcurrentModificationException. @@ -145,7 +143,7 @@ private void doRun() { boolean removed = false; // Remove registered sources if it is removed from the global registry for (Iterator> it = - registeredSources.entrySet().iterator(); it.hasNext();) { + registeredSources.entrySet().iterator(); it.hasNext();) { Entry entry = it.next(); MetricRegistryInfo info = entry.getKey(); Optional found = MetricRegistries.global().get(info); @@ -153,7 +151,7 @@ private void doRun() { if (LOG.isDebugEnabled()) { LOG.debug("Removing adapter for the MetricRegistry: " + info.getMetricsJmxContext()); } - synchronized(DefaultMetricsSystem.instance()) { + synchronized (DefaultMetricsSystem.instance()) { DefaultMetricsSystem.instance().unregisterSource(info.getMetricsJmxContext()); helper.removeSourceName(info.getMetricsJmxContext()); helper.removeObjectName(info.getMetricsJmxContext()); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java index 5fc2450cdb5e..8e8fcf736bfe 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/* - * Copyright 2016 Josh Elser - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ package org.apache.hadoop.hbase.metrics.impl; import java.util.Map; @@ -53,16 +38,15 @@ /** * This is the adapter from "HBase Metrics Framework", implemented in hbase-metrics-api and - * hbase-metrics modules to the Hadoop Metrics2 framework. This adapter is not a metric source, - * but a helper to be able to collect all of the Metric's in the MetricRegistry using the - * MetricsCollector and MetricsRecordBuilder. - * - * Some of the code is forked from https://github.com/joshelser/dropwizard-hadoop-metrics2. + * hbase-metrics modules to the Hadoop Metrics2 framework. This adapter is not a metric source, but + * a helper to be able to collect all of the Metric's in the MetricRegistry using the + * MetricsCollector and MetricsRecordBuilder. Some of the code is forked from + * https://github.com/joshelser/dropwizard-hadoop-metrics2. */ @InterfaceAudience.Private public class HBaseMetrics2HadoopMetricsAdapter { - private static final Logger LOG - = LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); + private static final Logger LOG = + LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); private static final String EMPTY_STRING = ""; public HBaseMetrics2HadoopMetricsAdapter() { @@ -70,14 +54,12 @@ public HBaseMetrics2HadoopMetricsAdapter() { /** * Iterates over the MetricRegistry and adds them to the {@code collector}. - * * @param collector A metrics collector */ - public void snapshotAllMetrics(MetricRegistry metricRegistry, - MetricsCollector collector) { + public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsCollector collector) { MetricRegistryInfo info = metricRegistry.getMetricRegistryInfo(); - MetricsRecordBuilder builder = collector.addRecord(Interns.info(info.getMetricsName(), - info.getMetricsDescription())); + MetricsRecordBuilder builder = + collector.addRecord(Interns.info(info.getMetricsName(), info.getMetricsDescription())); builder.setContext(info.getMetricsContext()); snapshotAllMetrics(metricRegistry, builder); @@ -85,13 +67,12 @@ public void snapshotAllMetrics(MetricRegistry metricRegistry, /** * Iterates over the MetricRegistry and adds them to the {@code builder}. - * * @param builder A record builder */ public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) { Map metrics = metricRegistry.getMetrics(); - for (Map.Entry e: metrics.entrySet()) { + for (Map.Entry e : metrics.entrySet()) { // Always capitalize the name String name = StringUtils.capitalize(e.getKey()); Metric metric = e.getValue(); @@ -99,13 +80,13 @@ public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuild if (metric instanceof Gauge) { addGauge(name, (Gauge) metric, builder); } else if (metric instanceof Counter) { - addCounter(name, (Counter)metric, builder); + addCounter(name, (Counter) metric, builder); } else if (metric instanceof Histogram) { - addHistogram(name, (Histogram)metric, builder); + addHistogram(name, (Histogram) metric, builder); } else if (metric instanceof Meter) { - addMeter(name, (Meter)metric, builder); + addMeter(name, (Meter) metric, builder); } else if (metric instanceof Timer) { - addTimer(name, (Timer)metric, builder); + addTimer(name, (Timer) metric, builder); } else { LOG.info("Ignoring unknown Metric class " + metric.getClass().getName()); } @@ -137,10 +118,9 @@ private void addCounter(String name, Counter counter, MetricsRecordBuilder build /** * Add Histogram value-distribution data to a Hadoop-Metrics2 record building. - * - * @param name A base name for this record. + * @param name A base name for this record. * @param histogram A histogram to measure distribution of values. - * @param builder A Hadoop-Metrics2 record builder. + * @param builder A Hadoop-Metrics2 record builder. */ private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder builder) { MutableHistogram.snapshot(name, EMPTY_STRING, histogram, builder, true); @@ -149,9 +129,8 @@ private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder /** * Add Dropwizard-Metrics rate information to a Hadoop-Metrics2 record builder, converting the * rates to the appropriate unit. - * * @param builder A Hadoop-Metrics2 record builder. - * @param name A base name for this record. + * @param name A base name for this record. */ private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) { builder.addGauge(Interns.info(name + "_count", EMPTY_STRING), meter.getCount()); @@ -159,7 +138,7 @@ private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) { builder.addGauge(Interns.info(name + "_1min_rate", EMPTY_STRING), meter.getOneMinuteRate()); builder.addGauge(Interns.info(name + "_5min_rate", EMPTY_STRING), meter.getFiveMinuteRate()); builder.addGauge(Interns.info(name + "_15min_rate", EMPTY_STRING), - meter.getFifteenMinuteRate()); + meter.getFifteenMinuteRate()); } private void addTimer(String name, Timer timer, MetricsRecordBuilder builder) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java index 12fb43fce350..d9e972ba1fb6 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java @@ -1,20 +1,19 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -101,16 +100,16 @@ public interface MetricsHeapMemoryManagerSource extends BaseSource { String UNBLOCKED_FLUSH_DESC = "Histogram for the number of unblocked flushes in the memstore"; String INC_MEMSTORE_TUNING_NAME = "increaseMemStoreSize"; String INC_MEMSTORE_TUNING_DESC = - "Histogram for the heap memory tuner expanding memstore global size limit in bytes"; + "Histogram for the heap memory tuner expanding memstore global size limit in bytes"; String DEC_MEMSTORE_TUNING_NAME = "decreaseMemStoreSize"; String DEC_MEMSTORE_TUNING_DESC = - "Histogram for the heap memory tuner shrinking memstore global size limit in bytes"; + "Histogram for the heap memory tuner shrinking memstore global size limit in bytes"; String INC_BLOCKCACHE_TUNING_NAME = "increaseBlockCacheSize"; String INC_BLOCKCACHE_TUNING_DESC = - "Histogram for the heap memory tuner expanding blockcache max heap size in bytes"; + "Histogram for the heap memory tuner expanding blockcache max heap size in bytes"; String DEC_BLOCKCACHE_TUNING_NAME = "decreaseBlockCacheSize"; String DEC_BLOCKCACHE_TUNING_DESC = - "Histogram for the heap memory tuner shrinking blockcache max heap size in bytes"; + "Histogram for the heap memory tuner shrinking blockcache max heap size in bytes"; // Gauges String BLOCKED_FLUSH_GAUGE_NAME = "blockedFlushGauge"; @@ -125,9 +124,9 @@ public interface MetricsHeapMemoryManagerSource extends BaseSource { // Counters String DO_NOTHING_COUNTER_NAME = "tunerDoNothingCounter"; String DO_NOTHING_COUNTER_DESC = - "The number of times that tuner neither expands memstore global size limit nor expands " + - "blockcache max size"; + "The number of times that tuner neither expands memstore global size limit nor expands " + + "blockcache max size"; String ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME = "aboveHeapOccupancyLowWaterMarkCounter"; String ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC = - "The number of times that heap occupancy percent is above low watermark"; + "The number of times that heap occupancy percent is above low watermark"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java index 047f8e13b1e1..c2e8d329143c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java @@ -1,20 +1,19 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -29,8 +28,8 @@ * BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl implements - MetricsHeapMemoryManagerSource { +public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl + implements MetricsHeapMemoryManagerSource { private final MetricHistogram blockedFlushHistogram; private final MetricHistogram unblockedFlushHistogram; @@ -52,39 +51,38 @@ public MetricsHeapMemoryManagerSourceImpl() { } public MetricsHeapMemoryManagerSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // Histograms - blockedFlushHistogram = getMetricsRegistry() - .newSizeHistogram(BLOCKED_FLUSH_NAME, BLOCKED_FLUSH_DESC); - unblockedFlushHistogram = getMetricsRegistry() - .newSizeHistogram(UNBLOCKED_FLUSH_NAME, UNBLOCKED_FLUSH_DESC); - incMemStoreSizeHistogram = getMetricsRegistry() - .newSizeHistogram(INC_MEMSTORE_TUNING_NAME, INC_MEMSTORE_TUNING_DESC); - decMemStoreSizeHistogram = getMetricsRegistry() - .newSizeHistogram(DEC_MEMSTORE_TUNING_NAME, DEC_MEMSTORE_TUNING_DESC); - incBlockCacheSizeHistogram = getMetricsRegistry() - .newSizeHistogram(INC_BLOCKCACHE_TUNING_NAME, INC_BLOCKCACHE_TUNING_DESC); - decBlockCacheSizeHistogram = getMetricsRegistry() - .newSizeHistogram(DEC_BLOCKCACHE_TUNING_NAME, DEC_BLOCKCACHE_TUNING_DESC); + blockedFlushHistogram = + getMetricsRegistry().newSizeHistogram(BLOCKED_FLUSH_NAME, BLOCKED_FLUSH_DESC); + unblockedFlushHistogram = + getMetricsRegistry().newSizeHistogram(UNBLOCKED_FLUSH_NAME, UNBLOCKED_FLUSH_DESC); + incMemStoreSizeHistogram = + getMetricsRegistry().newSizeHistogram(INC_MEMSTORE_TUNING_NAME, INC_MEMSTORE_TUNING_DESC); + decMemStoreSizeHistogram = + getMetricsRegistry().newSizeHistogram(DEC_MEMSTORE_TUNING_NAME, DEC_MEMSTORE_TUNING_DESC); + incBlockCacheSizeHistogram = + getMetricsRegistry().newSizeHistogram(INC_BLOCKCACHE_TUNING_NAME, INC_BLOCKCACHE_TUNING_DESC); + decBlockCacheSizeHistogram = + getMetricsRegistry().newSizeHistogram(DEC_BLOCKCACHE_TUNING_NAME, DEC_BLOCKCACHE_TUNING_DESC); // Gauges - blockedFlushGauge = getMetricsRegistry() - .newGauge(BLOCKED_FLUSH_GAUGE_NAME, BLOCKED_FLUSH_GAUGE_DESC, 0L); - unblockedFlushGauge = getMetricsRegistry() - .newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); - memStoreSizeGauge = getMetricsRegistry() - .newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); - blockCacheSizeGauge = getMetricsRegistry() - .newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); + blockedFlushGauge = + getMetricsRegistry().newGauge(BLOCKED_FLUSH_GAUGE_NAME, BLOCKED_FLUSH_GAUGE_DESC, 0L); + unblockedFlushGauge = + getMetricsRegistry().newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); + memStoreSizeGauge = + getMetricsRegistry().newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); + blockCacheSizeGauge = + getMetricsRegistry().newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); // Counters - doNothingCounter = getMetricsRegistry() - .newCounter(DO_NOTHING_COUNTER_NAME, DO_NOTHING_COUNTER_DESC, 0L); + doNothingCounter = + getMetricsRegistry().newCounter(DO_NOTHING_COUNTER_NAME, DO_NOTHING_COUNTER_DESC, 0L); aboveHeapOccupancyLowWatermarkCounter = getMetricsRegistry() - .newCounter(ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME, - ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC, 0L); + .newCounter(ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME, ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC, 0L); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java index 23d02598a3d9..74edf74c456e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions into the hadoop metrics system. + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * regions into the hadoop metrics system. */ @InterfaceAudience.Private public interface MetricsRegionAggregateSource extends BaseSource { @@ -53,14 +52,12 @@ public interface MetricsRegionAggregateSource extends BaseSource { /** * Register a MetricsRegionSource as being open. - * * @param source the source for the region being opened. */ void register(MetricsRegionSource source); /** * Remove a region's source. This is called when a region is closed. - * * @param source The region to remove. */ void deregister(MetricsRegionSource source); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java index 044d6b8bb0a1..eabc00d60386 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsCollector; @@ -35,24 +33,21 @@ @InterfaceAudience.Private public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl - implements MetricsRegionAggregateSource { + implements MetricsRegionAggregateSource { private static final Logger LOG = LoggerFactory.getLogger(MetricsRegionAggregateSourceImpl.class); private final MetricsExecutorImpl executor = new MetricsExecutorImpl(); private final Set regionSources = - Collections.newSetFromMap(new ConcurrentHashMap()); + Collections.newSetFromMap(new ConcurrentHashMap()); public MetricsRegionAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - - public MetricsRegionAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsRegionAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // Every few mins clean the JMX cache. @@ -76,9 +71,8 @@ public void deregister(MetricsRegionSource toRemove) { } catch (Exception e) { // Ignored. If this errors out it means that someone is double // closing the region source and the region is already nulled out. - LOG.info( - "Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), - e); + LOG.info("Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), + e); } clearCache(); } @@ -88,10 +82,9 @@ private synchronized void clearCache() { } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param collector the collector * @param all get all the metrics regardless of when they last changed. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java index 93990ef1bd4e..991187bc98eb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,31 +40,28 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { String REGION_SIZE_REPORTING_CHORE_TIME_NAME = "regionSizeReportingChoreTime"; /** - * Updates the metric tracking how many tables this RegionServer has marked as in violation - * of their space quota. + * Updates the metric tracking how many tables this RegionServer has marked as in violation of + * their space quota. */ void updateNumTablesInSpaceQuotaViolation(long tablesInViolation); /** * Updates the metric tracking how many tables this RegionServer has received * {@code SpaceQuotaSnapshot}s for. - * * @param numSnapshots The number of {@code SpaceQuotaSnapshot}s received from the Master. */ void updateNumTableSpaceQuotaSnapshots(long numSnapshots); /** - * Updates the metric tracking how much time was spent scanning the filesystem to compute - * the size of each region hosted by this RegionServer. - * + * Updates the metric tracking how much time was spent scanning the filesystem to compute the size + * of each region hosted by this RegionServer. * @param time The execution time of the chore in milliseconds. */ void incrementSpaceQuotaFileSystemScannerChoreTime(long time); /** - * Updates the metric tracking how much time was spent updating the RegionServer with the - * latest information on space quotas from the {@code hbase:quota} table. - * + * Updates the metric tracking how much time was spent updating the RegionServer with the latest + * information on space quotas from the {@code hbase:quota} table. * @param time The execution time of the chore in milliseconds. */ void incrementSpaceQuotaRefresherChoreTime(long time); @@ -71,7 +69,6 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { /** * Updates the metric tracking how many region size reports were sent from this RegionServer to * the Master. These reports contain information on the size of each Region hosted locally. - * * @param numReportsSent The number of region size reports sent */ void incrementNumRegionSizeReportsSent(long numReportsSent); @@ -79,7 +76,6 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { /** * Updates the metric tracking how much time was spent sending region size reports to the Master * by the RegionSizeReportingChore. - * * @param time The execution time in milliseconds. */ void incrementRegionSizeReportingChoreTime(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java index 3a796ddf0c5f..b13a0508391c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Meter; @@ -28,8 +28,8 @@ * Implementation of {@link MetricsRegionServerQuotaSource}. */ @InterfaceAudience.Private -public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl implements - MetricsRegionServerQuotaSource { +public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl + implements MetricsRegionServerQuotaSource { private final Meter tablesInViolationCounter; private final Meter spaceQuotaSnapshotsReceived; @@ -43,7 +43,7 @@ public MetricsRegionServerQuotaSourceImpl() { } public MetricsRegionServerQuotaSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); tablesInViolationCounter = this.registry.meter(NUM_TABLES_IN_VIOLATION_NAME); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 6f103d005233..7bffc57d0c0a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -50,7 +49,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Put time histogram - * * @param t time it took */ void updatePut(long t); @@ -63,7 +61,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Delete time histogram - * * @param t time it took */ void updateDelete(long t); @@ -94,42 +91,37 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Get time histogram . - * * @param t time it took */ void updateGet(long t); /** * Update the Increment time histogram. - * * @param t time it took */ void updateIncrement(long t); /** * Update the Append time histogram. - * * @param t time it took */ void updateAppend(long t); /** * Update the Replay time histogram. - * * @param t time it took */ void updateReplay(long t); /** * Update the scan size. - * * @param scanSize size of the scan */ void updateScanSize(long scanSize); /** * Update the scan time. - * */ + */ void updateScanTime(long t); /** @@ -194,35 +186,35 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the compaction time histogram, both major and minor * @param isMajor whether compaction is a major compaction - * @param t time it took, in milliseconds + * @param t time it took, in milliseconds */ void updateCompactionTime(boolean isMajor, long t); /** * Update the compaction input number of files histogram * @param isMajor whether compaction is a major compaction - * @param c number of files + * @param c number of files */ void updateCompactionInputFileCount(boolean isMajor, long c); /** * Update the compaction total input file size histogram * @param isMajor whether compaction is a major compaction - * @param bytes the number of bytes of the compaction input file + * @param bytes the number of bytes of the compaction input file */ void updateCompactionInputSize(boolean isMajor, long bytes); /** * Update the compaction output number of files histogram * @param isMajor whether compaction is a major compaction - * @param c number of files + * @param c number of files */ void updateCompactionOutputFileCount(boolean isMajor, long c); /** * Update the compaction total output file size * @param isMajor whether compaction is a major compaction - * @param bytes the number of bytes of the compaction input file + * @param bytes the number of bytes of the compaction input file */ void updateCompactionOutputSize(boolean isMajor, long bytes); @@ -256,57 +248,55 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String NUM_REFERENCE_FILES_DESC = "Number of reference file on this RegionServer"; String STOREFILE_SIZE_DESC = "Size of storefiles being served."; String STOREFILE_SIZE_GROWTH_RATE_DESC = - "Bytes per second by which the size of storefiles being served grows."; + "Bytes per second by which the size of storefiles being served grows."; String TOTAL_REQUEST_COUNT = "totalRequestCount"; String TOTAL_REQUEST_COUNT_DESC = - "Total number of requests this RegionServer has answered; increments the count once for " + - "EVERY access whether an admin operation, a Scan, a Put or Put of 1M rows, or a Get " + - "of a non-existent row"; + "Total number of requests this RegionServer has answered; increments the count once for " + + "EVERY access whether an admin operation, a Scan, a Put or Put of 1M rows, or a Get " + + "of a non-existent row"; String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount"; String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC = - "Total number of region requests this RegionServer has answered; counts by row-level " + - "action at the RPC Server (Sums 'readRequestsCount' and 'writeRequestsCount'); counts" + - "once per access whether a Put of 1M rows or a Get that returns 1M Results"; + "Total number of region requests this RegionServer has answered; counts by row-level " + + "action at the RPC Server (Sums 'readRequestsCount' and 'writeRequestsCount'); counts" + + "once per access whether a Put of 1M rows or a Get that returns 1M Results"; String READ_REQUEST_COUNT = "readRequestCount"; String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount"; String FILTERED_READ_REQUEST_COUNT_DESC = - "Number of read requests this region server has answered."; + "Number of read requests this region server has answered."; String READ_REQUEST_COUNT_DESC = - "Number of read requests with non-empty Results that this RegionServer has answered."; + "Number of read requests with non-empty Results that this RegionServer has answered."; String READ_REQUEST_RATE_PER_SECOND = "readRequestRatePerSecond"; String READ_REQUEST_RATE_DESC = - "Rate of answering the read requests by this region server per second."; + "Rate of answering the read requests by this region server per second."; String CP_REQUEST_COUNT = "cpRequestCount"; String CP_REQUEST_COUNT_DESC = - "Number of coprocessor service requests this region server has answered."; + "Number of coprocessor service requests this region server has answered."; String WRITE_REQUEST_COUNT = "writeRequestCount"; - String WRITE_REQUEST_COUNT_DESC = - "Number of mutation requests this RegionServer has answered."; + String WRITE_REQUEST_COUNT_DESC = "Number of mutation requests this RegionServer has answered."; String WRITE_REQUEST_RATE_PER_SECOND = "writeRequestRatePerSecond"; String WRITE_REQUEST_RATE_DESC = - "Rate of answering the mutation requests by this region server per second."; + "Rate of answering the mutation requests by this region server per second."; String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount"; String CHECK_MUTATE_FAILED_COUNT_DESC = - "Number of Check and Mutate calls that failed the checks."; + "Number of Check and Mutate calls that failed the checks."; String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount"; String CHECK_MUTATE_PASSED_COUNT_DESC = - "Number of Check and Mutate calls that passed the checks."; + "Number of Check and Mutate calls that passed the checks."; String STOREFILE_INDEX_SIZE = "storeFileIndexSize"; String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk."; String STATIC_INDEX_SIZE = "staticIndexSize"; String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes."; String STATIC_BLOOM_SIZE = "staticBloomSize"; - String STATIC_BLOOM_SIZE_DESC = - "Uncompressed size of the static bloom filters."; + String STATIC_BLOOM_SIZE_DESC = "Uncompressed size of the static bloom filters."; String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount"; String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC = - "Number of mutations that have been sent by clients with the write ahead logging turned off."; + "Number of mutations that have been sent by clients with the write ahead logging turned off."; String DATA_SIZE_WITHOUT_WAL = "mutationsWithoutWALSize"; String DATA_SIZE_WITHOUT_WAL_DESC = - "Size of data that has been sent by clients with the write ahead logging turned off."; + "Size of data that has been sent by clients with the write ahead logging turned off."; String PERCENT_FILES_LOCAL = "percentFilesLocal"; String PERCENT_FILES_LOCAL_DESC = - "The percent of HFiles that are stored on the local hdfs data node."; + "The percent of HFiles that are stored on the local hdfs data node."; String PERCENT_FILES_LOCAL_SECONDARY_REGIONS = "percentFilesLocalSecondaryRegions"; String PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC = "The percent of HFiles used by secondary regions that are stored on the local hdfs data node."; @@ -317,14 +307,13 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String SMALL_COMPACTION_QUEUE_LENGTH = "smallCompactionQueueLength"; String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions."; String LARGE_COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions with input size " - + "larger than throttle threshold (2.5GB by default)"; + + "larger than throttle threshold (2.5GB by default)"; String SMALL_COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions with input size " - + "smaller than throttle threshold (2.5GB by default)"; + + "smaller than throttle threshold (2.5GB by default)"; String FLUSH_QUEUE_LENGTH = "flushQueueLength"; String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes"; String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize"; - String BLOCK_CACHE_FREE_DESC = - "Size of the block cache that is not occupied."; + String BLOCK_CACHE_FREE_DESC = "Size of the block cache that is not occupied."; String BLOCK_CACHE_COUNT = "blockCacheCount"; String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; String BLOCK_CACHE_SIZE = "blockCacheSize"; @@ -335,26 +324,25 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC = "Count of hit on primary replica in the block cache."; String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount"; String BLOCK_COUNT_MISS_COUNT_DESC = - "Number of requests for a block that missed the block cache."; + "Number of requests for a block that missed the block cache."; String BLOCK_CACHE_PRIMARY_MISS_COUNT = "blockCacheMissCountPrimary"; String BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC = - "Number of requests for a block of primary replica that missed the block cache."; + "Number of requests for a block of primary replica that missed the block cache."; String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount"; String BLOCK_CACHE_EVICTION_COUNT_DESC = - "Count of the number of blocks evicted from the block cache." + "Count of the number of blocks evicted from the block cache." + "(Not including blocks evicted because of HFile removal)"; String BLOCK_CACHE_PRIMARY_EVICTION_COUNT = "blockCacheEvictionCountPrimary"; String BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC = - "Count of the number of blocks evicted from primary replica in the block cache."; + "Count of the number of blocks evicted from primary replica in the block cache."; String BLOCK_CACHE_HIT_PERCENT = "blockCacheCountHitPercent"; - String BLOCK_CACHE_HIT_PERCENT_DESC = - "Percent of block cache requests that are hits"; + String BLOCK_CACHE_HIT_PERCENT_DESC = "Percent of block cache requests that are hits"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = - "The percent of the time that requests with the cache turned on hit the cache."; + "The percent of the time that requests with the cache turned on hit the cache."; String BLOCK_CACHE_FAILED_INSERTION_COUNT = "blockCacheFailedInsertionCount"; - String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = "Number of times that a block cache " + - "insertion failed. Usually due to size restrictions."; + String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = + "Number of times that a block cache " + "insertion failed. Usually due to size restrictions."; String BLOCK_CACHE_DATA_MISS_COUNT = "blockCacheDataMissCount"; String BLOCK_CACHE_ENCODED_DATA_MISS_COUNT = "blockCacheEncodedDataMissCount"; String BLOCK_CACHE_LEAF_INDEX_MISS_COUNT = "blockCacheLeafIndexMissCount"; @@ -403,7 +391,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String CLUSTER_ID_DESC = "Cluster Id"; String UPDATES_BLOCKED_TIME = "updatesBlockedTime"; String UPDATES_BLOCKED_DESC = - "Number of MS updates have been blocked so that the memstore can be flushed."; + "Number of MS updates have been blocked so that the memstore can be flushed."; String DELETE_KEY = "delete"; String CHECK_AND_DELETE_KEY = "checkAndDelete"; String CHECK_AND_PUT_KEY = "checkAndPut"; @@ -425,15 +413,12 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String SLOW_DELETE_KEY = "slowDeleteCount"; String SLOW_INCREMENT_KEY = "slowIncrementCount"; String SLOW_APPEND_KEY = "slowAppendCount"; - String SLOW_PUT_DESC = - "The number of batches containing puts that took over 1000ms to complete"; + String SLOW_PUT_DESC = "The number of batches containing puts that took over 1000ms to complete"; String SLOW_DELETE_DESC = - "The number of batches containing delete(s) that took over 1000ms to complete"; + "The number of batches containing delete(s) that took over 1000ms to complete"; String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete"; - String SLOW_INCREMENT_DESC = - "The number of Increments that took over 1000ms to complete"; - String SLOW_APPEND_DESC = - "The number of Appends that took over 1000ms to complete"; + String SLOW_INCREMENT_DESC = "The number of Increments that took over 1000ms to complete"; + String SLOW_APPEND_DESC = "The number of Appends that took over 1000ms to complete"; String FLUSHED_CELLS = "flushedCellsCount"; String FLUSHED_CELLS_DESC = "The number of cells flushed to disk"; @@ -443,25 +428,23 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String COMPACTED_CELLS_DESC = "The number of cells processed during minor compactions"; String COMPACTED_CELLS_SIZE = "compactedCellsSize"; String COMPACTED_CELLS_SIZE_DESC = - "The total amount of data processed during minor compactions, in bytes"; + "The total amount of data processed during minor compactions, in bytes"; String MAJOR_COMPACTED_CELLS = "majorCompactedCellsCount"; - String MAJOR_COMPACTED_CELLS_DESC = - "The number of cells processed during major compactions"; + String MAJOR_COMPACTED_CELLS_DESC = "The number of cells processed during major compactions"; String MAJOR_COMPACTED_CELLS_SIZE = "majorCompactedCellsSize"; String MAJOR_COMPACTED_CELLS_SIZE_DESC = - "The total amount of data processed during major compactions, in bytes"; + "The total amount of data processed during major compactions, in bytes"; String CELLS_COUNT_COMPACTED_TO_MOB = "cellsCountCompactedToMob"; - String CELLS_COUNT_COMPACTED_TO_MOB_DESC = - "The number of cells moved to mob during compaction"; + String CELLS_COUNT_COMPACTED_TO_MOB_DESC = "The number of cells moved to mob during compaction"; String CELLS_COUNT_COMPACTED_FROM_MOB = "cellsCountCompactedFromMob"; String CELLS_COUNT_COMPACTED_FROM_MOB_DESC = - "The number of cells moved from mob during compaction"; + "The number of cells moved from mob during compaction"; String CELLS_SIZE_COMPACTED_TO_MOB = "cellsSizeCompactedToMob"; String CELLS_SIZE_COMPACTED_TO_MOB_DESC = - "The total amount of cells move to mob during compaction, in bytes"; + "The total amount of cells move to mob during compaction, in bytes"; String CELLS_SIZE_COMPACTED_FROM_MOB = "cellsSizeCompactedFromMob"; String CELLS_SIZE_COMPACTED_FROM_MOB_DESC = - "The total amount of cells move from mob during compaction, in bytes"; + "The total amount of cells move from mob during compaction, in bytes"; String MOB_FLUSH_COUNT = "mobFlushCount"; String MOB_FLUSH_COUNT_DESC = "The number of the flushes in mob-enabled stores"; String MOB_FLUSHED_CELLS_COUNT = "mobFlushedCellsCount"; @@ -487,7 +470,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String HEDGED_READS_DESC = "The number of times we started a hedged read"; String HEDGED_READ_WINS = "hedgedReadWins"; String HEDGED_READ_WINS_DESC = - "The number of times we started a hedged read and a hedged read won"; + "The number of times we started a hedged read and a hedged read won"; String HEDGED_READ_IN_CUR_THREAD = "hedgedReadOpsInCurThread"; String HEDGED_READ_IN_CUR_THREAD_DESC = "The number of times we execute a hedged read" + " in current thread as a fallback for task rejection"; @@ -495,17 +478,15 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String TOTAL_BYTES_READ = "totalBytesRead"; String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS"; String LOCAL_BYTES_READ = "localBytesRead"; - String LOCAL_BYTES_READ_DESC = - "The number of bytes read from the local HDFS DataNode"; + String LOCAL_BYTES_READ_DESC = "The number of bytes read from the local HDFS DataNode"; String SHORTCIRCUIT_BYTES_READ = "shortCircuitBytesRead"; String SHORTCIRCUIT_BYTES_READ_DESC = "The number of bytes read through HDFS short circuit read"; String ZEROCOPY_BYTES_READ = "zeroCopyBytesRead"; - String ZEROCOPY_BYTES_READ_DESC = - "The number of bytes read through HDFS zero copy"; + String ZEROCOPY_BYTES_READ_DESC = "The number of bytes read through HDFS zero copy"; String BLOCKED_REQUESTS_COUNT = "blockedRequestCount"; String BLOCKED_REQUESTS_COUNT_DESC = "The number of blocked requests because of memstore size is " - + "larger than blockingMemStoreSize"; + + "larger than blockingMemStoreSize"; String SPLIT_KEY = "splitTime"; String SPLIT_REQUEST_KEY = "splitRequestCount"; @@ -525,77 +506,76 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String FLUSHED_MEMSTORE_BYTES_DESC = "Total number of bytes of cells in memstore from flush"; String COMPACTION_TIME = "compactionTime"; - String COMPACTION_TIME_DESC - = "Histogram for the time in millis for compaction, both major and minor"; + String COMPACTION_TIME_DESC = + "Histogram for the time in millis for compaction, both major and minor"; String COMPACTION_INPUT_FILE_COUNT = "compactionInputFileCount"; - String COMPACTION_INPUT_FILE_COUNT_DESC - = "Histogram for the compaction input number of files, both major and minor"; + String COMPACTION_INPUT_FILE_COUNT_DESC = + "Histogram for the compaction input number of files, both major and minor"; String COMPACTION_INPUT_SIZE = "compactionInputSize"; - String COMPACTION_INPUT_SIZE_DESC - = "Histogram for the compaction total input file sizes, both major and minor"; + String COMPACTION_INPUT_SIZE_DESC = + "Histogram for the compaction total input file sizes, both major and minor"; String COMPACTION_OUTPUT_FILE_COUNT = "compactionOutputFileCount"; - String COMPACTION_OUTPUT_FILE_COUNT_DESC - = "Histogram for the compaction output number of files, both major and minor"; + String COMPACTION_OUTPUT_FILE_COUNT_DESC = + "Histogram for the compaction output number of files, both major and minor"; String COMPACTION_OUTPUT_SIZE = "compactionOutputSize"; - String COMPACTION_OUTPUT_SIZE_DESC - = "Histogram for the compaction total output file sizes, both major and minor"; + String COMPACTION_OUTPUT_SIZE_DESC = + "Histogram for the compaction total output file sizes, both major and minor"; String COMPACTED_INPUT_BYTES = "compactedInputBytes"; - String COMPACTED_INPUT_BYTES_DESC - = "Total number of bytes that is read for compaction, both major and minor"; + String COMPACTED_INPUT_BYTES_DESC = + "Total number of bytes that is read for compaction, both major and minor"; String COMPACTED_OUTPUT_BYTES = "compactedOutputBytes"; - String COMPACTED_OUTPUT_BYTES_DESC - = "Total number of bytes that is output from compaction, both major and minor"; + String COMPACTED_OUTPUT_BYTES_DESC = + "Total number of bytes that is output from compaction, both major and minor"; String MAJOR_COMPACTION_TIME = "majorCompactionTime"; - String MAJOR_COMPACTION_TIME_DESC - = "Histogram for the time in millis for compaction, major only"; + String MAJOR_COMPACTION_TIME_DESC = "Histogram for the time in millis for compaction, major only"; String MAJOR_COMPACTION_INPUT_FILE_COUNT = "majorCompactionInputFileCount"; - String MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC - = "Histogram for the compaction input number of files, major only"; + String MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC = + "Histogram for the compaction input number of files, major only"; String MAJOR_COMPACTION_INPUT_SIZE = "majorCompactionInputSize"; - String MAJOR_COMPACTION_INPUT_SIZE_DESC - = "Histogram for the compaction total input file sizes, major only"; + String MAJOR_COMPACTION_INPUT_SIZE_DESC = + "Histogram for the compaction total input file sizes, major only"; String MAJOR_COMPACTION_OUTPUT_FILE_COUNT = "majorCompactionOutputFileCount"; - String MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC - = "Histogram for the compaction output number of files, major only"; + String MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC = + "Histogram for the compaction output number of files, major only"; String MAJOR_COMPACTION_OUTPUT_SIZE = "majorCompactionOutputSize"; - String MAJOR_COMPACTION_OUTPUT_SIZE_DESC - = "Histogram for the compaction total output file sizes, major only"; + String MAJOR_COMPACTION_OUTPUT_SIZE_DESC = + "Histogram for the compaction total output file sizes, major only"; String MAJOR_COMPACTED_INPUT_BYTES = "majorCompactedInputBytes"; - String MAJOR_COMPACTED_INPUT_BYTES_DESC - = "Total number of bytes that is read for compaction, major only"; + String MAJOR_COMPACTED_INPUT_BYTES_DESC = + "Total number of bytes that is read for compaction, major only"; String MAJOR_COMPACTED_OUTPUT_BYTES = "majorCompactedOutputBytes"; - String MAJOR_COMPACTED_OUTPUT_BYTES_DESC - = "Total number of bytes that is output from compaction, major only"; + String MAJOR_COMPACTED_OUTPUT_BYTES_DESC = + "Total number of bytes that is output from compaction, major only"; String RPC_GET_REQUEST_COUNT = "rpcGetRequestCount"; String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get requests this RegionServer has answered."; String RPC_SCAN_REQUEST_COUNT = "rpcScanRequestCount"; String RPC_SCAN_REQUEST_COUNT_DESC = - "Number of rpc scan requests this RegionServer has answered."; + "Number of rpc scan requests this RegionServer has answered."; String RPC_FULL_SCAN_REQUEST_COUNT = "rpcFullScanRequestCount"; String RPC_FULL_SCAN_REQUEST_COUNT_DESC = - "Number of rpc scan requests that were possible full region scans."; + "Number of rpc scan requests that were possible full region scans."; String RPC_MULTI_REQUEST_COUNT = "rpcMultiRequestCount"; String RPC_MULTI_REQUEST_COUNT_DESC = - "Number of rpc multi requests this RegionServer has answered."; + "Number of rpc multi requests this RegionServer has answered."; String RPC_MUTATE_REQUEST_COUNT = "rpcMutateRequestCount"; String RPC_MUTATE_REQUEST_COUNT_DESC = - "Number of rpc mutation requests this RegionServer has answered."; + "Number of rpc mutation requests this RegionServer has answered."; String AVERAGE_REGION_SIZE = "averageRegionSize"; String AVERAGE_REGION_SIZE_DESC = - "Average region size over the RegionServer including memstore and storefile sizes."; + "Average region size over the RegionServer including memstore and storefile sizes."; /** Metrics for {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} **/ String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES = "ByteBuffAllocatorHeapAllocationBytes"; String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC = - "Bytes of heap allocation from ByteBuffAllocator"; + "Bytes of heap allocation from ByteBuffAllocator"; String BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES = "ByteBuffAllocatorPoolAllocationBytes"; String BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC = - "Bytes of pool allocation from ByteBuffAllocator"; + "Bytes of pool allocation from ByteBuffAllocator"; String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO = "ByteBuffAllocatorHeapAllocationRatio"; String BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC = - "Ratio of heap allocation from ByteBuffAllocator, means heapAllocation/totalAllocation"; + "Ratio of heap allocation from ByteBuffAllocator, means heapAllocation/totalAllocation"; String BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT = "ByteBuffAllocatorTotalBufferCount"; String BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC = "Total buffer count in ByteBuffAllocator"; String BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT = "ByteBuffAllocatorUsedBufferCount"; @@ -605,5 +585,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String ACTIVE_SCANNERS_DESC = "Gauge of currently active scanners"; String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount"; - String SCANNER_LEASE_EXPIRED_COUNT_DESC = "Count of scanners which were expired due to scanner lease timeout"; + String SCANNER_LEASE_EXPIRED_COUNT_DESC = + "Count of scanners which were expired due to scanner lease timeout"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java index ef33909839ce..9a12d75373f8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.io.MetricsIOSource; @@ -30,7 +29,6 @@ public interface MetricsRegionServerSourceFactory { /** * Given a wrapper create a MetricsRegionServerSource. - * * @param regionServerWrapper The wrapped region server * @return a Metrics Source. */ @@ -38,7 +36,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsRegionSource from a MetricsRegionWrapper. - * * @param wrapper The wrapped region * @return A metrics region source */ @@ -58,8 +55,7 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsTableSource from a MetricsTableWrapper. - * - * @param table The table name + * @param table The table name * @param wrapper The wrapped table aggregate * @return A metrics table source */ @@ -67,7 +63,6 @@ public interface MetricsRegionServerSourceFactory { /** * Get a MetricsTableAggregateSource - * * @return A metrics table aggregate source */ MetricsTableAggregateSource getTableAggregate(); @@ -80,7 +75,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsIOSource from a MetricsIOWrapper. - * * @return A metrics IO source */ MetricsIOSource createIO(MetricsIOWrapper wrapper); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java index ccc17492dba5..c2a5e163f0fa 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,12 +23,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper + * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper */ @InterfaceAudience.Private public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory { public static enum FactoryStorage { INSTANCE; + private Object aggLock = new Object(); private MetricsRegionAggregateSourceImpl regionAggImpl; private MetricsUserAggregateSourceImpl userAggImpl; @@ -75,8 +76,8 @@ public synchronized MetricsHeapMemoryManagerSource getHeapMemoryManager() { } @Override - public synchronized MetricsRegionServerSource createServer( - MetricsRegionServerWrapper regionServerWrapper) { + public synchronized MetricsRegionServerSource + createServer(MetricsRegionServerWrapper regionServerWrapper) { return new MetricsRegionServerSourceImpl(regionServerWrapper); } @@ -97,6 +98,6 @@ public MetricsIOSource createIO(MetricsIOWrapper wrapper) { @Override public org.apache.hadoop.hbase.regionserver.MetricsUserSource createUser(String shortUserName) { return new org.apache.hadoop.hbase.regionserver.MetricsUserSourceImpl(shortUserName, - getUserAggregate()); + getUserAggregate()); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 966d75ac9fc4..f8cebd2ec60d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -27,13 +26,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsRegionServerSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsRegionServerSource. Implements BaseSource through + * BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsRegionServerSourceImpl - extends BaseSourceImpl implements MetricsRegionServerSource { +public class MetricsRegionServerSourceImpl extends BaseSourceImpl + implements MetricsRegionServerSource { final MetricsRegionServerWrapper rsWrap; private final MetricHistogram putHisto; @@ -97,11 +95,8 @@ public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); } - public MetricsRegionServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsRegionServerWrapper rsWrap) { + public MetricsRegionServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsRegionServerWrapper rsWrap) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.rsWrap = rsWrap; @@ -131,58 +126,59 @@ public MetricsRegionServerSourceImpl(String metricsName, scanTimeHisto = getMetricsRegistry().newTimeHistogram(SCAN_TIME_KEY); flushTimeHisto = getMetricsRegistry().newTimeHistogram(FLUSH_TIME, FLUSH_TIME_DESC); - flushMemstoreSizeHisto = getMetricsRegistry() - .newSizeHistogram(FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); - flushOutputSizeHisto = getMetricsRegistry().newSizeHistogram(FLUSH_OUTPUT_SIZE, - FLUSH_OUTPUT_SIZE_DESC); - flushedOutputBytes = getMetricsRegistry().newCounter(FLUSHED_OUTPUT_BYTES, - FLUSHED_OUTPUT_BYTES_DESC, 0L); - flushedMemstoreBytes = getMetricsRegistry().newCounter(FLUSHED_MEMSTORE_BYTES, - FLUSHED_MEMSTORE_BYTES_DESC, 0L); - - compactionTimeHisto = getMetricsRegistry() - .newTimeHistogram(COMPACTION_TIME, COMPACTION_TIME_DESC); - compactionInputFileCountHisto = getMetricsRegistry() - .newHistogram(COMPACTION_INPUT_FILE_COUNT, COMPACTION_INPUT_FILE_COUNT_DESC); - compactionInputSizeHisto = getMetricsRegistry() - .newSizeHistogram(COMPACTION_INPUT_SIZE, COMPACTION_INPUT_SIZE_DESC); - compactionOutputFileCountHisto = getMetricsRegistry() - .newHistogram(COMPACTION_OUTPUT_FILE_COUNT, COMPACTION_OUTPUT_FILE_COUNT_DESC); - compactionOutputSizeHisto = getMetricsRegistry() - .newSizeHistogram(COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); - compactedInputBytes = getMetricsRegistry() - .newCounter(COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); - compactedOutputBytes = getMetricsRegistry() - .newCounter(COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); - - majorCompactionTimeHisto = getMetricsRegistry() - .newTimeHistogram(MAJOR_COMPACTION_TIME, MAJOR_COMPACTION_TIME_DESC); + flushMemstoreSizeHisto = + getMetricsRegistry().newSizeHistogram(FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); + flushOutputSizeHisto = + getMetricsRegistry().newSizeHistogram(FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC); + flushedOutputBytes = + getMetricsRegistry().newCounter(FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L); + flushedMemstoreBytes = + getMetricsRegistry().newCounter(FLUSHED_MEMSTORE_BYTES, FLUSHED_MEMSTORE_BYTES_DESC, 0L); + + compactionTimeHisto = + getMetricsRegistry().newTimeHistogram(COMPACTION_TIME, COMPACTION_TIME_DESC); + compactionInputFileCountHisto = getMetricsRegistry().newHistogram(COMPACTION_INPUT_FILE_COUNT, + COMPACTION_INPUT_FILE_COUNT_DESC); + compactionInputSizeHisto = + getMetricsRegistry().newSizeHistogram(COMPACTION_INPUT_SIZE, COMPACTION_INPUT_SIZE_DESC); + compactionOutputFileCountHisto = getMetricsRegistry().newHistogram(COMPACTION_OUTPUT_FILE_COUNT, + COMPACTION_OUTPUT_FILE_COUNT_DESC); + compactionOutputSizeHisto = + getMetricsRegistry().newSizeHistogram(COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); + compactedInputBytes = + getMetricsRegistry().newCounter(COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); + compactedOutputBytes = + getMetricsRegistry().newCounter(COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); + + majorCompactionTimeHisto = + getMetricsRegistry().newTimeHistogram(MAJOR_COMPACTION_TIME, MAJOR_COMPACTION_TIME_DESC); majorCompactionInputFileCountHisto = getMetricsRegistry() .newHistogram(MAJOR_COMPACTION_INPUT_FILE_COUNT, MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC); majorCompactionInputSizeHisto = getMetricsRegistry() - .newSizeHistogram(MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC); + .newSizeHistogram(MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC); majorCompactionOutputFileCountHisto = getMetricsRegistry() - .newHistogram(MAJOR_COMPACTION_OUTPUT_FILE_COUNT, MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); + .newHistogram(MAJOR_COMPACTION_OUTPUT_FILE_COUNT, MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); majorCompactionOutputSizeHisto = getMetricsRegistry() .newSizeHistogram(MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC); - majorCompactedInputBytes = getMetricsRegistry() - .newCounter(MAJOR_COMPACTED_INPUT_BYTES, MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L); - majorCompactedOutputBytes = getMetricsRegistry() - .newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); + majorCompactedInputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_INPUT_BYTES, + MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L); + majorCompactedOutputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, + MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); splitTimeHisto = getMetricsRegistry().newTimeHistogram(SPLIT_KEY); splitRequest = getMetricsRegistry().newCounter(SPLIT_REQUEST_KEY, SPLIT_REQUEST_DESC, 0L); splitSuccess = getMetricsRegistry().newCounter(SPLIT_SUCCESS_KEY, SPLIT_SUCCESS_DESC, 0L); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); - scannerLeaseExpiredCount = getMetricsRegistry().newCounter(SCANNER_LEASE_EXPIRED_COUNT, SCANNER_LEASE_EXPIRED_COUNT_DESC, 0L); + scannerLeaseExpiredCount = getMetricsRegistry().newCounter(SCANNER_LEASE_EXPIRED_COUNT, + SCANNER_LEASE_EXPIRED_COUNT_DESC, 0L); } @Override @@ -332,10 +328,9 @@ public void incrScannerLeaseExpired() { } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param metricsCollector Collector to accept metrics * @param all push all or only changed? */ @@ -346,137 +341,131 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // rsWrap can be null because this function is called inside of init. if (rsWrap != null) { addGaugesToMetricsRecordBuilder(mrb) - .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), - rsWrap.getTotalRequestCount()) - .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, - TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), rsWrap.getTotalRowActionRequestCount()) - .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), - rsWrap.getReadRequestsCount()) - .addCounter(Interns.info(CP_REQUEST_COUNT, CP_REQUEST_COUNT_DESC), - rsWrap.getCpRequestsCount()) - .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, - FILTERED_READ_REQUEST_COUNT_DESC), rsWrap.getFilteredReadRequestsCount()) - .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), - rsWrap.getWriteRequestsCount()) - .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), - rsWrap.getRpcGetRequestsCount()) - .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), - rsWrap.getRpcFullScanRequestsCount()) - .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), - rsWrap.getRpcScanRequestsCount()) - .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), - rsWrap.getRpcMultiRequestsCount()) - .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), - rsWrap.getRpcMutateRequestsCount()) - .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksFailed()) - .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksPassed()) - .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), - rsWrap.getBlockCacheHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, - BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), rsWrap.getBlockCachePrimaryHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), - rsWrap.getBlockCacheMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, - BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), rsWrap.getBlockCachePrimaryMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), - rsWrap.getBlockCacheEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, - BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), - rsWrap.getBlockCachePrimaryEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, - BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), - rsWrap.getBlockCacheFailedInsertions()) - .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), - rsWrap.getDataMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), - rsWrap.getLeafIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), - rsWrap.getBloomChunkMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), - rsWrap.getMetaMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), - rsWrap.getRootIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), - rsWrap.getIntermediateIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), - rsWrap.getFileInfoMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), - rsWrap.getGeneralBloomMetaMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), - rsWrap.getDeleteFamilyBloomMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), - rsWrap.getTrailerMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), - rsWrap.getDataHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), - rsWrap.getLeafIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), - rsWrap.getBloomChunkHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), - rsWrap.getMetaHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), - rsWrap.getRootIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), - rsWrap.getIntermediateIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), - rsWrap.getFileInfoHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), - rsWrap.getGeneralBloomMetaHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), - rsWrap.getDeleteFamilyBloomHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), - rsWrap.getTrailerHitCount()) - .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), - rsWrap.getUpdatesBlockedTime()) - .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), - rsWrap.getFlushedCellsCount()) - .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), - rsWrap.getCompactedCellsCount()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), - rsWrap.getMajorCompactedCellsCount()) - .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), - rsWrap.getFlushedCellsSize()) - .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), - rsWrap.getCompactedCellsSize()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), - rsWrap.getMajorCompactedCellsSize()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, - CELLS_COUNT_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsCountCompactedFromMob()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, - CELLS_COUNT_COMPACTED_TO_MOB_DESC), rsWrap.getCellsCountCompactedToMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, - CELLS_SIZE_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsSizeCompactedFromMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, - CELLS_SIZE_COMPACTED_TO_MOB_DESC), rsWrap.getCellsSizeCompactedToMob()) - .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), - rsWrap.getMobFlushCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), - rsWrap.getMobFlushedCellsCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), - rsWrap.getMobFlushedCellsSize()) - .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), - rsWrap.getMobScanCellsCount()) - .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), - rsWrap.getMobScanCellsSize()) - .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, - MOB_FILE_CACHE_ACCESS_COUNT_DESC), rsWrap.getMobFileCacheAccessCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), - rsWrap.getMobFileCacheMissCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, - MOB_FILE_CACHE_EVICTED_COUNT_DESC), rsWrap.getMobFileCacheEvictedCount()) - .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) - .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), - rsWrap.getHedgedReadWins()) - .addCounter(Interns.info(HEDGED_READ_IN_CUR_THREAD, HEDGED_READ_IN_CUR_THREAD_DESC), - rsWrap.getHedgedReadOpsInCurThread()) - .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), - rsWrap.getBlockedRequestsCount()) - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - rsWrap.getZookeeperQuorum()) - .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) - .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); + .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), + rsWrap.getTotalRequestCount()) + .addCounter( + Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), + rsWrap.getTotalRowActionRequestCount()) + .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), + rsWrap.getReadRequestsCount()) + .addCounter(Interns.info(CP_REQUEST_COUNT, CP_REQUEST_COUNT_DESC), + rsWrap.getCpRequestsCount()) + .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, FILTERED_READ_REQUEST_COUNT_DESC), + rsWrap.getFilteredReadRequestsCount()) + .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), + rsWrap.getWriteRequestsCount()) + .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), + rsWrap.getRpcGetRequestsCount()) + .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcFullScanRequestsCount()) + .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcScanRequestsCount()) + .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), + rsWrap.getRpcMultiRequestsCount()) + .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), + rsWrap.getRpcMutateRequestsCount()) + .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksFailed()) + .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksPassed()) + .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), + rsWrap.getBlockCacheHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), + rsWrap.getBlockCachePrimaryHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), + rsWrap.getBlockCacheMissCount()) + .addCounter( + Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), + rsWrap.getBlockCachePrimaryMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), + rsWrap.getBlockCacheEvictedCount()) + .addCounter( + Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), + rsWrap.getBlockCachePrimaryEvictedCount()) + .addCounter( + Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), + rsWrap.getBlockCacheFailedInsertions()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), rsWrap.getDataMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), + rsWrap.getLeafIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), + rsWrap.getBloomChunkMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), rsWrap.getMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), + rsWrap.getRootIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), + rsWrap.getIntermediateIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), + rsWrap.getFileInfoMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), + rsWrap.getGeneralBloomMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), + rsWrap.getDeleteFamilyBloomMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), rsWrap.getTrailerMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), rsWrap.getDataHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), + rsWrap.getLeafIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), + rsWrap.getBloomChunkHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), rsWrap.getMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), + rsWrap.getRootIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), + rsWrap.getIntermediateIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), rsWrap.getFileInfoHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), + rsWrap.getGeneralBloomMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), + rsWrap.getDeleteFamilyBloomHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), rsWrap.getTrailerHitCount()) + .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), + rsWrap.getUpdatesBlockedTime()) + .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), rsWrap.getFlushedCellsCount()) + .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), + rsWrap.getCompactedCellsCount()) + .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), + rsWrap.getMajorCompactedCellsCount()) + .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), + rsWrap.getFlushedCellsSize()) + .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), + rsWrap.getCompactedCellsSize()) + .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), + rsWrap.getMajorCompactedCellsSize()) + .addCounter( + Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, CELLS_COUNT_COMPACTED_FROM_MOB_DESC), + rsWrap.getCellsCountCompactedFromMob()) + .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, CELLS_COUNT_COMPACTED_TO_MOB_DESC), + rsWrap.getCellsCountCompactedToMob()) + .addCounter(Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, CELLS_SIZE_COMPACTED_FROM_MOB_DESC), + rsWrap.getCellsSizeCompactedFromMob()) + .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, CELLS_SIZE_COMPACTED_TO_MOB_DESC), + rsWrap.getCellsSizeCompactedToMob()) + .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), rsWrap.getMobFlushCount()) + .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), + rsWrap.getMobFlushedCellsCount()) + .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), + rsWrap.getMobFlushedCellsSize()) + .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), + rsWrap.getMobScanCellsCount()) + .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), + rsWrap.getMobScanCellsSize()) + .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, MOB_FILE_CACHE_ACCESS_COUNT_DESC), + rsWrap.getMobFileCacheAccessCount()) + .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), + rsWrap.getMobFileCacheMissCount()) + .addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, MOB_FILE_CACHE_EVICTED_COUNT_DESC), + rsWrap.getMobFileCacheEvictedCount()) + .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) + .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), + rsWrap.getHedgedReadWins()) + .addCounter(Interns.info(HEDGED_READ_IN_CUR_THREAD, HEDGED_READ_IN_CUR_THREAD_DESC), + rsWrap.getHedgedReadOpsInCurThread()) + .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), + rsWrap.getBlockedRequestsCount()) + .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), + rsWrap.getZookeeperQuorum()) + .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) + .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); } metricsRegistry.snapshot(mrb, all); @@ -490,114 +479,107 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { private MetricsRecordBuilder addGaugesToMetricsRecordBuilder(MetricsRecordBuilder mrb) { return mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions()) - .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) - .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) - .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) - .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), - rsWrap.getNumStoreFiles()) - .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) - .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) - .addGauge(Interns.info(STOREFILE_SIZE_GROWTH_RATE, STOREFILE_SIZE_GROWTH_RATE_DESC), - rsWrap.getStoreFileSizeGrowthRate()) - .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), - rsWrap.getMaxStoreFileAge()) - .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), - rsWrap.getMinStoreFileAge()) - .addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), - rsWrap.getAvgStoreFileAge()) - .addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), - rsWrap.getNumReferenceFiles()) - .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()) - .addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), - rsWrap.getAverageRegionSize()) - .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), - rsWrap.getStoreFileIndexSize()) - .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), - rsWrap.getTotalStaticIndexSize()) - .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), - rsWrap.getTotalStaticBloomSize()) - .addGauge(Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, - NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), rsWrap.getNumMutationsWithoutWAL()) - .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), - rsWrap.getDataInMemoryWithoutWAL()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), - rsWrap.getPercentFileLocal()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, - PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), - rsWrap.getPercentFileLocalSecondaryRegions()) - .addGauge(Interns.info(TOTAL_BYTES_READ, - TOTAL_BYTES_READ_DESC), - rsWrap.getTotalBytesRead()) - .addGauge(Interns.info(LOCAL_BYTES_READ, - LOCAL_BYTES_READ_DESC), - rsWrap.getLocalBytesRead()) - .addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, - SHORTCIRCUIT_BYTES_READ_DESC), - rsWrap.getShortCircuitBytesRead()) - .addGauge(Interns.info(ZEROCOPY_BYTES_READ, - ZEROCOPY_BYTES_READ_DESC), - rsWrap.getZeroCopyBytesRead()) - .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), - rsWrap.getSplitQueueSize()) - .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), - rsWrap.getCompactionQueueSize()) - .addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, - SMALL_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getSmallCompactionQueueSize()) - .addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, - LARGE_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getLargeCompactionQueueSize()) - .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), - rsWrap.getFlushQueueSize()) - .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), - rsWrap.getBlockCacheFreeSize()) - .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), - rsWrap.getBlockCacheCount()) - .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), - rsWrap.getBlockCacheSize()) - .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), - rsWrap.getBlockCacheHitPercent()) - .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, - BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()) - .addGauge(Interns.info(L1_CACHE_HIT_COUNT, L1_CACHE_HIT_COUNT_DESC), - rsWrap.getL1CacheHitCount()) - .addGauge(Interns.info(L1_CACHE_MISS_COUNT, L1_CACHE_MISS_COUNT_DESC), - rsWrap.getL1CacheMissCount()) - .addGauge(Interns.info(L1_CACHE_HIT_RATIO, L1_CACHE_HIT_RATIO_DESC), - rsWrap.getL1CacheHitRatio()) - .addGauge(Interns.info(L1_CACHE_MISS_RATIO, L1_CACHE_MISS_RATIO_DESC), - rsWrap.getL1CacheMissRatio()) - .addGauge(Interns.info(L2_CACHE_HIT_COUNT, L2_CACHE_HIT_COUNT_DESC), - rsWrap.getL2CacheHitCount()) - .addGauge(Interns.info(L2_CACHE_MISS_COUNT, L2_CACHE_MISS_COUNT_DESC), - rsWrap.getL2CacheMissCount()) - .addGauge(Interns.info(L2_CACHE_HIT_RATIO, L2_CACHE_HIT_RATIO_DESC), - rsWrap.getL2CacheHitRatio()) - .addGauge(Interns.info(L2_CACHE_MISS_RATIO, L2_CACHE_MISS_RATIO_DESC), - rsWrap.getL2CacheMissRatio()) - .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), - rsWrap.getMobFileCacheCount()) - .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), - rsWrap.getMobFileCacheHitPercent()) - .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC), - rsWrap.getReadRequestsRatePerSecond()) - .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC), - rsWrap.getWriteRequestsRatePerSecond()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES, - BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC), - rsWrap.getByteBuffAllocatorHeapAllocationBytes()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES, - BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC), - rsWrap.getByteBuffAllocatorPoolAllocationBytes()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO, - BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC), - rsWrap.getByteBuffAllocatorHeapAllocRatio()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT, - BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC), - rsWrap.getByteBuffAllocatorTotalBufferCount()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT, - BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT_DESC), - rsWrap.getByteBuffAllocatorUsedBufferCount()) - .addGauge(Interns.info(ACTIVE_SCANNERS, ACTIVE_SCANNERS_DESC), - rsWrap.getActiveScanners()); + .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) + .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) + .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) + .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles()) + .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) + .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) + .addGauge(Interns.info(STOREFILE_SIZE_GROWTH_RATE, STOREFILE_SIZE_GROWTH_RATE_DESC), + rsWrap.getStoreFileSizeGrowthRate()) + .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), + rsWrap.getMaxStoreFileAge()) + .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), + rsWrap.getMinStoreFileAge()) + .addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), + rsWrap.getAvgStoreFileAge()) + .addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), + rsWrap.getNumReferenceFiles()) + .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()) + .addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), + rsWrap.getAverageRegionSize()) + .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), + rsWrap.getStoreFileIndexSize()) + .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), + rsWrap.getTotalStaticIndexSize()) + .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), + rsWrap.getTotalStaticBloomSize()) + .addGauge(Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), + rsWrap.getNumMutationsWithoutWAL()) + .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), + rsWrap.getDataInMemoryWithoutWAL()) + .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), + rsWrap.getPercentFileLocal()) + .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, + PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), rsWrap.getPercentFileLocalSecondaryRegions()) + .addGauge(Interns.info(TOTAL_BYTES_READ, TOTAL_BYTES_READ_DESC), rsWrap.getTotalBytesRead()) + .addGauge(Interns.info(LOCAL_BYTES_READ, LOCAL_BYTES_READ_DESC), rsWrap.getLocalBytesRead()) + .addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, SHORTCIRCUIT_BYTES_READ_DESC), + rsWrap.getShortCircuitBytesRead()) + .addGauge(Interns.info(ZEROCOPY_BYTES_READ, ZEROCOPY_BYTES_READ_DESC), + rsWrap.getZeroCopyBytesRead()) + .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), + rsWrap.getSplitQueueSize()) + .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getCompactionQueueSize()) + .addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, SMALL_COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getSmallCompactionQueueSize()) + .addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, LARGE_COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getLargeCompactionQueueSize()) + .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), + rsWrap.getFlushQueueSize()) + .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), + rsWrap.getBlockCacheFreeSize()) + .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), + rsWrap.getBlockCacheCount()) + .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), rsWrap.getBlockCacheSize()) + .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitPercent()) + .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitCachingPercent()) + .addGauge(Interns.info(L1_CACHE_HIT_COUNT, L1_CACHE_HIT_COUNT_DESC), + rsWrap.getL1CacheHitCount()) + .addGauge(Interns.info(L1_CACHE_MISS_COUNT, L1_CACHE_MISS_COUNT_DESC), + rsWrap.getL1CacheMissCount()) + .addGauge(Interns.info(L1_CACHE_HIT_RATIO, L1_CACHE_HIT_RATIO_DESC), + rsWrap.getL1CacheHitRatio()) + .addGauge(Interns.info(L1_CACHE_MISS_RATIO, L1_CACHE_MISS_RATIO_DESC), + rsWrap.getL1CacheMissRatio()) + .addGauge(Interns.info(L2_CACHE_HIT_COUNT, L2_CACHE_HIT_COUNT_DESC), + rsWrap.getL2CacheHitCount()) + .addGauge(Interns.info(L2_CACHE_MISS_COUNT, L2_CACHE_MISS_COUNT_DESC), + rsWrap.getL2CacheMissCount()) + .addGauge(Interns.info(L2_CACHE_HIT_RATIO, L2_CACHE_HIT_RATIO_DESC), + rsWrap.getL2CacheHitRatio()) + .addGauge(Interns.info(L2_CACHE_MISS_RATIO, L2_CACHE_MISS_RATIO_DESC), + rsWrap.getL2CacheMissRatio()) + .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), + rsWrap.getMobFileCacheCount()) + .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), + rsWrap.getMobFileCacheHitPercent()) + .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC), + rsWrap.getReadRequestsRatePerSecond()) + .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC), + rsWrap.getWriteRequestsRatePerSecond()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES, + BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC), + rsWrap.getByteBuffAllocatorHeapAllocationBytes()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES, + BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC), + rsWrap.getByteBuffAllocatorPoolAllocationBytes()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO, + BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC), + rsWrap.getByteBuffAllocatorHeapAllocRatio()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT, + BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC), + rsWrap.getByteBuffAllocatorTotalBufferCount()) + .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT, + BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT_DESC), rsWrap.getByteBuffAllocatorUsedBufferCount()) + .addGauge(Interns.info(ACTIVE_SCANNERS, ACTIVE_SCANNERS_DESC), rsWrap.getActiveScanners()); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index b424cdb21dbb..f2ad7b48cc86 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.List; @@ -35,28 +34,24 @@ public interface MetricsRegionServerWrapper { /** * Get the Cluster ID - * * @return Cluster ID */ String getClusterId(); /** * Get the ZooKeeper Quorum Info - * * @return ZooKeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors - * * @return Co-processors */ String getCoprocessors(); /** * Get HRegionServer start time - * * @return Start time of RegionServer in milliseconds */ long getStartCode(); @@ -91,9 +86,9 @@ public interface MetricsRegionServerWrapper { */ long getNumWALSlowAppend(); - /** - * Get the number of store files hosted on this region server. - */ + /** + * Get the number of store files hosted on this region server. + */ long getNumStoreFiles(); /** @@ -122,12 +117,12 @@ public interface MetricsRegionServerWrapper { long getMinStoreFileAge(); /** - * @return Average age of store files hosted on this region server + * @return Average age of store files hosted on this region server */ long getAvgStoreFileAge(); /** - * @return Number of reference files on this region server + * @return Number of reference files on this region server */ long getNumReferenceFiles(); @@ -202,8 +197,8 @@ public interface MetricsRegionServerWrapper { long getNumMutationsWithoutWAL(); /** - * Ammount of data in the memstore but not in the WAL because mutations explicitly had their - * WAL turned off. + * Ammount of data in the memstore but not in the WAL because mutations explicitly had their WAL + * turned off. */ long getDataInMemoryWithoutWAL(); @@ -237,6 +232,7 @@ public interface MetricsRegionServerWrapper { int getFlushQueueSize(); long getMemStoreLimit(); + /** * Get the size (in bytes) of the block cache that is free. */ @@ -282,7 +278,6 @@ public interface MetricsRegionServerWrapper { */ long getBlockCachePrimaryEvictedCount(); - /** * Get the percent of all requests that hit the block cache. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index b3a556e3d9f2..c3d955592d6a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -45,10 +44,10 @@ public interface MetricsRegionSource extends Comparable { String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for this region"; String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this region"; String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region"; - String NUM_BYTES_COMPACTED_DESC = - "Sum of filesize on all files entering a finished, successful or aborted, compaction"; + String NUM_BYTES_COMPACTED_DESC = + "Sum of filesize on all files entering a finished, successful or aborted, compaction"; String NUM_FILES_COMPACTED_DESC = - "Number of files that were input for finished, successful or aborted, compactions"; + "Number of files that were input for finished, successful or aborted, compactions"; String COPROCESSOR_EXECUTION_STATISTICS = "coprocessorExecutionStatistics"; String COPROCESSOR_EXECUTION_STATISTICS_DESC = "Statistics for coprocessor execution times"; String REPLICA_ID = "replicaid"; @@ -81,7 +80,7 @@ public interface MetricsRegionSource extends Comparable { /** * Update time used of resultScanner.next(). - * */ + */ void updateScanTime(long mills); /** @@ -99,5 +98,4 @@ public interface MetricsRegionSource extends Comparable { */ MetricsRegionAggregateSource getAggregateSource(); - } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index 2f7f8074c9df..6933241842ce 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; @@ -72,19 +70,19 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { private final int hashCode; public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, - MetricsRegionAggregateSourceImpl aggregate) { + MetricsRegionAggregateSourceImpl aggregate) { this.regionWrapper = regionWrapper; agg = aggregate; hashCode = regionWrapper.getRegionHashCode(); agg.register(this); - LOG.debug("Creating new MetricsRegionSourceImpl for table " + - regionWrapper.getTableName() + " " + regionWrapper.getRegionName()); + LOG.debug("Creating new MetricsRegionSourceImpl for table " + regionWrapper.getTableName() + " " + + regionWrapper.getRegionName()); registry = agg.getMetricsRegistry(); regionNamePrefix1 = "Namespace_" + regionWrapper.getNamespace() + "_table_" - + regionWrapper.getTableName() + "_region_" + regionWrapper.getRegionName(); + + regionWrapper.getTableName() + "_region_" + regionWrapper.getRegionName(); regionNamePrefix2 = "_metric_"; regionNamePrefix = regionNamePrefix1 + regionNamePrefix2; @@ -204,115 +202,91 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) { return; } + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, + MetricsRegionServerSource.STORE_COUNT_DESC), this.regionWrapper.getNumStores()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + MetricsRegionServerSource.STOREFILE_COUNT_DESC), this.regionWrapper.getNumStoreFiles()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_REF_COUNT, + MetricsRegionServerSource.STORE_REF_COUNT), this.regionWrapper.getStoreRefCount()); + mrb.addGauge( + Interns.info( + regionNamePrefix + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT, + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT), + this.regionWrapper.getMaxCompactedStoreFileRefCount()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + MetricsRegionServerSource.MEMSTORE_SIZE_DESC), this.regionWrapper.getMemStoreSize()); mrb.addGauge( - Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, - MetricsRegionServerSource.STORE_COUNT_DESC), - this.regionWrapper.getNumStores()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, - MetricsRegionServerSource.STOREFILE_COUNT_DESC), - this.regionWrapper.getNumStoreFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_REF_COUNT, - MetricsRegionServerSource.STORE_REF_COUNT), - this.regionWrapper.getStoreRefCount()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT, - MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT), - this.regionWrapper.getMaxCompactedStoreFileRefCount() - ); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, - MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - this.regionWrapper.getMemStoreSize()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, - MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), + Interns.info(regionNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, + MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), this.regionWrapper.getMaxStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, - MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, + MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), this.regionWrapper.getMinStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, - MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, + MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), this.regionWrapper.getAvgStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, - MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, + MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), this.regionWrapper.getNumReferenceFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, - MetricsRegionServerSource.STOREFILE_SIZE_DESC), - this.regionWrapper.getStoreFileSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, - MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), - this.regionWrapper.getNumCompactionsCompleted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_FAILED_COUNT, + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + MetricsRegionServerSource.STOREFILE_SIZE_DESC), this.regionWrapper.getStoreFileSize()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, + MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), + this.regionWrapper.getNumCompactionsCompleted()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_FAILED_COUNT, MetricsRegionSource.COMPACTIONS_FAILED_DESC), - this.regionWrapper.getNumCompactionsFailed()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.LAST_MAJOR_COMPACTION_AGE, - MetricsRegionSource.LAST_MAJOR_COMPACTION_DESC), - this.regionWrapper.getLastMajorCompactionAge()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, - MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), - this.regionWrapper.getNumBytesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, - MetricsRegionSource.NUM_FILES_COMPACTED_DESC), - this.regionWrapper.getNumFilesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, - MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - this.regionWrapper.getReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT, - MetricsRegionServerSource.CP_REQUEST_COUNT_DESC), - this.regionWrapper.getCpRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, - MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), - this.regionWrapper.getFilteredReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, - MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - this.regionWrapper.getWriteRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.REPLICA_ID, - MetricsRegionSource.REPLICA_ID_DESC), - this.regionWrapper.getReplicaId()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_QUEUED_COUNT, - MetricsRegionSource.COMPACTIONS_QUEUED_DESC), - this.regionWrapper.getNumCompactionsQueued()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.FLUSHES_QUEUED_COUNT, - MetricsRegionSource.FLUSHES_QUEUED_DESC), - this.regionWrapper.getNumFlushesQueued()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.MAX_COMPACTION_QUEUE_SIZE, - MetricsRegionSource.MAX_COMPACTION_QUEUE_DESC), - this.regionWrapper.getMaxCompactionQueueSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE, - MetricsRegionSource.MAX_FLUSH_QUEUE_DESC), - this.regionWrapper.getMaxFlushQueueSize()); + this.regionWrapper.getNumCompactionsFailed()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.LAST_MAJOR_COMPACTION_AGE, + MetricsRegionSource.LAST_MAJOR_COMPACTION_DESC), + this.regionWrapper.getLastMajorCompactionAge()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, + MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), this.regionWrapper.getNumBytesCompacted()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, + MetricsRegionSource.NUM_FILES_COMPACTED_DESC), this.regionWrapper.getNumFilesCompacted()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), + this.regionWrapper.getReadRequestCount()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT, + MetricsRegionServerSource.CP_REQUEST_COUNT_DESC), this.regionWrapper.getCpRequestCount()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), + this.regionWrapper.getFilteredReadRequestCount()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), + this.regionWrapper.getWriteRequestCount()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.REPLICA_ID, + MetricsRegionSource.REPLICA_ID_DESC), this.regionWrapper.getReplicaId()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_QUEUED_COUNT, + MetricsRegionSource.COMPACTIONS_QUEUED_DESC), + this.regionWrapper.getNumCompactionsQueued()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.FLUSHES_QUEUED_COUNT, + MetricsRegionSource.FLUSHES_QUEUED_DESC), this.regionWrapper.getNumFlushesQueued()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.MAX_COMPACTION_QUEUE_SIZE, + MetricsRegionSource.MAX_COMPACTION_QUEUE_DESC), + this.regionWrapper.getMaxCompactionQueueSize()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE, + MetricsRegionSource.MAX_FLUSH_QUEUE_DESC), this.regionWrapper.getMaxFlushQueueSize()); addCounter(mrb, this.regionWrapper.getMemstoreOnlyRowReadsCount(), MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE, MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC); addCounter(mrb, this.regionWrapper.getMixedRowReadsCount(), - MetricsRegionSource.MIXED_ROW_READS, - MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); + MetricsRegionSource.MIXED_ROW_READS, MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); } } private void addCounter(MetricsRecordBuilder mrb, Map metricMap, String metricName, - String metricDesc) { + String metricDesc) { if (metricMap != null) { for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric @@ -330,7 +304,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsRegionSourceImpl && compareTo((MetricsRegionSourceImpl) obj) == 0); + return obj == this + || (obj instanceof MetricsRegionSourceImpl && compareTo((MetricsRegionSourceImpl) obj) == 0); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index 6bf010ce91b4..5714a0542776 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,36 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Interface of class that will wrap an HRegion and export numbers so they can be - * used in MetricsRegionSource + * Interface of class that will wrap an HRegion and export numbers so they can be used in + * MetricsRegionSource */ @InterfaceAudience.Private public interface MetricsRegionWrapper { /** * Get the name of the table the region belongs to. - * * @return The string version of the table name. */ String getTableName(); /** * Get the name of the namespace this table is in. - * @return String version of the namespace. Can't be empty. + * @return String version of the namespace. Can't be empty. */ String getNamespace(); /** * Get the name of the region. - * * @return The encoded name of the region. */ String getRegionName(); @@ -95,12 +91,12 @@ public interface MetricsRegionWrapper { long getMinStoreFileAge(); /** - * @return Average age of store files under this region + * @return Average age of store files under this region */ long getAvgStoreFileAge(); /** - * @return Number of reference files under this region + * @return Number of reference files under this region */ long getNumReferenceFiles(); @@ -118,14 +114,14 @@ public interface MetricsRegionWrapper { long getNumCompactionsCompleted(); /** - * @return Age of the last major compaction + * @return Age of the last major compaction */ long getLastMajorCompactionAge(); /** - * Returns the total number of compactions that have been reported as failed on this region. - * Note that a given compaction can be reported as both completed and failed if an exception - * is thrown in the processing after {@code HRegion.compact()}. + * Returns the total number of compactions that have been reported as failed on this region. Note + * that a given compaction can be reported as both completed and failed if an exception is thrown + * in the processing after {@code HRegion.compact()}. */ long getNumCompactionsFailed(); @@ -143,14 +139,12 @@ public interface MetricsRegionWrapper { /** * Note that this metric is updated periodically and hence might miss some data points. - * * @return the max number of compactions queued for this region */ long getMaxCompactionQueueSize(); /** * Note that this metric is updated periodically and hence might miss some data points. - * * @return the max number of flushes queued for this region */ long getMaxFlushQueueSize(); @@ -168,8 +162,8 @@ public interface MetricsRegionWrapper { long getStoreRefCount(); /** - * @return the max number of references active on any store file among - * all compacted store files that belong to this region + * @return the max number of references active on any store file among all compacted store files + * that belong to this region */ long getMaxCompactedStoreFileRefCount(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java index f746c98c5458..e11f1864f484 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions of a table into the hadoop metrics system. + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * regions of a table into the hadoop metrics system. */ @InterfaceAudience.Private public interface MetricsTableAggregateSource extends BaseSource { @@ -59,7 +58,6 @@ public interface MetricsTableAggregateSource extends BaseSource { /** * Remove a table's source. This is called when regions of a table are closed. - * * @param table The table name */ void deleteTableSource(String table); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java index 0b13e5c8dfed..9b36d27b99db 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; @@ -31,7 +29,7 @@ @InterfaceAudience.Private public class MetricsTableAggregateSourceImpl extends BaseSourceImpl - implements MetricsTableAggregateSource { + implements MetricsTableAggregateSource { private static final Logger LOG = LoggerFactory.getLogger(MetricsTableAggregateSourceImpl.class); private ConcurrentHashMap tableSources = new ConcurrentHashMap<>(); @@ -40,10 +38,8 @@ public MetricsTableAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsTableAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsTableAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -67,7 +63,7 @@ public void deleteTableSource(String table) { @Override public MetricsTableSource getOrCreateTableSource(String table, - MetricsTableWrapperAggregate wrapper) { + MetricsTableWrapperAggregate wrapper) { MetricsTableSource source = tableSources.get(table); if (source != null) { return source; @@ -82,10 +78,9 @@ public MetricsTableSource getOrCreateTableSource(String table, } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param collector the collector * @param all get all the metrics regardless of when they last changed. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java index 2aeb82b0d64d..e7d447aef491 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -59,94 +60,85 @@ public interface MetricsTableLatencies { /** * Update the Put time histogram - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updatePut(String tableName, long t); /** * Update the batch Put time histogram - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updatePutBatch(String tableName, long t); /** * Update the Delete time histogram - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateDelete(String tableName, long t); /** * Update the batch Delete time histogram - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateDeleteBatch(String tableName, long t); /** * Update the Get time histogram . - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateGet(String tableName, long t); /** * Update the Increment time histogram. - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateIncrement(String tableName, long t); /** * Update the Append time histogram. - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateAppend(String tableName, long t); /** * Update the scan size. - * * @param tableName The table the metric is for - * @param scanSize size of the scan + * @param scanSize size of the scan */ void updateScanSize(String tableName, long scanSize); /** * Update the scan time. - * * @param tableName The table the metric is for - * @param t time it took + * @param t time it took */ void updateScanTime(String tableName, long t); /** * Update the CheckAndDelete time histogram. * @param nameAsString The table the metric is for - * @param time time it took + * @param time time it took */ void updateCheckAndDelete(String nameAsString, long time); /** * Update the CheckAndPut time histogram. * @param nameAsString The table the metric is for - * @param time time it took + * @param time time it took */ void updateCheckAndPut(String nameAsString, long time); /** * Update the CheckAndMutate time histogram. * @param nameAsString The table the metric is for - * @param time time it took + * @param time time it took */ void updateCheckAndMutate(String nameAsString, long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index dd143d4c6f5d..1c90b33d5d4d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +33,7 @@ @InterfaceAudience.Private public class MetricsTableLatenciesImpl extends BaseSourceImpl implements MetricsTableLatencies { - private final HashMap histogramsByTable = new HashMap<>(); + private final HashMap histogramsByTable = new HashMap<>(); public static class TableHistograms { final MetricHistogram getTimeHisto; @@ -50,20 +51,17 @@ public static class TableHistograms { TableHistograms(DynamicMetricsRegistry registry, TableName tn) { getTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, GET_TIME)); - incrementTimeHisto = registry.newTimeHistogram( - qualifyMetricsName(tn, INCREMENT_TIME)); + incrementTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, INCREMENT_TIME)); appendTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, APPEND_TIME)); putTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_TIME)); putBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_BATCH_TIME)); deleteTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_TIME)); - deleteBatchTimeHisto = registry.newTimeHistogram( - qualifyMetricsName(tn, DELETE_BATCH_TIME)); + deleteBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_BATCH_TIME)); scanTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, SCAN_TIME)); scanSizeHisto = registry.newSizeHistogram(qualifyMetricsName(tn, SCAN_SIZE)); checkAndDeleteTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); - checkAndPutTimeHisto = - registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); + checkAndPutTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); checkAndMutateTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); } @@ -141,7 +139,7 @@ public MetricsTableLatenciesImpl() { } public MetricsTableLatenciesImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java index c3b819228fe4..1ff86de67ec5 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,7 +32,7 @@ public interface MetricsTableQueryMeter { /** * Update table read QPS * @param tableName The table the metric is for - * @param count Number of occurrences to record + * @param count Number of occurrences to record */ void updateTableReadQueryMeter(TableName tableName, long count); @@ -44,7 +45,7 @@ public interface MetricsTableQueryMeter { /** * Update table write QPS * @param tableName The table the metric is for - * @param count Number of occurrences to record + * @param count Number of occurrences to record */ void updateTableWriteQueryMeter(TableName tableName, long count); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java index 6b1d323dc19a..dc53c940166d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,15 +19,14 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.Meter; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in - * a RegionServer. + * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in a + * RegionServer. */ @InterfaceAudience.Private public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter { @@ -42,8 +42,8 @@ private static class TableMeters { final Meter tableWriteQueryMeter; TableMeters(MetricRegistry metricRegistry, TableName tableName) { - this.tableReadQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName, - TABLE_READ_QUERY_PER_SECOND)); + this.tableReadQueryMeter = + metricRegistry.meter(qualifyMetricsName(tableName, TABLE_READ_QUERY_PER_SECOND)); this.tableWriteQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND)); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java index 9fc606257e0c..b65457a87147 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.Closeable; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -83,35 +82,35 @@ public interface MetricsTableSource extends Comparable, Clos /** * Update the compaction time histogram, both major and minor * @param isMajor whether compaction is a major compaction - * @param t time it took, in milliseconds + * @param t time it took, in milliseconds */ void updateCompactionTime(boolean isMajor, long t); /** * Update the compaction input number of files histogram * @param isMajor whether compaction is a major compaction - * @param c number of files + * @param c number of files */ void updateCompactionInputFileCount(boolean isMajor, long c); /** * Update the compaction total input file size histogram * @param isMajor whether compaction is a major compaction - * @param bytes the number of bytes of the compaction input file + * @param bytes the number of bytes of the compaction input file */ void updateCompactionInputSize(boolean isMajor, long bytes); /** * Update the compaction output number of files histogram * @param isMajor whether compaction is a major compaction - * @param c number of files + * @param c number of files */ void updateCompactionOutputFileCount(boolean isMajor, long c); /** * Update the compaction total output file size * @param isMajor whether compaction is a major compaction - * @param bytes the number of bytes of the compaction input file + * @param bytes the number of bytes of the compaction input file */ void updateCompactionOutputSize(boolean isMajor, long bytes); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index 85f5bded98a8..19c6d845ceb2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,7 +64,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricHistogram; @@ -126,19 +125,18 @@ public class MetricsTableSourceImpl implements MetricsTableSource { private MutableFastCounter majorCompactedInputBytes; private MutableFastCounter majorCompactedOutputBytes; - public MetricsTableSourceImpl(String tblName, - MetricsTableAggregateSourceImpl aggregate, MetricsTableWrapperAggregate tblWrapperAgg) { + public MetricsTableSourceImpl(String tblName, MetricsTableAggregateSourceImpl aggregate, + MetricsTableWrapperAggregate tblWrapperAgg) { LOG.debug("Creating new MetricsTableSourceImpl for table '{}'", tblName); this.tableName = TableName.valueOf(tblName); this.agg = aggregate; this.tableWrapperAgg = tblWrapperAgg; this.registry = agg.getMetricsRegistry(); - this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() + - "_table_" + this.tableName.getQualifierAsString(); + this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() + "_table_" + + this.tableName.getQualifierAsString(); this.tableNamePrefixPart2 = "_metric_"; - this.tableNamePrefix = tableNamePrefixPart1 + - tableNamePrefixPart2; + this.tableNamePrefix = tableNamePrefixPart1 + tableNamePrefixPart2; this.hashCode = this.tableName.hashCode(); } @@ -146,16 +144,16 @@ public MetricsTableSourceImpl(String tblName, public synchronized void registerMetrics() { flushTimeHisto = registry.newTimeHistogram(tableNamePrefix + FLUSH_TIME, FLUSH_TIME_DESC); flushMemstoreSizeHisto = - registry.newSizeHistogram(tableNamePrefix + FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); + registry.newSizeHistogram(tableNamePrefix + FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); flushOutputSizeHisto = - registry.newSizeHistogram(tableNamePrefix + FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC); + registry.newSizeHistogram(tableNamePrefix + FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC); flushedOutputBytes = - registry.newCounter(tableNamePrefix + FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L); + registry.newCounter(tableNamePrefix + FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L); flushedMemstoreBytes = registry.newCounter(tableNamePrefix + FLUSHED_MEMSTORE_BYTES, FLUSHED_MEMSTORE_BYTES_DESC, 0L); compactionTimeHisto = - registry.newTimeHistogram(tableNamePrefix + COMPACTION_TIME, COMPACTION_TIME_DESC); + registry.newTimeHistogram(tableNamePrefix + COMPACTION_TIME, COMPACTION_TIME_DESC); compactionInputFileCountHisto = registry.newHistogram( tableNamePrefix + COMPACTION_INPUT_FILE_COUNT, COMPACTION_INPUT_FILE_COUNT_DESC); compactionInputSizeHisto = registry.newSizeHistogram(tableNamePrefix + COMPACTION_INPUT_SIZE, @@ -164,8 +162,8 @@ public synchronized void registerMetrics() { tableNamePrefix + COMPACTION_OUTPUT_FILE_COUNT, COMPACTION_OUTPUT_FILE_COUNT_DESC); compactionOutputSizeHisto = registry.newSizeHistogram(tableNamePrefix + COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); - compactedInputBytes = registry.newCounter(tableNamePrefix + COMPACTED_INPUT_BYTES, - COMPACTED_INPUT_BYTES_DESC, 0L); + compactedInputBytes = + registry.newCounter(tableNamePrefix + COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); compactedOutputBytes = registry.newCounter(tableNamePrefix + COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); @@ -176,8 +174,8 @@ public synchronized void registerMetrics() { majorCompactionInputSizeHisto = registry.newSizeHistogram( tableNamePrefix + MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC); majorCompactionOutputFileCountHisto = - registry.newHistogram(tableNamePrefix + MAJOR_COMPACTION_OUTPUT_FILE_COUNT, - MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); + registry.newHistogram(tableNamePrefix + MAJOR_COMPACTION_OUTPUT_FILE_COUNT, + MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); majorCompactionOutputSizeHisto = registry.newSizeHistogram( tableNamePrefix + MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC); majorCompactedInputBytes = registry.newCounter(tableNamePrefix + MAJOR_COMPACTED_INPUT_BYTES, @@ -238,6 +236,7 @@ public void close() { tableWrapperAgg = null; } } + @Override public MetricsTableAggregateSource getAggregateSource() { return agg; @@ -272,74 +271,87 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) { } if (this.tableWrapperAgg != null) { - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT, + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT, MetricsRegionServerSource.CP_REQUEST_COUNT_DESC), tableWrapperAgg.getCpRequestsCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - tableWrapperAgg.getReadRequestCount(tableName.getNameAsString())); + tableWrapperAgg.getReadRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), + tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString())); mrb.addCounter( - Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, - MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), - tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT, + tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT, MetricsRegionServerSource.TOTAL_REQUEST_COUNT_DESC), - tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - tableWrapperAgg.getMemStoreSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + tableWrapperAgg.getMemStoreSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, MetricsRegionServerSource.STOREFILE_COUNT_DESC), - tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, MetricsRegionServerSource.STOREFILE_SIZE_DESC), - tableWrapperAgg.getStoreFileSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, - MetricsTableSource.TABLE_SIZE_DESC), + tableWrapperAgg.getStoreFileSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, + MetricsTableSource.TABLE_SIZE_DESC), tableWrapperAgg.getTableSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE, + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE, MetricsRegionServerSource.AVERAGE_REGION_SIZE_DESC), - tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT, + tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT, MetricsRegionServerSource.REGION_COUNT_DESC), - tableWrapperAgg.getNumRegions(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT, + tableWrapperAgg.getNumRegions(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT, MetricsRegionServerSource.STORE_COUNT_DESC), - tableWrapperAgg.getNumStores(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, + tableWrapperAgg.getNumStores(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), - tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, + tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), - tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, + tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), - tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, + tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), - tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString())); + tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString())); addGauge(mrb, tableWrapperAgg.getMemstoreOnlyRowReadsCount(tableName.getNameAsString()), MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE, MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC); addGauge(mrb, tableWrapperAgg.getMixedRowReadsCount(tableName.getNameAsString()), - MetricsRegionSource.MIXED_ROW_READS, - MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); + MetricsRegionSource.MIXED_ROW_READS, MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); } } } private void addGauge(MetricsRecordBuilder mrb, Map metricMap, String metricName, - String metricDesc) { + String metricDesc) { if (metricMap != null) { for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric mrb.addGauge(Interns.info(this.tableNamePrefixPart1 + _COLUMNFAMILY - + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] - + this.tableNamePrefixPart2 + metricName, - metricDesc), entry.getValue()); + + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] + this.tableNamePrefixPart2 + + metricName, metricDesc), entry.getValue()); } } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java index 40fd6d8effaf..284fb57cd231 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Interface of class that will wrap a MetricsTableSource and export numbers so they can be - * used in MetricsTableSource + * Interface of class that will wrap a MetricsTableSource and export numbers so they can be used in + * MetricsTableSource */ @InterfaceAudience.Private public interface MetricsTableWrapperAggregate { public String HASH = "#"; + /** * Get the number of read requests that have been issued against this table */ @@ -43,6 +42,7 @@ public interface MetricsTableWrapperAggregate { * Get the total number of filtered read requests that have been issued against this table */ long getFilteredReadRequestCount(String table); + /** * Get the number of write requests that have been issued for this table */ @@ -68,7 +68,6 @@ public interface MetricsTableWrapperAggregate { */ long getTableSize(String table); - /** * Get the average region size for this table */ @@ -100,12 +99,12 @@ public interface MetricsTableWrapperAggregate { long getMinStoreFileAge(String table); /** - * @return Average age of store files for this table + * @return Average age of store files for this table */ long getAvgStoreFileAge(String table); /** - * @return Number of reference files for this table + * @return Number of reference files for this table */ long getNumReferenceFiles(String table); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java index ee570f00d999..fe5b2ab47536 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** -* This interface will be implemented by a MetricsSource that will export metrics from -* multiple users into the hadoop metrics system. -*/ + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * users into the hadoop metrics system. + */ @InterfaceAudience.Private public interface MetricsUserAggregateSource extends BaseSource { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java index 28726c4ee1f1..6a2e9713eb9b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -37,16 +35,14 @@ public class MetricsUserAggregateSourceImpl extends BaseSourceImpl private static final Logger LOG = LoggerFactory.getLogger(MetricsUserAggregateSourceImpl.class); private final ConcurrentHashMap userSources = - new ConcurrentHashMap(); + new ConcurrentHashMap(); public MetricsUserAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsUserAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsUserAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java index 96173669bbc3..2d75c9246ba2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface MetricsUserSource extends Comparable { - //These client metrics will be reported through clusterStatus and hbtop only + // These client metrics will be reported through clusterStatus and hbtop only interface ClientMetrics { void incrementReadRequest(); @@ -66,15 +64,14 @@ interface ClientMetrics { void getMetrics(MetricsCollector metricsCollector, boolean all); /** - * Metrics collected at client level for a user(needed for reporting through clusterStatus - * and hbtop currently) + * Metrics collected at client level for a user(needed for reporting through clusterStatus and + * hbtop currently) * @return metrics per hostname */ Map getClientMetrics(); /** * Create a instance of ClientMetrics if not present otherwise return the previous one - * * @param hostName hostname of the client * @return Instance of ClientMetrics */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java index ef0eb7bf4620..8559e0e4b921 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; @@ -23,7 +22,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -74,32 +72,39 @@ public ClientMetricsImpl(String hostName) { this.hostName = hostName; } - @Override public void incrementReadRequest() { + @Override + public void incrementReadRequest() { readRequestsCount.increment(); } - @Override public void incrementWriteRequest() { + @Override + public void incrementWriteRequest() { writeRequestsCount.increment(); } - @Override public String getHostName() { + @Override + public String getHostName() { return hostName; } - @Override public long getReadRequestsCount() { + @Override + public long getReadRequestsCount() { return readRequestsCount.sum(); } - @Override public long getWriteRequestsCount() { + @Override + public long getWriteRequestsCount() { return writeRequestsCount.sum(); } - @Override public void incrementFilteredReadRequests() { + @Override + public void incrementFilteredReadRequests() { filteredRequestsCount.increment(); } - @Override public long getFilteredReadRequests() { + @Override + public long getFilteredReadRequests() { return filteredRequestsCount.sum(); } } @@ -191,8 +196,8 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsUserSourceImpl && compareTo((MetricsUserSourceImpl) obj) == 0); + return obj == this + || (obj instanceof MetricsUserSourceImpl && compareTo((MetricsUserSourceImpl) obj) == 0); } void snapshot(MetricsRecordBuilder mrb, boolean ignored) { @@ -252,16 +257,19 @@ public void updateScanTime(long t) { scanTimeHisto.add(t); } - @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(this.userNamePrefix); registry.snapshot(mrb, all); } - @Override public Map getClientMetrics() { + @Override + public Map getClientMetrics() { return Collections.unmodifiableMap(clientMetricsMap); } - @Override public ClientMetrics getOrCreateMetricsClient(String client) { + @Override + public ClientMetrics getOrCreateMetricsClient(String client) { ClientMetrics source = clientMetricsMap.get(client); if (source != null) { return source; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java index 4a430cdc434e..ce4cd8f16195 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import org.apache.hadoop.hbase.TableName; @@ -28,7 +27,6 @@ @InterfaceAudience.Private public interface MetricsWALSource extends BaseSource { - /** * The name of the metrics */ @@ -49,7 +47,6 @@ public interface MetricsWALSource extends BaseSource { */ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String APPEND_TIME = "appendTime"; String APPEND_TIME_DESC = "Time an append to the log took."; String APPEND_COUNT = "appendCount"; @@ -64,16 +61,16 @@ public interface MetricsWALSource extends BaseSource { String ROLL_REQUESTED_DESC = "How many times a roll has been requested total"; String ERROR_ROLL_REQUESTED = "errorRollRequest"; String ERROR_ROLL_REQUESTED_DESC = - "How many times a roll was requested due to I/O or other errors."; + "How many times a roll was requested due to I/O or other errors."; String LOW_REPLICA_ROLL_REQUESTED = "lowReplicaRollRequest"; String LOW_REPLICA_ROLL_REQUESTED_DESC = - "How many times a roll was requested due to too few datanodes in the write pipeline."; + "How many times a roll was requested due to too few datanodes in the write pipeline."; String SLOW_SYNC_ROLL_REQUESTED = "slowSyncRollRequest"; String SLOW_SYNC_ROLL_REQUESTED_DESC = - "How many times a roll was requested due to sync too slow on the write pipeline."; + "How many times a roll was requested due to sync too slow on the write pipeline."; String SIZE_ROLL_REQUESTED = "sizeRollRequest"; String SIZE_ROLL_REQUESTED_DESC = - "How many times a roll was requested due to file size roll threshold."; + "How many times a roll was requested due to file size roll threshold."; String WRITTEN_BYTES = "writtenBytes"; String WRITTEN_BYTES_DESC = "Size (in bytes) of the data written to the WAL."; String SUCCESSFUL_LOG_ROLLS = "successfulLogRolls"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java index 4f71681113c5..9400eb7d22c7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.util.concurrent.ConcurrentHashMap; @@ -27,9 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Class that transitions metrics from MetricsWAL into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. + * Class that transitions metrics from MetricsWAL into the metrics subsystem. Implements BaseSource + * through BaseSourceImpl, following the pattern. * @see org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource */ @InterfaceAudience.Private @@ -55,32 +53,30 @@ public MetricsWALSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsWALSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsWALSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - //Create and store the metrics that will be used. + // Create and store the metrics that will be used. appendTimeHisto = this.getMetricsRegistry().newTimeHistogram(APPEND_TIME, APPEND_TIME_DESC); appendSizeHisto = this.getMetricsRegistry().newSizeHistogram(APPEND_SIZE, APPEND_SIZE_DESC); appendCount = this.getMetricsRegistry().newCounter(APPEND_COUNT, APPEND_COUNT_DESC, 0L); slowAppendCount = - this.getMetricsRegistry().newCounter(SLOW_APPEND_COUNT, SLOW_APPEND_COUNT_DESC, 0L); + this.getMetricsRegistry().newCounter(SLOW_APPEND_COUNT, SLOW_APPEND_COUNT_DESC, 0L); syncTimeHisto = this.getMetricsRegistry().newTimeHistogram(SYNC_TIME, SYNC_TIME_DESC); logRollRequested = - this.getMetricsRegistry().newCounter(ROLL_REQUESTED, ROLL_REQUESTED_DESC, 0L); - errorRollRequested = this.getMetricsRegistry() - .newCounter(ERROR_ROLL_REQUESTED, ERROR_ROLL_REQUESTED_DESC, 0L); - lowReplicationRollRequested = this.getMetricsRegistry() - .newCounter(LOW_REPLICA_ROLL_REQUESTED, LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); - slowSyncRollRequested = this.getMetricsRegistry() - .newCounter(SLOW_SYNC_ROLL_REQUESTED, SLOW_SYNC_ROLL_REQUESTED_DESC, 0L); - sizeRollRequested = this.getMetricsRegistry() - .newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); + this.getMetricsRegistry().newCounter(ROLL_REQUESTED, ROLL_REQUESTED_DESC, 0L); + errorRollRequested = + this.getMetricsRegistry().newCounter(ERROR_ROLL_REQUESTED, ERROR_ROLL_REQUESTED_DESC, 0L); + lowReplicationRollRequested = this.getMetricsRegistry().newCounter(LOW_REPLICA_ROLL_REQUESTED, + LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); + slowSyncRollRequested = this.getMetricsRegistry().newCounter(SLOW_SYNC_ROLL_REQUESTED, + SLOW_SYNC_ROLL_REQUESTED_DESC, 0L); + sizeRollRequested = + this.getMetricsRegistry().newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, WRITTEN_BYTES_DESC, 0L); - successfulLogRolls = this.getMetricsRegistry() - .newCounter(SUCCESSFUL_LOG_ROLLS, SUCCESSFUL_LOG_ROLLS_DESC, 0L); + successfulLogRolls = + this.getMetricsRegistry().newCounter(SUCCESSFUL_LOG_ROLLS, SUCCESSFUL_LOG_ROLLS_DESC, 0L); perTableAppendCount = new ConcurrentHashMap<>(); perTableAppendSize = new ConcurrentHashMap<>(); } @@ -93,8 +89,8 @@ public void incrementAppendSize(TableName tableName, long size) { // Ideally putIfAbsent is atomic and we don't need a branch check but we still do it to avoid // expensive string construction for every append. String metricsKey = String.format("%s.%s", tableName, APPEND_SIZE); - perTableAppendSize.putIfAbsent( - tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); + perTableAppendSize.putIfAbsent(tableName, + getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); tableAppendSizeCounter = perTableAppendSize.get(tableName); } tableAppendSizeCounter.incr(size); @@ -111,8 +107,8 @@ public void incrementAppendCount(TableName tableName) { MutableFastCounter tableAppendCounter = perTableAppendCount.get(tableName); if (tableAppendCounter == null) { String metricsKey = String.format("%s.%s", tableName, APPEND_COUNT); - perTableAppendCount.putIfAbsent( - tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); + perTableAppendCount.putIfAbsent(tableName, + getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); tableAppendCounter = perTableAppendCount.get(tableName); } tableAppendCounter.incr(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java index 547617a1669f..643f292a4601 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -75,63 +74,72 @@ public MetricsReplicationGlobalSourceSourceImpl(MetricsReplicationSourceImpl rms shippedHFilesCounter = rms.getMetricsRegistry().getCounter(SOURCE_SHIPPED_HFILES, 0L); sizeOfHFileRefsQueueGauge = - rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L); + rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L); - unknownFileLengthForClosedWAL = rms.getMetricsRegistry() - .getCounter(SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH, 0L); + unknownFileLengthForClosedWAL = + rms.getMetricsRegistry().getCounter(SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH, 0L); uncleanlyClosedWAL = rms.getMetricsRegistry().getCounter(SOURCE_UNCLEANLY_CLOSED_LOGS, 0L); - uncleanlyClosedSkippedBytes = rms.getMetricsRegistry() - .getCounter(SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES, 0L); + uncleanlyClosedSkippedBytes = + rms.getMetricsRegistry().getCounter(SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES, 0L); restartWALReading = rms.getMetricsRegistry().getCounter(SOURCE_RESTARTED_LOG_READING, 0L); repeatedFileBytes = rms.getMetricsRegistry().getCounter(SOURCE_REPEATED_LOG_FILE_BYTES, 0L); completedWAL = rms.getMetricsRegistry().getCounter(SOURCE_COMPLETED_LOGS, 0L); - completedRecoveryQueue = rms.getMetricsRegistry() - .getCounter(SOURCE_COMPLETED_RECOVERY_QUEUES, 0L); - failedRecoveryQueue = rms.getMetricsRegistry() - .getCounter(SOURCE_FAILED_RECOVERY_QUEUES, 0L); + completedRecoveryQueue = + rms.getMetricsRegistry().getCounter(SOURCE_COMPLETED_RECOVERY_QUEUES, 0L); + failedRecoveryQueue = rms.getMetricsRegistry().getCounter(SOURCE_FAILED_RECOVERY_QUEUES, 0L); - walReaderBufferUsageBytes = rms.getMetricsRegistry() - .getGauge(SOURCE_WAL_READER_EDITS_BUFFER, 0L); + walReaderBufferUsageBytes = + rms.getMetricsRegistry().getGauge(SOURCE_WAL_READER_EDITS_BUFFER, 0L); sourceInitializing = rms.getMetricsRegistry().getGaugeInt(SOURCE_INITIALIZING, 0); } - @Override public void setLastShippedAge(long age) { + @Override + public void setLastShippedAge(long age) { ageOfLastShippedOpHist.add(age); } - @Override public void incrSizeOfLogQueue(int size) { + @Override + public void incrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.incr(size); } - @Override public void decrSizeOfLogQueue(int size) { + @Override + public void decrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.decr(size); } - @Override public void incrLogReadInEdits(long size) { + @Override + public void incrLogReadInEdits(long size) { logReadInEditsCounter.incr(size); } - @Override public void incrLogEditsFiltered(long size) { + @Override + public void incrLogEditsFiltered(long size) { walEditsFilteredCounter.incr(size); } - @Override public void incrBatchesShipped(int batches) { + @Override + public void incrBatchesShipped(int batches) { shippedBatchesCounter.incr(batches); } - @Override public void incrOpsShipped(long ops) { + @Override + public void incrOpsShipped(long ops) { shippedOpsCounter.incr(ops); } - @Override public void incrShippedBytes(long size) { + @Override + public void incrShippedBytes(long size) { shippedBytesCounter.incr(size); } - @Override public void incrLogReadInBytes(long size) { + @Override + public void incrLogReadInBytes(long size) { logReadInBytesCounter.incr(size); } - @Override public void clear() { + @Override + public void clear() { } @Override @@ -139,7 +147,8 @@ public long getLastShippedAge() { return ageOfLastShippedOpHist.getMax(); } - @Override public void incrHFilesShipped(long hfiles) { + @Override + public void incrHFilesShipped(long hfiles) { shippedHFilesCounter.incr(hfiles); } @@ -155,13 +164,14 @@ public void decrSizeOfHFileRefsQueue(long size) { @Override public int getSizeOfLogQueue() { - return (int)sizeOfLogQueueGauge.value(); + return (int) sizeOfLogQueueGauge.value(); } @Override public void incrUnknownFileLengthForClosedWAL() { unknownFileLengthForClosedWAL.incr(1L); } + @Override public void incrUncleanlyClosedWALs() { uncleanlyClosedWAL.incr(1L); @@ -176,22 +186,27 @@ public long getUncleanlyClosedWALs() { public void incrBytesSkippedInUncleanlyClosedWALs(final long bytes) { uncleanlyClosedSkippedBytes.incr(bytes); } + @Override public void incrRestartedWALReading() { restartWALReading.incr(1L); } + @Override public void incrRepeatedFileBytes(final long bytes) { repeatedFileBytes.incr(bytes); } + @Override public void incrCompletedWAL() { completedWAL.incr(1L); } + @Override public void incrCompletedRecoveryQueue() { completedRecoveryQueue.incr(1L); } + @Override public void incrFailedRecoveryQueue() { failedRecoveryQueue.incr(1L); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java index 2498e3426a5d..ff594412fe9a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -28,9 +27,14 @@ public interface MetricsReplicationSinkSource { public static final String SINK_APPLIED_HFILES = "sink.appliedHFiles"; void setLastAppliedOpAge(long age); + void incrAppliedBatches(long batches); + void incrAppliedOps(long batchsize); + long getLastAppliedOpAge(); + void incrAppliedHFiles(long hfileSize); + long getSinkAppliedOps(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java index ce45af5ccec7..84a7458a257c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -37,15 +36,18 @@ public MetricsReplicationSinkSourceImpl(MetricsReplicationSourceImpl rms) { hfilesCounter = rms.getMetricsRegistry().getCounter(SINK_APPLIED_HFILES, 0L); } - @Override public void setLastAppliedOpAge(long age) { + @Override + public void setLastAppliedOpAge(long age) { ageHist.add(age); } - @Override public void incrAppliedBatches(long batches) { + @Override + public void incrAppliedBatches(long batches) { batchesCounter.incr(batches); } - @Override public void incrAppliedOps(long batchsize) { + @Override + public void incrAppliedOps(long batchsize) { opsCounter.incr(batchsize); } @@ -59,7 +61,8 @@ public void incrAppliedHFiles(long hfiles) { hfilesCounter.incr(hfiles); } - @Override public long getSinkAppliedOps() { + @Override + public long getSinkAppliedOps() { return opsCounter.value(); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java index 6fb5d71ef02f..a891b7732880 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java index 73d2cfd62f49..ef72ce756e55 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -23,7 +22,10 @@ @InterfaceAudience.Private public interface MetricsReplicationSourceFactory { public MetricsReplicationSinkSource getSink(); + public MetricsReplicationSourceSource getSource(String id); + public MetricsReplicationTableSource getTableSource(String tableName); + public MetricsReplicationGlobalSourceSourceImpl getGlobalSource(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java index 061fc58296e0..1362a9022f86 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java @@ -24,22 +24,27 @@ public class MetricsReplicationSourceFactoryImpl implements MetricsReplicationSo private static enum SourceHolder { INSTANCE; + final MetricsReplicationSourceImpl source = new MetricsReplicationSourceImpl(); } - @Override public MetricsReplicationSinkSource getSink() { + @Override + public MetricsReplicationSinkSource getSink() { return new MetricsReplicationSinkSourceImpl(SourceHolder.INSTANCE.source); } - @Override public MetricsReplicationSourceSource getSource(String id) { + @Override + public MetricsReplicationSourceSource getSource(String id) { return new MetricsReplicationSourceSourceImpl(SourceHolder.INSTANCE.source, id); } - @Override public MetricsReplicationTableSource getTableSource(String tableName) { + @Override + public MetricsReplicationTableSource getTableSource(String tableName) { return new MetricsReplicationTableSourceImpl(SourceHolder.INSTANCE.source, tableName); } - @Override public MetricsReplicationGlobalSourceSourceImpl getGlobalSource() { + @Override + public MetricsReplicationGlobalSourceSourceImpl getGlobalSource() { return new MetricsReplicationGlobalSourceSourceImpl(SourceHolder.INSTANCE.source); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java index 02045f8bbd13..0caf9970ce07 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,23 +22,18 @@ /** * Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and - * counters. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * counters. Implements BaseSource through BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsReplicationSourceImpl extends BaseSourceImpl implements - MetricsReplicationSource { - +public class MetricsReplicationSourceImpl extends BaseSourceImpl + implements MetricsReplicationSource { public MetricsReplicationSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - MetricsReplicationSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + MetricsReplicationSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java index 42e28f5d0f31..6dc2c39c75e7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -40,10 +39,10 @@ public interface MetricsReplicationSourceSource extends BaseSource { public static final String SOURCE_SIZE_OF_HFILE_REFS_QUEUE = "source.sizeOfHFileRefsQueue"; public static final String SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH = - "source.closedLogsWithUnknownFileLength"; + "source.closedLogsWithUnknownFileLength"; public static final String SOURCE_UNCLEANLY_CLOSED_LOGS = "source.uncleanlyClosedLogs"; public static final String SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES = - "source.ignoredUncleanlyClosedLogContentsInBytes"; + "source.ignoredUncleanlyClosedLogContentsInBytes"; public static final String SOURCE_RESTARTED_LOG_READING = "source.restartedLogReading"; public static final String SOURCE_REPEATED_LOG_FILE_BYTES = "source.repeatedLogFileBytes"; public static final String SOURCE_COMPLETED_LOGS = "source.completedLogs"; @@ -53,35 +52,66 @@ public interface MetricsReplicationSourceSource extends BaseSource { public static final String SOURCE_INITIALIZING = "source.numInitializing"; void setLastShippedAge(long age); + void incrSizeOfLogQueue(int size); + void decrSizeOfLogQueue(int size); + void incrLogEditsFiltered(long size); + void incrBatchesShipped(int batches); + void incrOpsShipped(long ops); + void incrShippedBytes(long size); + void incrLogReadInBytes(long size); + void incrLogReadInEdits(long size); + void clear(); + long getLastShippedAge(); + int getSizeOfLogQueue(); + void incrHFilesShipped(long hfiles); + void incrSizeOfHFileRefsQueue(long size); + void decrSizeOfHFileRefsQueue(long size); + void incrUnknownFileLengthForClosedWAL(); + void incrUncleanlyClosedWALs(); + long getUncleanlyClosedWALs(); + void incrBytesSkippedInUncleanlyClosedWALs(final long bytes); + void incrRestartedWALReading(); + void incrRepeatedFileBytes(final long bytes); + void incrCompletedWAL(); + void incrCompletedRecoveryQueue(); + void incrFailedRecoveryQueue(); + long getWALEditsRead(); + long getShippedOps(); + long getEditsFiltered(); + void setOldestWalAge(long age); + long getOldestWalAge(); + void incrSourceInitializing(); + void decrSourceInitializing(); + int getSourceInitializing(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java index faf14f79cfb7..795f81c0df85 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java @@ -134,43 +134,53 @@ public MetricsReplicationSourceSourceImpl(MetricsReplicationSourceImpl rms, Stri sourceInitializing = rms.getMetricsRegistry().getGaugeInt(sourceInitializingKey, 0); } - @Override public void setLastShippedAge(long age) { + @Override + public void setLastShippedAge(long age) { ageOfLastShippedOpHist.add(age); } - @Override public void incrSizeOfLogQueue(int size) { + @Override + public void incrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.incr(size); } - @Override public void decrSizeOfLogQueue(int size) { + @Override + public void decrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.decr(size); } - @Override public void incrLogReadInEdits(long size) { + @Override + public void incrLogReadInEdits(long size) { logReadInEditsCounter.incr(size); } - @Override public void incrLogEditsFiltered(long size) { + @Override + public void incrLogEditsFiltered(long size) { walEditsFilteredCounter.incr(size); } - @Override public void incrBatchesShipped(int batches) { + @Override + public void incrBatchesShipped(int batches) { shippedBatchesCounter.incr(batches); } - @Override public void incrOpsShipped(long ops) { + @Override + public void incrOpsShipped(long ops) { shippedOpsCounter.incr(ops); } - @Override public void incrShippedBytes(long size) { + @Override + public void incrShippedBytes(long size) { shippedBytesCounter.incr(size); } - @Override public void incrLogReadInBytes(long size) { + @Override + public void incrLogReadInBytes(long size) { logReadInBytesCounter.incr(size); } - @Override public void clear() { + @Override + public void clear() { rms.removeMetric(ageOfLastShippedOpKey); rms.removeMetric(sizeOfLogQueueKey); @@ -220,7 +230,7 @@ public void decrSizeOfHFileRefsQueue(long size) { @Override public int getSizeOfLogQueue() { - return (int)sizeOfLogQueueGauge.value(); + return (int) sizeOfLogQueueGauge.value(); } @Override @@ -264,13 +274,16 @@ public void incrCompletedRecoveryQueue() { } @Override - public void incrFailedRecoveryQueue() {/*no op*/} + public void incrFailedRecoveryQueue() { + /* no op */} - @Override public void setOldestWalAge(long age) { + @Override + public void setOldestWalAge(long age) { oldestWalAge.set(age); } - @Override public long getOldestWalAge() { + @Override + public long getOldestWalAge() { return oldestWalAge.value(); } @@ -284,7 +297,8 @@ public int getSourceInitializing() { return sourceInitializing.value(); } - @Override public void decrSourceInitializing() { + @Override + public void decrSourceInitializing() { sourceInitializing.decr(1); } @@ -343,15 +357,18 @@ public String getMetricsName() { return rms.getMetricsName(); } - @Override public long getWALEditsRead() { + @Override + public long getWALEditsRead() { return this.logReadInEditsCounter.value(); } - @Override public long getShippedOps() { + @Override + public long getShippedOps() { return this.shippedOpsCounter.value(); } - @Override public long getEditsFiltered() { + @Override + public long getEditsFiltered() { return this.walEditsFilteredCounter.value(); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java index faa944a6870d..c4550abb6e83 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -25,8 +24,12 @@ public interface MetricsReplicationTableSource extends BaseSource { void setLastShippedAge(long age); + void incrShippedBytes(long size); + long getShippedBytes(); + void clear(); + long getLastShippedAge(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java index 9ca0cd1a94ef..244298faff66 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java @@ -22,9 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This is the metric source for table level replication metrics. - * We can easy monitor some useful table level replication metrics such as - * ageOfLastShippedOp and shippedBytes + * This is the metric source for table level replication metrics. We can easy monitor some useful + * table level replication metrics such as ageOfLastShippedOp and shippedBytes */ @InterfaceAudience.Private public class MetricsReplicationTableSourceImpl implements MetricsReplicationTableSource { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java index 6a672f8cf9ff..72d7ad83821d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -64,91 +63,78 @@ public interface MetricsRESTSource extends BaseSource, JvmPauseMonitorSource { /** * Increment the number of requests - * * @param inc Ammount to increment by */ void incrementRequests(int inc); /** * Increment the number of successful Get requests. - * * @param inc Number of successful get requests. */ void incrementSucessfulGetRequests(int inc); /** * Increment the number of successful Put requests. - * * @param inc Number of successful put requests. */ void incrementSucessfulPutRequests(int inc); /** * Increment the number of successful Delete requests. - * * @param inc number of successful delete requests */ void incrementSucessfulDeleteRequests(int inc); /** * Increment the number of failed Put Requests. - * * @param inc Number of failed Put requests. */ void incrementFailedPutRequests(int inc); /** * Increment the number of failed Get requests. - * * @param inc The number of failed Get Requests. */ void incrementFailedGetRequests(int inc); /** * Increment the number of failed Delete requests. - * * @param inc The number of failed delete requests. */ void incrementFailedDeleteRequests(int inc); /** * Increment the number of successful scan requests. - * * @param inc Number of successful scan requests. */ void incrementSucessfulScanRequests(final int inc); /** * Increment the number failed scan requests. - * * @param inc Number of failed scan requests. */ void incrementFailedScanRequests(final int inc); /** * Increment the number of successful append requests. - * * @param inc Number of successful append requests. */ void incrementSucessfulAppendRequests(final int inc); /** * Increment the number failed append requests. - * * @param inc Number of failed append requests. */ void incrementFailedAppendRequests(final int inc); /** * Increment the number of successful increment requests. - * * @param inc Number of successful increment requests. */ void incrementSucessfulIncrementRequests(final int inc); /** * Increment the number failed increment requests. - * * @param inc Number of failed increment requests. */ void incrementFailedIncrementRequests(final int inc); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java index 3474265ee26c..e01c428f9286 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,9 +24,8 @@ /** * Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to - * the hadoop metrics2 subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * the hadoop metrics2 subsystem. Implements BaseSource through BaseSourceImpl, following the + * pattern */ @InterfaceAudience.Private public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource { @@ -55,17 +53,15 @@ public MetricsRESTSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT); } - public MetricsRESTSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsRESTSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java index 3fbf15caebfa..2becf3aff52b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java index 760376cfe206..dadc2d0a5e42 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java index 3ce2d5d1fdc1..0ad71fc46c43 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.yetus.audience.InterfaceAudience; @@ -32,6 +31,7 @@ public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServer */ private enum FactoryStorage { INSTANCE; + MetricsThriftServerSourceImpl thriftOne; MetricsThriftServerSourceImpl thriftTwo; } @@ -40,9 +40,7 @@ private enum FactoryStorage { public MetricsThriftServerSource createThriftOneSource() { if (FactoryStorage.INSTANCE.thriftOne == null) { FactoryStorage.INSTANCE.thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_ONE_METRICS_CONTEXT, - THRIFT_ONE_JMX_CONTEXT); + METRICS_DESCRIPTION, THRIFT_ONE_METRICS_CONTEXT, THRIFT_ONE_JMX_CONTEXT); } return FactoryStorage.INSTANCE.thriftOne; } @@ -51,9 +49,7 @@ public MetricsThriftServerSource createThriftOneSource() { public MetricsThriftServerSource createThriftTwoSource() { if (FactoryStorage.INSTANCE.thriftTwo == null) { FactoryStorage.INSTANCE.thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_TWO_METRICS_CONTEXT, - THRIFT_TWO_JMX_CONTEXT); + METRICS_DESCRIPTION, THRIFT_TWO_METRICS_CONTEXT, THRIFT_TWO_JMX_CONTEXT); } return FactoryStorage.INSTANCE.thriftTwo; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java index 4ed974c95dce..fe50ccb084a7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; @@ -26,13 +25,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop 2 version of {@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop 2 version of {@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} Implements + * BaseSource through BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl implements - MetricsThriftServerSource { +public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl + implements MetricsThriftServerSource { private MetricHistogram batchGetStat; private MetricHistogram batchMutateStat; @@ -51,17 +49,15 @@ public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl i private final MetricHistogram pausesWithGc; private final MetricHistogram pausesWithoutGc; - public MetricsThriftServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsThriftServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java index 3133472a8d33..5e920a25c121 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,17 +52,17 @@ public interface MetricsZooKeeperSource extends BaseSource { String EXCEPTION_CONNECTIONLOSS_DESC = "Number of failed ops due to a CONNECTIONLOSS exception."; String EXCEPTION_DATAINCONSISTENCY = "DATAINCONSISTENCY Exception"; String EXCEPTION_DATAINCONSISTENCY_DESC = - "Number of failed ops due to a DATAINCONSISTENCY exception."; + "Number of failed ops due to a DATAINCONSISTENCY exception."; String EXCEPTION_INVALIDACL = "INVALIDACL Exception"; String EXCEPTION_INVALIDACL_DESC = "Number of failed ops due to an INVALIDACL exception"; String EXCEPTION_NOAUTH = "NOAUTH Exception"; String EXCEPTION_NOAUTH_DESC = "Number of failed ops due to a NOAUTH exception."; String EXCEPTION_OPERATIONTIMEOUT = "OPERATIONTIMEOUT Exception"; String EXCEPTION_OPERATIONTIMEOUT_DESC = - "Number of failed ops due to an OPERATIONTIMEOUT exception."; + "Number of failed ops due to an OPERATIONTIMEOUT exception."; String EXCEPTION_RUNTIMEINCONSISTENCY = "RUNTIMEINCONSISTENCY Exception"; String EXCEPTION_RUNTIMEINCONSISTENCY_DESC = - "Number of failed ops due to a RUNTIMEINCONSISTENCY exception."; + "Number of failed ops due to a RUNTIMEINCONSISTENCY exception."; String EXCEPTION_SESSIONEXPIRED = "SESSIONEXPIRED Exception"; String EXCEPTION_SESSIONEXPIRED_DESC = "Number of failed ops due to a SESSIONEXPIRED exception."; String EXCEPTION_SYSTEMERROR = "SYSTEMERROR Exception"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java index 50ebd46b7166..9429428d300c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,9 +24,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Class that transitions metrics from MetricsZooKeeper into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. + * Class that transitions metrics from MetricsZooKeeper into the metrics subsystem. Implements + * BaseSource through BaseSourceImpl, following the pattern. */ @InterfaceAudience.Private public class MetricsZooKeeperSourceImpl extends BaseSourceImpl implements MetricsZooKeeperSource { @@ -52,37 +50,37 @@ public MetricsZooKeeperSourceImpl() { } public MetricsZooKeeperSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - //Create and store the metrics that will be used. - authFailedFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_AUTHFAILED, EXCEPTION_AUTHFAILED_DESC, 0L); - connectionLossFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_CONNECTIONLOSS, EXCEPTION_CONNECTIONLOSS_DESC, 0L); - dataInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_DATAINCONSISTENCY, EXCEPTION_DATAINCONSISTENCY_DESC, 0L); - invalidACLFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_INVALIDACL, EXCEPTION_INVALIDACL_DESC, 0L); - noAuthFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_NOAUTH, EXCEPTION_NOAUTH_DESC, 0L); - operationTimeOutFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_OPERATIONTIMEOUT, EXCEPTION_OPERATIONTIMEOUT_DESC, 0L); - runtimeInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_RUNTIMEINCONSISTENCY, EXCEPTION_RUNTIMEINCONSISTENCY_DESC, 0L); - sessionExpiredFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_SESSIONEXPIRED, EXCEPTION_SESSIONEXPIRED_DESC, 0L); - systemErrorFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_SYSTEMERROR, EXCEPTION_SYSTEMERROR_DESC, 0L); - totalFailedZKCalls = this.getMetricsRegistry().newGauge( - TOTAL_FAILED_ZK_CALLS, TOTAL_FAILED_ZK_CALLS_DESC, 0L); - - readOpLatency = this.getMetricsRegistry().newHistogram( - READ_OPERATION_LATENCY_NAME, READ_OPERATION_LATENCY_DESC); - writeOpLatency = this.getMetricsRegistry().newHistogram( - WRITE_OPERATION_LATENCY_NAME, WRITE_OPERATION_LATENCY_DESC); - syncOpLatency = this.getMetricsRegistry().newHistogram( - SYNC_OPERATION_LATENCY_NAME, SYNC_OPERATION_LATENCY_DESC); + // Create and store the metrics that will be used. + authFailedFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_AUTHFAILED, EXCEPTION_AUTHFAILED_DESC, 0L); + connectionLossFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_CONNECTIONLOSS, + EXCEPTION_CONNECTIONLOSS_DESC, 0L); + dataInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_DATAINCONSISTENCY, + EXCEPTION_DATAINCONSISTENCY_DESC, 0L); + invalidACLFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_INVALIDACL, EXCEPTION_INVALIDACL_DESC, 0L); + noAuthFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_NOAUTH, EXCEPTION_NOAUTH_DESC, 0L); + operationTimeOutFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_OPERATIONTIMEOUT, + EXCEPTION_OPERATIONTIMEOUT_DESC, 0L); + runtimeInconsistencyFailedOpCount = this.getMetricsRegistry() + .newGauge(EXCEPTION_RUNTIMEINCONSISTENCY, EXCEPTION_RUNTIMEINCONSISTENCY_DESC, 0L); + sessionExpiredFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_SESSIONEXPIRED, + EXCEPTION_SESSIONEXPIRED_DESC, 0L); + systemErrorFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_SYSTEMERROR, EXCEPTION_SYSTEMERROR_DESC, 0L); + totalFailedZKCalls = + this.getMetricsRegistry().newGauge(TOTAL_FAILED_ZK_CALLS, TOTAL_FAILED_ZK_CALLS_DESC, 0L); + + readOpLatency = this.getMetricsRegistry().newHistogram(READ_OPERATION_LATENCY_NAME, + READ_OPERATION_LATENCY_DESC); + writeOpLatency = this.getMetricsRegistry().newHistogram(WRITE_OPERATION_LATENCY_NAME, + WRITE_OPERATION_LATENCY_DESC); + syncOpLatency = this.getMetricsRegistry().newHistogram(SYNC_OPERATION_LATENCY_NAME, + SYNC_OPERATION_LATENCY_DESC); } public void getMetrics(MetricsCollector metricsCollector, boolean all) { @@ -91,7 +89,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { } private void clearZKExceptionMetrics() { - //Reset the exception metrics. + // Reset the exception metrics. clearMetricIfNotNull(authFailedFailedOpCount); clearMetricIfNotNull(connectionLossFailedOpCount); clearMetricIfNotNull(dataInconsistencyFailedOpCount); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java index 9aa12bab5200..306c0c761aa6 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,19 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2; import org.apache.yetus.audience.InterfaceAudience; /** - * Metrics Histogram interface. Implementing classes will expose computed - * quartile values through the metrics system. + * Metrics Histogram interface. Implementing classes will expose computed quartile values through + * the metrics system. */ @InterfaceAudience.Private public interface MetricHistogram { - //Strings used to create metrics names. + // Strings used to create metrics names. String NUM_OPS_METRIC_NAME = "_num_ops"; String MIN_METRIC_NAME = "_min"; String MAX_METRIC_NAME = "_max"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java index 1366fd0b9205..33b6c0d9a934 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2; import java.util.concurrent.ScheduledExecutorService; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java index 88b491ba3ea1..9b62cd898f61 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl; @@ -32,11 +31,9 @@ /** * JMX caches the beans that have been exported; even after the values are removed from hadoop's - * metrics system the keys and old values will still remain. This class stops and restarts the - * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. - * - * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used - * are package private. + * metrics system the keys and old values will still remain. This class stops and restarts the + * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. This class need to be + * in the o.a.h.metrics2.impl namespace as many of the variables/calls used are package private. */ @InterfaceAudience.Private public final class JmxCacheBuster { @@ -56,7 +53,7 @@ public static void clearJmxCache() { if (LOG.isTraceEnabled()) { LOG.trace("clearing JMX Cache" + StringUtils.stringifyException(new Exception())); } - //If there are more then 100 ms before the executor will run then everything should be merged. + // If there are more then 100 ms before the executor will run then everything should be merged. ScheduledFuture future = fut.get(); if ((future != null && (!future.isDone() && future.getDelay(TimeUnit.MILLISECONDS) > 100))) { // BAIL OUT @@ -104,9 +101,9 @@ public void run() { Thread.sleep(500); DefaultMetricsSystem.instance().start(); } - } catch (Exception exception) { + } catch (Exception exception) { LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", - exception); + exception); } } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java index 723e6d34c1d7..09556707648e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,8 +77,8 @@ public boolean removeObjectName(final String name) { * so far as a Source, thus preventing further re-registration of the source with the same name. * In case of dynamic metrics tied to region-lifecycles, this becomes a problem because we would * like to be able to re-register and remove with the same name. Otherwise, it is resource leak. - * This ugly code manually removes the name from the UniqueNames map. - * TODO: May not be needed for Hadoop versions after YARN-5190. + * This ugly code manually removes the name from the UniqueNames map. TODO: May not be needed for + * Hadoop versions after YARN-5190. */ public void removeSourceName(String name) { if (sourceNamesField == null || mapField == null) { @@ -92,8 +92,9 @@ public void removeSourceName(String name) { } } catch (Exception ex) { if (LOG.isTraceEnabled()) { - LOG.trace("Received exception while trying to access Hadoop Metrics classes via " + - "reflection.", ex); + LOG.trace( + "Received exception while trying to access Hadoop Metrics classes via " + "reflection.", + ex); } } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index 7a791c92bc1e..7177f3222106 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.Collection; @@ -29,51 +28,41 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * An optional metrics registry class for creating and maintaining a - * collection of MetricsMutables, making writing metrics source easier. - * NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry with added one - * feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class - * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. - * This implementation also provides handy methods for creating metrics - * dynamically. - * Another difference is that metricsMap implementation is substituted with - * thread-safe map, as we allow dynamic metrics additions/removals. + * An optional metrics registry class for creating and maintaining a collection of MetricsMutables, + * making writing metrics source easier. NOTE: this is a copy of + * org.apache.hadoop.metrics2.lib.MetricsRegistry with added one feature: metrics can be removed. + * When HADOOP-8313 is fixed, usages of this class should be substituted with + * org.apache.hadoop.metrics2.lib.MetricsRegistry. This implementation also provides handy methods + * for creating metrics dynamically. Another difference is that metricsMap implementation is + * substituted with thread-safe map, as we allow dynamic metrics additions/removals. */ @InterfaceAudience.Private public class DynamicMetricsRegistry { private static final Logger LOG = LoggerFactory.getLogger(DynamicMetricsRegistry.class); - private final ConcurrentMap metricsMap = - Maps.newConcurrentMap(); - private final ConcurrentMap tagsMap = - Maps.newConcurrentMap(); + private final ConcurrentMap metricsMap = Maps.newConcurrentMap(); + private final ConcurrentMap tagsMap = Maps.newConcurrentMap(); private final MetricsInfo metricsInfo; private final DefaultMetricsSystemHelper helper = new DefaultMetricsSystemHelper(); - private final static String[] histogramSuffixes = new String[]{ - "_num_ops", - "_min", - "_max", - "_median", - "_75th_percentile", - "_90th_percentile", - "_95th_percentile", - "_99th_percentile"}; + private final static String[] histogramSuffixes = new String[] { "_num_ops", "_min", "_max", + "_median", "_75th_percentile", "_90th_percentile", "_95th_percentile", "_99th_percentile" }; /** * Construct the registry with a record name - * @param name of the record of the metrics + * @param name of the record of the metrics */ public DynamicMetricsRegistry(String name) { - this(Interns.info(name,name)); + this(Interns.info(name, name)); } /** * Construct the registry with a metadata object - * @param info the info object for the metrics record/group + * @param info the info object for the metrics record/group */ public DynamicMetricsRegistry(MetricsInfo info) { metricsInfo = info; @@ -88,7 +77,7 @@ public MetricsInfo info() { /** * Get a metric by name - * @param name of the metric + * @param name of the metric * @return the metric object */ public MutableMetric get(String name) { @@ -97,7 +86,7 @@ public MutableMetric get(String name) { /** * Get a tag by name - * @param name of the tag + * @param name of the tag * @return the tag object */ public MetricsTag getTag(String name) { @@ -106,9 +95,9 @@ public MetricsTag getTag(String name) { /** * Create a mutable long integer counter - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(String name, String desc, long iVal) { @@ -117,8 +106,8 @@ public MutableFastCounter newCounter(String name, String desc, long iVal) { /** * Create a mutable long integer counter - * @param info metadata of the metric - * @param iVal initial value + * @param info metadata of the metric + * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(MetricsInfo info, long iVal) { @@ -128,9 +117,9 @@ public MutableFastCounter newCounter(MetricsInfo info, long iVal) { /** * Create a mutable long integer gauge - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(String name, String desc, long iVal) { @@ -139,8 +128,8 @@ public MutableGaugeLong newGauge(String name, String desc, long iVal) { /** * Create a mutable long integer gauge - * @param info metadata of the metric - * @param iVal initial value + * @param info metadata of the metric + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { @@ -150,36 +139,34 @@ public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") - * @param extended produce extended stat (stdev, min/max etc.) if true. + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") + * @param extended produce extended stat (stdev, min/max etc.) if true. * @return a new mutable stat metric object */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName, boolean extended) { - MutableStat ret = - new MutableStat(name, desc, sampleName, valueName, extended); + public MutableStat newStat(String name, String desc, String sampleName, String valueName, + boolean extended) { + MutableStat ret = new MutableStat(name, desc, sampleName, valueName, extended); return addNewMetricIfAbsent(name, ret, MutableStat.class); } /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") * @return a new mutable metric object */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName) { + public MutableStat newStat(String name, String desc, String sampleName, String valueName) { return newStat(name, desc, sampleName, valueName, false); } /** * Create a mutable rate metric - * @param name of the metric + * @param name of the metric * @return a new mutable metric object */ public MutableRate newRate(String name) { @@ -188,7 +175,7 @@ public MutableRate newRate(String name) { /** * Create a mutable rate metric - * @param name of the metric + * @param name of the metric * @param description of the metric * @return a new mutable rate metric object */ @@ -198,9 +185,9 @@ public MutableRate newRate(String name, String description) { /** * Create a mutable rate metric (for throughput measurement) - * @param name of the metric - * @param desc description - * @param extended produce extended stat (stdev/min/max etc.) if true + * @param name of the metric + * @param desc description + * @param extended produce extended stat (stdev/min/max etc.) if true * @return a new mutable rate metric object */ public MutableRate newRate(String name, String desc, boolean extended) { @@ -208,8 +195,7 @@ public MutableRate newRate(String name, String desc, boolean extended) { } @InterfaceAudience.Private - public MutableRate newRate(String name, String desc, - boolean extended, boolean returnExisting) { + public MutableRate newRate(String name, String desc, boolean extended, boolean returnExisting) { if (returnExisting) { MutableMetric rate = metricsMap.get(name); if (rate != null) { @@ -217,8 +203,7 @@ public MutableRate newRate(String name, String desc, return (MutableRate) rate; } - throw new MetricsException("Unexpected metrics type "+ rate.getClass() - +" for "+ name); + throw new MetricsException("Unexpected metrics type " + rate.getClass() + " for " + name); } } MutableRate ret = new MutableRate(name, desc, extended); @@ -244,7 +229,7 @@ public MutableHistogram newHistogram(String name, String desc) { MutableHistogram histo = new MutableHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableHistogram.class); } - + /** * Create a new histogram with time range counts. * @param name Name of the histogram. @@ -264,7 +249,7 @@ public MutableTimeHistogram newTimeHistogram(String name, String desc) { MutableTimeHistogram histo = new MutableTimeHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableTimeHistogram.class); } - + /** * Create a new histogram with size range counts. * @param name Name of the histogram. @@ -285,7 +270,6 @@ public MutableSizeHistogram newSizeHistogram(String name, String desc) { return addNewMetricIfAbsent(name, histo, MutableSizeHistogram.class); } - synchronized void add(String name, MutableMetric metric) { addNewMetricIfAbsent(name, metric, MutableMetric.class); } @@ -301,12 +285,10 @@ public void add(String name, long value) { if (m != null) { if (m instanceof MutableStat) { ((MutableStat) m).add(value); + } else { + throw new MetricsException("Unsupported add(value) for metric " + name); } - else { - throw new MetricsException("Unsupported add(value) for metric "+ name); - } - } - else { + } else { metricsMap.put(name, newRate(name)); // default is a rate metric add(name, value); } @@ -323,9 +305,9 @@ public DynamicMetricsRegistry setContext(String name) { /** * Add a tag to the metrics - * @param name of the tag + * @param name of the tag * @param description of the tag - * @param value of the tag + * @param value of the tag * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value) { @@ -334,21 +316,21 @@ public DynamicMetricsRegistry tag(String name, String description, String value) /** * Add a tag to the metrics - * @param name of the tag + * @param name of the tag * @param description of the tag - * @param value of the tag - * @param override existing tag if true + * @param value of the tag + * @param override existing tag if true * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value, - boolean override) { + boolean override) { return tag(new MetricsInfoImpl(name, description), value, override); } /** * Add a tag to the metrics - * @param info metadata of the tag - * @param value of the tag + * @param info metadata of the tag + * @param value of the tag * @param override existing tag if true * @return the registry (for keep adding tags etc.) */ @@ -358,7 +340,7 @@ public DynamicMetricsRegistry tag(MetricsInfo info, String value, boolean overri if (!override) { MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag); if (existing != null) { - throw new MetricsException("Tag "+ info.name() +" already exists!"); + throw new MetricsException("Tag " + info.name() + " already exists!"); } return this; } @@ -383,7 +365,7 @@ Collection metrics() { /** * Sample all the mutable metrics and put the snapshot in the builder * @param builder to contain the metrics snapshot - * @param all get all the metrics even if the values are not changed. + * @param all get all the metrics even if the values are not changed. */ public void snapshot(MetricsRecordBuilder builder, boolean all) { for (MetricsTag tag : tags()) { @@ -394,10 +376,10 @@ public void snapshot(MetricsRecordBuilder builder, boolean all) { } } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics()) - .toString(); + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("info", metricsInfo).add("tags", tags()) + .add("metrics", metrics()).toString(); } /** @@ -410,131 +392,125 @@ public void removeMetric(String name) { } public void removeHistogramMetrics(String baseName) { - for (String suffix:histogramSuffixes) { - removeMetric(baseName+suffix); + for (String suffix : histogramSuffixes) { + removeMetric(baseName + suffix); } } /** - * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. - * + * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new gauge if we have to create it. */ public MutableGaugeLong getGauge(String gaugeName, long potentialStartingValue) { - //Try and get the guage. + // Try and get the guage. MutableMetric metric = metricsMap.get(gaugeName); - //If it's not there then try and put a new one in the storage. + // If it's not there then try and put a new one in the storage. if (metric == null) { - //Create the potential new gauge. - MutableGaugeLong newGauge = new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); + // Create the potential new gauge. + MutableGaugeLong newGauge = + new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeLong should contain the thing that was in before the put could be completed. + // If the value we get back is null then the put was successful and we will return that. + // otherwise gaugeLong should contain the thing that was in before the put could be completed. if (metric == null) { return newGauge; } } if (!(metric instanceof MutableGaugeLong)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeLong"); + throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + + " and not of type MetricMutableGaugeLong"); } return (MutableGaugeLong) metric; } /** - * Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it. - * + * Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it. * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new gauge if we have to create it. */ public MutableGaugeInt getGaugeInt(String gaugeName, int potentialStartingValue) { - //Try and get the guage. + // Try and get the guage. MutableMetric metric = metricsMap.get(gaugeName); - //If it's not there then try and put a new one in the storage. + // If it's not there then try and put a new one in the storage. if (metric == null) { - //Create the potential new gauge. - MutableGaugeInt newGauge = new MutableGaugeInt(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); + // Create the potential new gauge. + MutableGaugeInt newGauge = + new MutableGaugeInt(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeInt should contain the thing that was in before the put could be completed. + // If the value we get back is null then the put was successful and we will return that. + // otherwise gaugeInt should contain the thing that was in before the put could be completed. if (metric == null) { return newGauge; } } if (!(metric instanceof MutableGaugeInt)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeInr"); + throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + + " and not of type MetricMutableGaugeInr"); } return (MutableGaugeInt) metric; } /** - * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. - * + * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. * @param counterName Name of the counter to get * @param potentialStartingValue starting value if we have to create a new counter */ public MutableFastCounter getCounter(String counterName, long potentialStartingValue) { - //See getGauge for description on how this works. + // See getGauge for description on how this works. MutableMetric counter = metricsMap.get(counterName); if (counter == null) { MutableFastCounter newCounter = - new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue); + new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue); counter = metricsMap.putIfAbsent(counterName, newCounter); if (counter == null) { return newCounter; } } - if (!(counter instanceof MutableCounter)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - counterName + " and not of type MutableCounter"); + throw new MetricsException("Metric already exists in registry for metric name: " + counterName + + " and not of type MutableCounter"); } return (MutableFastCounter) counter; } public MutableHistogram getHistogram(String histoName) { - //See getGauge for description on how this works. + // See getGauge for description on how this works. MutableMetric histo = metricsMap.get(histoName); if (histo == null) { - MutableHistogram newCounter = - new MutableHistogram(new MetricsInfoImpl(histoName, "")); + MutableHistogram newCounter = new MutableHistogram(new MetricsInfoImpl(histoName, "")); histo = metricsMap.putIfAbsent(histoName, newCounter); if (histo == null) { return newCounter; } } - if (!(histo instanceof MutableHistogram)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - histoName + " and not of type MutableHistogram"); + throw new MetricsException("Metric already exists in registry for metric name: " + histoName + + " and not of type MutableHistogram"); } return (MutableHistogram) histo; } - private T addNewMetricIfAbsent(String name, T ret, - Class metricClass) { - //If the value we get back is null then the put was successful and we will + private T addNewMetricIfAbsent(String name, T ret, + Class metricClass) { + // If the value we get back is null then the put was successful and we will // return that. Otherwise metric should contain the thing that was in // before the put could be completed. MutableMetric metric = metricsMap.putIfAbsent(name, ret); @@ -546,19 +522,17 @@ private T addNewMetricIfAbsent(String name, T ret, } @SuppressWarnings("unchecked") - private T returnExistingWithCast(MutableMetric metric, - Class metricClass, String name) { + private T returnExistingWithCast(MutableMetric metric, Class metricClass, String name) { if (!metricClass.isAssignableFrom(metric.getClass())) { - throw new MetricsException("Metric already exists in registry for metric name: " + - name + " and not of type " + metricClass + - " but instead of type " + metric.getClass()); + throw new MetricsException("Metric already exists in registry for metric name: " + name + + " and not of type " + metricClass + " but instead of type " + metric.getClass()); } return (T) metric; } public void clearMetrics() { - for (String name:metricsMap.keySet()) { + for (String name : metricsMap.keySet()) { helper.removeObjectName(name); } metricsMap.clear(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java index d24f23f7f359..1b4d9ecc8f11 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.yetus.audience.InterfaceAudience; /** - * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by - * MetricsRegionAggregateSourceImpl, and - * JmxCacheBuster + * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by + * MetricsRegionAggregateSourceImpl, and JmxCacheBuster */ @InterfaceAudience.Private public class MetricsExecutorImpl implements MetricsExecutor { @@ -48,8 +45,9 @@ public void stop() { private enum ExecutorSingleton { INSTANCE; - private final transient ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1, - new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); + + private final transient ScheduledExecutorService scheduler = + new ScheduledThreadPoolExecutor(1, new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); } private final static class ThreadPoolExecutorThreadFactory implements ThreadFactory { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java index 7b5ec024a508..f8f8aee35501 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java index dc86ebe8bf76..d5356aecda36 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.commons.lang3.StringUtils; @@ -51,7 +50,8 @@ public void add(final long val) { histogram.update(val); } - @Override public long getCount() { + @Override + public long getCount() { return histogram.getCount(); } @@ -65,7 +65,7 @@ public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boo } public static void snapshot(String name, String desc, Histogram histogram, - MetricsRecordBuilder metricsRecordBuilder, boolean all) { + MetricsRecordBuilder metricsRecordBuilder, boolean all) { // Get a reference to the old histogram. Snapshot snapshot = histogram.snapshot(); if (snapshot != null) { @@ -74,29 +74,29 @@ public static void snapshot(String name, String desc, Histogram histogram, } protected static void updateSnapshotMetrics(String name, String desc, Histogram histogram, - Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) { + Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) { metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc), - histogram.getCount()); + histogram.getCount()); metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), snapshot.getMin()); metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), snapshot.getMax()); metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), snapshot.getMean()); metricsRecordBuilder.addGauge(Interns.info(name + TWENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get25thPercentile()); + snapshot.get25thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc), - snapshot.getMedian()); + snapshot.getMedian()); metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get75thPercentile()); + snapshot.get75thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get90thPercentile()); + snapshot.get90thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get95thPercentile()); + snapshot.get95thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_EIGHTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get98thPercentile()); + snapshot.get98thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get99thPercentile()); + snapshot.get99thPercentile()); metricsRecordBuilder.addGauge( - Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get999thPercentile()); + Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), + snapshot.get999thPercentile()); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java index 507e95400264..a4d316fa9f46 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.hbase.metrics.Interns; @@ -40,16 +39,15 @@ public MutableRangeHistogram(String name, String description) { } /** - * Returns the type of range histogram size or time + * Returns the type of range histogram size or time */ public abstract String getRangeType(); - + /** - * Returns the ranges to be counted + * Returns the ranges to be counted */ public abstract long[] getRanges(); - @Override public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) { // Get a reference to the old histogram. @@ -61,7 +59,7 @@ public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boo } public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder, - Snapshot snapshot) { + Snapshot snapshot) { long priorRange = 0; long cumNum = 0; @@ -71,8 +69,8 @@ public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder long val = snapshot.getCountAtOrBelow(ranges[i]); if (val - cumNum > 0) { metricsRecordBuilder.addCounter( - Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc), - val - cumNum); + Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc), + val - cumNum); } priorRange = ranges[i]; cumNum = val; @@ -80,12 +78,12 @@ public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder long val = snapshot.getCount(); if (val - cumNum > 0) { metricsRecordBuilder.addCounter( - Interns.info(name + "_" + rangeType + "_" + priorRange + "-inf", desc), - val - cumNum); + Interns.info(name + "_" + rangeType + "_" + priorRange + "-inf", desc), val - cumNum); } } - @Override public long getCount() { + @Override + public long getCount() { return histogram.getCount(); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java index b02efb76f9d8..07c29ef636eb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.metrics2.MetricsInfo; @@ -28,7 +27,8 @@ public class MutableSizeHistogram extends MutableRangeHistogram { private final static String RANGE_TYPE = "SizeRangeCount"; - private final static long[] RANGES = {10,100,1000,10000,100000,1000000,10000000,100000000}; + private final static long[] RANGES = + { 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000 }; public MutableSizeHistogram(MetricsInfo info) { this(info.name(), info.description()); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java index 7c6dfbbd5776..42418de944db 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java index 84a76edf72e9..1256dcc99991 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java @@ -20,8 +20,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Specifies a quantile (with error bounds) to be watched by a - * {@link MetricSampleQuantiles} object. + * Specifies a quantile (with error bounds) to be watched by a {@link MetricSampleQuantiles} object. */ @InterfaceAudience.Private public class MetricQuantile { @@ -54,12 +53,11 @@ public boolean equals(Object aThat) { long ebits = Double.doubleToLongBits(error); return qbits == Double.doubleToLongBits(that.quantile) - && ebits == Double.doubleToLongBits(that.error); + && ebits == Double.doubleToLongBits(that.error); } @Override public int hashCode() { - return (int) (Double.doubleToLongBits(quantile) ^ Double - .doubleToLongBits(error)); + return (int) (Double.doubleToLongBits(quantile) ^ Double.doubleToLongBits(error)); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java index c1880f8203ba..cf01b099bb87 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.util; import java.io.IOException; @@ -24,24 +23,16 @@ import java.util.LinkedList; import java.util.ListIterator; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm - * for streaming calculation of targeted high-percentile epsilon-approximate - * quantiles. - * - * This is a generalization of the earlier work by Greenwald and Khanna (GK), - * which essentially allows different error bounds on the targeted quantiles, - * which allows for far more efficient calculation of high-percentiles. - * - * See: Cormode, Korn, Muthukrishnan, and Srivastava - * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005 - * - * Greenwald and Khanna, - * "Space-efficient online computation of quantile summaries" in SIGMOD 2001 - * + * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm for streaming + * calculation of targeted high-percentile epsilon-approximate quantiles. This is a generalization + * of the earlier work by Greenwald and Khanna (GK), which essentially allows different error bounds + * on the targeted quantiles, which allows for far more efficient calculation of high-percentiles. + * See: Cormode, Korn, Muthukrishnan, and Srivastava "Effective Computation of Biased Quantiles over + * Data Streams" in ICDE 2005 Greenwald and Khanna, "Space-efficient online computation of quantile + * summaries" in SIGMOD 2001 */ @InterfaceAudience.Private public class MetricSampleQuantiles { @@ -57,9 +48,8 @@ public class MetricSampleQuantiles { private LinkedList samples; /** - * Buffers incoming items to be inserted in batch. Items are inserted into - * the buffer linearly. When the buffer fills, it is flushed into the samples - * array in its entirety. + * Buffers incoming items to be inserted in batch. Items are inserted into the buffer linearly. + * When the buffer fills, it is flushed into the samples array in its entirety. */ private long[] buffer = new long[500]; private int bufferCount = 0; @@ -75,14 +65,9 @@ public MetricSampleQuantiles(MetricQuantile[] quantiles) { } /** - * Specifies the allowable error for this rank, depending on which quantiles - * are being targeted. - * - * This is the f(r_i, n) function from the CKMS paper. It's basically how wide - * the range of this rank can be. - * - * @param rank - * the index in the list of samples + * Specifies the allowable error for this rank, depending on which quantiles are being targeted. + * This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this + * rank can be. n * the index in the list of samples */ private double allowableError(int rank) { int size = samples.size(); @@ -104,7 +89,6 @@ private double allowableError(int rank) { /** * Add a new value from the stream. - * * @param v the value to insert */ synchronized public void insert(long v) { @@ -120,8 +104,8 @@ synchronized public void insert(long v) { } /** - * Merges items from buffer into the samples array in one pass. - * This is more efficient than doing an insert on every item. + * Merges items from buffer into the samples array in one pass. This is more efficient than doing + * an insert on every item. */ private void insertBatch() { if (bufferCount == 0) { @@ -166,9 +150,8 @@ private void insertBatch() { } /** - * Try to remove extraneous items from the set of sampled items. This checks - * if an item is unnecessary based on the desired error bounds, and merges it - * with the adjacent item if it is. + * Try to remove extraneous items from the set of sampled items. This checks if an item is + * unnecessary based on the desired error bounds, and merges it with the adjacent item if it is. */ private void compress() { if (samples.size() < 2) { @@ -196,7 +179,6 @@ private void compress() { /** * Get the estimated value at the specified quantile. - * * @param quantile Queried quantile, e.g. 0.50 or 0.99. * @return Estimated value at that quantile. */ @@ -225,10 +207,7 @@ private long query(double quantile) throws IOException { /** * Get a snapshot of the current values of all the tracked quantiles. - * - * @return snapshot of the tracked quantiles - * @throws IOException - * if no items have been added to the estimator + * @return snapshot of the tracked quantiles n * if no items have been added to the estimator */ synchronized public Map snapshot() throws IOException { // flush the buffer first for best results @@ -243,7 +222,6 @@ synchronized public Map snapshot() throws IOException { /** * Returns the number of items that the estimator has processed - * * @return count total number of items processed */ synchronized public long getCount() { @@ -252,7 +230,6 @@ synchronized public long getCount() { /** * Returns the number of samples kept by the estimator - * * @return count current number of samples */ synchronized public int getSampleCount() { @@ -269,27 +246,24 @@ synchronized public void clear() { } /** - * Describes a measured value passed to the estimator, tracking additional - * metadata required by the CKMS algorithm. + * Describes a measured value passed to the estimator, tracking additional metadata required by + * the CKMS algorithm. */ private static class SampleItem { - + /** * Value of the sampled item (e.g. a measured latency value) */ private final long value; - + /** - * Difference between the lowest possible rank of the previous item, and - * the lowest possible rank of this item. - * - * The sum of the g of all previous items yields this item's lower bound. + * Difference between the lowest possible rank of the previous item, and the lowest possible + * rank of this item. The sum of the g of all previous items yields this item's lower bound. */ private int g; - + /** - * Difference between the item's greatest possible rank and lowest possible - * rank. + * Difference between the item's greatest possible rank and lowest possible rank. */ private final int delta; diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java index 157327babb28..92442de2b3c6 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,23 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; - /** * A compatibility shim layer for interacting with different versions of Hadoop. */ -//NOTE: we can move this under src/main if main code wants to use this shim layer +// NOTE: we can move this under src/main if main code wants to use this shim layer public interface HadoopShims { /** * Returns a TaskAttemptContext instance created from the given parameters. - * @param job an instance of o.a.h.mapreduce.Job + * @param job an instance of o.a.h.mapreduce.Job * @param taskId an identifier for the task attempt id. Should be parsable by - * TaskAttemptId.forName() + * TaskAttemptId.forName() * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext */ - T createTestTaskAttemptContext(final J job, final String taskId); + T createTestTaskAttemptContext(final J job, final String taskId); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java index a022ef3e0183..533d2d6d1046 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.hadoop.mapreduce.Job; @@ -28,7 +27,7 @@ public class HadoopShimsImpl implements HadoopShims { /** * Returns a TaskAttemptContext instance created from the given parameters. - * @param job an instance of o.a.h.mapreduce.Job + * @param job an instance of o.a.h.mapreduce.Job * @param taskId an identifier for the task attempt id. Should be parsable by * {@link TaskAttemptID#forName(String)} * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext @@ -36,7 +35,7 @@ public class HadoopShimsImpl implements HadoopShims { @Override @SuppressWarnings("unchecked") public T createTestTaskAttemptContext(J job, String taskId) { - Job j = (Job)job; - return (T)new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); + Job j = (Job) job; + return (T) new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java index f72843cc4b01..8ea3da856af0 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; public interface RandomStringGenerator { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java index 91cd19ef009c..8f8fbd66f983 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; - import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; @@ -27,8 +25,8 @@ public class RandomStringGeneratorImpl implements RandomStringGenerator { private final String s; public RandomStringGeneratorImpl() { - s = new UUID(ThreadLocalRandom.current().nextLong(), - ThreadLocalRandom.current().nextLong()).toString(); + s = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()) + .toString(); } @Override diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java index 27888db0f6d2..e963a439efa7 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,12 +32,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestCompatibilitySingletonFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompatibilitySingletonFactory.class); + HBaseClassTestRule.forClass(TestCompatibilitySingletonFactory.class); private static final int ITERATIONS = 100000; @@ -47,9 +47,8 @@ private class TestCompatibilitySingletonFactoryCallable implements Callable callables = new ArrayList<>(ITERATIONS); List resultStrings = new ArrayList<>(ITERATIONS); - // Create the callables. for (int i = 0; i < ITERATIONS; i++) { callables.add(new TestCompatibilitySingletonFactoryCallable()); @@ -77,7 +75,6 @@ public void testGetInstance() throws Exception { // Get the first string. String firstString = resultStrings.get(0); - // Assert that all the strings are equal to the fist. for (String s : resultStrings) { assertEquals(firstString, s); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java index d95c282ecf99..04f95bf2eb91 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,23 +29,23 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsMasterProcSourceImpl + * Test for MetricsMasterProcSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterProcSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsMasterProcSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsMasterProcSourceImpl.class); @Test public void testGetInstance() throws Exception { - MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterProcSourceFactory.class); + MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class); MetricsMasterProcSource masterProcSource = metricsMasterProcSourceFactory.create(null); assertTrue(masterProcSource instanceof MetricsMasterProcSourceImpl); assertSame(metricsMasterProcSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); + CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java index 7d35e846b5c0..a30c6249a4a3 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java index 70ec90ab39a2..8dc1ae6e1636 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java @@ -29,21 +29,21 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsMasterSourceImpl + * Test for MetricsMasterSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsMasterSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsMasterSourceImpl.class); @Test public void testGetInstance() { - MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterSourceFactory.class); + MetricsMasterSourceFactory metricsMasterSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null); assertTrue(masterSource instanceof MetricsMasterSourceImpl); - assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance( - MetricsMasterSourceFactory.class)); + assertSame(metricsMasterSourceFactory, + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class)); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java index 063071b43173..63f22143a91f 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,14 +31,14 @@ import org.junit.experimental.categories.Category; /** - * Test of default BaseSource for hadoop 2 + * Test of default BaseSource for hadoop 2 */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestBaseSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBaseSourceImpl.class); + HBaseClassTestRule.forClass(TestBaseSourceImpl.class); private static BaseSourceImpl bmsi; diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index dbdc92da8ac4..029b75e52cf3 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.HashMap; diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java index c7594b4ff3b3..425b37df59d2 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java index 86a94baf72fd..3cda99b134eb 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java @@ -28,24 +28,22 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionServerSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRegionServerSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsRegionServerSourceImpl.class); @Test public void testGetInstance() { MetricsRegionServerSourceFactory metricsRegionServerSourceFactory = - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsRegionServerSource serverSource = - metricsRegionServerSourceFactory.createServer(null); + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSource serverSource = metricsRegionServerSourceFactory.createServer(null); assertTrue(serverSource instanceof MetricsRegionServerSourceImpl); assertSame(metricsRegionServerSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); } - @Test(expected = RuntimeException.class) public void testNoGetRegionServerMetricsSourceImpl() { // This should throw an exception because MetricsRegionServerSourceImpl should only diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java index 598658a56ccc..46b6405eb46e 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MetricsTests; @@ -31,17 +30,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRegionSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsRegionSourceImpl.class); @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCodeEquals() { - MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance( - MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST")); MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST")); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java index 11177edcafb3..f1694801d7be 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java @@ -30,27 +30,26 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsTableSourceImpl + * Test for MetricsTableSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsTableSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsTableSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsTableSourceImpl.class); @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCode() throws Exception { MetricsRegionServerSourceFactory metricsFact = - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsTableSource one = metricsFact.createTable( - "ONETABLE", new MetricsTableWrapperStub("ONETABLE")); - MetricsTableSource oneClone = metricsFact.createTable( - "ONETABLE", - new MetricsTableWrapperStub("ONETABLE")); - MetricsTableSource two = metricsFact.createTable( - "TWOTABLE", new MetricsTableWrapperStub("TWOTABLE")); + MetricsTableSource one = + metricsFact.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + MetricsTableSource oneClone = + metricsFact.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + MetricsTableSource two = + metricsFact.createTable("TWOTABLE", new MetricsTableWrapperStub("TWOTABLE")); assertEquals(0, one.compareTo(oneClone)); assertEquals(one.hashCode(), oneClone.hashCode()); @@ -72,7 +71,7 @@ public void testNoGetTableMetricsSourceImpl() { @Test public void testGetTableMetrics() { MetricsTableSource oneTbl = - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) .createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); assertEquals("ONETABLE", oneTbl.getTableName()); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java index 8a72961edadc..b339dd8cc522 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; @@ -30,18 +29,18 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsUserSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsUserSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsUserSourceImpl.class); @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCodeEquals() throws Exception { - MetricsRegionServerSourceFactory fact - = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsUserSource one = fact.createUser("ONE"); MetricsUserSource oneClone = fact.createUser("ONE"); @@ -57,8 +56,7 @@ public void testCompareToHashCodeEquals() throws Exception { assertTrue(two.compareTo(two) == 0); } - - @Test (expected = RuntimeException.class) + @Test(expected = RuntimeException.class) public void testNoGetRegionServerMetricsSourceImpl() throws Exception { // This should throw an exception because MetricsUserSourceImpl should only // be created by a factory. @@ -67,8 +65,8 @@ public void testNoGetRegionServerMetricsSourceImpl() throws Exception { @Test public void testGetUser() { - MetricsRegionServerSourceFactory fact - = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsUserSource one = fact.createUser("ONE"); assertEquals("ONE", one.getUser()); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java index e6ffbc98f321..deb9dfa3fe9a 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java index d8ec0af92bb0..300d536dc5f6 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,19 +28,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsWALSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsWALSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsWALSourceImpl.class); @Test public void testGetInstance() throws Exception { - MetricsWALSource walSource = - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); + MetricsWALSource walSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); assertTrue(walSource instanceof MetricsWALSourceImpl); - assertSame(walSource, - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); + assertSame(walSource, CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java index 68f6fda9ee80..b9198bac65f3 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java index 6cc26e2a4dd1..c825d01b1628 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java @@ -27,16 +27,16 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceFactoryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactoryImpl.class); + HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactoryImpl.class); @Test public void testGetInstance() { - MetricsReplicationSourceFactory rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class); + MetricsReplicationSourceFactory rms = + CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class); assertTrue(rms instanceof MetricsReplicationSourceFactoryImpl); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java index faff4b389176..6c2284131049 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java @@ -27,16 +27,16 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsReplicationSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsReplicationSourceImpl.class); @Test public void testGetInstance() throws Exception { - MetricsReplicationSource rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSource.class); + MetricsReplicationSource rms = + CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); assertTrue(rms instanceof MetricsReplicationSourceImpl); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java index 57acdcb4539f..aae15e8fec00 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java index 2ac7996485e4..d1901f68bccc 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java @@ -31,16 +31,16 @@ /** * Test for hadoop 2's version of {@link MetricsRESTSource}. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRESTSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRESTSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsRESTSourceImpl.class); @Test public void ensureCompatRegistered() { assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) - instanceof MetricsRESTSourceImpl); + assertTrue(CompatibilitySingletonFactory + .getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java index 49d25723b880..eccbecacb316 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.test; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -24,157 +23,141 @@ public interface MetricsAssertHelper { /** - * Init helper. This method will make sure that the metrics system is set - * up for tests. + * Init helper. This method will make sure that the metrics system is set up for tests. */ void init(); /** * Assert that a tag exists and has a given value. - * * @param name The name of the tag. * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertTag(String name, String expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. - * * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGauge(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value - * * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeGt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value - * * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeLt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. - * * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGauge(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value - * * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeGt(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value - * * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeLt(String name, double expected, BaseSource source); /** * Assert that a counter exists and that it's value is equal to the expected value. - * * @param name The name of the counter. * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounter(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is greater than the given value. - * * @param name The name of the counter. * @param expected The value the counter is expected to be greater than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounterGt(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is less than the given value. - * * @param name The name of the counter. * @param expected The value the counter is expected to be less than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounterLt(String name, long expected, BaseSource source); /** * Get the value of a counter. - * * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return long value of the counter. */ long getCounter(String name, BaseSource source); /** * Check if a dynamic counter exists. - * * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return boolean true if counter metric exists. */ boolean checkCounterExists(String name, BaseSource source); /** * Check if a gauge exists. - * * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return boolean true if gauge metric exists. */ boolean checkGaugeExists(String name, BaseSource source); /** * Get the value of a gauge as a double. - * * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return double value of the gauge. */ double getGaugeDouble(String name, BaseSource source); /** * Get the value of a gauge as a long. - * * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return long value of the gauge. */ long getGaugeLong(String name, BaseSource source); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java index 83e25a636f07..7c7357c4f049 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.test; import static org.junit.Assert.assertEquals; @@ -25,7 +24,6 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; - import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsCollector; @@ -36,7 +34,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; /** - * A helper class that will allow tests to get into hadoop2's metrics2 values. + * A helper class that will allow tests to get into hadoop2's metrics2 values. */ public class MetricsAssertHelperImpl implements MetricsAssertHelper { private Map tags = new HashMap<>(); @@ -203,8 +201,8 @@ public void assertCounterLt(String name, long expected, BaseSource source) { public long getCounter(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); - assertNotNull("Should get counter "+cName + " but did not",counters.get(cName)); - return counters.get(cName).longValue(); + assertNotNull("Should get counter " + cName + " but did not", counters.get(cName)); + return counters.get(cName).longValue(); } @Override @@ -225,8 +223,8 @@ public boolean checkGaugeExists(String name, BaseSource source) { public double getGaugeDouble(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); - assertNotNull("Should get gauge "+cName + " but did not",gauges.get(cName)); - return gauges.get(cName).doubleValue(); + assertNotNull("Should get gauge " + cName + " but did not", gauges.get(cName)); + return gauges.get(cName).doubleValue(); } @Override diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java index 0b9c0f12e719..498a27bd9e87 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java index 7206810ab138..3bba46bcfa95 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java @@ -32,32 +32,32 @@ /** * Test for hadoop 2's version of MetricsThriftServerSourceFactory. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsThriftServerSourceFactoryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactoryImpl.class); + HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactoryImpl.class); @Test public void testCompatabilityRegistered() { - assertNotNull(CompatibilitySingletonFactory.getInstance( - MetricsThriftServerSourceFactory.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) - instanceof MetricsThriftServerSourceFactoryImpl); + assertNotNull( + CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)); + assertTrue(CompatibilitySingletonFactory.getInstance( + MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl); } @Test public void testCreateThriftOneSource() { - //Make sure that the factory gives back a singleton. + // Make sure that the factory gives back a singleton. assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); + new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); } @Test public void testCreateThriftTwoSource() { - //Make sure that the factory gives back a singleton. + // Make sure that the factory gives back a singleton. assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); + new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java index 11e984bbe4d1..0ce271d323dd 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java index a199a78938a8..bc200fd1e384 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java @@ -28,16 +28,16 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsZooKeeperSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsZooKeeperSourceImpl.class); + HBaseClassTestRule.forClass(TestMetricsZooKeeperSourceImpl.class); @Test public void testGetInstance() { MetricsZooKeeperSource zkSource = - CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); + CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); assertTrue(zkSource instanceof MetricsZooKeeperSourceImpl); assertSame(zkSource, CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class)); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java index 1cf8702b7b12..62595a96bbda 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import static org.junit.Assert.assertEquals; +import java.util.ArrayList; +import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MetricsTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -30,9 +31,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import java.util.ArrayList; -import java.util.List; - @Category({ MetricsTests.class, SmallTests.class }) public class TestMutableRangeHistogram { diff --git a/hbase-hbtop/pom.xml b/hbase-hbtop/pom.xml index 08f1c07fbde6..41ecdc536ac2 100644 --- a/hbase-hbtop/pom.xml +++ b/hbase-hbtop/pom.xml @@ -1,7 +1,5 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-hbtop Apache HBase - HBTop A real-time monitoring tool for HBase like Unix's top command - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.apache.hbase @@ -107,4 +96,13 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java index 9c1a000831a2..7e6944f73e7e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,6 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; - /** * A real-time monitoring tool for HBase like Unix top command. */ @@ -212,15 +211,12 @@ public int run(String[] args) throws Exception { private Options getOptions() { Options opts = new Options(); - opts.addOption("h", "help", false, - "Print usage; for help while the tool is running press 'h'"); - opts.addOption("d", "delay", true, - "The refresh delay (in seconds); default is 3 seconds"); + opts.addOption("h", "help", false, "Print usage; for help while the tool is running press 'h'"); + opts.addOption("d", "delay", true, "The refresh delay (in seconds); default is 3 seconds"); opts.addOption("m", "mode", true, "The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)|u (User)" + "|c (Client), default is r"); - opts.addOption("n", "numberOfIterations", true, - "The number of iterations"); + opts.addOption("n", "numberOfIterations", true, "The number of iterations"); opts.addOption("s", "sortField", true, "The initial sort field. You can prepend a `+' or `-' to the field name to also override" + " the sort direction. A leading `+' will force sorting high to low, whereas a `-' will" diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java index 577172a38cb2..3331cd03550f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -94,7 +94,8 @@ public static Record ofEntries(Entry... entries) { } public static Record ofEntries(Stream entries) { - return entries.collect(Record::builder, Builder::put, (r1, r2) -> {}).build(); + return entries.collect(Record::builder, Builder::put, (r1, r2) -> { + }).build(); } private Record(ImmutableMap values) { @@ -165,12 +166,11 @@ public Set> entrySet() { } public Record combine(Record o) { - return ofEntries(values.keySet().stream() - .map(k -> { - if (k.getFieldValueType() == FieldValueType.STRING) { - return entry(k, values.get(k)); - } - return entry(k, values.get(k).plus(o.values.get(k))); - })); + return ofEntries(values.keySet().stream().map(k -> { + if (k.getFieldValueType() == FieldValueType.STRING) { + return entry(k, values.get(k)); + } + return entry(k, values.get(k).plus(o.values.get(k))); + })); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java index 78adf7cce009..7d14f5691dee 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldValue; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a filter that's filtering the metric {@link Record}s. */ @@ -67,8 +66,10 @@ public static RecordFilter parse(String filterString, List fields, boolea } StringBuilder fieldString = new StringBuilder(); - while (filterString.length() > index && filterString.charAt(index) != '<' - && filterString.charAt(index) != '>' && filterString.charAt(index) != '=') { + while ( + filterString.length() > index && filterString.charAt(index) != '<' + && filterString.charAt(index) != '>' && filterString.charAt(index) != '=' + ) { fieldString.append(filterString.charAt(index++)); } @@ -82,8 +83,10 @@ public static RecordFilter parse(String filterString, List fields, boolea } StringBuilder operatorString = new StringBuilder(); - while (filterString.length() > index && (filterString.charAt(index) == '<' || - filterString.charAt(index) == '>' || filterString.charAt(index) == '=')) { + while ( + filterString.length() > index && (filterString.charAt(index) == '<' + || filterString.charAt(index) == '>' || filterString.charAt(index) == '=') + ) { operatorString.append(filterString.charAt(index++)); } @@ -166,8 +169,7 @@ public boolean execute(Record record) { return not != ret; } - int compare = ignoreCase ? - fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value); + int compare = ignoreCase ? fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value); boolean ret; switch (operator) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java index df460dd31cf2..ab776cf03368 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents fields that are displayed in the top screen. */ @@ -34,8 +33,7 @@ public enum Field { REGION("REGION", "Encoded Region Name", false, true, FieldValueType.STRING), REGION_SERVER("RS", "Short Region Server Name", true, true, FieldValueType.STRING), LONG_REGION_SERVER("LRS", "Long Region Server Name", true, true, FieldValueType.STRING), - REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false, - FieldValueType.LONG), + REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false, FieldValueType.LONG), READ_REQUEST_COUNT_PER_SECOND("#READ/S", "Read Request Count per second", false, false, FieldValueType.LONG), FILTERED_READ_REQUEST_COUNT_PER_SECOND("#FREAD/S", "Filtered Read Request Count per second", @@ -49,8 +47,7 @@ public enum Field { MEM_STORE_SIZE("MEMSTORE", "MemStore Size", false, false, FieldValueType.SIZE), LOCALITY("LOCALITY", "Block Locality", false, false, FieldValueType.FLOAT), START_KEY("SKEY", "Start Key", true, true, FieldValueType.STRING), - COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false, - FieldValueType.LONG), + COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false, FieldValueType.LONG), COMPACTED_CELL_COUNT("#COMPedCELL", "Compacted Cell Count", false, false, FieldValueType.LONG), COMPACTION_PROGRESS("%COMP", "Compaction Progress", false, false, FieldValueType.PERCENT), LAST_MAJOR_COMPACTION_TIME("LASTMCOMP", "Last Major Compaction Time", false, true, diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java index 3f0e5f7ad1d3..ad153210dd9e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,11 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** - * Information about a field. - * - * This has a {@link Field} itself and additional information (e.g. {@code defaultLength} and - * {@code displayByDefault}). This additional information is different between the - * {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. That's why the - * additional information is separated from {@link Field}. + * Information about a field. This has a {@link Field} itself and additional information (e.g. + * {@code defaultLength} and {@code displayByDefault}). This additional information is different + * between the {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. + * That's why the additional information is separated from {@link Field}. */ @InterfaceAudience.Private public class FieldInfo { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java index 086dadc3e290..4c4a29c0bb53 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,11 +22,8 @@ import org.apache.hadoop.hbase.Size; import org.apache.yetus.audience.InterfaceAudience; - /** - * Represents a value of a field. - * - * The type of a value is defined by {@link FieldValue}. + * Represents a value of a field. The type of a value is defined by {@link FieldValue}. */ @InterfaceAudience.Private public final class FieldValue implements Comparable { @@ -103,23 +100,29 @@ public final class FieldValue implements Comparable { private Size optimizeSize(Size size) { if (size.get(Size.Unit.BYTE) < 1024d) { - return size.getUnit() == Size.Unit.BYTE ? - size : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE); + return size.getUnit() == Size.Unit.BYTE + ? size + : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE); } else if (size.get(Size.Unit.KILOBYTE) < 1024d) { - return size.getUnit() == Size.Unit.KILOBYTE ? - size : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE); + return size.getUnit() == Size.Unit.KILOBYTE + ? size + : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE); } else if (size.get(Size.Unit.MEGABYTE) < 1024d) { - return size.getUnit() == Size.Unit.MEGABYTE ? - size : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE); + return size.getUnit() == Size.Unit.MEGABYTE + ? size + : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE); } else if (size.get(Size.Unit.GIGABYTE) < 1024d) { - return size.getUnit() == Size.Unit.GIGABYTE ? - size : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE); + return size.getUnit() == Size.Unit.GIGABYTE + ? size + : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE); } else if (size.get(Size.Unit.TERABYTE) < 1024d) { - return size.getUnit() == Size.Unit.TERABYTE ? - size : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE); + return size.getUnit() == Size.Unit.TERABYTE + ? size + : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE); } - return size.getUnit() == Size.Unit.PETABYTE ? - size : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE); + return size.getUnit() == Size.Unit.PETABYTE + ? size + : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE); } private Size parseSizeString(String sizeString) { @@ -133,7 +136,7 @@ private Size parseSizeString(String sizeString) { } private Size.Unit convertToUnit(String unitSimpleName) { - for (Size.Unit unit: Size.Unit.values()) { + for (Size.Unit unit : Size.Unit.values()) { if (unitSimpleName.equals(unit.getSimpleName())) { return unit; } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java index e2edae87b800..e9825d9206a4 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,15 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the type of a {@link FieldValue}. */ @InterfaceAudience.Private public enum FieldValueType { - STRING, INTEGER, LONG, FLOAT, SIZE, PERCENT + STRING, + INTEGER, + LONG, + FLOAT, + SIZE, + PERCENT } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java index fe3edd1b2544..8327b1425cfb 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.UserMetrics; @@ -41,51 +40,53 @@ /** * Implementation for {@link ModeStrategy} for client Mode. */ -@InterfaceAudience.Private public final class ClientModeStrategy implements ModeStrategy { +@InterfaceAudience.Private +public final class ClientModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays - .asList(new FieldInfo(Field.CLIENT, 0, true), - new FieldInfo(Field.USER_COUNT, 5, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.CLIENT, 0, true), new FieldInfo(Field.USER_COUNT, 5, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); private final Map requestCountPerSecondMap = new HashMap<>(); ClientModeStrategy() { } - @Override public List getFieldInfos() { + @Override + public List getFieldInfos() { return fieldInfos; } - @Override public Field getDefaultSortField() { + @Override + public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { List records = createRecords(clusterMetrics); return aggregateRecordsAndAddDistinct( - ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.CLIENT, Field.USER, - Field.USER_COUNT); + ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.CLIENT, Field.USER, + Field.USER_COUNT); } List createRecords(ClusterMetrics clusterMetrics) { List ret = new ArrayList<>(); for (ServerMetrics serverMetrics : clusterMetrics.getLiveServerMetrics().values()) { long lastReportTimestamp = serverMetrics.getLastReportTimestamp(); - serverMetrics.getUserMetrics().values().forEach(um -> um.getClientMetrics().values().forEach( - clientMetrics -> ret.add( - createRecord(um.getNameAsString(), clientMetrics, lastReportTimestamp, - serverMetrics.getServerName().getServerName())))); + serverMetrics.getUserMetrics().values() + .forEach(um -> um.getClientMetrics().values() + .forEach(clientMetrics -> ret.add(createRecord(um.getNameAsString(), clientMetrics, + lastReportTimestamp, serverMetrics.getServerName().getServerName())))); } return ret; } /** * Aggregate the records and count the unique values for the given distinctField - * * @param records records to be processed * @param groupBy Field on which group by needs to be done * @param distinctField Field whose unique values needs to be counted @@ -93,40 +94,39 @@ List createRecords(ClusterMetrics clusterMetrics) { * @return aggregated records */ List aggregateRecordsAndAddDistinct(List records, Field groupBy, - Field distinctField, Field uniqueCountAssignedTo) { + Field distinctField, Field uniqueCountAssignedTo) { List result = new ArrayList<>(); - records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values() - .forEach(val -> { - Set distinctValues = new HashSet<>(); - Map map = new HashMap<>(); - for (Record record : val) { - for (Map.Entry field : record.entrySet()) { - if (distinctField.equals(field.getKey())) { - //We will not be adding the field in the new record whose distinct count is required - distinctValues.add(record.get(distinctField)); + records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values().forEach(val -> { + Set distinctValues = new HashSet<>(); + Map map = new HashMap<>(); + for (Record record : val) { + for (Map.Entry field : record.entrySet()) { + if (distinctField.equals(field.getKey())) { + // We will not be adding the field in the new record whose distinct count is required + distinctValues.add(record.get(distinctField)); + } else { + if (field.getKey().getFieldValueType() == FieldValueType.STRING) { + map.put(field.getKey(), field.getValue()); + } else { + if (map.get(field.getKey()) == null) { + map.put(field.getKey(), field.getValue()); } else { - if (field.getKey().getFieldValueType() == FieldValueType.STRING) { - map.put(field.getKey(), field.getValue()); - } else { - if (map.get(field.getKey()) == null) { - map.put(field.getKey(), field.getValue()); - } else { - map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue())); - } - } + map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue())); } } } - // Add unique count field - map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size())); - result.add(Record.ofEntries(map.entrySet().stream() - .map(k -> Record.entry(k.getKey(), k.getValue())))); - }); + } + } + // Add unique count field + map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size())); + result.add( + Record.ofEntries(map.entrySet().stream().map(k -> Record.entry(k.getKey(), k.getValue())))); + }); return result; } Record createRecord(String user, UserMetrics.ClientMetrics clientMetrics, - long lastReportTimestamp, String server) { + long lastReportTimestamp, String server) { Record.Builder builder = Record.builder(); String client = clientMetrics.getHostName(); builder.put(Field.CLIENT, clientMetrics.getHostName()); @@ -137,21 +137,22 @@ Record createRecord(String user, UserMetrics.ClientMetrics clientMetrics, requestCountPerSecondMap.put(mapKey, requestCountPerSecond); } requestCountPerSecond.refresh(lastReportTimestamp, clientMetrics.getReadRequestsCount(), - clientMetrics.getFilteredReadRequestsCount(), clientMetrics.getWriteRequestsCount()); + clientMetrics.getFilteredReadRequestsCount(), clientMetrics.getWriteRequestsCount()); builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getReadRequestCountPerSecond()); + requestCountPerSecond.getReadRequestCountPerSecond()); builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getWriteRequestCountPerSecond()); + requestCountPerSecond.getWriteRequestCountPerSecond()); builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getFilteredReadRequestCountPerSecond()); + requestCountPerSecond.getFilteredReadRequestCountPerSecond()); builder.put(Field.USER, user); return builder.build(); } - @Override public DrillDownInfo drillDown(Record selectedRecord) { + @Override + public DrillDownInfo drillDown(Record selectedRecord) { List initialFilters = Collections.singletonList( - RecordFilter.newBuilder(Field.CLIENT).doubleEquals(selectedRecord.get(Field.CLIENT))); + RecordFilter.newBuilder(Field.CLIENT).doubleEquals(selectedRecord.get(Field.CLIENT))); return new DrillDownInfo(Mode.USER, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java index de3d582fb9f1..7061d5374e88 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +21,12 @@ import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.yetus.audience.InterfaceAudience; - /** - * Information about drilling down. - * - * When drilling down, going to next {@link Mode} with initial {@link RecordFilter}s. + * Information about drilling down. When drilling down, going to next {@link Mode} with initial + * {@link RecordFilter}s. */ @InterfaceAudience.Private public class DrillDownInfo { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java index ffd98dfd6837..4ae1b4faf333 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a display mode in the top screen. */ @@ -45,7 +44,7 @@ public enum Mode { private final ModeStrategy modeStrategy; Mode(String header, String description, ModeStrategy modeStrategy) { - this.header = Objects.requireNonNull(header); + this.header = Objects.requireNonNull(header); this.description = Objects.requireNonNull(description); this.modeStrategy = Objects.requireNonNull(modeStrategy); } @@ -59,7 +58,7 @@ public String getDescription() { } public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + List pushDownFilters) { return modeStrategy.getRecords(clusterMetrics, pushDownFilters); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java index 021cee25810a..db58f1facae5 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,14 +26,17 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * An interface for strategy logic for {@link Mode}. */ @InterfaceAudience.Private interface ModeStrategy { List getFieldInfos(); + Field getDefaultSortField(); + List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters); - @Nullable DrillDownInfo drillDown(Record selectedRecord); + + @Nullable + DrillDownInfo drillDown(Record selectedRecord); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java index 9175820e0cae..6b78be9e2067 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.hadoop.hbase.hbtop.field.Field; @@ -36,28 +35,26 @@ private ModeStrategyUtils() { * @param filters List of filters * @return filtered records */ - public static List applyFilterAndGet(List records, - List filters) { + public static List applyFilterAndGet(List records, List filters) { if (filters != null && !filters.isEmpty()) { return records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } return records; } - /** - * Group by records on the basis of supplied groupBy field and - * Aggregate records using {@link Record#combine(Record)} - * + * Group by records on the basis of supplied groupBy field and Aggregate records using + * {@link Record#combine(Record)} * @param records records needs to be processed * @param groupBy Field to be used for group by * @return aggregated records */ public static List aggregateRecords(List records, Field groupBy) { return records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).entrySet().stream() - .flatMap(e -> e.getValue().stream().reduce(Record::combine).map(Stream::of) - .orElse(Stream.empty())).collect(Collectors.toList()); + .flatMap( + e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) + .collect(Collectors.toList()); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java index f74d8bf22ebc..a4a8a88aca38 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; @@ -28,15 +27,13 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Namespace Mode. */ @InterfaceAudience.Private public final class NamespaceModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.NAMESPACE, 0, true), + private final List fieldInfos = Arrays.asList(new FieldInfo(Field.NAMESPACE, 0, true), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), @@ -44,13 +41,11 @@ public final class NamespaceModeStrategy implements ModeStrategy { new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); - NamespaceModeStrategy(){ + NamespaceModeStrategy() { } @Override @@ -63,11 +58,12 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by NAMESPACE field return ModeStrategyUtils.aggregateRecords(records, Field.NAMESPACE); @@ -75,9 +71,8 @@ public Field getDefaultSortField() { @Override public DrillDownInfo drillDown(Record selectedRecord) { - List initialFilters = - Collections.singletonList(RecordFilter.newBuilder(Field.NAMESPACE) - .doubleEquals(selectedRecord.get(Field.NAMESPACE))); + List initialFilters = Collections.singletonList( + RecordFilter.newBuilder(Field.NAMESPACE).doubleEquals(selectedRecord.get(Field.NAMESPACE))); return new DrillDownInfo(Mode.TABLE, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java index 0adbc823bf4c..9a70f61005a1 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.RegionMetrics; @@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Region Mode. */ @@ -47,29 +45,22 @@ public final class RegionModeStrategy implements ModeStrategy { private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.REGION_NAME, 0, false), - new FieldInfo(Field.NAMESPACE, 0, true), - new FieldInfo(Field.TABLE, 0, true), - new FieldInfo(Field.START_CODE, 13, false), - new FieldInfo(Field.REPLICA_ID, 5, false), - new FieldInfo(Field.REGION, 32, true), - new FieldInfo(Field.REGION_SERVER, 0, true), - new FieldInfo(Field.LONG_REGION_SERVER, 0, false), + new FieldInfo(Field.REGION_NAME, 0, false), new FieldInfo(Field.NAMESPACE, 0, true), + new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.START_CODE, 13, false), + new FieldInfo(Field.REPLICA_ID, 5, false), new FieldInfo(Field.REGION, 32, true), + new FieldInfo(Field.REGION_SERVER, 0, true), new FieldInfo(Field.LONG_REGION_SERVER, 0, false), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.STORE_FILE_SIZE, 10, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 12, false), - new FieldInfo(Field.NUM_STORE_FILES,4, true), - new FieldInfo(Field.MEM_STORE_SIZE, 8, true), - new FieldInfo(Field.LOCALITY, 8, true), - new FieldInfo(Field.START_KEY, 0, false), + new FieldInfo(Field.NUM_STORE_FILES, 4, true), new FieldInfo(Field.MEM_STORE_SIZE, 8, true), + new FieldInfo(Field.LOCALITY, 8, true), new FieldInfo(Field.START_KEY, 0, false), new FieldInfo(Field.COMPACTING_CELL_COUNT, 12, false), new FieldInfo(Field.COMPACTED_CELL_COUNT, 12, false), new FieldInfo(Field.COMPACTION_PROGRESS, 7, false), - new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false) - ); + new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false)); private final Map requestCountPerSecondMap = new HashMap<>(); @@ -86,8 +77,9 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { List ret = new ArrayList<>(); for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) { long lastReportTimestamp = sm.getLastReportTimestamp(); @@ -119,8 +111,8 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet tableName = tn.getQualifierAsString(); startKey = Bytes.toStringBinary(elements[1]); startCode = Bytes.toString(elements[2]); - replicaId = elements.length == 4 ? - Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; + replicaId = + elements.length == 4 ? Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; region = RegionInfo.encodeRegionName(regionMetrics.getRegionName()); } catch (IOException ignored) { } @@ -145,11 +137,10 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getReadRequestCountPerSecond()); builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getFilteredReadRequestCountPerSecond()); + requestCountPerSecond.getFilteredReadRequestCountPerSecond()); builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getWriteRequestCountPerSecond()); - builder.put(Field.REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getRequestCountPerSecond()); + builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); builder.put(Field.STORE_FILE_SIZE, regionMetrics.getStoreFileSize()); builder.put(Field.UNCOMPRESSED_STORE_FILE_SIZE, regionMetrics.getUncompressedStoreFileSize()); @@ -160,7 +151,7 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet long compactingCellCount = regionMetrics.getCompactingCellCount(); long compactedCellCount = regionMetrics.getCompactedCellCount(); float compactionProgress = 0; - if (compactedCellCount > 0) { + if (compactedCellCount > 0) { compactionProgress = 100 * ((float) compactedCellCount / compactingCellCount); } @@ -178,24 +169,22 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet } /** - * Form new record list with records formed by only fields provided through fieldInfo and - * add a count field for each record with value 1 - * We are doing two operation of selecting and adding new field - * because of saving some CPU cycles on rebuilding the record again - * + * Form new record list with records formed by only fields provided through fieldInfo and add a + * count field for each record with value 1 We are doing two operation of selecting and adding new + * field because of saving some CPU cycles on rebuilding the record again * @param fieldInfos List of FieldInfos required in the record * @param records List of records which needs to be processed * @param countField Field which needs to be added with value 1 for each record * @return records after selecting required fields and adding count field */ List selectModeFieldsAndAddCountField(List fieldInfos, List records, - Field countField) { + Field countField) { - return records.stream().map(record -> Record.ofEntries( - fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) - .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) - .map(record -> Record.builder().putAll(record).put(countField, 1).build()) - .collect(Collectors.toList()); + return records.stream().map( + record -> Record.ofEntries(fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) + .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) + .map(record -> Record.builder().putAll(record).put(countField, 1).build()) + .collect(Collectors.toList()); } @Nullable diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java index 44a9a2c82711..d06060bcc093 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.hbtop.Record; @@ -32,32 +31,27 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for RegionServer Mode. */ @InterfaceAudience.Private public final class RegionServerModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.REGION_SERVER, 0, true), - new FieldInfo(Field.LONG_REGION_SERVER, 0, false), - new FieldInfo(Field.REGION_COUNT, 7, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.STORE_FILE_SIZE, 13, true), - new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true), - new FieldInfo(Field.USED_HEAP_SIZE, 11, true), - new FieldInfo(Field.MAX_HEAP_SIZE, 11, true) - ); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.REGION_SERVER, 0, true), + new FieldInfo(Field.LONG_REGION_SERVER, 0, false), new FieldInfo(Field.REGION_COUNT, 7, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.STORE_FILE_SIZE, 13, true), + new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true), + new FieldInfo(Field.USED_HEAP_SIZE, 11, true), new FieldInfo(Field.MAX_HEAP_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); - RegionServerModeStrategy(){ + RegionServerModeStrategy() { } @Override @@ -70,15 +64,16 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by LONG_REGION_SERVER field Map retMap = - ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream() - .collect(Collectors.toMap(r -> r.get(Field.LONG_REGION_SERVER).asString(), r -> r)); + ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream() + .collect(Collectors.toMap(r -> r.get(Field.LONG_REGION_SERVER).asString(), r -> r)); // Add USED_HEAP_SIZE field and MAX_HEAP_SIZE field for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) { @@ -87,9 +82,9 @@ public Field getDefaultSortField() { continue; } - Record newRecord = Record.builder().putAll(record) - .put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()) - .put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build(); + Record newRecord = + Record.builder().putAll(record).put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()) + .put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build(); retMap.put(sm.getServerName().getServerName(), newRecord); } @@ -100,8 +95,7 @@ public Field getDefaultSortField() { @Override public DrillDownInfo drillDown(Record selectedRecord) { List initialFilters = Collections.singletonList(RecordFilter - .newBuilder(Field.REGION_SERVER) - .doubleEquals(selectedRecord.get(Field.REGION_SERVER))); + .newBuilder(Field.REGION_SERVER).doubleEquals(selectedRecord.get(Field.REGION_SERVER))); return new DrillDownInfo(Mode.REGION, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java index d546070db71d..72802569750d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for calculating request counts per second. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java index 4acc34412584..735dfdb4a4cd 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,26 +29,21 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Table Mode. */ @InterfaceAudience.Private public final class TableModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.NAMESPACE, 0, true), - new FieldInfo(Field.TABLE, 0, true), - new FieldInfo(Field.REGION_COUNT, 7, true), + private final List fieldInfos = Arrays.asList(new FieldInfo(Field.NAMESPACE, 0, true), + new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); @@ -65,25 +60,21 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by NAMESPACE field and TABLE field - return records.stream() - .collect(Collectors.groupingBy(r -> { - String namespace = r.get(Field.NAMESPACE).asString(); - String table = r.get(Field.TABLE).asString(); - return TableName.valueOf(namespace, table); - })) - .entrySet().stream() + return records.stream().collect(Collectors.groupingBy(r -> { + String namespace = r.get(Field.NAMESPACE).asString(); + String table = r.get(Field.TABLE).asString(); + return TableName.valueOf(namespace, table); + })).entrySet().stream() .flatMap( - e -> e.getValue().stream() - .reduce(Record::combine) - .map(Stream::of) - .orElse(Stream.empty())) + e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) .collect(Collectors.toList()); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java index 605376e12218..d2c9cf4c8ec0 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; @@ -31,40 +30,44 @@ /** * Implementation for {@link ModeStrategy} for User Mode. */ -@InterfaceAudience.Private public final class UserModeStrategy implements ModeStrategy { +@InterfaceAudience.Private +public final class UserModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays - .asList(new FieldInfo(Field.USER, 0, true), - new FieldInfo(Field.CLIENT_COUNT, 7, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.USER, 0, true), new FieldInfo(Field.CLIENT_COUNT, 7, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); private final ClientModeStrategy clientModeStrategy = new ClientModeStrategy(); UserModeStrategy() { } - @Override public List getFieldInfos() { + @Override + public List getFieldInfos() { return fieldInfos; } - @Override public Field getDefaultSortField() { + @Override + public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, - List pushDownFilters) { + @Override + public List getRecords(ClusterMetrics clusterMetrics, + List pushDownFilters) { List records = clientModeStrategy.createRecords(clusterMetrics); return clientModeStrategy.aggregateRecordsAndAddDistinct( - ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.USER, Field.CLIENT, - Field.CLIENT_COUNT); + ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.USER, Field.CLIENT, + Field.CLIENT_COUNT); } - @Override public DrillDownInfo drillDown(Record selectedRecord) { - //Drill down to client and using selected USER as a filter + @Override + public DrillDownInfo drillDown(Record selectedRecord) { + // Drill down to client and using selected USER as a filter List initialFilters = Collections.singletonList( - RecordFilter.newBuilder(Field.USER).doubleEquals(selectedRecord.get(Field.USER))); + RecordFilter.newBuilder(Field.USER).doubleEquals(selectedRecord.get(Field.USER))); return new DrillDownInfo(Mode.CLIENT, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java index 8b55d6ec0df3..4620d0896c2c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.yetus.audience.InterfaceAudience; - /** * An abstract class for {@link ScreenView} that has the common useful methods and the default * implementations for the abstract methods. diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java index 2846c25d1cc4..da0bd1e97e64 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * This dispatches key presses and timers to the current {@link ScreenView}. */ @@ -58,8 +57,7 @@ public class Screen implements Closeable { public Screen(Configuration conf, long initialRefreshDelay, Mode initialMode, @Nullable List initialFields, @Nullable Field initialSortField, @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, - long numberOfIterations, boolean batchMode) - throws IOException { + long numberOfIterations, boolean batchMode) throws IOException { connection = ConnectionFactory.createConnection(conf); admin = connection.getAdmin(); @@ -69,9 +67,8 @@ public Screen(Configuration conf, long initialRefreshDelay, Mode initialMode, } else { terminal = new TerminalImpl("hbtop"); } - currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin, - initialMode, initialFields, initialSortField, initialAscendingSort, initialFilters, - numberOfIterations); + currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin, initialMode, + initialFields, initialSortField, initialAscendingSort, initialFilters, numberOfIterations); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java index f061bff831d4..9291cedb7db4 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,16 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.yetus.audience.InterfaceAudience; - /** * An interface for a screen view that handles key presses and timers. */ @InterfaceAudience.Private public interface ScreenView { void init(); - @Nullable ScreenView handleKeyPress(KeyPress keyPress); - @Nullable ScreenView handleTimer(); + + @Nullable + ScreenView handleKeyPress(KeyPress keyPress); + + @Nullable + ScreenView handleTimer(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java index 45f5fd01efb7..16576475419c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,10 @@ import java.util.EnumMap; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the field screen. */ @@ -63,7 +61,7 @@ public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, Li int headerLength = 0; int descriptionLength = 0; - for (int i = 0; i < fields.size(); i ++) { + for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); if (field == sortField) { @@ -86,8 +84,8 @@ public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, Li public void init() { fieldScreenView.hideCursor(); fieldScreenView.clearTerminal(); - fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap, - currentPosition, headerMaxLength, descriptionMaxLength, moveMode); + fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap, currentPosition, + headerMaxLength, descriptionMaxLength, moveMode); fieldScreenView.refreshTerminal(); } @@ -132,7 +130,7 @@ public void pageUp() { } public void pageDown() { - if (currentPosition < fields.size() - 1 && !moveMode) { + if (currentPosition < fields.size() - 1 && !moveMode) { int previousPosition = currentPosition; currentPosition = fields.size() - 1; showField(previousPosition); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java index 165850142247..954786b476af 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The screen where we can change the displayed fields, the sort key and the order of the fields. */ @@ -122,7 +121,7 @@ public void showFieldScreen(String sortFieldHeader, List fields, int descriptionMaxLength, boolean moveMode) { showScreenDescription(sortFieldHeader); - for (int i = 0; i < fields.size(); i ++) { + for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); showField(i, field, fieldDisplayMap.get(field), i == currentPosition, headerMaxLength, descriptionMaxLength, moveMode); @@ -143,8 +142,8 @@ public void showField(int pos, Field field, boolean display, boolean selected, int fieldHeaderMaxLength, int fieldDescriptionMaxLength, boolean moveMode) { String fieldHeader = String.format("%-" + fieldHeaderMaxLength + "s", field.getHeader()); - String fieldDescription = String.format("%-" + fieldDescriptionMaxLength + "s", - field.getDescription()); + String fieldDescription = + String.format("%-" + fieldDescriptionMaxLength + "s", field.getDescription()); int row = FIELD_START_ROW + pos; TerminalPrinter printer = getTerminalPrinter(row); @@ -157,8 +156,8 @@ public void showField(int pos, Field field, boolean display, boolean selected, printer.startBold(); } - printer.startHighlight() - .printFormat("%s = %s", fieldHeader, fieldDescription).stopHighlight(); + printer.startHighlight().printFormat("%s = %s", fieldHeader, fieldDescription) + .stopHighlight(); if (display) { printer.stopBold(); @@ -172,8 +171,8 @@ public void showField(int pos, Field field, boolean display, boolean selected, printer.startBold(); } - printer.startHighlight().print(fieldHeader).stopHighlight() - .printFormat(" = %s", fieldDescription); + printer.startHighlight().print(fieldHeader).stopHighlight().printFormat(" = %s", + fieldDescription); if (display) { printer.stopBold(); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java index 5002ab8f6c18..218de676d4ec 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,8 @@ import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a description of a command that we can execute in the top screen. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java index f170fc57fde1..9534796dfccf 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,35 +19,32 @@ import java.util.Arrays; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the help screen. */ @InterfaceAudience.Private public class HelpScreenPresenter { - private static final CommandDescription[] COMMAND_DESCRIPTIONS = new CommandDescription[] { - new CommandDescription("f", "Add/Remove/Order/Sort the fields"), - new CommandDescription("R", "Toggle the sort order (ascending/descending)"), - new CommandDescription("m", "Select mode"), - new CommandDescription("o", "Add a filter with ignoring case"), - new CommandDescription("O", "Add a filter with case sensitive"), - new CommandDescription("^o", "Show the current filters"), - new CommandDescription("=", "Clear the current filters"), - new CommandDescription("i", "Drill down"), - new CommandDescription( - Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"), - "Scroll the metrics"), - new CommandDescription("d", "Change the refresh delay"), - new CommandDescription("X", "Adjust the field length"), - new CommandDescription("", "Refresh the display"), - new CommandDescription("h", "Display this screen"), - new CommandDescription(Arrays.asList("q", ""), "Quit") - }; + private static final CommandDescription[] COMMAND_DESCRIPTIONS = + new CommandDescription[] { new CommandDescription("f", "Add/Remove/Order/Sort the fields"), + new CommandDescription("R", "Toggle the sort order (ascending/descending)"), + new CommandDescription("m", "Select mode"), + new CommandDescription("o", "Add a filter with ignoring case"), + new CommandDescription("O", "Add a filter with case sensitive"), + new CommandDescription("^o", "Show the current filters"), + new CommandDescription("=", "Clear the current filters"), + new CommandDescription("i", "Drill down"), + new CommandDescription( + Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"), + "Scroll the metrics"), + new CommandDescription("d", "Change the refresh delay"), + new CommandDescription("X", "Adjust the field length"), + new CommandDescription("", "Refresh the display"), + new CommandDescription("h", "Display this screen"), + new CommandDescription(Arrays.asList("q", ""), "Quit") }; private final HelpScreenView helpScreenView; private final long refreshDelay; diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java index ccdc15737d17..fc4f75dd9667 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The help screen. */ @@ -68,8 +67,8 @@ public void showHelpScreen(long refreshDelay, CommandDescription[] commandDescri private void showScreenDescription(long refreshDelay) { TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW); printer.startBold().print("Help for Interactive Commands").stopBold().endOfLine(); - printer.print("Refresh delay: ").startBold() - .print((double) refreshDelay / 1000).stopBold().endOfLine(); + printer.print("Refresh delay: ").startBold().print((double) refreshDelay / 1000).stopBold() + .endOfLine(); } private void showCommandDescription(TerminalPrinter terminalPrinter, diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java index 8cd9879b0ede..07f9dc7ee434 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the mode screen. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java index bda9853028b7..5aa404ef2ef9 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The screen where we can choose the {@link Mode} in the top screen. */ @@ -43,8 +42,8 @@ public class ModeScreenView extends AbstractScreenView { public ModeScreenView(Screen screen, Terminal terminal, Mode currentMode, Consumer resultListener, ScreenView nextScreenView) { super(screen, terminal); - this.modeScreenPresenter = new ModeScreenPresenter(this, currentMode, resultListener, - nextScreenView); + this.modeScreenPresenter = + new ModeScreenPresenter(this, currentMode, resultListener, nextScreenView); } @Override @@ -106,16 +105,16 @@ public void showModeScreen(Mode currentMode, List modes, int currentPositi showScreenDescription(currentMode); for (int i = 0; i < modes.size(); i++) { - showMode(i, modes.get(i), i == currentPosition, - modeHeaderMaxLength, modeDescriptionMaxLength); + showMode(i, modes.get(i), i == currentPosition, modeHeaderMaxLength, + modeDescriptionMaxLength); } } private void showScreenDescription(Mode currentMode) { TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW); printer.startBold().print("Mode Management").stopBold().endOfLine(); - printer.print("Current mode: ") - .startBold().print(currentMode.getHeader()).stopBold().endOfLine(); + printer.print("Current mode: ").startBold().print(currentMode.getHeader()).stopBold() + .endOfLine(); printer.print("Select mode followed by ").endOfLine(); } @@ -123,8 +122,8 @@ public void showMode(int pos, Mode mode, boolean selected, int modeHeaderMaxLeng int modeDescriptionMaxLength) { String modeHeader = String.format("%-" + modeHeaderMaxLength + "s", mode.getHeader()); - String modeDescription = String.format("%-" + modeDescriptionMaxLength + "s", - mode.getDescription()); + String modeDescription = + String.format("%-" + modeDescriptionMaxLength + "s", mode.getDescription()); int row = MODE_START_ROW + pos; TerminalPrinter printer = getTerminalPrinter(row); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java index 6c6bf1c1b215..b123deff2435 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the filter display mode. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java index e85a4b7df42c..86585ad99280 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,11 +27,8 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** - * The filter display mode in the top screen. - * - * Exit if Enter key is pressed. + * The filter display mode in the top screen. Exit if Enter key is pressed. */ @InterfaceAudience.Private public class FilterDisplayModeScreenView extends AbstractScreenView { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java index df672e9695d9..98a059faacc7 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents headers for the metrics in the top screen. */ @@ -36,7 +35,7 @@ public Header(Field field, int length) { } public String format() { - return "%" + (field.isLeftJustify() ? "-" : "") + length + "s"; + return "%" + (field.isLeftJustify() ? "-" : "") + length + "s"; } public Field getField() { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java index 8ab858b995f3..e79c50f845f8 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the input mode. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java index ab64a8ade227..7c5cecc8f4a1 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The input mode in the top screen. */ @@ -40,8 +39,8 @@ public InputModeScreenView(Screen screen, Terminal terminal, int row, String mes List histories, Function resultListener) { super(screen, terminal); this.row = row; - this.inputModeScreenPresenter = new InputModeScreenPresenter(this, message, histories, - resultListener); + this.inputModeScreenPresenter = + new InputModeScreenPresenter(this, message, histories, resultListener); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java index 174a15a48432..8a91891e2c62 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,8 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** - * The presentation logic for the message mode. - * - * Exit after 2 seconds or if any key is pressed. + * The presentation logic for the message mode. Exit after 2 seconds or if any key is pressed. */ @InterfaceAudience.Private public class MessageModeScreenPresenter { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java index 0dfa388fad0c..8e8dc35af6ab 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The message mode in the top screen. */ @@ -38,8 +37,7 @@ public MessageModeScreenView(Screen screen, Terminal terminal, int row, String m ScreenView nextScreenView) { super(screen, terminal); this.row = row; - this.messageModeScreenPresenter = - new MessageModeScreenPresenter(this, message, nextScreenView); + this.messageModeScreenPresenter = new MessageModeScreenPresenter(this, message, nextScreenView); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java index b95e6f480e6e..4f93dda8ec5f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for paging for the metrics. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java index 03598f66fb48..635fe07a601c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the summary of the metrics. */ @@ -37,8 +36,8 @@ public class Summary { private final double averageLoad; private final long aggregateRequestPerSecond; - public Summary(String currentTime, String version, String clusterId, int servers, - int liveServers, int deadServers, int regionCount, int ritCount, double averageLoad, + public Summary(String currentTime, String version, String clusterId, int servers, int liveServers, + int deadServers, int regionCount, int ritCount, double averageLoad, long aggregateRequestPerSecond) { this.currentTime = Objects.requireNonNull(currentTime); this.version = Objects.requireNonNull(version); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java index 9cbcd18e885f..aca2d0f3a8f0 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * The data and business logic for the top screen. */ @@ -87,12 +86,11 @@ public void switchMode(Mode nextMode, Field initialSortField, if (initialFields != null) { List tmp = new ArrayList<>(initialFields); tmp.addAll(currentMode.getFieldInfos().stream().map(FieldInfo::getField) - .filter(f -> !initialFields.contains(f)) - .collect(Collectors.toList())); + .filter(f -> !initialFields.contains(f)).collect(Collectors.toList())); fields = Collections.unmodifiableList(tmp); } else { - fields = Collections.unmodifiableList(currentMode.getFieldInfos().stream() - .map(FieldInfo::getField).collect(Collectors.toList())); + fields = Collections.unmodifiableList( + currentMode.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList())); } if (keepSortFieldAndSortOrderIfPossible) { @@ -146,8 +144,7 @@ public void refreshMetricsData() { } private void refreshSummary(ClusterMetrics clusterMetrics) { - String currentTime = ISO_8601_EXTENDED_TIME_FORMAT - .format(EnvironmentEdgeManager.currentTime()); + String currentTime = ISO_8601_EXTENDED_TIME_FORMAT.format(EnvironmentEdgeManager.currentTime()); String version = clusterMetrics.getHBaseVersion(); String clusterId = clusterMetrics.getClusterId(); int liveServers = clusterMetrics.getLiveServerMetrics().size(); @@ -158,16 +155,15 @@ private void refreshSummary(ClusterMetrics clusterMetrics) { long aggregateRequestPerSecond = clusterMetrics.getLiveServerMetrics().entrySet().stream() .mapToLong(e -> e.getValue().getRequestCountPerSecond()).sum(); - summary = new Summary(currentTime, version, clusterId, liveServers + deadServers, - liveServers, deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond); + summary = new Summary(currentTime, version, clusterId, liveServers + deadServers, liveServers, + deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond); } private void refreshRecords(ClusterMetrics clusterMetrics) { List records = currentMode.getRecords(clusterMetrics, pushDownFilters); // Filter and sort - records = records.stream() - .filter(r -> filters.stream().allMatch(f -> f.execute(r))) + records = records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) .sorted((recordLeft, recordRight) -> { FieldValue left = recordLeft.get(currentSortField); FieldValue right = recordRight.get(currentSortField); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java index e4e3caee5940..9912e35fd23c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the top screen. */ @@ -132,8 +131,7 @@ private void adjustFieldLengthIfNeeded() { for (Field f : topScreenModel.getFields()) { if (f.isAutoAdjust()) { int maxLength = topScreenModel.getRecords().stream() - .map(r -> r.get(f).asString().length()) - .max(Integer::compareTo).orElse(0); + .map(r -> r.get(f).asString().length()).max(Integer::compareTo).orElse(0); fieldLengthMap.put(f, Math.max(maxLength, f.getHeader().length())); } } @@ -142,8 +140,7 @@ private void adjustFieldLengthIfNeeded() { private List

    getDisplayedHeaders() { List displayFields = - topScreenModel.getFields().stream() - .filter(fieldDisplayMap::get).collect(Collectors.toList()); + topScreenModel.getFields().stream().filter(fieldDisplayMap::get).collect(Collectors.toList()); if (displayFields.isEmpty()) { horizontalScroll = 0; @@ -231,8 +228,7 @@ public void end() { } private int getHeaderSize() { - return (int) topScreenModel.getFields().stream() - .filter(fieldDisplayMap::get).count(); + return (int) topScreenModel.getFields().stream().filter(fieldDisplayMap::get).count(); } public void switchSortOrder() { @@ -250,10 +246,8 @@ public ScreenView transitionToModeScreen(Screen screen, Terminal terminal) { } public ScreenView transitionToFieldScreen(Screen screen, Terminal terminal) { - return new FieldScreenView(screen, terminal, - topScreenModel.getCurrentSortField(), topScreenModel.getFields(), - fieldDisplayMap, - (sortField, fields, fieldDisplayMap) -> { + return new FieldScreenView(screen, terminal, topScreenModel.getCurrentSortField(), + topScreenModel.getFields(), fieldDisplayMap, (sortField, fields, fieldDisplayMap) -> { topScreenModel.setSortFieldAndFields(sortField, fields); this.fieldDisplayMap.clear(); this.fieldDisplayMap.putAll(fieldDisplayMap); @@ -324,10 +318,9 @@ public ScreenView goToInputModeForRefreshDelay(Screen screen, Terminal terminal, public ScreenView goToInputModeForFilter(Screen screen, Terminal terminal, int row, boolean ignoreCase) { return new InputModeScreenView(screen, terminal, row, - "add filter #" + (topScreenModel.getFilters().size() + 1) + - " (" + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL", - topScreenModel.getFilterHistories(), - (inputString) -> { + "add filter #" + (topScreenModel.getFilters().size() + 1) + " (" + + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL", + topScreenModel.getFilterHistories(), (inputString) -> { if (inputString.isEmpty()) { return topScreenView; } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java index da5c88360d19..467201fcc55d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,10 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.yetus.audience.InterfaceAudience; - /** - * The screen that provides a dynamic real-time view for the HBase metrics. - * - * This shows the metric {@link Summary} and the metric {@link Record}s. The summary and the - * metrics are updated periodically (3 seconds by default). + * The screen that provides a dynamic real-time view for the HBase metrics. This shows the metric + * {@link Summary} and the metric {@link Record}s. The summary and the metrics are updated + * periodically (3 seconds by default). */ @InterfaceAudience.Private public class TopScreenView extends AbstractScreenView { @@ -59,9 +57,11 @@ public TopScreenView(Screen screen, Terminal terminal, long initialRefreshDelay, @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, long numberOfIterations) { super(screen, terminal); - this.topScreenPresenter = new TopScreenPresenter(this, initialRefreshDelay, - new TopScreenModel(admin, initialMode, initialFields, initialSortField, - initialAscendingSort, initialFilters), initialFields, numberOfIterations); + this.topScreenPresenter = + new TopScreenPresenter( + this, initialRefreshDelay, new TopScreenModel(admin, initialMode, initialFields, + initialSortField, initialAscendingSort, initialFilters), + initialFields, numberOfIterations); } @Override @@ -235,23 +235,17 @@ private void showSummary(Summary summary) { printer.print(String.format("HBase hbtop - %s", summary.getCurrentTime())).endOfLine(); printer.print(String.format("Version: %s", summary.getVersion())).endOfLine(); printer.print(String.format("Cluster ID: %s", summary.getClusterId())).endOfLine(); - printer.print("RegionServer(s): ") - .startBold().print(Integer.toString(summary.getServers())).stopBold() - .print(" total, ") - .startBold().print(Integer.toString(summary.getLiveServers())).stopBold() - .print(" live, ") - .startBold().print(Integer.toString(summary.getDeadServers())).stopBold() - .print(" dead").endOfLine(); - printer.print("RegionCount: ") - .startBold().print(Integer.toString(summary.getRegionCount())).stopBold() - .print(" total, ") - .startBold().print(Integer.toString(summary.getRitCount())).stopBold() - .print(" rit").endOfLine(); - printer.print("Average Cluster Load: ") - .startBold().print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine(); - printer.print("Aggregate Request/s: ") - .startBold().print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold() - .endOfLine(); + printer.print("RegionServer(s): ").startBold().print(Integer.toString(summary.getServers())) + .stopBold().print(" total, ").startBold().print(Integer.toString(summary.getLiveServers())) + .stopBold().print(" live, ").startBold().print(Integer.toString(summary.getDeadServers())) + .stopBold().print(" dead").endOfLine(); + printer.print("RegionCount: ").startBold().print(Integer.toString(summary.getRegionCount())) + .stopBold().print(" total, ").startBold().print(Integer.toString(summary.getRitCount())) + .stopBold().print(" rit").endOfLine(); + printer.print("Average Cluster Load: ").startBold() + .print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine(); + printer.print("Aggregate Request/s: ").startBold() + .print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold().endOfLine(); } private void showRecords(List
    headers, List records, Record selectedRecord) { @@ -264,7 +258,7 @@ private void showRecords(List
    headers, List records, Record sele } List buf = new ArrayList<>(headers.size()); for (int i = 0; i < size; i++) { - if(i < records.size()) { + if (i < records.size()) { Record record = records.get(i); buf.clear(); for (Header header : headers) { @@ -293,8 +287,7 @@ private void showRecords(List
    headers, List records, Record sele } private void showHeaders(List
    headers) { - String header = headers.stream() - .map(h -> String.format(h.format(), h.getField().getHeader())) + String header = headers.stream().map(h -> String.format(h.format(), h.getField().getHeader())) .collect(Collectors.joining(" ")); if (!header.isEmpty()) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java index 9322aaa8157f..331128ba2d32 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * The attributes of text in the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java index 843a315ab716..8747de0c0cc7 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,17 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Terminal color definitions. */ @InterfaceAudience.Private public enum Color { - BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE + BLACK, + RED, + GREEN, + YELLOW, + BLUE, + MAGENTA, + CYAN, + WHITE } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java index 775ff3d72e6a..11da1b58c6e3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * A 2-d position in 'terminal space'. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java index d0be00c5868d..d85b3f05d3cc 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the user pressing a key on the keyboard. */ @@ -97,13 +96,8 @@ public boolean isShift() { @Override public String toString() { - return "KeyPress{" + - "type=" + type + - ", character=" + escape(character) + - ", alt=" + alt + - ", ctrl=" + ctrl + - ", shift=" + shift + - '}'; + return "KeyPress{" + "type=" + type + ", character=" + escape(character) + ", alt=" + alt + + ", ctrl=" + ctrl + ", shift=" + shift + '}'; } private String escape(Character character) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java index c834b7515c24..f34cfc298c62 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,19 +21,29 @@ import java.io.Closeable; import org.apache.yetus.audience.InterfaceAudience; - /** * The terminal interface that is an abstraction of terminal screen. */ @InterfaceAudience.Private public interface Terminal extends Closeable { void clear(); + void refresh(); - @Nullable TerminalSize getSize(); - @Nullable TerminalSize doResizeIfNecessary(); - @Nullable KeyPress pollKeyPress(); + + @Nullable + TerminalSize getSize(); + + @Nullable + TerminalSize doResizeIfNecessary(); + + @Nullable + KeyPress pollKeyPress(); + CursorPosition getCursorPosition(); + void setCursorPosition(int column, int row); + void hideCursor(); + TerminalPrinter getTerminalPrinter(int startRow); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java index 66fb55875b0e..52818e42a7d3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * The interface responsible for printing to the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java index f7e55dde7b54..7aea3dac115b 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Terminal dimensions in 2-d space, measured in number of rows and columns. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java index de61477ce33a..6cd9475c6d0f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a single text cell of the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java index 52f8e374364e..28cc52e4d8aa 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color; import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for escape sequences. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java index a20222c3eb5b..15cbb3070c37 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,9 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - /** - * This generates {@link KeyPress} objects from the given input stream and offers them to the - * given queue. + * This generates {@link KeyPress} objects from the given input stream and offers them to the given + * queue. */ @InterfaceAudience.Private public class KeyPressGenerator { @@ -48,7 +47,10 @@ public class KeyPressGenerator { private static final Logger LOGGER = LoggerFactory.getLogger(KeyPressGenerator.class); private enum ParseState { - START, ESCAPE, ESCAPE_SEQUENCE_PARAM1, ESCAPE_SEQUENCE_PARAM2 + START, + ESCAPE, + ESCAPE_SEQUENCE_PARAM1, + ESCAPE_SEQUENCE_PARAM2 } private final Queue keyPressQueue; @@ -67,9 +69,9 @@ public KeyPressGenerator(InputStream inputStream, Queue keyPressQueue) input = new InputStreamReader(inputStream, StandardCharsets.UTF_8); this.keyPressQueue = keyPressQueue; - executorService = Executors.newFixedThreadPool(2, new ThreadFactoryBuilder() - .setNameFormat("KeyPressGenerator-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + executorService = Executors.newFixedThreadPool(2, + new ThreadFactoryBuilder().setNameFormat("KeyPressGenerator-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); initState(); } @@ -469,8 +471,10 @@ private boolean isCtrl(int param) { private void offer(KeyPress keyPress) { // Handle ctrl + c - if (keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character && - keyPress.getCharacter() == 'c') { + if ( + keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character + && keyPress.getCharacter() == 'c' + ) { System.exit(0); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java index 8752c5fe689a..887851f36bb2 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.CursorPosition; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a buffer of the terminal screen for double-buffering. */ @@ -78,8 +77,10 @@ public void flush(PrintWriter output) { flushRow(row, sb, attributes); } - if (cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows && - cursorColumn < columns) { + if ( + cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows + && cursorColumn < columns + ) { sb.append(cursor(true)); sb.append(moveCursor(cursorColumn, cursorRow)); } else { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java index c6b74afcbfa5..e579b0ff057d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,10 +43,9 @@ import org.slf4j.LoggerFactory; /** - * An implementation of the {@link Terminal} interface for normal display mode. - * - * This implementation produces output intended for human viewing. In particular, it only displays - * one screenful of data. The output contains some escape sequences for formatting. + * An implementation of the {@link Terminal} interface for normal display mode. This implementation + * produces output intended for human viewing. In particular, it only displays one screenful of + * data. The output contains some escape sequences for formatting. */ @InterfaceAudience.Private public class TerminalImpl implements Terminal { @@ -181,8 +180,8 @@ private TerminalSize queryTerminalSize() { } private void sttyRaw() { - doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " + - "-echo -echonl -icanon -isig -iexten -parenb cs8 min 1"); + doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " + + "-echo -echonl -icanon -isig -iexten -parenb cs8 min 1"); } private void sttyCooked() { @@ -190,7 +189,7 @@ private void sttyCooked() { } private String doStty(String sttyOptionsString) { - String [] cmd = {"/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty"}; + String[] cmd = { "/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty" }; try { Process process = Runtime.getRuntime().exec(cmd); @@ -198,14 +197,14 @@ private String doStty(String sttyOptionsString) { String ret; // stdout - try (BufferedReader stdout = new BufferedReader(new InputStreamReader( - process.getInputStream(), StandardCharsets.UTF_8))) { + try (BufferedReader stdout = new BufferedReader( + new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { ret = stdout.readLine(); } // stderr - try (BufferedReader stderr = new BufferedReader(new InputStreamReader( - process.getErrorStream(), StandardCharsets.UTF_8))) { + try (BufferedReader stderr = new BufferedReader( + new InputStreamReader(process.getErrorStream(), StandardCharsets.UTF_8))) { String line = stderr.readLine(); if ((line != null) && (line.length() > 0)) { LOGGER.error("Error output from stty: " + line); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java index 788d26799581..05e0b5611533 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java index 60f550289e26..ba7a5de40a5c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,11 +25,9 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; /** - * An implementation of the {@link Terminal} interface for batch mode. - * - * This implementation produces output that's more sensible for collecting to a log file or for - * parsing. There is no limit on the number of output lines, and the output doesn't contain any - * escape sequences for formatting. + * An implementation of the {@link Terminal} interface for batch mode. This implementation produces + * output that's more sensible for collecting to a log file or for parsing. There is no limit on the + * number of output lines, and the output doesn't contain any escape sequences for formatting. */ public class BatchTerminal implements Terminal { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java index 60316669daaf..ed216a164926 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java index 339cc40847d3..3790af32ed64 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,20 +28,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRecord { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRecord.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRecord.class); @Test public void testBuilder() { - Record actual1 = Record.builder().put(Field.TABLE, "tableName") - .put(entry(Field.REGION_COUNT, 3)) - .put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L)) - .build(); + Record actual1 = + Record.builder().put(Field.TABLE, "tableName").put(entry(Field.REGION_COUNT, 3)) + .put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L)).build(); assertThat(actual1.size(), is(3)); assertThat(actual1.get(Field.TABLE).asString(), is("tableName")); @@ -58,11 +55,8 @@ public void testBuilder() { @Test public void testOfEntries() { - Record actual = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 3), - entry(Field.REQUEST_COUNT_PER_SECOND, 100L) - ); + Record actual = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 3), + entry(Field.REQUEST_COUNT_PER_SECOND, 100L)); assertThat(actual.size(), is(3)); assertThat(actual.get(Field.TABLE).asString(), is("tableName")); @@ -72,17 +66,11 @@ public void testOfEntries() { @Test public void testCombine() { - Record record1 = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 3), - entry(Field.REQUEST_COUNT_PER_SECOND, 100L) - ); + Record record1 = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 3), + entry(Field.REQUEST_COUNT_PER_SECOND, 100L)); - Record record2 = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 5), - entry(Field.REQUEST_COUNT_PER_SECOND, 500L) - ); + Record record2 = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 5), + entry(Field.REQUEST_COUNT_PER_SECOND, 500L)); Record actual = record1.combine(record2); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java index 2807fd8ef61e..155b7942a66d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRecordFilter { @@ -49,8 +48,7 @@ public void testParseAndBuilder() { testParseAndBuilder("REGION=region1", false, RecordFilter.newBuilder(Field.REGION).equal("region1")); - testParseAndBuilder("REGION=", false, - RecordFilter.newBuilder(Field.REGION).equal("")); + testParseAndBuilder("REGION=", false, RecordFilter.newBuilder(Field.REGION).equal("")); testParseAndBuilder("!REGION=region1", false, RecordFilter.newBuilder(Field.REGION).notEqual("region1")); @@ -132,8 +130,8 @@ private void testToString(String filterString) { public void testFilters() { List records = createTestRecords(); - testFilter(records, "REGION=region", false, - "region1", "region2", "region3", "region4", "region5"); + testFilter(records, "REGION=region", false, "region1", "region2", "region3", "region4", + "region5"); testFilter(records, "!REGION=region", false); testFilter(records, "REGION=Region", false); @@ -148,8 +146,7 @@ public void testFilters() { testFilter(records, "LOCALITY<0.5", false, "region5"); testFilter(records, "%COMP<=50%", false, "region2", "region3", "region4", "region5"); - testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false, - "region2", "region5"); + testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false, "region2", "region5"); testFilters(records, Arrays.asList("%COMP<=50%", "!#SF>=10"), false, "region4"); testFilters(records, Arrays.asList("!REGION==region1", "LOCALITY<0.5", "#REQ/S>100"), false, "region5"); @@ -159,10 +156,10 @@ public void testFilters() { public void testFiltersIgnoreCase() { List records = createTestRecords(); - testFilter(records, "REGION=Region", true, - "region1", "region2", "region3", "region4", "region5"); - testFilter(records, "REGION=REGION", true, - "region1", "region2", "region3", "region4", "region5"); + testFilter(records, "REGION=Region", true, "region1", "region2", "region3", "region4", + "region5"); + testFilter(records, "REGION=REGION", true, "region1", "region2", "region3", "region4", + "region5"); } private List createTestRecords() { @@ -175,8 +172,8 @@ private List createTestRecords() { return ret; } - private Record createTestRecord(String region, long requestCountPerSecond, - Size storeFileSize, int numStoreFiles, float locality, float compactionProgress) { + private Record createTestRecord(String region, long requestCountPerSecond, Size storeFileSize, + int numStoreFiles, float locality, float compactionProgress) { Record.Builder builder = Record.builder(); builder.put(Field.REGION, region); builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond); @@ -194,12 +191,10 @@ private void testFilter(List records, String filterString, boolean ignor private void testFilters(List records, List filterStrings, boolean ignoreCase, String... expectedRegions) { - List actual = - records.stream().filter(r -> filterStrings.stream() - .map(f -> RecordFilter.parse(f, ignoreCase)) + List actual = records.stream() + .filter(r -> filterStrings.stream().map(f -> RecordFilter.parse(f, ignoreCase)) .allMatch(f -> f.execute(r))) - .map(r -> r.get(Field.REGION).asString()) - .collect(Collectors.toList()); + .map(r -> r.get(Field.REGION).asString()).collect(Collectors.toList()); assertThat(actual, hasItems(expectedRegions)); assertThat(actual.size(), is(expectedRegions.length)); } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java index c633e37825ea..0f6a02a27c11 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Bytes; - public final class TestUtils { private TestUtils() { @@ -57,81 +56,62 @@ public static ClusterMetrics createDummyClusterMetrics() { // host1 List regionMetricsList = new ArrayList<>(); List userMetricsList = new ArrayList<>(); - userMetricsList.add(createUserMetrics("FOO",1,2, 4)); - userMetricsList.add(createUserMetrics("BAR",2,3, 3)); - regionMetricsList.add(createRegionMetrics( - "table1,,1.00000000000000000000000000000000.", - 100, 50, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + userMetricsList.add(createUserMetrics("FOO", 1, 2, 4)); + userMetricsList.add(createUserMetrics("BAR", 2, 3, 3)); + regionMetricsList.add(createRegionMetrics("table1,,1.00000000000000000000000000000000.", 100, + 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00")); - regionMetricsList.add(createRegionMetrics( - "table2,1,2.00000000000000000000000000000001.", - 200, 100, 200, - new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + regionMetricsList.add(createRegionMetrics("table2,1,2.00000000000000000000000000000001.", 200, + 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01")); - regionMetricsList.add(createRegionMetrics( - "namespace:table3,,3_0001.00000000000000000000000000000002.", - 300, 150, 300, - new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, - new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); + regionMetricsList + .add(createRegionMetrics("namespace:table3,,3_0001.00000000000000000000000000000002.", 300, + 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1); - serverMetricsMap.put(host1, createServerMetrics(host1, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 100, - regionMetricsList, userMetricsList)); + serverMetricsMap.put(host1, createServerMetrics(host1, 100, new Size(100, Size.Unit.MEGABYTE), + new Size(200, Size.Unit.MEGABYTE), 100, regionMetricsList, userMetricsList)); // host2 regionMetricsList.clear(); userMetricsList.clear(); - userMetricsList.add(createUserMetrics("FOO",5,7, 3)); - userMetricsList.add(createUserMetrics("BAR",4,8, 4)); - regionMetricsList.add(createRegionMetrics( - "table1,1,4.00000000000000000000000000000003.", - 100, 50, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + userMetricsList.add(createUserMetrics("FOO", 5, 7, 3)); + userMetricsList.add(createUserMetrics("BAR", 4, 8, 4)); + regionMetricsList.add(createRegionMetrics("table1,1,4.00000000000000000000000000000003.", 100, + 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03")); - regionMetricsList.add(createRegionMetrics( - "table2,,5.00000000000000000000000000000004.", - 200, 100, 200, - new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + regionMetricsList.add(createRegionMetrics("table2,,5.00000000000000000000000000000004.", 200, + 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04")); - regionMetricsList.add(createRegionMetrics( - "namespace:table3,,6.00000000000000000000000000000005.", - 300, 150, 300, - new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, - new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); + regionMetricsList + .add(createRegionMetrics("namespace:table3,,6.00000000000000000000000000000005.", 300, 150, + 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2); - serverMetricsMap.put(host2, createServerMetrics(host2, 200, - new Size(16, Size.Unit.GIGABYTE), new Size(32, Size.Unit.GIGABYTE), 200, - regionMetricsList, userMetricsList)); + serverMetricsMap.put(host2, createServerMetrics(host2, 200, new Size(16, Size.Unit.GIGABYTE), + new Size(32, Size.Unit.GIGABYTE), 200, regionMetricsList, userMetricsList)); ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3); - return ClusterMetricsBuilder.newBuilder() - .setHBaseVersion("3.0.0-SNAPSHOT") - .setClusterId("01234567-89ab-cdef-0123-456789abcdef") - .setLiveServerMetrics(serverMetricsMap) + return ClusterMetricsBuilder.newBuilder().setHBaseVersion("3.0.0-SNAPSHOT") + .setClusterId("01234567-89ab-cdef-0123-456789abcdef").setLiveServerMetrics(serverMetricsMap) .setDeadServerNames(Collections.singletonList(host3)) - .setRegionsInTransition(Collections.singletonList( - new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) - .setStartKey(new byte [0]) - .setEndKey(new byte [0]) - .setOffline(true) - .setReplicaId(0) - .setRegionId(0) - .setSplit(false) - .build(), - RegionState.State.OFFLINE, host3))) + .setRegionsInTransition(Collections + .singletonList(new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) + .setStartKey(new byte[0]).setEndKey(new byte[0]).setOffline(true).setReplicaId(0) + .setRegionId(0).setSplit(false).build(), RegionState.State.OFFLINE, host3))) .build(); } private static UserMetrics createUserMetrics(String user, long readRequestCount, - long writeRequestCount, long filteredReadRequestsCount) { - return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)).addClientMetris( - new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, readRequestCount, - writeRequestCount, filteredReadRequestsCount)).addClientMetris( - new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, readRequestCount, - writeRequestCount, filteredReadRequestsCount)).build(); + long writeRequestCount, long filteredReadRequestsCount) { + return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)) + .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, + readRequestCount, writeRequestCount, filteredReadRequestsCount)) + .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, + readRequestCount, writeRequestCount, filteredReadRequestsCount)) + .build(); } private static RegionMetrics createRegionMetrics(String regionName, long readRequestCount, @@ -142,8 +122,7 @@ private static RegionMetrics createRegionMetrics(String regionName, long readReq FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss"); try { return RegionMetricsBuilder.newBuilder(Bytes.toBytes(regionName)) - .setReadRequestCount(readRequestCount) - .setFilteredReadRequestCount(filteredReadRequestCount) + .setReadRequestCount(readRequestCount).setFilteredReadRequestCount(filteredReadRequestCount) .setWriteRequestCount(writeRequestCount).setStoreFileSize(storeFileSize) .setUncompressedStoreFileSize(uncompressedStoreFileSize).setStoreFileCount(storeFileCount) .setMemStoreSize(memStoreSize).setDataLocality(locality) @@ -158,12 +137,9 @@ private static ServerMetrics createServerMetrics(ServerName serverName, long rep Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond, List regionMetricsList, List userMetricsList) { - return ServerMetricsBuilder.newBuilder(serverName) - .setReportTimestamp(reportTimestamp) - .setUsedHeapSize(usedHeapSize) - .setMaxHeapSize(maxHeapSize) - .setRequestCountPerSecond(requestCountPerSecond) - .setRegionMetrics(regionMetricsList) + return ServerMetricsBuilder.newBuilder(serverName).setReportTimestamp(reportTimestamp) + .setUsedHeapSize(usedHeapSize).setMaxHeapSize(maxHeapSize) + .setRequestCountPerSecond(requestCountPerSecond).setRegionMetrics(regionMetricsList) .setUserMetrics(userMetricsList).build(); } @@ -174,48 +150,44 @@ public static void assertRecordsInRegionMode(List records) { switch (record.get(Field.REGION_NAME).asString()) { case "table1,,1.00000000000000000000000000000000.": assertRecordInRegionMode(record, "default", "1", "", "table1", - "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, - new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, - "2019-07-22 00:00:00"); + "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, "2019-07-22 00:00:00"); break; case "table1,1,4.00000000000000000000000000000003.": assertRecordInRegionMode(record, "default", "4", "", "table1", - "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, - new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, - "2019-07-22 00:00:03"); + "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, "2019-07-22 00:00:03"); break; case "table2,,5.00000000000000000000000000000004.": assertRecordInRegionMode(record, "default", "5", "", "table2", - "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, - new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, - "2019-07-22 00:00:04"); + "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, "2019-07-22 00:00:04"); break; case "table2,1,2.00000000000000000000000000000001.": assertRecordInRegionMode(record, "default", "2", "", "table2", - "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, - new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, - "2019-07-22 00:00:01"); + "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, "2019-07-22 00:00:01"); break; case "namespace:table3,,6.00000000000000000000000000000005.": assertRecordInRegionMode(record, "namespace", "6", "", "table3", - "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.6f, "", 300L, 200L, 66.66667f, "2019-07-22 00:00:05"); break; case "namespace:table3,,3_0001.00000000000000000000000000000002.": assertRecordInRegionMode(record, "namespace", "3", "1", "table3", - "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.3f, "", 300L, 100L, 33.333336f, "2019-07-22 00:00:02"); break; @@ -229,10 +201,10 @@ public static void assertRecordsInRegionMode(List records) { private static void assertRecordInRegionMode(Record record, String namespace, String startCode, String replicaId, String table, String region, String regionServer, String longRegionServer, long requestCountPerSecond, long readRequestCountPerSecond, - long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, - Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, - Size memStoreSize, float locality, String startKey, long compactingCellCount, - long compactedCellCount, float compactionProgress, String lastMajorCompactionTime) { + long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, + Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, float locality, + String startKey, long compactingCellCount, long compactedCellCount, float compactionProgress, + String lastMajorCompactionTime) { assertThat(record.size(), is(22)); assertThat(record.get(Field.NAMESPACE).asString(), is(namespace)); assertThat(record.get(Field.START_CODE).asString(), is(startCode)); @@ -241,8 +213,7 @@ private static void assertRecordInRegionMode(Record record, String namespace, St assertThat(record.get(Field.REGION).asString(), is(region)); assertThat(record.get(Field.REGION_SERVER).asString(), is(regionServer)); assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -289,8 +260,7 @@ private static void assertRecordInNamespaceMode(Record record, long requestCount long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount) { assertThat(record.size(), is(10)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -339,7 +309,7 @@ public static void assertRecordsInUserMode(List records) { for (Record record : records) { String user = record.get(Field.USER).asString(); switch (user) { - //readRequestPerSecond and writeRequestPerSecond will be zero + // readRequestPerSecond and writeRequestPerSecond will be zero // because there is no change or new metrics during refresh case "FOO": assertRecordInUserMode(record, 0L, 0L, 0L); @@ -358,8 +328,8 @@ public static void assertRecordsInClientMode(List records) { for (Record record : records) { String client = record.get(Field.CLIENT).asString(); switch (client) { - //readRequestPerSecond and writeRequestPerSecond will be zero - // because there is no change or new metrics during refresh + // readRequestPerSecond and writeRequestPerSecond will be zero + // because there is no change or new metrics during refresh case "CLIENT_A_FOO": assertRecordInClientMode(record, 0L, 0L, 0L); break; @@ -379,36 +349,35 @@ public static void assertRecordsInClientMode(List records) { } private static void assertRecordInUserMode(Record record, long readRequestCountPerSecond, - long writeCountRequestPerSecond, long filteredReadRequestsCount) { + long writeCountRequestPerSecond, long filteredReadRequestsCount) { assertThat(record.size(), is(6)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(readRequestCountPerSecond)); + is(readRequestCountPerSecond)); assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), - is(writeCountRequestPerSecond)); + is(writeCountRequestPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(filteredReadRequestsCount)); + is(filteredReadRequestsCount)); assertThat(record.get(Field.CLIENT_COUNT).asInt(), is(2)); } private static void assertRecordInClientMode(Record record, long readRequestCountPerSecond, - long writeCountRequestPerSecond, long filteredReadRequestsCount) { + long writeCountRequestPerSecond, long filteredReadRequestsCount) { assertThat(record.size(), is(6)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(readRequestCountPerSecond)); + is(readRequestCountPerSecond)); assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), - is(writeCountRequestPerSecond)); + is(writeCountRequestPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(filteredReadRequestsCount)); + is(filteredReadRequestsCount)); assertThat(record.get(Field.USER_COUNT).asInt(), is(1)); } private static void assertRecordInTableMode(Record record, long requestCountPerSecond, - long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, - long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, - int numStoreFiles, Size memStoreSize, int regionCount) { + long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, + long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, + int numStoreFiles, Size memStoreSize, int regionCount) { assertThat(record.size(), is(11)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -450,14 +419,12 @@ public static void assertRecordsInRegionServerMode(List records) { private static void assertRecordInRegionServerMode(Record record, String longRegionServer, long requestCountPerSecond, long readRequestCountPerSecond, - long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, - Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, - Size memStoreSize, int regionCount, Size usedHeapSize, Size maxHeapSize) { + long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, + Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount, + Size usedHeapSize, Size maxHeapSize) { assertThat(record.size(), is(13)); - assertThat(record.get(Field.LONG_REGION_SERVER).asString(), - is(longRegionServer)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java index dcbdb6b9b8ab..d2af864bdd02 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestFieldValue { @@ -122,8 +121,7 @@ public void testParseAndAsSomethingMethod() { } // Percent - FieldValue percentFieldValue = - new FieldValue(100f, FieldValueType.PERCENT); + FieldValue percentFieldValue = new FieldValue(100f, FieldValueType.PERCENT); assertThat(percentFieldValue.asString(), is("100.00%")); assertThat(percentFieldValue.asFloat(), is(100f)); @@ -255,44 +253,35 @@ public void testCompareToIgnoreCase() { @Test public void testOptimizeSize() { - FieldValue sizeFieldValue = - new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE); + FieldValue sizeFieldValue = new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0B")); - sizeFieldValue = - new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0KB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0KB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0MB")); - sizeFieldValue = - new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0GB")); sizeFieldValue = new FieldValue(new Size(2 * 1024 * 1024, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0TB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0PB")); - sizeFieldValue = - new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1024.0PB")); - sizeFieldValue = - new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0PB")); - sizeFieldValue = - new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1024.0PB")); } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java index 4f0864838532..a58033851401 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,18 +33,22 @@ @Category(SmallTests.class) public class TestClientMode extends TestModeBase { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientMode.class); + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestClientMode.class); - @Override protected Mode getMode() { + @Override + protected Mode getMode() { return Mode.CLIENT; } - @Override protected void assertRecords(List records) { + @Override + protected void assertRecords(List records) { TestUtils.assertRecordsInClientMode(records); } - @Override protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) { + @Override + protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) { assertThat(drillDownInfo.getNextMode(), is(Mode.USER)); assertThat(drillDownInfo.getInitialFilters().size(), is(1)); String client = currentRecord.get(Field.CLIENT).asString(); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java index a52b332265b0..2d29fc414605 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,23 +22,21 @@ import org.apache.hadoop.hbase.hbtop.TestUtils; import org.junit.Test; - public abstract class TestModeBase { @Test public void testGetRecords() { - List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), - null); + List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), null); assertRecords(records); } protected abstract Mode getMode(); + protected abstract void assertRecords(List records); @Test public void testDrillDown() { - List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), - null); + List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), null); for (Record record : records) { assertDrillDown(record, getMode().drillDown(record)); } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java index 6c498e94eb1d..ab439fd826d9 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestNamespaceMode extends TestModeBase { @@ -59,8 +58,7 @@ protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo break; case "namespace": - assertThat(drillDownInfo.getInitialFilters().get(0).toString(), - is("NAMESPACE==namespace")); + assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==namespace")); break; default: diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java index b705531475f3..f0756e48a952 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRegionMode extends TestModeBase { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java index cbfc7283fc64..62cbeea5d139 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRegionServerMode extends TestModeBase { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java index a73d54ea6bb9..25dca63d57ee 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRequestCountPerSecond { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java index f718304671c4..0f05e484c1b2 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestTableMode extends TestModeBase { @@ -68,8 +67,7 @@ protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo break; case "namespace:table3": - assertThat(drillDownInfo.getInitialFilters().get(0).toString(), - is("NAMESPACE==namespace")); + assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==namespace")); assertThat(drillDownInfo.getInitialFilters().get(1).toString(), is("TABLE==table3")); break; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java index f094c85f5481..772e24a82c29 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java index cbf740430b0a..18211ee2463a 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,6 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestFieldScreenPresenter { @@ -71,17 +70,15 @@ public class TestFieldScreenPresenter { @Before public void setup() { Field sortField = Mode.REGION.getDefaultSortField(); - fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); - fieldDisplayMap = Mode.REGION.getFieldInfos().stream() - .collect(() -> new EnumMap<>(Field.class), - (r, fi) -> r.put(fi.getField(), fi.isDisplayByDefault()), (r1, r2) -> {}); + fieldDisplayMap = Mode.REGION.getFieldInfos().stream().collect(() -> new EnumMap<>(Field.class), + (r, fi) -> r.put(fi.getField(), fi.isDisplayByDefault()), (r1, r2) -> { + }); - fieldScreenPresenter = - new FieldScreenPresenter(fieldScreenView, sortField, fields, fieldDisplayMap, resultListener, - topScreenView); + fieldScreenPresenter = new FieldScreenPresenter(fieldScreenView, sortField, fields, + fieldDisplayMap, resultListener, topScreenView); for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); @@ -122,8 +119,8 @@ public void testChangeSortField() { inOrder.verify(fieldScreenView).showScreenDescription(eq("LRS")); inOrder.verify(fieldScreenView).showScreenDescription(eq("#READ/S")); inOrder.verify(fieldScreenView).showScreenDescription(eq(fields.get(0).getHeader())); - inOrder.verify(fieldScreenView).showScreenDescription( - eq(fields.get(fields.size() - 1).getHeader())); + inOrder.verify(fieldScreenView) + .showScreenDescription(eq(fields.get(fields.size() - 1).getHeader())); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java index 245bf615e731..d6f217498978 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,6 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestHelpScreenPresenter { @@ -55,8 +54,8 @@ public class TestHelpScreenPresenter { @Before public void setup() { - helpScreenPresenter = new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY, - topScreenView); + helpScreenPresenter = + new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY, topScreenView); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java index 1b7e12a6240f..c4984966c799 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,6 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestModeScreenPresenter { @@ -69,7 +68,7 @@ public void testInit() { int modeDescriptionMaxLength = Mode.REGION_SERVER.getDescription().length(); verify(modeScreenView).showModeScreen(eq(Mode.REGION), eq(Arrays.asList(Mode.values())), - eq(Mode.REGION.ordinal()) , eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength)); + eq(Mode.REGION.ordinal()), eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength)); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java index 414b5b0702c5..a79bcbd808f4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,6 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestFilterDisplayModeScreenPresenter { @@ -58,24 +57,23 @@ public class TestFilterDisplayModeScreenPresenter { @Before public void setup() { - List fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + List fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); - List filters = new ArrayList<>(); + List filters = new ArrayList<>(); filters.add(RecordFilter.parse("NAMESPACE==namespace", fields, true)); filters.add(RecordFilter.parse("TABLE==table", fields, true)); - filterDisplayModeScreenPresenter = new FilterDisplayModeScreenPresenter( - filterDisplayModeScreenView, filters, topScreenView); + filterDisplayModeScreenPresenter = + new FilterDisplayModeScreenPresenter(filterDisplayModeScreenView, filters, topScreenView); } @Test public void testInit() { filterDisplayModeScreenPresenter.init(); - verify(filterDisplayModeScreenView).showFilters(argThat(filters -> filters.size() == 2 - && filters.get(0).toString().equals("NAMESPACE==namespace") - && filters.get(1).toString().equals("TABLE==table"))); + verify(filterDisplayModeScreenView).showFilters(argThat( + filters -> filters.size() == 2 && filters.get(0).toString().equals("NAMESPACE==namespace") + && filters.get(1).toString().equals("TABLE==table"))); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java index b5e9bb9f3ba6..e7abefd854a7 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestInputModeScreenPresenter { @@ -68,8 +67,8 @@ public void setup() { histories.add("history1"); histories.add("history2"); - inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView, - TEST_INPUT_MESSAGE, histories, resultListener); + inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView, TEST_INPUT_MESSAGE, + histories, resultListener); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java index 0acd79c56d2d..4b4d10e83236 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,6 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestMessageModeScreenPresenter { @@ -53,8 +52,8 @@ public class TestMessageModeScreenPresenter { @Before public void setup() { - messageModeScreenPresenter = new MessageModeScreenPresenter(messageModeScreenView, - TEST_MESSAGE, topScreenView); + messageModeScreenPresenter = + new MessageModeScreenPresenter(messageModeScreenView, TEST_MESSAGE, topScreenView); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java index e0c09dfe1673..f5a90cc6071f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,13 +26,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestPaging { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPaging.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestPaging.class); @Test public void testArrowUpAndArrowDown() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java index 44a8878407a0..177d64002c7f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,6 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestTopScreenModel { @@ -65,9 +64,8 @@ public void setup() throws IOException { when(admin.getClusterMetrics()).thenReturn(TestUtils.createDummyClusterMetrics()); topScreenModel = new TopScreenModel(admin, Mode.REGION, null, null, null, null); - fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); } @Test @@ -172,9 +170,9 @@ public void testSwitchMode() { assertThat(topScreenModel.getCurrentMode(), is(Mode.TABLE)); // Test for initialFilters - List initialFilters = Arrays.asList( - RecordFilter.parse("TABLE==table1", fields, true), - RecordFilter.parse("TABLE==table2", fields, true)); + List initialFilters = + Arrays.asList(RecordFilter.parse("TABLE==table1", fields, true), + RecordFilter.parse("TABLE==table2", fields, true)); topScreenModel.switchMode(Mode.TABLE, false, initialFilters); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java index d218dd52950d..6a780b5ff85e 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,6 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestTopScreenPresenter { @@ -52,29 +51,19 @@ public class TestTopScreenPresenter { HBaseClassTestRule.forClass(TestTopScreenPresenter.class); private static final List TEST_FIELD_INFOS = Arrays.asList( - new FieldInfo(Field.REGION, 10, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.LOCALITY, 10, true) - ); + new FieldInfo(Field.REGION, 10, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.LOCALITY, 10, true)); private static final List TEST_RECORDS = Arrays.asList( - Record.ofEntries( - entry(Field.REGION, "region1"), - entry(Field.REQUEST_COUNT_PER_SECOND, 1L), + Record.ofEntries(entry(Field.REGION, "region1"), entry(Field.REQUEST_COUNT_PER_SECOND, 1L), entry(Field.LOCALITY, 0.3f)), - Record.ofEntries( - entry(Field.REGION, "region2"), - entry(Field.REQUEST_COUNT_PER_SECOND, 2L), + Record.ofEntries(entry(Field.REGION, "region2"), entry(Field.REQUEST_COUNT_PER_SECOND, 2L), entry(Field.LOCALITY, 0.2f)), - Record.ofEntries( - entry(Field.REGION, "region3"), - entry(Field.REQUEST_COUNT_PER_SECOND, 3L), - entry(Field.LOCALITY, 0.1f)) - ); + Record.ofEntries(entry(Field.REGION, "region3"), entry(Field.REQUEST_COUNT_PER_SECOND, 3L), + entry(Field.LOCALITY, 0.1f))); - private static final Summary TEST_SUMMARY = new Summary( - "00:00:01", "3.0.0-SNAPSHOT", "01234567-89ab-cdef-0123-456789abcdef", - 3, 2, 1, 6, 1, 3.0, 300); + private static final Summary TEST_SUMMARY = new Summary("00:00:01", "3.0.0-SNAPSHOT", + "01234567-89ab-cdef-0123-456789abcdef", 3, 2, 1, 6, 1, 3.0, 300); @Mock private TopScreenView topScreenView; @@ -90,13 +79,13 @@ public void setup() { when(topScreenView.getPageSize()).thenReturn(100); when(topScreenModel.getFieldInfos()).thenReturn(TEST_FIELD_INFOS); - when(topScreenModel.getFields()).thenReturn(TEST_FIELD_INFOS.stream() - .map(FieldInfo::getField).collect(Collectors.toList())); + when(topScreenModel.getFields()) + .thenReturn(TEST_FIELD_INFOS.stream().map(FieldInfo::getField).collect(Collectors.toList())); when(topScreenModel.getRecords()).thenReturn(TEST_RECORDS); when(topScreenModel.getSummary()).thenReturn(TEST_SUMMARY); - topScreenPresenter = new TopScreenPresenter(topScreenView, 3000, topScreenModel, - null, Long.MAX_VALUE); + topScreenPresenter = + new TopScreenPresenter(topScreenView, 3000, topScreenModel, null, Long.MAX_VALUE); } @Test @@ -104,8 +93,8 @@ public void testRefresh() { topScreenPresenter.init(); topScreenPresenter.refresh(true); - verify(topScreenView).showTopScreen(argThat(this::assertSummary), - argThat(this::assertHeaders), argThat(this::assertRecords), + verify(topScreenView).showTopScreen(argThat(this::assertSummary), argThat(this::assertHeaders), + argThat(this::assertRecords), argThat(selectedRecord -> assertSelectedRecord(selectedRecord, 0))); } @@ -211,9 +200,8 @@ private boolean assertSummary(Summary actual) { } private boolean assertHeaders(List
    actual) { - List
    expected = - TEST_FIELD_INFOS.stream().map(fi -> new Header(fi.getField(), fi.getDefaultLength())) - .collect(Collectors.toList()); + List
    expected = TEST_FIELD_INFOS.stream() + .map(fi -> new Header(fi.getField(), fi.getDefaultLength())).collect(Collectors.toList()); if (actual.size() != expected.size()) { return false; @@ -250,8 +238,9 @@ private boolean assertSelectedRecord(Record actual, int expectedSelectedRecodeIn } private boolean assertRecord(Record actual, Record expected) { - return actual.get(Field.REGION).equals(expected.get(Field.REGION)) && actual - .get(Field.REQUEST_COUNT_PER_SECOND).equals(expected.get(Field.REQUEST_COUNT_PER_SECOND)) + return actual.get(Field.REGION).equals(expected.get(Field.REGION)) + && actual.get(Field.REQUEST_COUNT_PER_SECOND) + .equals(expected.get(Field.REQUEST_COUNT_PER_SECOND)) && actual.get(Field.LOCALITY).equals(expected.get(Field.LOCALITY)); } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java index 304c92b8497e..3458e7ee31b4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; - public final class TestCursor { private TestCursor() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java index ebfe56981c49..6295cd0166aa 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; - public final class TestKeyPress { private TestKeyPress() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java index 212395fecaf5..6af4eef609cb 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,10 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; - public final class TestTerminalPrinter { private TestTerminalPrinter() { @@ -38,8 +36,8 @@ public static void main(String[] args) throws Exception { printer.print("Normal string").endOfLine(); printer.startHighlight().print("Highlighted string").stopHighlight().endOfLine(); printer.startBold().print("Bold string").stopBold().endOfLine(); - printer.startHighlight().startBold().print("Highlighted bold string") - .stopBold().stopHighlight().endOfLine(); + printer.startHighlight().startBold().print("Highlighted bold string").stopBold() + .stopHighlight().endOfLine(); printer.endOfLine(); printer.print("Press any key to finish").endOfLine(); diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml index e36989ba83ef..34eb95d8da4a 100644 --- a/hbase-http/pom.xml +++ b/hbase-http/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-http Apache HBase - HTTP HTTP functionality for HBase Servers - - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - default - - false - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - package - - jar - test-jar - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -262,6 +162,106 @@ test + + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + default + + false + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + jar + test-jar + + package + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + build-with-jdk11 @@ -286,10 +286,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -331,7 +331,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -341,6 +343,7 @@ org.apache.hadoop hadoop-minicluster + test com.google.guava @@ -355,7 +358,6 @@ jsr311-api - test com.fasterxml.jackson.core @@ -369,10 +371,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources @@ -431,7 +433,7 @@ - + diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java index 215ff37e3bf5..ba72af2e5f48 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -27,7 +26,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.yetus.audience.InterfaceAudience; @@ -38,16 +36,17 @@ public class AdminAuthorizedFilter implements Filter { private Configuration conf; private AccessControlList adminsAcl; - @Override public void init(FilterConfig filterConfig) throws ServletException { - adminsAcl = (AccessControlList) filterConfig.getServletContext().getAttribute( - HttpServer.ADMINS_ACL); - conf = (Configuration) filterConfig.getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + @Override + public void init(FilterConfig filterConfig) throws ServletException { + adminsAcl = + (AccessControlList) filterConfig.getServletContext().getAttribute(HttpServer.ADMINS_ACL); + conf = (Configuration) filterConfig.getServletContext() + .getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); } @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { if (!(request instanceof HttpServletRequest) || !(response instanceof HttpServletResponse)) { throw new UnsupportedOperationException("Only accepts HTTP"); } @@ -61,5 +60,7 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha chain.doFilter(request, response); } - @Override public void destroy() {} + @Override + public void destroy() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java index 10156f43b445..2ad09b5ae5c7 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,8 +39,7 @@ public class AdminAuthorizedServlet extends DefaultServlet { protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { // Do the authorization - if (HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (HttpServer.hasAdministratorAccess(getServletContext(), request, response)) { // Authorization is done. Just call super. super.doGet(request, response); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java index 0f0c7150c417..3cc58e8cf446 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -28,10 +27,8 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -46,7 +43,7 @@ public void init(FilterConfig filterConfig) throws ServletException { @Override public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.addHeader("X-Frame-Options", filterConfig.getInitParameter("xframeoptions")); chain.doFilter(req, res); @@ -58,8 +55,8 @@ public void destroy() { public static Map getDefaultParameters(Configuration conf) { Map params = new HashMap<>(); - params.put("xframeoptions", conf.get("hbase.http.filter.xframeoptions.mode", - DEFAULT_XFRAMEOPTIONS)); + params.put("xframeoptions", + conf.get("hbase.http.filter.xframeoptions.mode", DEFAULT_XFRAMEOPTIONS)); return params; } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java index 5869ce3f92e8..7c98352cf222 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,16 +27,17 @@ public interface FilterContainer { /** * Add a filter to the container. - * @param name Filter name - * @param classname Filter class name + * @param name Filter name + * @param classname Filter class name * @param parameters a map from parameter names to initial values */ void addFilter(String name, String classname, Map parameters); + /** - * Add a global filter to the container - This global filter will be - * applied to all available web contexts. - * @param name filter name - * @param classname filter class name + * Add a global filter to the container - This global filter will be applied to all available web + * contexts. + * @param name filter name + * @param classname filter class name * @param parameters a map from parameter names to initial values */ void addGlobalFilter(String name, String classname, Map parameters); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java index 7e8595e7d043..135115acb1bf 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ public abstract class FilterInitializer { /** * Initialize a Filter to a FilterContainer. * @param container The filter container - * @param conf Configuration for run-time parameters + * @param conf Configuration for run-time parameters */ public abstract void initFilter(FilterContainer container, Configuration conf); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java index ad584c9d1166..23d4bd0a51ec 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -38,17 +37,17 @@ public final class HtmlQuoting { /** * Does the given string need to be quoted? * @param data the string to check - * @param off the starting position - * @param len the number of bytes to check + * @param off the starting position + * @param len the number of bytes to check * @return does the string contain any of the active html characters? */ public static boolean needsQuoting(byte[] data, int off, int len) { - if (off+len > data.length) { - throw new IllegalStateException("off+len=" + off+len + " should be lower" - + " than data length=" + data.length); + if (off + len > data.length) { + throw new IllegalStateException( + "off+len=" + off + len + " should be lower" + " than data length=" + data.length); } - for(int i=off; i< off+len; ++i) { - switch(data[i]) { + for (int i = off; i < off + len; ++i) { + switch (data[i]) { case '&': case '<': case '>': @@ -72,20 +71,19 @@ public static boolean needsQuoting(String str) { return false; } byte[] bytes = Bytes.toBytes(str); - return needsQuoting(bytes, 0 , bytes.length); + return needsQuoting(bytes, 0, bytes.length); } /** - * Quote all of the active HTML characters in the given string as they - * are added to the buffer. + * Quote all of the active HTML characters in the given string as they are added to the buffer. * @param output the stream to write the output to * @param buffer the byte array to take the characters from - * @param off the index of the first byte to quote - * @param len the number of bytes to quote + * @param off the index of the first byte to quote + * @param len the number of bytes to quote */ public static void quoteHtmlChars(OutputStream output, byte[] buffer, int off, int len) - throws IOException { - for(int i=off; i < off+len; i++) { + throws IOException { + for (int i = off; i < off + len; i++) { switch (buffer[i]) { case '&': output.write(ampBytes); @@ -140,6 +138,7 @@ public static String quoteHtmlChars(String item) { public static OutputStream quoteOutputStream(final OutputStream out) { return new OutputStream() { private byte[] data = new byte[1]; + @Override public void write(byte[] data, int off, int len) throws IOException { quoteHtmlChars(out, data, off, len); @@ -198,12 +197,11 @@ public static String unquoteHtmlChars(String item) { buffer.append('"'); next += 6; } else { - int end = item.indexOf(';', next)+1; + int end = item.indexOf(';', next) + 1; if (end == 0) { end = len; } - throw new IllegalArgumentException("Bad HTML quoting for " + - item.substring(next,end)); + throw new IllegalArgumentException("Bad HTML quoting for " + item.substring(next, end)); } posn = next; next = item.indexOf('&', posn); @@ -216,15 +214,16 @@ public static void main(String[] args) { if (args.length == 0) { throw new IllegalArgumentException("Please provide some arguments"); } - for(String arg:args) { + for (String arg : args) { System.out.println("Original: " + arg); String quoted = quoteHtmlChars(arg); - System.out.println("Quoted: "+ quoted); + System.out.println("Quoted: " + quoted); String unquoted = unquoteHtmlChars(quoted); System.out.println("Unquoted: " + unquoted); System.out.println(); } } - private HtmlQuoting() {} + private HtmlQuoting() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java index 52c9133dcf63..09de376ea188 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import org.apache.hadoop.conf.Configuration; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -29,6 +28,7 @@ @InterfaceStability.Unstable public class HttpConfig { private Policy policy; + public enum Policy { HTTP_ONLY, HTTPS_ONLY, @@ -53,8 +53,7 @@ public boolean isHttpsEnabled() { } public HttpConfig(final Configuration conf) { - boolean sslEnabled = conf.getBoolean( - ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, + boolean sslEnabled = conf.getBoolean(ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, ServerConfigurationKeys.HBASE_SSL_ENABLED_DEFAULT); policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY; if (sslEnabled) { diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java index d3e8005eb9c0..5f63dda9f3e0 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index f8c04bac9715..de2e59befb47 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,6 +68,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion; @@ -96,12 +97,10 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; /** - * Create a Jetty embedded server to answer http requests. The primary goal - * is to serve up status information for the server. - * There are three contexts: - * "/logs/" -> points to the log directory - * "/static/" -> points to common static files (src/webapps/static) - * "/" -> the jsp server code from (src/webapps/<name>) + * Create a Jetty embedded server to answer http requests. The primary goal is to serve up status + * information for the server. There are three contexts: "/logs/" -> points to the log directory + * "/static/" -> points to common static files (src/webapps/static) "/" -> the jsp server code + * from (src/webapps/<name>) */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -111,37 +110,35 @@ public class HttpServer implements FilterContainer { private static final int DEFAULT_MAX_HEADER_SIZE = 64 * 1024; // 64K - static final String FILTER_INITIALIZERS_PROPERTY - = "hbase.http.filter.initializers"; + static final String FILTER_INITIALIZERS_PROPERTY = "hbase.http.filter.initializers"; static final String HTTP_MAX_THREADS = "hbase.http.max.threads"; public static final String HTTP_UI_AUTHENTICATION = "hbase.security.authentication.ui"; static final String HTTP_AUTHENTICATION_PREFIX = "hbase.security.authentication."; - static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX - + "spnego."; + static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX + "spnego."; static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX = "kerberos.principal"; public static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX; static final String HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX = "kerberos.keytab"; public static final String HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX; static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = "kerberos.name.rules"; public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX; - static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = "kerberos.proxyuser.enable"; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX; + static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = + "kerberos.proxyuser.enable"; public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX; - public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; - static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = - "signature.secret.file"; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX; + public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; + static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = "signature.secret.file"; public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY = - HTTP_AUTHENTICATION_PREFIX + HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX; + HTTP_AUTHENTICATION_PREFIX + HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX; public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.users"; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.users"; public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY = - HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.groups"; + HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.groups"; public static final String HTTP_PRIVILEGED_CONF_KEY = - "hbase.security.authentication.ui.config.protected"; + "hbase.security.authentication.ui.config.protected"; public static final boolean HTTP_PRIVILEGED_CONF_DEFAULT = false; // The ServletContext attribute where the daemon Configuration @@ -162,11 +159,11 @@ public class HttpServer implements FilterContainer { private static final class ListenerInfo { /** - * Boolean flag to determine whether the HTTP server should clean up the - * listener in stop(). + * Boolean flag to determine whether the HTTP server should clean up the listener in stop(). */ private final boolean isManaged; private final ServerConnector listener; + private ListenerInfo(boolean isManaged, ServerConnector listener) { this.isManaged = isManaged; this.listener = listener; @@ -240,14 +237,10 @@ public static class Builder { private int port = -1; /** - * Add an endpoint that the HTTP server should listen to. - * - * @param endpoint - * the endpoint of that the HTTP server should listen to. The - * scheme specifies the protocol (i.e. HTTP / HTTPS), the host - * specifies the binding address, and the port specifies the - * listening port. Unspecified or zero port means that the server - * can listen to any port. + * Add an endpoint that the HTTP server should listen to. n * the endpoint of that the HTTP + * server should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host + * specifies the binding address, and the port specifies the listening port. Unspecified or zero + * port means that the server can listen to any port. */ public Builder addEndpoint(URI endpoint) { endpoints.add(endpoint); @@ -255,9 +248,9 @@ public Builder addEndpoint(URI endpoint) { } /** - * Set the hostname of the http server. The host name is used to resolve the - * _HOST field in Kerberos principals. The hostname of the first listener - * will be used if the name is unspecified. + * Set the hostname of the http server. The host name is used to resolve the _HOST field in + * Kerberos principals. The hostname of the first listener will be used if the name is + * unspecified. */ public Builder hostName(String hostName) { this.hostName = hostName; @@ -284,8 +277,7 @@ public Builder keyPassword(String password) { } /** - * Specify whether the server should authorize the client in SSL - * connections. + * Specify whether the server should authorize the client in SSL connections. */ public Builder needsClientAuth(boolean value) { this.needsClientAuth = value; @@ -297,7 +289,7 @@ public Builder needsClientAuth(boolean value) { * @deprecated Since 0.99.0. Use {@link #setAppDir(String)} instead. */ @Deprecated - public Builder setName(String name){ + public Builder setName(String name) { this.name = name; return this; } @@ -307,7 +299,7 @@ public Builder setName(String name){ * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead. */ @Deprecated - public Builder setBindAddress(String bindAddress){ + public Builder setBindAddress(String bindAddress) { this.bindAddress = bindAddress; return this; } @@ -393,7 +385,7 @@ public HttpServer build() throws IOException { try { endpoints.add(0, new URI("http", "", bindAddress, port, "", "", "")); } catch (URISyntaxException e) { - throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e); + throw new HadoopIllegalArgumentException("Invalid endpoint: " + e); } } @@ -447,11 +439,11 @@ public HttpServer build() throws IOException { LOG.debug("Excluded SSL Cipher List:" + excludeCiphers); } - listener = new ServerConnector(server.webServer, new SslConnectionFactory(sslCtxFactory, - HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig)); + listener = new ServerConnector(server.webServer, + new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), + new HttpConnectionFactory(httpsConfig)); } else { - throw new HadoopIllegalArgumentException( - "unknown scheme for endpoint:" + ep); + throw new HadoopIllegalArgumentException("unknown scheme for endpoint:" + ep); } // default settings for connector @@ -482,90 +474,83 @@ public HttpServer build() throws IOException { */ @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort) - throws IOException { + throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } /** - * Create a status server on the given port. Allows you to specify the - * path specifications that this server will be serving so that they will be - * added to the filters properly. - * - * @param name The name of the server + * Create a status server on the given port. Allows you to specify the path specifications that + * this server will be serving so that they will be added to the filters properly. + * @param name The name of the server * @param bindAddress The address for this server - * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. - * @param conf Configuration - * @param pathSpecs Path specifications that this httpserver will be serving. - * These will be added to any filters. + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until + * it finds a free port. + * @param conf Configuration + * @param pathSpecs Path specifications that this httpserver will be serving. These will be + * added to any filters. * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, String[] pathSpecs) throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + String[] pathSpecs) throws IOException { this(name, bindAddress, port, findPort, conf, null, pathSpecs); } /** - * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/<name>. - * @param name The name of the server - * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. - * @param conf Configuration + * Create a status server on the given port. The jsp scripts are taken from + * src/webapps/<name>. + * @param name The name of the server + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. + * @param conf Configuration * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf) throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) + throws IOException { this(name, bindAddress, port, findPort, conf, null, null); } /** - * Creates a status server on the given port. The JSP scripts are taken - * from src/webapp<name>. - * - * @param name the name of the server + * Creates a status server on the given port. The JSP scripts are taken from + * src/webapp<name>. + * @param name the name of the server * @param bindAddress the address for this server - * @param port the port to use on the server - * @param findPort whether the server should start at the given port and increment by 1 until it - * finds a free port - * @param conf the configuration to use - * @param adminsAcl {@link AccessControlList} of the admins + * @param port the port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until + * it finds a free port + * @param conf the configuration to use + * @param adminsAcl {@link AccessControlList} of the admins * @throws IOException when creating the server fails * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, AccessControlList adminsAcl) - throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + AccessControlList adminsAcl) throws IOException { this(name, bindAddress, port, findPort, conf, adminsAcl, null); } /** - * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/<name>. - * @param name The name of the server + * Create a status server on the given port. The jsp scripts are taken from + * src/webapps/<name>. + * @param name The name of the server * @param bindAddress The address for this server - * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. - * @param conf Configuration - * @param adminsAcl {@link AccessControlList} of the admins - * @param pathSpecs Path specifications that this httpserver will be serving. - * These will be added to any filters. + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until + * it finds a free port. + * @param conf Configuration + * @param adminsAcl {@link AccessControlList} of the admins + * @param pathSpecs Path specifications that this httpserver will be serving. These will be + * added to any filters. * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, AccessControlList adminsAcl, - String[] pathSpecs) throws IOException { - this(new Builder().setName(name) - .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) - .setFindPort(findPort).setConf(conf).setACL(adminsAcl) - .setPathSpec(pathSpecs)); + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + AccessControlList adminsAcl, String[] pathSpecs) throws IOException { + this(new Builder().setName(name).addEndpoint(URI.create("http://" + bindAddress + ":" + port)) + .setFindPort(findPort).setConf(conf).setACL(adminsAcl).setPathSpec(pathSpecs)); } private HttpServer(final Builder b) throws IOException { @@ -573,12 +558,11 @@ private HttpServer(final Builder b) throws IOException { this.logDir = b.logDir; final String appDir = getWebAppsPath(b.name); - int maxThreads = b.conf.getInt(HTTP_MAX_THREADS, 16); // If HTTP_MAX_THREADS is less than or equal to 0, QueueThreadPool() will use the // default value (currently 200). - QueuedThreadPool threadPool = maxThreads <= 0 ? new QueuedThreadPool() - : new QueuedThreadPool(maxThreads); + QueuedThreadPool threadPool = + maxThreads <= 0 ? new QueuedThreadPool() : new QueuedThreadPool(maxThreads); threadPool.setDaemon(true); this.webServer = new Server(threadPool); @@ -590,9 +574,8 @@ private HttpServer(final Builder b) throws IOException { this.webServer.setHandler(buildGzipHandler(this.webServer.getHandler())); } - private void initializeWebServer(String name, String hostName, - Configuration conf, String[] pathSpecs, HttpServer.Builder b) - throws FileNotFoundException, IOException { + private void initializeWebServer(String name, String hostName, Configuration conf, + String[] pathSpecs, HttpServer.Builder b) throws FileNotFoundException, IOException { Preconditions.checkNotNull(webAppContext); @@ -623,20 +606,18 @@ private void initializeWebServer(String name, String hostName, addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); - addGlobalFilter("clickjackingprevention", - ClickjackingPreventionFilter.class.getName(), - ClickjackingPreventionFilter.getDefaultParameters(conf)); + addGlobalFilter("clickjackingprevention", ClickjackingPreventionFilter.class.getName(), + ClickjackingPreventionFilter.getDefaultParameters(conf)); HttpConfig httpConfig = new HttpConfig(conf); - addGlobalFilter("securityheaders", - SecurityHeadersFilter.class.getName(), - SecurityHeadersFilter.getDefaultParameters(conf, httpConfig.isSecure())); + addGlobalFilter("securityheaders", SecurityHeadersFilter.class.getName(), + SecurityHeadersFilter.getDefaultParameters(conf, httpConfig.isSecure())); // But security needs to be enabled prior to adding the other servlets if (authenticationEnabled) { initSpnego(conf, hostName, b.usernameConfKey, b.keytabConfKey, b.kerberosNameRulesKey, - b.signatureSecretFileKey); + b.signatureSecretFileKey); } final FilterInitializer[] initializers = getFilterInitializers(conf); @@ -662,16 +643,16 @@ private void addManagedListener(ServerConnector connector) { listeners.add(new ListenerInfo(true, connector)); } - private static WebAppContext createWebAppContext(String name, - Configuration conf, AccessControlList adminsAcl, final String appDir) { + private static WebAppContext createWebAppContext(String name, Configuration conf, + AccessControlList adminsAcl, final String appDir) { WebAppContext ctx = new WebAppContext(); ctx.setDisplayName(name); ctx.setContextPath("/"); ctx.setWar(appDir + "/" + name); ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); // for org.apache.hadoop.metrics.MetricsServlet - ctx.getServletContext().setAttribute( - org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf); + ctx.getServletContext().setAttribute(org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, + conf); ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); addNoCacheFilter(ctx); return ctx; @@ -681,11 +662,12 @@ private static WebAppContext createWebAppContext(String name, * Construct and configure an instance of {@link GzipHandler}. With complex * multi-{@link WebAppContext} configurations, it's easiest to apply this handler directly to the * instance of {@link Server} near the end of its configuration, something like + * *
    -   *    Server server = new Server();
    -   *    //...
    -   *    server.setHandler(buildGzipHandler(server.getHandler()));
    -   *    server.start();
    +   * Server server = new Server();
    +   * // ...
    +   * server.setHandler(buildGzipHandler(server.getHandler()));
    +   * server.start();
        * 
    */ public static GzipHandler buildGzipHandler(final Handler wrapped) { @@ -696,7 +678,7 @@ public static GzipHandler buildGzipHandler(final Handler wrapped) { private static void addNoCacheFilter(WebAppContext ctxt) { defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(), - Collections. emptyMap(), new String[] { "/*" }); + Collections. emptyMap(), new String[] { "/*" }); } /** Get an array of FilterConfiguration specified in the conf */ @@ -711,8 +693,8 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { } FilterInitializer[] initializers = new FilterInitializer[classes.length]; - for(int i = 0; i < classes.length; i++) { - initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(classes[i]); + for (int i = 0; i < classes.length; i++) { + initializers[i] = (FilterInitializer) ReflectionUtils.newInstance(classes[i]); } return initializers; } @@ -721,8 +703,8 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { * Add default apps. * @param appDir The application directory */ - protected void addDefaultApps(ContextHandlerCollection parent, - final String appDir, Configuration conf) { + protected void addDefaultApps(ContextHandlerCollection parent, final String appDir, + Configuration conf) { // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = this.logDir; if (logDir == null) { @@ -733,12 +715,12 @@ protected void addDefaultApps(ContextHandlerCollection parent, logContext.addServlet(AdminAuthorizedServlet.class, "/*"); logContext.setResourceBase(logDir); - if (conf.getBoolean( - ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, - ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) { + if ( + conf.getBoolean(ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, + ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES) + ) { Map params = logContext.getInitParams(); - params.put( - "org.mortbay.jetty.servlet.Default.aliases", "true"); + params.put("org.mortbay.jetty.servlet.Default.aliases", "true"); } logContext.setDisplayName("logs"); setContextAttributes(logContext, conf); @@ -761,13 +743,13 @@ private void setContextAttributes(ServletContextHandler context, Configuration c /** * Add default servlets. */ - protected void addDefaultServlets( - ContextHandlerCollection contexts, Configuration conf) throws IOException { + protected void addDefaultServlets(ContextHandlerCollection contexts, Configuration conf) + throws IOException { // set up default servlets addPrivilegedServlet("stacks", "/stacks", StackServlet.class); addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class); - // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's - // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. + // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's + // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. // Remove when we drop support for hbase on hadoop2.x. try { Class clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet"); @@ -796,15 +778,15 @@ protected void addDefaultServlets( genCtx.setDisplayName("prof-output-hbase"); } else { addUnprivilegedServlet("prof", "/prof", ProfileServlet.DisabledServlet.class); - LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + - "not specified. Disabling /prof endpoint."); + LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + + "not specified. Disabling /prof endpoint."); } } /** - * Set a value in the webapp context. These values are available to the jsp - * pages as "application.getAttribute(name)". - * @param name The name of the attribute + * Set a value in the webapp context. These values are available to the jsp pages as + * "application.getAttribute(name)". + * @param name The name of the attribute * @param value The value of the attribute */ public void setAttribute(String name, Object value) { @@ -814,12 +796,10 @@ public void setAttribute(String name, Object value) { /** * Add a Jersey resource package. * @param packageName The Java package name containing the Jersey resource. - * @param pathSpec The path spec for the servlet + * @param pathSpec The path spec for the servlet */ - public void addJerseyResourcePackage(final String packageName, - final String pathSpec) { - LOG.info("addJerseyResourcePackage: packageName=" + packageName - + ", pathSpec=" + pathSpec); + public void addJerseyResourcePackage(final String packageName, final String pathSpec) { + LOG.info("addJerseyResourcePackage: packageName=" + packageName + ", pathSpec=" + pathSpec); ResourceConfig application = new ResourceConfig().packages(packageName); final ServletHolder sh = new ServletHolder(new ServletContainer(application)); @@ -828,23 +808,23 @@ public void addJerseyResourcePackage(final String packageName, /** * Adds a servlet in the server that any user can access. This method differs from - * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user - * can interact with the servlet added by this method. - * @param name The name of the servlet (can be passed as null) + * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user can + * interact with the servlet added by this method. + * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet - * @param clazz The servlet class + * @param clazz The servlet class */ public void addUnprivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { addServletWithAuth(name, pathSpec, clazz, false); } /** * Adds a servlet in the server that any user can access. This method differs from - * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user - * can interact with the servlet added by this method. + * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user can + * interact with the servlet added by this method. * @param pathSpec The path spec for the servlet - * @param holder The servlet holder + * @param holder The servlet holder */ public void addUnprivilegedServlet(String pathSpec, ServletHolder holder) { addServletWithAuth(pathSpec, holder, false); @@ -856,15 +836,14 @@ public void addUnprivilegedServlet(String pathSpec, ServletHolder holder) { * who are identified as administrators can interact with the servlet added by this method. */ public void addPrivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { addServletWithAuth(name, pathSpec, clazz, true); } /** * Adds a servlet in the server that only administrators can access. This method differs from - * {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those - * authenticated user who are identified as administrators can interact with the servlet added by - * this method. + * {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those authenticated user + * who are identified as administrators can interact with the servlet added by this method. */ public void addPrivilegedServlet(String pathSpec, ServletHolder holder) { addServletWithAuth(pathSpec, holder, true); @@ -875,8 +854,8 @@ public void addPrivilegedServlet(String pathSpec, ServletHolder holder) { * directly, but invoke it via {@link #addUnprivilegedServlet(String, String, Class)} or * {@link #addPrivilegedServlet(String, String, Class)}. */ - void addServletWithAuth(String name, String pathSpec, - Class clazz, boolean requireAuthz) { + void addServletWithAuth(String name, String pathSpec, Class clazz, + boolean requireAuthz) { addInternalServlet(name, pathSpec, clazz, requireAuthz); addFilterPathMapping(pathSpec, webAppContext); } @@ -892,20 +871,17 @@ void addServletWithAuth(String pathSpec, ServletHolder holder, boolean requireAu } /** - * Add an internal servlet in the server, specifying whether or not to - * protect with Kerberos authentication. - * Note: This method is to be used for adding servlets that facilitate - * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberos - * filters) are not enabled. - * - * @param name The name of the {@link Servlet} (can be passed as null) - * @param pathSpec The path spec for the {@link Servlet} - * @param clazz The {@link Servlet} class + * Add an internal servlet in the server, specifying whether or not to protect with Kerberos + * authentication. Note: This method is to be used for adding servlets that facilitate internal + * communication and not for user facing functionality. For servlets added using this method, + * filters (except internal Kerberos filters) are not enabled. + * @param name The name of the {@link Servlet} (can be passed as null) + * @param pathSpec The path spec for the {@link Servlet} + * @param clazz The {@link Servlet} class * @param requireAuthz Require Kerberos authenticate to access servlet */ - void addInternalServlet(String name, String pathSpec, - Class clazz, boolean requireAuthz) { + void addInternalServlet(String name, String pathSpec, Class clazz, + boolean requireAuthz) { ServletHolder holder = new ServletHolder(clazz); if (name != null) { holder.setName(name); @@ -914,15 +890,12 @@ void addInternalServlet(String name, String pathSpec, } /** - * Add an internal servlet in the server, specifying whether or not to - * protect with Kerberos authentication. - * Note: This method is to be used for adding servlets that facilitate - * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberos - * filters) are not enabled. - * - * @param pathSpec The path spec for the {@link Servlet} - * @param holder The object providing the {@link Servlet} instance + * Add an internal servlet in the server, specifying whether or not to protect with Kerberos + * authentication. Note: This method is to be used for adding servlets that facilitate internal + * communication and not for user facing functionality. For servlets added using this method, + * filters (except internal Kerberos filters) are not enabled. + * @param pathSpec The path spec for the {@link Servlet} + * @param holder The object providing the {@link Servlet} instance * @param requireAuthz Require Kerberos authenticate to access servlet */ void addInternalServlet(String pathSpec, ServletHolder holder, boolean requireAuthz) { @@ -944,15 +917,15 @@ void addInternalServlet(String pathSpec, ServletHolder holder, boolean requireAu public void addFilter(String name, String classname, Map parameters) { final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS); - LOG.info("Added filter " + name + " (class=" + classname - + ") to context " + webAppContext.getDisplayName()); + LOG.info("Added filter " + name + " (class=" + classname + ") to context " + + webAppContext.getDisplayName()); final String[] ALL_URLS = { "/*" }; for (Map.Entry e : defaultContexts.entrySet()) { if (e.getValue()) { ServletContextHandler handler = e.getKey(); defineFilter(handler, name, classname, parameters, ALL_URLS); - LOG.info("Added filter " + name + " (class=" + classname - + ") to context " + handler.getDisplayName()); + LOG.info("Added filter " + name + " (class=" + classname + ") to context " + + handler.getDisplayName()); } } filterNames.add(name); @@ -971,8 +944,8 @@ public void addGlobalFilter(String name, String classname, Map p /** * Define a filter for a context and set up default url mappings. */ - public static void defineFilter(ServletContextHandler handler, String name, - String classname, Map parameters, String[] urls) { + public static void defineFilter(ServletContextHandler handler, String name, String classname, + Map parameters, String[] urls) { FilterHolder holder = new FilterHolder(); holder.setName(name); holder.setClassName(classname); @@ -988,12 +961,11 @@ public static void defineFilter(ServletContextHandler handler, String name, /** * Add the path spec to the filter path mapping. - * @param pathSpec The path spec + * @param pathSpec The path spec * @param webAppCtx The WebApplicationContext to add to */ - protected void addFilterPathMapping(String pathSpec, - WebAppContext webAppCtx) { - for(String name : filterNames) { + protected void addFilterPathMapping(String pathSpec, WebAppContext webAppCtx) { + for (String name : filterNames) { FilterMapping fmap = new FilterMapping(); fmap.setPathSpec(pathSpec); fmap.setFilterName(name); @@ -1011,7 +983,7 @@ public Object getAttribute(String name) { return webAppContext.getAttribute(name); } - public WebAppContext getWebAppContext(){ + public WebAppContext getWebAppContext() { return this.webAppContext; } @@ -1029,8 +1001,7 @@ protected String getWebAppsPath(String webapps, String appName) throws FileNotFo URL url = getClass().getClassLoader().getResource(webapps + "/" + appName); if (url == null) { - throw new FileNotFoundException(webapps + "/" + appName - + " not found in CLASSPATH"); + throw new FileNotFoundException(webapps + "/" + appName + " not found in CLASSPATH"); } String urlString = url.toString(); @@ -1044,14 +1015,13 @@ protected String getWebAppsPath(String webapps, String appName) throws FileNotFo */ @Deprecated public int getPort() { - return ((ServerConnector)webServer.getConnectors()[0]).getLocalPort(); + return ((ServerConnector) webServer.getConnectors()[0]).getLocalPort(); } /** * Get the address that corresponds to a particular connector. - * - * @return the corresponding address for the connector, or null if there's no - * such connector or the connector is not bounded. + * @return the corresponding address for the connector, or null if there's no such connector or + * the connector is not bounded. */ public InetSocketAddress getConnectorAddress(int index) { Preconditions.checkArgument(index >= 0); @@ -1060,7 +1030,7 @@ public InetSocketAddress getConnectorAddress(int index) { return null; } - ServerConnector c = (ServerConnector)webServer.getConnectors()[index]; + ServerConnector c = (ServerConnector) webServer.getConnectors()[index]; if (c.getLocalPort() == -1 || c.getLocalPort() == -2) { // -1 if the connector has not been opened // -2 if it has been closed @@ -1079,14 +1049,14 @@ public void setThreads(int min, int max) { pool.setMaxThreads(max); } - private void initSpnego(Configuration conf, String hostName, - String usernameConfKey, String keytabConfKey, String kerberosNameRuleKey, - String signatureSecretKeyFileKey) throws IOException { + private void initSpnego(Configuration conf, String hostName, String usernameConfKey, + String keytabConfKey, String kerberosNameRuleKey, String signatureSecretKeyFileKey) + throws IOException { Map params = new HashMap<>(); String principalInConf = getOrEmptyString(conf, usernameConfKey); if (!principalInConf.isEmpty()) { - params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, SecurityUtil.getServerPrincipal( - principalInConf, hostName)); + params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, + SecurityUtil.getServerPrincipal(principalInConf, hostName)); } String httpKeytab = getOrEmptyString(conf, keytabConfKey); if (!httpKeytab.isEmpty()) { @@ -1098,30 +1068,34 @@ private void initSpnego(Configuration conf, String hostName, } String signatureSecretKeyFile = getOrEmptyString(conf, signatureSecretKeyFileKey); if (!signatureSecretKeyFile.isEmpty()) { - params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, - signatureSecretKeyFile); + params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, signatureSecretKeyFile); } params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); // Verify that the required options were provided - if (isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) || - isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX))) { - throw new IllegalArgumentException(usernameConfKey + " and " - + keytabConfKey + " are both required in the configuration " + if ( + isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) + || isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX)) + ) { + throw new IllegalArgumentException( + usernameConfKey + " and " + keytabConfKey + " are both required in the configuration " + "to enable SPNEGO/Kerberos authentication for the Web UI"); } - if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY, - HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) { - //Copy/rename standard hadoop proxyuser settings to filter - for(Map.Entry proxyEntry : - conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { - params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), - proxyEntry.getValue()); - } - addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), params); + if ( + conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY, + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT) + ) { + // Copy/rename standard hadoop proxyuser settings to filter + for (Map.Entry proxyEntry : conf + .getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { + params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), + proxyEntry.getValue()); + } + addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), + params); } else { - addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); + addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); } } @@ -1136,8 +1110,7 @@ private boolean isMissing(String value) { } /** - * Extracts the value for the given key from the configuration of returns a string of - * zero length. + * Extracts the value for the given key from the configuration of returns a string of zero length. */ private String getOrEmptyString(Configuration conf, String key) { if (null == key) { @@ -1166,8 +1139,7 @@ public void start() throws IOException { Handler[] handlers = webServer.getHandlers(); for (int i = 0; i < handlers.length; i++) { if (handlers[i].isFailed()) { - throw new IOException( - "Problem in starting http server. Server handlers failed"); + throw new IOException("Problem in starting http server. Server handlers failed"); } } // Make sure there are no errors initializing the context. @@ -1176,14 +1148,13 @@ public void start() throws IOException { // Have to stop the webserver, or else its non-daemon threads // will hang forever. webServer.stop(); - throw new IOException("Unable to initialize WebAppContext", - unavailableException); + throw new IOException("Unable to initialize WebAppContext", unavailableException); } } catch (IOException e) { throw e; } catch (InterruptedException e) { - throw (IOException) new InterruptedIOException( - "Interrupted while starting HTTP server").initCause(e); + throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server") + .initCause(e); } catch (Exception e) { throw new IOException("Problem starting http server", e); } @@ -1216,12 +1187,12 @@ void openListeners() throws Exception { LOG.info("Jetty bound to port " + listener.getLocalPort()); break; } catch (IOException ex) { - if(!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { + if (!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { throw ex; } if (port == 0 || !findPort) { - BindException be = new BindException("Port in use: " - + listener.getHost() + ":" + listener.getPort()); + BindException be = + new BindException("Port in use: " + listener.getHost() + ":" + listener.getPort()); be.initCause(ex); throw be; } @@ -1246,9 +1217,7 @@ public void stop() throws Exception { try { li.listener.close(); } catch (Exception e) { - LOG.error( - "Error while stopping listener for webapp" - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping listener for webapp" + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } } @@ -1258,16 +1227,15 @@ public void stop() throws Exception { webAppContext.clearAttributes(); webAppContext.stop(); } catch (Exception e) { - LOG.error("Error while stopping web app context for webapp " - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping web app context for webapp " + webAppContext.getDisplayName(), + e); exception = addMultiException(exception, e); } try { webServer.stop(); } catch (Exception e) { - LOG.error("Error while stopping web server for webapp " - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } @@ -1278,7 +1246,7 @@ public void stop() throws Exception { } private MultiException addMultiException(MultiException exception, Exception e) { - if(exception == null){ + if (exception == null) { exception = new MultiException(); } exception.add(e); @@ -1307,8 +1275,8 @@ public String toString() { return "Inactive HttpServer"; } else { StringBuilder sb = new StringBuilder("HttpServer (") - .append(isAlive() ? STATE_DESCRIPTION_ALIVE : - STATE_DESCRIPTION_NOT_LIVE).append("), listening at:"); + .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE) + .append("), listening at:"); for (ListenerInfo li : listeners) { ServerConnector l = li.listener; sb.append(l.getHost()).append(":").append(l.getPort()).append("/,"); @@ -1320,29 +1288,26 @@ public String toString() { /** * Checks the user has privileges to access to instrumentation servlets. *

    - * If hadoop.security.instrumentation.requires.admin is set to FALSE - * (default value) it always returns TRUE. - *

    - * If hadoop.security.instrumentation.requires.admin is set to TRUE - * it will check that if the current user is in the admin ACLS. If the user is - * in the admin ACLs it returns TRUE, otherwise it returns FALSE. + * If hadoop.security.instrumentation.requires.admin is set to FALSE (default value) + * it always returns TRUE. + *

    + *

    + * If hadoop.security.instrumentation.requires.admin is set to TRUE it will check + * that if the current user is in the admin ACLS. If the user is in the admin ACLs it returns + * TRUE, otherwise it returns FALSE. *

    - * * @param servletContext the servlet context. - * @param request the servlet request. - * @param response the servlet response. + * @param request the servlet request. + * @param response the servlet response. * @return TRUE/FALSE based on the logic decribed above. */ - public static boolean isInstrumentationAccessAllowed( - ServletContext servletContext, HttpServletRequest request, - HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + public static boolean isInstrumentationAccessAllowed(ServletContext servletContext, + HttpServletRequest request, HttpServletResponse response) throws IOException { + Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); boolean access = true; - boolean adminAccess = conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, - false); + boolean adminAccess = conf + .getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, false); if (adminAccess) { access = hasAdministratorAccess(servletContext, request, response); } @@ -1350,44 +1315,39 @@ public static boolean isInstrumentationAccessAllowed( } /** - * Does the user sending the HttpServletRequest has the administrator ACLs? If - * it isn't the case, response will be modified to send an error to the user. - * + * Does the user sending the HttpServletRequest has the administrator ACLs? If it isn't the case, + * response will be modified to send an error to the user. * @param servletContext the {@link ServletContext} to use - * @param request the {@link HttpServletRequest} to check - * @param response used to send the error response if user does not have admin access. + * @param request the {@link HttpServletRequest} to check + * @param response used to send the error response if user does not have admin access. * @return true if admin-authorized, false otherwise * @throws IOException if an unauthenticated or unauthorized user tries to access the page */ - public static boolean hasAdministratorAccess( - ServletContext servletContext, HttpServletRequest request, - HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + public static boolean hasAdministratorAccess(ServletContext servletContext, + HttpServletRequest request, HttpServletResponse response) throws IOException { + Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); AccessControlList acl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return hasAdministratorAccess(conf, acl, request, response); } public static boolean hasAdministratorAccess(Configuration conf, AccessControlList acl, - HttpServletRequest request, HttpServletResponse response) throws IOException { + HttpServletRequest request, HttpServletResponse response) throws IOException { // If there is no authorization, anybody has administrator access. - if (!conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { + if (!conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { return true; } String remoteUser = request.getRemoteUser(); if (remoteUser == null) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, - "Unauthenticated users are not " + - "authorized to access this page."); + "Unauthenticated users are not " + "authorized to access this page."); return false; } if (acl != null && !userHasAdministratorAccess(acl, remoteUser)) { - response.sendError(HttpServletResponse.SC_FORBIDDEN, "User " - + remoteUser + " is unauthorized to access this page."); + response.sendError(HttpServletResponse.SC_FORBIDDEN, + "User " + remoteUser + " is unauthorized to access this page."); return false; } @@ -1395,32 +1355,27 @@ public static boolean hasAdministratorAccess(Configuration conf, AccessControlLi } /** - * Get the admin ACLs from the given ServletContext and check if the given - * user is in the ACL. - * + * Get the admin ACLs from the given ServletContext and check if the given user is in the ACL. * @param servletContext the context containing the admin ACL. - * @param remoteUser the remote user to check for. - * @return true if the user is present in the ACL, false if no ACL is set or - * the user is not present + * @param remoteUser the remote user to check for. + * @return true if the user is present in the ACL, false if no ACL is set or the user is not + * present */ public static boolean userHasAdministratorAccess(ServletContext servletContext, - String remoteUser) { - AccessControlList adminsAcl = (AccessControlList) servletContext - .getAttribute(ADMINS_ACL); + String remoteUser) { + AccessControlList adminsAcl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return userHasAdministratorAccess(adminsAcl, remoteUser); } public static boolean userHasAdministratorAccess(AccessControlList acl, String remoteUser) { - UserGroupInformation remoteUserUGI = - UserGroupInformation.createRemoteUser(remoteUser); + UserGroupInformation remoteUserUGI = UserGroupInformation.createRemoteUser(remoteUser); return acl != null && acl.isUserAllowed(remoteUserUGI); } /** - * A very simple servlet to serve up a text representation of the current - * stack traces. It both returns the stacks to the caller and logs them. - * Currently the stack traces are done sequentially rather than exactly the - * same data. + * A very simple servlet to serve up a text representation of the current stack traces. It both + * returns the stacks to the caller and logs them. Currently the stack traces are done + * sequentially rather than exactly the same data. */ public static class StackServlet extends HttpServlet { private static final long serialVersionUID = -6284183679759467039L; @@ -1428,13 +1383,11 @@ public static class StackServlet extends HttpServlet { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } response.setContentType("text/plain; charset=UTF-8"); - try (PrintStream out = new PrintStream( - response.getOutputStream(), false, "UTF-8")) { + try (PrintStream out = new PrintStream(response.getOutputStream(), false, "UTF-8")) { Threads.printThreadInfo(out, ""); out.flush(); } @@ -1443,9 +1396,9 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) } /** - * A Servlet input filter that quotes all HTML active characters in the - * parameter names and values. The goal is to quote the characters to make - * all of the servlets resistant to cross-site scripting attacks. + * A Servlet input filter that quotes all HTML active characters in the parameter names and + * values. The goal is to quote the characters to make all of the servlets resistant to cross-site + * scripting attacks. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static class QuotingInputFilter implements Filter { @@ -1453,6 +1406,7 @@ public static class QuotingInputFilter implements Filter { public static class RequestQuoter extends HttpServletRequestWrapper { private final HttpServletRequest rawRequest; + public RequestQuoter(HttpServletRequest rawRequest) { super(rawRequest); this.rawRequest = rawRequest; @@ -1464,8 +1418,8 @@ public RequestQuoter(HttpServletRequest rawRequest) { @Override public Enumeration getParameterNames() { return new Enumeration() { - private Enumeration rawIterator = - rawRequest.getParameterNames(); + private Enumeration rawIterator = rawRequest.getParameterNames(); + @Override public boolean hasMoreElements() { return rawIterator.hasMoreElements(); @@ -1483,8 +1437,8 @@ public String nextElement() { */ @Override public String getParameter(String name) { - return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter( - HtmlQuoting.unquoteHtmlChars(name))); + return HtmlQuoting + .quoteHtmlChars(rawRequest.getParameter(HtmlQuoting.unquoteHtmlChars(name))); } @Override @@ -1495,7 +1449,7 @@ public String[] getParameterValues(String name) { return null; } String[] result = new String[unquoteValue.length]; - for(int i=0; i < result.length; ++i) { + for (int i = 0; i < result.length; ++i) { result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]); } return result; @@ -1505,10 +1459,10 @@ public String[] getParameterValues(String name) { public Map getParameterMap() { Map result = new HashMap<>(); Map raw = rawRequest.getParameterMap(); - for (Map.Entry item: raw.entrySet()) { + for (Map.Entry item : raw.entrySet()) { String[] rawValue = item.getValue(); String[] cookedValue = new String[rawValue.length]; - for(int i=0; i< rawValue.length; ++i) { + for (int i = 0; i < rawValue.length; ++i) { cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]); } result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue); @@ -1517,18 +1471,16 @@ public Map getParameterMap() { } /** - * Quote the url so that users specifying the HOST HTTP header - * can't inject attacks. + * Quote the url so that users specifying the HOST HTTP header can't inject attacks. */ @Override - public StringBuffer getRequestURL(){ + public StringBuffer getRequestURL() { String url = rawRequest.getRequestURL().toString(); return new StringBuffer(HtmlQuoting.quoteHtmlChars(url)); } /** - * Quote the server name so that users specifying the HOST HTTP header - * can't inject attacks. + * Quote the server name so that users specifying the HOST HTTP header can't inject attacks. */ @Override public String getServerName() { @@ -1546,12 +1498,9 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, - ServletResponse response, - FilterChain chain - ) throws IOException, ServletException { - HttpServletRequestWrapper quoted = - new RequestQuoter((HttpServletRequest) request); + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + HttpServletRequestWrapper quoted = new RequestQuoter((HttpServletRequest) request); HttpServletResponse httpResponse = (HttpServletResponse) response; String mime = inferMimeType(request); @@ -1570,11 +1519,11 @@ public void doFilter(ServletRequest request, } /** - * Infer the mime type for the response based on the extension of the request - * URI. Returns null if unknown. + * Infer the mime type for the response based on the extension of the request URI. Returns null + * if unknown. */ private String inferMimeType(ServletRequest request) { - String path = ((HttpServletRequest)request).getRequestURI(); + String path = ((HttpServletRequest) request).getRequestURI(); ServletContext context = config.getServletContext(); return context.getMimeType(path); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java index 94269719aa42..686f0861f25a 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,11 +31,11 @@ public final class HttpServerUtil { /** * Add constraints to a Jetty Context to disallow undesirable Http methods. - * @param ctxHandler The context to modify + * @param ctxHandler The context to modify * @param allowOptionsMethod if true then OPTIONS method will not be set in constraint mapping */ public static void constrainHttpMethods(ServletContextHandler ctxHandler, - boolean allowOptionsMethod) { + boolean allowOptionsMethod) { Constraint c = new Constraint(); c.setAuthenticate(true); @@ -59,5 +59,6 @@ public static void constrainHttpMethods(ServletContextHandler ctxHandler, ctxHandler.setSecurityHandler(securityHandler); } - private HttpServerUtil() {} + private HttpServerUtil() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 8b13e2b22053..c44222b83342 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -27,16 +27,15 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; /** - * Create a Jetty embedded server to answer http requests. The primary goal - * is to serve up status information for the server. - * There are three contexts: - * "/stacks/" -> points to stack trace - * "/static/" -> points to common static files (src/hbase-webapps/static) - * "/" -> the jsp server code from (src/hbase-webapps/<name>) + * Create a Jetty embedded server to answer http requests. The primary goal is to serve up status + * information for the server. There are three contexts: "/stacks/" -> points to stack trace + * "/static/" -> points to common static files (src/hbase-webapps/static) "/" -> the jsp + * server code from (src/hbase-webapps/<name>) */ @InterfaceAudience.Private public class InfoServer { @@ -44,38 +43,38 @@ public class InfoServer { private final org.apache.hadoop.hbase.http.HttpServer httpServer; /** - * Create a status server on the given port. - * The jsp scripts are taken from src/hbase-webapps/name. - * @param name The name of the server + * Create a status server on the given port. The jsp scripts are taken from + * src/hbase-webapps/name. + * @param name The name of the server * @param bindAddress address to bind to - * @param port The port to use on the server - * @param findPort whether the server should start at the given port and increment by 1 until it - * finds a free port. - * @param c the {@link Configuration} to build the server + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until + * it finds a free port. + * @param c the {@link Configuration} to build the server * @throws IOException if getting one of the password fails or the server cannot be created */ public InfoServer(String name, String bindAddress, int port, boolean findPort, - final Configuration c) throws IOException { + final Configuration c) throws IOException { HttpConfig httpConfig = new HttpConfig(c); - HttpServer.Builder builder = - new org.apache.hadoop.hbase.http.HttpServer.Builder(); + HttpServer.Builder builder = new org.apache.hadoop.hbase.http.HttpServer.Builder(); - builder.setName(name).addEndpoint(URI.create(httpConfig.getSchemePrefix() + - HostAndPort.fromParts(bindAddress,port).toString())). - setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); + builder.setName(name) + .addEndpoint(URI + .create(httpConfig.getSchemePrefix() + HostAndPort.fromParts(bindAddress, port).toString())) + .setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); String logDir = System.getProperty("hbase.log.dir"); if (logDir != null) { builder.setLogDir(logDir); } if (httpConfig.isSecure()) { - builder.keyPassword(HBaseConfiguration - .getPassword(c, "ssl.server.keystore.keypassword", null)) + builder + .keyPassword(HBaseConfiguration.getPassword(c, "ssl.server.keystore.keypassword", null)) .keyStore(c.get("ssl.server.keystore.location"), - HBaseConfiguration.getPassword(c,"ssl.server.keystore.password", null), - c.get("ssl.server.keystore.type", "jks")) + HBaseConfiguration.getPassword(c, "ssl.server.keystore.password", null), + c.get("ssl.server.keystore.type", "jks")) .trustStore(c.get("ssl.server.truststore.location"), - HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), - c.get("ssl.server.truststore.type", "jks")); + HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), + c.get("ssl.server.truststore.type", "jks")); builder.excludeCiphers(c.get("ssl.server.exclude.cipher.list")); } // Enable SPNEGO authentication @@ -83,8 +82,7 @@ public InfoServer(String name, String bindAddress, int port, boolean findPort, builder.setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) .setKerberosNameRulesKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY) - .setSignatureSecretFileKey( - HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) + .setSignatureSecretFileKey(HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) .setSecurityEnabled(true); // Set an admin ACL on sensitive webUI endpoints @@ -95,13 +93,13 @@ public InfoServer(String name, String bindAddress, int port, boolean findPort, } /** - * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI - * which are meant only for administrators. + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which + * are meant only for administrators. */ AccessControlList buildAdminAcl(Configuration conf) { final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); - final String adminGroups = conf.get( - HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + final String adminGroups = + conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); if (userGroups == null && adminGroups == null) { // Backwards compatibility - if the user doesn't have anything set, allow all users in. return new AccessControlList("*", null); @@ -111,17 +109,14 @@ AccessControlList buildAdminAcl(Configuration conf) { /** * Explicitly invoke {@link #addPrivilegedServlet(String, String, Class)} or - * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. - * This method will add a servlet which any authenticated user can access. - * + * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. This method will + * add a servlet which any authenticated user can access. * @deprecated Use {@link #addUnprivilegedServlet(String, String, Class)} or - * {@link #addPrivilegedServlet(String, String, Class)} instead of this - * method which does not state outwardly what kind of authz rules will - * be applied to this servlet. + * {@link #addPrivilegedServlet(String, String, Class)} instead of this method which + * does not state outwardly what kind of authz rules will be applied to this servlet. */ @Deprecated - public void addServlet(String name, String pathSpec, - Class clazz) { + public void addServlet(String name, String pathSpec, Class clazz) { addUnprivilegedServlet(name, pathSpec, clazz); } @@ -130,7 +125,7 @@ public void addServlet(String name, String pathSpec, * @see HttpServer#addUnprivilegedServlet(String, String, Class) */ public void addUnprivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { this.httpServer.addUnprivilegedServlet(name, pathSpec, clazz); } @@ -150,7 +145,7 @@ public void addUnprivilegedServlet(String name, String pathSpec, ServletHolder h * @see HttpServer#addPrivilegedServlet(String, String, Class) */ public void addPrivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { this.httpServer.addPrivilegedServlet(name, pathSpec, clazz); } @@ -175,21 +170,22 @@ public void stop() throws Exception { this.httpServer.stop(); } - /** * Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled, * and the requesting user is defined as an administrator. If the UI is set to readonly, this * method always returns false. */ - public static boolean canUserModifyUI( - HttpServletRequest req, ServletContext ctx, Configuration conf) { + public static boolean canUserModifyUI(HttpServletRequest req, ServletContext ctx, + Configuration conf) { if (conf.getBoolean("hbase.master.ui.readonly", false)) { return false; } String remoteUser = req.getRemoteUser(); - if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && - conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && - remoteUser != null) { + if ( + "kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) + && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) + && remoteUser != null + ) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); } return false; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java index cd49f7e16baf..0c6aaa05079b 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -26,7 +25,6 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -38,8 +36,7 @@ public void init(FilterConfig filterConfig) throws ServletException { } @Override - public void doFilter(ServletRequest req, ServletResponse res, - FilterChain chain) + public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.setHeader("Cache-Control", "no-cache"); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java index d77ea9b14cec..d92e7d009f68 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java @@ -49,7 +49,7 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res // running which gets replaced by final output. If final output is not ready yet, the file size // will be <100 bytes (in all modes). if (requestedFile.length() < 100) { - LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); + LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); String refreshUrl = req.getRequestURI(); // Rebuild the query string (if we have one) if (req.getQueryString() != null) { @@ -57,8 +57,8 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res } ProfileServlet.setResponseHeader(resp); resp.setHeader("Refresh", REFRESH_PERIOD + ";" + refreshUrl); - resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + - " seconds until the output file is ready. Redirecting to " + refreshUrl); + resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + + " seconds until the output file is ready. Redirecting to " + refreshUrl); } else { super.doGet(req, resp); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java index 0fbe31ae4c99..2d87a2b3f26a 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java @@ -25,63 +25,35 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.util.ProcessUtils; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; /** - * Servlet that runs async-profiler as web-endpoint. - * Following options from async-profiler can be specified as query paramater. - * // -e event profiling event: cpu|alloc|lock|cache-misses etc. - * // -d duration run profiling for 'duration' seconds (integer) - * // -i interval sampling interval in nanoseconds (long) - * // -j jstackdepth maximum Java stack depth (integer) - * // -b bufsize frame buffer size (long) - * // -t profile different threads separately - * // -s simple class names instead of FQN - * // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html - * // --width px SVG width pixels (integer) - * // --height px SVG frame height pixels (integer) - * // --minwidth px skip frames smaller than px (double) - * // --reverse generate stack-reversed FlameGraph / Call tree - * Example: - * - To collect 30 second CPU profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof" - * - To collect 1 minute CPU profile of current process and output in tree format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" - * - To collect 30 second heap allocation profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof?event=alloc" - * - To collect lock contention profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof?event=lock" - * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events) - * // Perf events: - * // cpu - * // page-faults - * // context-switches - * // cycles - * // instructions - * // cache-references - * // cache-misses - * // branches - * // branch-misses - * // bus-cycles - * // L1-dcache-load-misses - * // LLC-load-misses - * // dTLB-load-misses - * // mem:breakpoint - * // trace:tracepoint - * // Java events: - * // alloc - * // lock + * Servlet that runs async-profiler as web-endpoint. Following options from async-profiler can be + * specified as query paramater. // -e event profiling event: cpu|alloc|lock|cache-misses etc. // -d + * duration run profiling for 'duration' seconds (integer) // -i interval sampling interval in + * nanoseconds (long) // -j jstackdepth maximum Java stack depth (integer) // -b bufsize frame + * buffer size (long) // -t profile different threads separately // -s simple class names instead of + * FQN // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html // --width + * px SVG width pixels (integer) // --height px SVG frame height pixels (integer) // --minwidth px + * skip frames smaller than px (double) // --reverse generate stack-reversed FlameGraph / Call tree + * Example: - To collect 30 second CPU profile of current process (returns FlameGraph svg) curl + * "http://localhost:10002/prof" - To collect 1 minute CPU profile of current process and output in + * tree format (html) curl "http://localhost:10002/prof?output=tree&duration=60" - To collect 30 + * second heap allocation profile of current process (returns FlameGraph svg) curl + * "http://localhost:10002/prof?event=alloc" - To collect lock contention profile of current process + * (returns FlameGraph svg) curl "http://localhost:10002/prof?event=lock" Following event types are + * supported (default is 'cpu') (NOTE: not all OS'es support all events) // Perf events: // cpu // + * page-faults // context-switches // cycles // instructions // cache-references // cache-misses // + * branches // branch-misses // bus-cycles // L1-dcache-load-misses // LLC-load-misses // + * dTLB-load-misses // mem:breakpoint // trace:tracepoint // Java events: // alloc // lock */ @InterfaceAudience.Private public class ProfileServlet extends HttpServlet { @@ -154,7 +126,7 @@ enum Output { } @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED", - justification = "This class is never serialized nor restored.") + justification = "This class is never serialized nor restored.") private transient Lock profilerLock = new ReentrantLock(); private transient volatile Process process; private String asyncProfilerHome; @@ -168,7 +140,7 @@ public ProfileServlet() { @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) - throws IOException { + throws IOException { if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), req, resp)) { resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED); setResponseHeader(resp); @@ -180,10 +152,11 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" + - "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + - "environment is properly configured. For more information please see\n" + - "http://hbase.apache.org/book.html#profiler\n"); + resp.getWriter() + .write("ASYNC_PROFILER_HOME env is not set.\n\n" + + "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); return; } @@ -194,8 +167,8 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res if (pid == null) { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write( - "'pid' query parameter unspecified or unable to determine PID of current process."); + resp.getWriter() + .write("'pid' query parameter unspecified or unable to determine PID of current process."); return; } @@ -217,9 +190,9 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res int lockTimeoutSecs = 3; if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) { try { - File outputFile = new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + - event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() + "." + - output.name().toLowerCase()); + File outputFile = + new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + event.name().toLowerCase() + "-" + + ID_GEN.incrementAndGet() + "." + output.name().toLowerCase()); List cmd = new ArrayList<>(); cmd.add(asyncProfilerHome + PROFILER_SCRIPT); cmd.add("-e"); @@ -270,14 +243,13 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res setResponseHeader(resp); resp.setStatus(HttpServletResponse.SC_ACCEPTED); String relativeUrl = "/prof-output-hbase/" + outputFile.getName(); - resp.getWriter().write( - "Started [" + event.getInternalName() + - "] profiling. This page will automatically redirect to " + - relativeUrl + " after " + duration + " seconds. " + - "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async " + - "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler." + - "\n\nCommand:\n" + - Joiner.on(" ").join(cmd)); + resp.getWriter() + .write("Started [" + event.getInternalName() + + "] profiling. This page will automatically redirect to " + relativeUrl + " after " + + duration + " seconds. " + + "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async " + + "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler." + + "\n\nCommand:\n" + Joiner.on(" ").join(cmd)); // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified // via url param @@ -293,10 +265,10 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res } else { setResponseHeader(resp); resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - resp.getWriter().write( - "Unable to acquire lock. Another instance of profiler might be running."); - LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + - " seconds. Another instance of profiler might be running."); + resp.getWriter() + .write("Unable to acquire lock. Another instance of profiler might be running."); + LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + + " seconds. Another instance of profiler might be running."); } } catch (InterruptedException e) { LOG.warn("Interrupted while acquiring profile lock.", e); @@ -310,7 +282,7 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res } private Integer getInteger(final HttpServletRequest req, final String param, - final Integer defaultValue) { + final Integer defaultValue) { final String value = req.getParameter(param); if (value != null) { try { @@ -389,13 +361,14 @@ public static class DisabledServlet extends HttpServlet { @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) - throws IOException { + throws IOException { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" + - "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + - "environment is properly configured. For more information please see\n" + - "http://hbase.apache.org/book.html#profiler\n"); + resp.getWriter() + .write("The profiler servlet was disabled at startup.\n\n" + + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); return; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java index 182a4e10996d..c8456a461bb8 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java @@ -17,18 +17,6 @@ */ package org.apache.hadoop.hbase.http; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.apache.hadoop.util.HttpExceptionUtils; -import org.apache.hadoop.util.StringUtils; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.security.Principal; import java.util.ArrayList; @@ -43,30 +31,32 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This file has been copied directly (changing only the package name and and the ASF license - * text format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase - * depends on doesn't have it yet - * (as of 2020 Apr 24, there is no Hadoop release that has it either). - * - * Hadoop version: - * unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 - * - * Haddop path: + * This file has been copied directly (changing only the package name and and the ASF license text + * format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase depends + * on doesn't have it yet (as of 2020 Apr 24, there is no Hadoop release that has it either). Hadoop + * version: unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 Haddop path: * hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/ - * server/ProxyUserAuthenticationFilter.java - * - * AuthenticationFilter which adds support to perform operations - * using end user instead of proxy user. Fetches the end user from - * doAs Query Parameter. + * server/ProxyUserAuthenticationFilter.java AuthenticationFilter which adds support to perform + * operations using end user instead of proxy user. Fetches the end user from doAs Query Parameter. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class ProxyUserAuthenticationFilter extends AuthenticationFilter { - private static final Logger LOG = LoggerFactory.getLogger( - ProxyUserAuthenticationFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(ProxyUserAuthenticationFilter.class); private static final String DO_AS = "doas"; public static final String PROXYUSER_PREFIX = "proxyuser"; @@ -80,19 +70,18 @@ public void init(FilterConfig filterConfig) throws ServletException { @Override protected void doFilter(FilterChain filterChain, HttpServletRequest request, - HttpServletResponse response) throws IOException, ServletException { + HttpServletResponse response) throws IOException, ServletException { final HttpServletRequest lowerCaseRequest = toLowerCase(request); String doAsUser = lowerCaseRequest.getParameter(DO_AS); if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) { - LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", - doAsUser, request.getRemoteUser(), request.getRemoteAddr()); - UserGroupInformation requestUgi = (request.getUserPrincipal() != null) ? - UserGroupInformation.createRemoteUser(request.getRemoteUser()) - : null; + LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", doAsUser, + request.getRemoteUser(), request.getRemoteAddr()); + UserGroupInformation requestUgi = (request.getUserPrincipal() != null) + ? UserGroupInformation.createRemoteUser(request.getRemoteUser()) + : null; if (requestUgi != null) { - requestUgi = UserGroupInformation.createProxyUser(doAsUser, - requestUgi); + requestUgi = UserGroupInformation.createProxyUser(doAsUser, requestUgi); try { ProxyUsers.authorize(requestUgi, request.getRemoteAddr()); @@ -116,7 +105,7 @@ public String getName() { LOG.debug("Proxy user Authentication successful"); } catch (AuthorizationException ex) { HttpExceptionUtils.createServletExceptionResponse(response, - HttpServletResponse.SC_FORBIDDEN, ex); + HttpServletResponse.SC_FORBIDDEN, ex); LOG.warn("Proxy user Authentication exception", ex); return; } @@ -126,7 +115,7 @@ public String getName() { } protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) - throws ServletException { + throws ServletException { Configuration conf = new Configuration(false); Enumeration names = filterConfig.getInitParameterNames(); while (names.hasMoreElements()) { @@ -140,8 +129,8 @@ protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) } static boolean containsUpperCase(final Iterable strings) { - for(String s : strings) { - for(int i = 0; i < s.length(); i++) { + for (String s : strings) { + for (int i = 0; i < s.length(); i++) { if (Character.isUpperCase(s.charAt(i))) { return true; } @@ -151,17 +140,15 @@ static boolean containsUpperCase(final Iterable strings) { } /** - * The purpose of this function is to get the doAs parameter of a http request - * case insensitively - * @param request - * @return doAs parameter if exists or null otherwise + * The purpose of this function is to get the doAs parameter of a http request case insensitively + * n * @return doAs parameter if exists or null otherwise */ - public static String getDoasFromHeader(final HttpServletRequest request) { + public static String getDoasFromHeader(final HttpServletRequest request) { String doas = null; final Enumeration headers = request.getHeaderNames(); - while (headers.hasMoreElements()){ + while (headers.hasMoreElements()) { String header = headers.nextElement(); - if (header.toLowerCase().equals("doas")){ + if (header.toLowerCase().equals("doas")) { doas = request.getHeader(header); break; } @@ -169,11 +156,9 @@ public static String getDoasFromHeader(final HttpServletRequest request) { return doas; } - public static HttpServletRequest toLowerCase( - final HttpServletRequest request) { + public static HttpServletRequest toLowerCase(final HttpServletRequest request) { @SuppressWarnings("unchecked") - final Map original = (Map) - request.getParameterMap(); + final Map original = (Map) request.getParameterMap(); if (!containsUpperCase(original.keySet())) { return request; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java index f00f2a195af0..01c8a32c62a0 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,16 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http; import java.io.IOException; @@ -37,10 +36,10 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SecurityHeadersFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(SecurityHeadersFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(SecurityHeadersFilter.class); private static final String DEFAULT_HSTS = "max-age=63072000;includeSubDomains;preload"; - private static final String DEFAULT_CSP = "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; + private static final String DEFAULT_CSP = + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; private FilterConfig filterConfig; @Override @@ -51,7 +50,7 @@ public void init(FilterConfig filterConfig) throws ServletException { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { HttpServletResponse httpResponse = (HttpServletResponse) response; httpResponse.addHeader("X-Content-Type-Options", "nosniff"); httpResponse.addHeader("X-XSS-Protection", "1; mode=block"); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java index 8f338a7af68a..9c99b0ab8dc7 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,7 @@ import org.apache.yetus.audience.InterfaceStability; /** - * This interface contains constants for configuration keys used - * in the hbase http server code. + * This interface contains constants for configuration keys used in the hbase http server code. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -34,11 +33,9 @@ public interface ServerConfigurationKeys { public static final boolean HBASE_SSL_ENABLED_DEFAULT = false; /** Enable/Disable aliases serving from jetty */ - public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = - "hbase.jetty.logs.serve.aliases"; + public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = "hbase.jetty.logs.serve.aliases"; - public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = - true; + public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = true; public static final String HBASE_HTTP_STATIC_USER = "hbase.http.staticuser.user"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java index 05ca9a3abd19..992b09191c44 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +19,19 @@ import java.io.IOException; import java.io.Writer; - import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.http.HttpServer; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * A servlet to print out the running configuration data. */ -@InterfaceAudience.LimitedPrivate({"HBase"}) +@InterfaceAudience.LimitedPrivate({ "HBase" }) @InterfaceStability.Unstable public class ConfServlet extends HttpServlet { private static final long serialVersionUID = 1L; @@ -44,21 +41,20 @@ public class ConfServlet extends HttpServlet { private static final String FORMAT_PARAM = "format"; /** - * Return the Configuration of the daemon hosting this servlet. - * This is populated when the HttpServer starts. + * Return the Configuration of the daemon hosting this servlet. This is populated when the + * HttpServer starts. */ private Configuration getConfFromContext() { - Configuration conf = (Configuration)getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + Configuration conf = + (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); assert conf != null; return conf; } @Override public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { + throws ServletException, IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java index fdcd34783c04..f501e1648599 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java @@ -20,6 +20,7 @@ import java.lang.reflect.Type; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.gson.JsonElement; import org.apache.hbase.thirdparty.com.google.gson.JsonPrimitive; import org.apache.hbase.thirdparty.com.google.gson.JsonSerializationContext; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java index c75113ded730..114a9aa99047 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java @@ -32,6 +32,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.javax.ws.rs.Produces; import org.apache.hbase.thirdparty.javax.ws.rs.WebApplicationException; @@ -61,15 +62,9 @@ public boolean isWriteable(Class type, Type genericType, Annotation[] annotat } @Override - public void writeTo( - T t, - Class type, - Type genericType, - Annotation[] annotations, - MediaType mediaType, - MultivaluedMap httpHeaders, - OutputStream entityStream - ) throws IOException, WebApplicationException { + public void writeTo(T t, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType, MultivaluedMap httpHeaders, OutputStream entityStream) + throws IOException, WebApplicationException { final Charset outputCharset = requestedCharset(mediaType); try (Writer writer = new OutputStreamWriter(entityStream, outputCharset)) { gson.toJson(t, writer); @@ -77,10 +72,8 @@ public void writeTo( } private static Charset requestedCharset(MediaType mediaType) { - return Optional.ofNullable(mediaType) - .map(MediaType::getParameters) - .map(params -> params.get("charset")) - .map(c -> { + return Optional.ofNullable(mediaType).map(MediaType::getParameters) + .map(params -> params.get("charset")).map(c -> { try { return Charset.forName(c); } catch (IllegalCharsetNameException e) { @@ -93,7 +86,6 @@ private static Charset requestedCharset(MediaType mediaType) { logger.debug("Error while resolving Charset '{}'", c, e); return null; } - }) - .orElse(StandardCharsets.UTF_8); + }).orElse(StandardCharsets.UTF_8); } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java index dc3f8a7bf430..e617fd7a41a6 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java @@ -19,6 +19,7 @@ import java.io.IOException; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerRequestContext; import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerResponseContext; @@ -34,10 +35,8 @@ public class ResponseEntityMapper implements ContainerResponseFilter { @Override - public void filter( - ContainerRequestContext requestContext, - ContainerResponseContext responseContext - ) throws IOException { + public void filter(ContainerRequestContext requestContext, + ContainerResponseContext responseContext) throws IOException { /* * Follows very loosely the top-level document specification described in by JSON API. Only * handles 200 response codes; leaves room for errors and other response types. diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java index 57a7e930905b..0c7b869fece5 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java @@ -19,6 +19,7 @@ import java.util.function.Supplier; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.org.glassfish.hk2.api.Factory; /** @@ -34,9 +35,12 @@ public SupplierFactoryAdapter(Supplier supplier) { this.supplier = supplier; } - @Override public T provide() { + @Override + public T provide() { return supplier.get(); } - @Override public void dispose(T instance) { } + @Override + public void dispose(T instance) { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java index a61e61684dac..5e45957ce1e9 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http.jmx; import java.io.IOException; @@ -43,26 +43,21 @@ /** * Provides Read only web access to JMX. *

    - * This servlet generally will be placed under the /jmx URL for each - * HttpServer. It provides read only - * access to JMX metrics. The optional qry parameter - * may be used to query only a subset of the JMX Beans. This query - * functionality is provided through the - * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} - * method. + * This servlet generally will be placed under the /jmx URL for each HttpServer. It provides read + * only access to JMX metrics. The optional qry parameter may be used to query only a + * subset of the JMX Beans. This query functionality is provided through the + * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} method. *

    *

    - * For example http://.../jmx?qry=Hadoop:* will return - * all hadoop metrics exposed through JMX. + * For example http://.../jmx?qry=Hadoop:* will return all hadoop metrics exposed + * through JMX. *

    *

    - * The optional get parameter is used to query an specific - * attribute of a JMX bean. The format of the URL is - * http://.../jmx?get=MXBeanName::AttributeName + * The optional get parameter is used to query an specific attribute of a JMX bean. The + * format of the URL is http://.../jmx?get=MXBeanName::AttributeName *

    *

    - * For example - * + * For example * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId * will return the cluster id of the namenode mxbean. *

    @@ -72,8 +67,7 @@ * http://.../jmx?get=MXBeanName::*[RegExp1],*[RegExp2] *

    *

    - * For example - * + * For example *

    * http://../jmx?get=Hadoop:service=HBase,name=RegionServer,sub=Tables::[a-zA-z_0-9]*memStoreSize *

    @@ -82,17 +76,19 @@ *

    *
    *

    - * If the qry or the get parameter is not formatted - * correctly then a 400 BAD REQUEST http response code will be returned. + * If the qry or the get parameter is not formatted correctly then a 400 + * BAD REQUEST http response code will be returned. *

    *

    - * If a resouce such as a mbean or attribute can not be found, - * a 404 SC_NOT_FOUND http response code will be returned. + * If a resouce such as a mbean or attribute can not be found, a 404 SC_NOT_FOUND http response code + * will be returned. *

    *

    * The return format is JSON and in the form *

    - *
    
    + *
    + * 
    + * 
      *  {
      *    "beans" : [
      *      {
    @@ -101,28 +97,18 @@
      *      }
      *    ]
      *  }
    - *  
    - *

    - * The servlet attempts to convert the the JMXBeans into JSON. Each - * bean's attributes will be converted to a JSON object member. - * - * If the attribute is a boolean, a number, a string, or an array - * it will be converted to the JSON equivalent. - * - * If the value is a {@link CompositeData} then it will be converted - * to a JSON object with the keys as the name of the JSON member and - * the value is converted following these same rules. - * - * If the value is a {@link TabularData} then it will be converted - * to an array of the {@link CompositeData} elements that it contains. - * - * All other objects will be converted to a string and output as such. - * - * The bean's name and modelerType will be returned for all beans. - * - * Optional paramater "callback" should be used to deliver JSONP response. + * + *

    + *

    + * The servlet attempts to convert the the JMXBeans into JSON. Each bean's attributes will be + * converted to a JSON object member. If the attribute is a boolean, a number, a string, or an array + * it will be converted to the JSON equivalent. If the value is a {@link CompositeData} then it will + * be converted to a JSON object with the keys as the name of the JSON member and the value is + * converted following these same rules. If the value is a {@link TabularData} then it will be + * converted to an array of the {@link CompositeData} elements that it contains. All other objects + * will be converted to a string and output as such. The bean's name and modelerType will be + * returned for all beans. Optional paramater "callback" should be used to deliver JSONP response. *

    - * */ @InterfaceAudience.Private public class JMXJsonServlet extends HttpServlet { @@ -156,12 +142,8 @@ public void init() throws ServletException { } /** - * Process a GET request for the specified resource. - * - * @param request - * The servlet request we are processing - * @param response - * The servlet response we are creating + * Process a GET request for the specified resource. n * The servlet request we are processing n * + * The servlet response we are creating */ @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { @@ -199,8 +181,10 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } - if (beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), - splitStrings[1], description) != 0) { + if ( + beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), splitStrings[1], + description) != 0 + ) { beanWriter.flush(); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } @@ -215,8 +199,9 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro String excl = request.getParameter("excl"); ObjectName excluded = excl == null ? null : new ObjectName(excl); - if (beanWriter.write(this.mBeanServer, new ObjectName(qry), - null, description, excluded) != 0) { + if ( + beanWriter.write(this.mBeanServer, new ObjectName(qry), null, description, excluded) != 0 + ) { beanWriter.flush(); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } @@ -241,10 +226,9 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro } /** - * Verifies that the callback property, if provided, is purely alphanumeric. - * This prevents a malicious callback name (that is javascript code) from being - * returned by the UI to an unsuspecting user. - * + * Verifies that the callback property, if provided, is purely alphanumeric. This prevents a + * malicious callback name (that is javascript code) from being returned by the UI to an + * unsuspecting user. * @param callbackName The callback name, can be null. * @return The callback name * @throws IOException If the name is disallowed. diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java index 72cedddd686b..a11ad268ec1f 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.IOException; import java.security.Principal; import java.util.HashMap; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -32,7 +31,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.http.FilterContainer; @@ -42,8 +40,8 @@ import org.slf4j.LoggerFactory; /** - * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) - * so that the web UI is usable for a secure cluster without authentication. + * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) so that the web UI + * is usable for a secure cluster without authentication. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StaticUserWebFilter extends FilterInitializer { @@ -53,17 +51,21 @@ public class StaticUserWebFilter extends FilterInitializer { static class User implements Principal { private final String name; + public User(String name) { this.name = name; } + @Override public String getName() { return name; } + @Override public int hashCode() { return name.hashCode(); } + @Override public boolean equals(Object other) { if (other == this) { @@ -73,6 +75,7 @@ public boolean equals(Object other) { } return ((User) other).name.equals(name); } + @Override public String toString() { return name; @@ -90,20 +93,19 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain - ) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) request; // if the user is already authenticated, don't override it if (httpRequest.getRemoteUser() != null) { chain.doFilter(request, response); } else { - HttpServletRequestWrapper wrapper = - new HttpServletRequestWrapper(httpRequest) { + HttpServletRequestWrapper wrapper = new HttpServletRequestWrapper(httpRequest) { @Override public Principal getUserPrincipal() { return user; } + @Override public String getRemoteUser() { return username; @@ -128,9 +130,7 @@ public void initFilter(FilterContainer container, Configuration conf) { String username = getUsernameFromConf(conf); options.put(HBASE_HTTP_STATIC_USER, username); - container.addFilter("static_user_filter", - StaticUserFilter.class.getName(), - options); + container.addFilter("static_user_filter", StaticUserFilter.class.getName(), options); } /** @@ -141,13 +141,12 @@ static String getUsernameFromConf(Configuration conf) { if (oldStyleUgi != null) { // We can't use the normal configuration deprecation mechanism here // since we need to split out the username from the configured UGI. - LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " + - HBASE_HTTP_STATIC_USER + "."); + LOG.warn( + DEPRECATED_UGI_KEY + " should not be used. Instead, use " + HBASE_HTTP_STATIC_USER + "."); String[] parts = oldStyleUgi.split(","); return parts[0]; } else { - return conf.get(HBASE_HTTP_STATIC_USER, - DEFAULT_HBASE_HTTP_STATIC_USER); + return conf.get(HBASE_HTTP_STATIC_USER, DEFAULT_HBASE_HTTP_STATIC_USER); } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java index 611316d9ec67..cec05f53bbca 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,8 +54,8 @@ @InterfaceAudience.Private public final class LogLevel { private static final String USAGES = "\nUsage: General options are:\n" - + "\t[-getlevel [-protocol (http|https)]\n" - + "\t[-setlevel [-protocol (http|https)]"; + + "\t[-getlevel [-protocol (http|https)]\n" + + "\t[-setlevel [-protocol (http|https)]"; public static final String PROTOCOL_HTTP = "http"; public static final String PROTOCOL_HTTPS = "https"; @@ -85,8 +85,7 @@ private static void printUsage() { } public static boolean isValidProtocol(String protocol) { - return ((protocol.equals(PROTOCOL_HTTP) || - protocol.equals(PROTOCOL_HTTPS))); + return ((protocol.equals(PROTOCOL_HTTP) || protocol.equals(PROTOCOL_HTTPS))); } static class CLI extends Configured implements Tool { @@ -114,10 +113,9 @@ public int run(String[] args) throws Exception { /** * Send HTTP request to the daemon. * @throws HadoopIllegalArgumentException if arguments are invalid. - * @throws Exception if unable to connect + * @throws Exception if unable to connect */ - private void sendLogLevelRequest() - throws HadoopIllegalArgumentException, Exception { + private void sendLogLevelRequest() throws HadoopIllegalArgumentException, Exception { switch (operation) { case GETLEVEL: doGetLevel(); @@ -126,13 +124,11 @@ private void sendLogLevelRequest() doSetLevel(); break; default: - throw new HadoopIllegalArgumentException( - "Expect either -getlevel or -setlevel"); + throw new HadoopIllegalArgumentException("Expect either -getlevel or -setlevel"); } } - public void parseArguments(String[] args) throws - HadoopIllegalArgumentException { + public void parseArguments(String[] args) throws HadoopIllegalArgumentException { if (args.length == 0) { throw new HadoopIllegalArgumentException("No arguments specified"); } @@ -149,15 +145,13 @@ public void parseArguments(String[] args) throws nextArgIndex = parseProtocolArgs(args, nextArgIndex); break; default: - throw new HadoopIllegalArgumentException( - "Unexpected argument " + args[nextArgIndex]); + throw new HadoopIllegalArgumentException("Unexpected argument " + args[nextArgIndex]); } } // if operation is never specified in the arguments if (operation == Operations.UNKNOWN) { - throw new HadoopIllegalArgumentException( - "Must specify either -getlevel or -setlevel"); + throw new HadoopIllegalArgumentException("Must specify either -getlevel or -setlevel"); } // if protocol is unspecified, set it as http. @@ -166,8 +160,7 @@ public void parseArguments(String[] args) throws } } - private int parseGetLevelArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseGetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { // fail if multiple operations are specified in the arguments if (operation != Operations.UNKNOWN) { throw new HadoopIllegalArgumentException("Redundant -getlevel command"); @@ -182,8 +175,7 @@ private int parseGetLevelArgs(String[] args, int index) throws return index + 3; } - private int parseSetLevelArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseSetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { // fail if multiple operations are specified in the arguments if (operation != Operations.UNKNOWN) { throw new HadoopIllegalArgumentException("Redundant -setlevel command"); @@ -199,32 +191,27 @@ private int parseSetLevelArgs(String[] args, int index) throws return index + 4; } - private int parseProtocolArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseProtocolArgs(String[] args, int index) throws HadoopIllegalArgumentException { // make sure only -protocol is specified if (protocol != null) { - throw new HadoopIllegalArgumentException( - "Redundant -protocol command"); + throw new HadoopIllegalArgumentException("Redundant -protocol command"); } // check number of arguments is sufficient if (index + 1 >= args.length) { - throw new HadoopIllegalArgumentException( - "-protocol needs one parameter"); + throw new HadoopIllegalArgumentException("-protocol needs one parameter"); } // check protocol is valid protocol = args[index + 1]; if (!isValidProtocol(protocol)) { - throw new HadoopIllegalArgumentException( - "Invalid protocol: " + protocol); + throw new HadoopIllegalArgumentException("Invalid protocol: " + protocol); } return index + 2; } /** * Send HTTP request to get log level. - * * @throws HadoopIllegalArgumentException if arguments are invalid. - * @throws Exception if unable to connect + * @throws Exception if unable to connect */ private void doGetLevel() throws Exception { process(protocol + "://" + hostName + "/logLevel?log=" + className); @@ -232,20 +219,16 @@ private void doGetLevel() throws Exception { /** * Send HTTP request to set log level. - * * @throws HadoopIllegalArgumentException if arguments are invalid. - * @throws Exception if unable to connect + * @throws Exception if unable to connect */ private void doSetLevel() throws Exception { - process(protocol + "://" + hostName + "/logLevel?log=" + className - + "&level=" + level); + process(protocol + "://" + hostName + "/logLevel?log=" + className + "&level=" + level); } /** - * Connect to the URL. Supports HTTP and supports SPNEGO - * authentication. It falls back to simple authentication if it fails to - * initiate SPNEGO. - * + * Connect to the URL. Supports HTTP and supports SPNEGO authentication. It falls back to simple + * authentication if it fails to initiate SPNEGO. * @param url the URL address of the daemon servlet * @return a connected connection * @throws Exception if it can not establish a connection. @@ -274,8 +257,7 @@ private HttpURLConnection connect(URL url) throws Exception { } /** - * Configures the client to send HTTP request to the URL. - * Supports SPENGO for authentication. + * Configures the client to send HTTP request to the URL. Supports SPENGO for authentication. * @param urlString URL and query string to the daemon's web UI * @throws Exception if unable to connect */ @@ -289,11 +271,12 @@ private void process(String urlString) throws Exception { // read from the servlet - try (InputStreamReader streamReader = - new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); - BufferedReader bufferedReader = new BufferedReader(streamReader)) { + try ( + InputStreamReader streamReader = + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); + BufferedReader bufferedReader = new BufferedReader(streamReader)) { bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER)) - .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); + .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); } catch (IOException ioe) { System.err.println("" + ioe); } @@ -312,19 +295,16 @@ public static class Servlet extends HttpServlet { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { + throws ServletException, IOException { // Do the authorization - if (!HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (!HttpServer.hasAdministratorAccess(getServletContext(), request, response)) { return; } // Disallow modification of the LogLevel if explicitly set to readonly - Configuration conf = (Configuration) getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + Configuration conf = + (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); if (conf.getBoolean("hbase.master.ui.readonly", false)) { - sendError( - response, - HttpServletResponse.SC_FORBIDDEN, + sendError(response, HttpServletResponse.SC_FORBIDDEN, "Modification of HBase via the UI is disallowed in configuration."); return; } @@ -347,17 +327,13 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) if (logName != null) { out.println("

    Results:

    "); - out.println(MARKER - + "Submitted Log Name: " + logName + "
    "); + out.println(MARKER + "Submitted Log Name: " + logName + "
    "); Logger log = LoggerFactory.getLogger(logName); - out.println(MARKER - + "Log Class: " + log.getClass().getName() +"
    "); + out.println(MARKER + "Log Class: " + log.getClass().getName() + "
    "); if (level != null) { if (!isLogLevelChangeAllowed(logName, readOnlyLogLevels)) { - sendError( - response, - HttpServletResponse.SC_PRECONDITION_FAILED, + sendError(response, HttpServletResponse.SC_PRECONDITION_FAILED, "Modification of logger " + logName + " is disallowed in configuration."); return; } @@ -396,41 +372,41 @@ private void sendError(HttpServletResponse response, int code, String message) } static final String FORMS = "
    \n" - + "
    \n" + "\n" + "
    \n" + "Actions:" + "

    " - + "

    \n" + "\n" + "\n" - + "\n" + "\n" + "\n" + "\n" + "\n" - + "\n" + "\n" + "\n" + "\n" - + "\n" + "\n" + "\n" - + "\n" + "
    \n" - + "\n" + "\n" - + "\n" + "" - + "Get the current log level for the specified log name." + "
    \n" - + "\n" + "\n" - + "\n" - + "\n" + "" - + "Set the specified log level for the specified log name." + "
    \n" + "
    \n" + "

    \n" + "
    \n"; + + "
    \n" + "\n" + "
    \n" + "Actions:" + "

    " + "

    \n" + + "\n" + "\n" + "\n" + + "\n" + "\n" + "\n" + "\n" + + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + + "\n" + "
    \n" + + "\n" + "\n" + + "\n" + "" + + "Get the current log level for the specified log name." + "
    \n" + + "\n" + "\n" + + "\n" + + "\n" + "" + + "Set the specified log level for the specified log name." + "
    \n" + "
    \n" + "

    \n" + "
    \n"; private static void process(Logger logger, String levelName, PrintWriter out) { if (levelName != null) { try { Log4jUtils.setLogLevel(logger.getName(), levelName); - out.println(MARKER + "
    " + "Setting Level to " + - levelName + " ...
    " + "
    "); + out.println(MARKER + "
    " + "Setting Level to " + + levelName + " ...
    " + "
    "); } catch (IllegalArgumentException e) { - out.println(MARKER + "
    " + "Bad level : " + levelName + - "
    " + "
    "); + out.println(MARKER + "
    " + "Bad level : " + levelName + + "
    " + "
    "); } } - out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + - "
    "); + out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + + "
    "); } } - private LogLevel() {} + private LogLevel() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java index 560985b73e09..c28b3525a053 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +27,6 @@ import java.util.Iterator; import java.util.Set; import java.util.regex.Pattern; - import javax.management.AttributeNotFoundException; import javax.management.InstanceNotFoundException; import javax.management.IntrospectionException; @@ -42,13 +42,13 @@ import javax.management.openmbean.CompositeData; import javax.management.openmbean.CompositeType; import javax.management.openmbean.TabularData; - -import org.apache.hbase.thirdparty.com.google.gson.Gson; -import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; + /** * Utility for doing JSON and MBeans. */ @@ -67,12 +67,12 @@ public interface Writer extends Closeable { void write(String key, String value) throws IOException; default int write(MBeanServer mBeanServer, ObjectName qry, String attribute, - boolean description) throws IOException { + boolean description) throws IOException { return write(mBeanServer, qry, attribute, description, null); } int write(MBeanServer mBeanServer, ObjectName qry, String attribute, boolean description, - ObjectName excluded) throws IOException; + ObjectName excluded) throws IOException; void flush() throws IOException; } @@ -123,7 +123,7 @@ public void write(String key, String value) throws IOException { @Override public int write(MBeanServer mBeanServer, ObjectName qry, String attribute, - boolean description, ObjectName excluded) throws IOException { + boolean description, ObjectName excluded) throws IOException { return JSONBean.write(jsonWriter, mBeanServer, qry, attribute, description, excluded); } }; @@ -133,7 +133,7 @@ public int write(MBeanServer mBeanServer, ObjectName qry, String attribute, * @return Return non-zero if failed to find bean. 0 */ private static int write(JsonWriter writer, MBeanServer mBeanServer, ObjectName qry, - String attribute, boolean description, ObjectName excluded) throws IOException { + String attribute, boolean description, ObjectName excluded) throws IOException { LOG.debug("Listing beans for {}", qry); Set names = null; names = mBeanServer.queryNames(qry, null); @@ -255,7 +255,7 @@ private static int write(JsonWriter writer, MBeanServer mBeanServer, ObjectName } private static void writeAttribute(JsonWriter writer, MBeanServer mBeanServer, ObjectName oname, - boolean description, Pattern pattern[], MBeanAttributeInfo attr) throws IOException { + boolean description, Pattern pattern[], MBeanAttributeInfo attr) throws IOException { if (!attr.isReadable()) { return; } @@ -332,7 +332,7 @@ private static void writeAttribute(JsonWriter writer, MBeanServer mBeanServer, O } private static void writeAttribute(JsonWriter writer, String attName, String descriptionStr, - Object value) throws IOException { + Object value) throws IOException { if (descriptionStr != null && descriptionStr.length() > 0 && !attName.equals(descriptionStr)) { writer.name(attName); writer.beginObject(); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java index 6e155ae39616..760f4c0a2b0c 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -14,7 +14,7 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + */ package org.apache.hadoop.hbase.util; import java.beans.IntrospectionException; @@ -67,8 +67,8 @@ private JSONMetricUtil() { } public static MBeanAttributeInfo[] getMBeanAttributeInfo(ObjectName bean) - throws IntrospectionException, InstanceNotFoundException, ReflectionException, - IntrospectionException, javax.management.IntrospectionException { + throws IntrospectionException, InstanceNotFoundException, ReflectionException, + IntrospectionException, javax.management.IntrospectionException { MBeanInfo mbinfo = mbServer.getMBeanInfo(bean); return mbinfo.getAttributes(); } @@ -78,8 +78,8 @@ public static Object getValueFromMBean(ObjectName bean, String attribute) { try { value = mbServer.getAttribute(bean, attribute); } catch (Exception e) { - LOG.error("Unable to get value from MBean= " + bean.toString() + "for attribute=" + - attribute + " " + e.getMessage()); + LOG.error("Unable to get value from MBean= " + bean.toString() + "for attribute=" + attribute + + " " + e.getMessage()); } return value; } @@ -88,11 +88,11 @@ public static Object getValueFromMBean(ObjectName bean, String attribute) { * Returns a subset of mbeans defined by qry. Modeled after DumpRegionServerMetrics#dumpMetrics. * Example: String qry= "java.lang:type=Memory" * @throws MalformedObjectNameException if json have bad format - * @throws IOException / + * @throws IOException / * @return String representation of json array. */ public static String dumpBeanToString(String qry) - throws MalformedObjectNameException, IOException { + throws MalformedObjectNameException, IOException { StringWriter sw = new StringWriter(1024 * 100); // Guess this size try (PrintWriter writer = new PrintWriter(sw)) { JSONBean dumper = new JSONBean(); @@ -107,7 +107,7 @@ public static String dumpBeanToString(String qry) /** * Method for building map used for constructing ObjectName. Mapping is done with arrays indices - * @param keys Map keys + * @param keys Map keys * @param values Map values * @return Map or null if arrays are empty * or have different number of elements */ @@ -132,7 +132,7 @@ public static ObjectName buildObjectName(String pattern) throws MalformedObjectN } public static ObjectName buildObjectName(String domain, Hashtable keyValueTable) - throws MalformedObjectNameException { + throws MalformedObjectNameException { return new ObjectName(domain, keyValueTable); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java index 92dc20d35b59..ddcd4b54d5fa 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +35,7 @@ @InterfaceAudience.Private public abstract class LogMonitoring { - public static void dumpTailOfLogs( - PrintWriter out, long tailKb) throws IOException { + public static void dumpTailOfLogs(PrintWriter out, long tailKb) throws IOException { Set logs = Log4jUtils.getActiveLogFiles(); for (File f : logs) { out.println("+++++++++++++++++++++++++++++++"); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java index 7ed09468cb67..fc1d523b0ef1 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,7 +31,8 @@ public final class ProcessUtils { private static Logger LOG = LoggerFactory.getLogger(ProcessUtils.class); - private ProcessUtils() { } + private ProcessUtils() { + } public static Integer getPid() { // JVM_PID is exported by bin/hbase run script diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java index 7f1223980e3d..a17dbcb3d489 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http; import java.io.BufferedReader; @@ -38,8 +37,8 @@ import org.slf4j.LoggerFactory; /** - * This is a base class for functional tests of the {@link HttpServer}. - * The methods are static for other classes to import statically. + * This is a base class for functional tests of the {@link HttpServer}. The methods are static for + * other classes to import statically. */ public class HttpServerFunctionalTest extends Assert { private static final Logger LOG = LoggerFactory.getLogger(HttpServerFunctionalTest.class); @@ -52,12 +51,10 @@ public class HttpServerFunctionalTest extends Assert { private static final String TEST = "test"; /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. - * + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @return the server instance - * - * @throws IOException if a problem occurs + * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ public static HttpServer createTestServer() throws IOException { @@ -66,76 +63,69 @@ public static HttpServer createTestServer() throws IOException { } /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @param conf the server configuration to use * @return the server instance - * - * @throws IOException if a problem occurs + * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ - public static HttpServer createTestServer(Configuration conf) - throws IOException { + public static HttpServer createTestServer(Configuration conf) throws IOException { prepareTestWebapp(); return createServer(TEST, conf); } public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl) - throws IOException { + throws IOException { prepareTestWebapp(); return createServer(TEST, conf, adminsAcl); } /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @param conf the server configuration to use * @return the server instance - * - * @throws IOException if a problem occurs + * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ - public static HttpServer createTestServer(Configuration conf, - String[] pathSpecs) throws IOException { + public static HttpServer createTestServer(Configuration conf, String[] pathSpecs) + throws IOException { prepareTestWebapp(); return createServer(TEST, conf, pathSpecs); } public static HttpServer createTestServerWithSecurity(Configuration conf) throws IOException { - prepareTestWebapp(); - return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) - // InfoServer normally sets these for us - .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .build(); - } + prepareTestWebapp(); + return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) + // InfoServer normally sets these for us + .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY).build(); + } - public static HttpServer createTestServerWithSecurityAndAcl(Configuration conf, AccessControlList acl) throws IOException { + public static HttpServer createTestServerWithSecurityAndAcl(Configuration conf, + AccessControlList acl) throws IOException { prepareTestWebapp(); return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) - // InfoServer normally sets these for us - .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .setSecurityEnabled(true) - .setACL(acl) - .build(); + // InfoServer normally sets these for us + .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY).setSecurityEnabled(true) + .setACL(acl).build(); } /** - * Prepare the test webapp by creating the directory from the test properties - * fail if the directory cannot be created. + * Prepare the test webapp by creating the directory from the test properties fail if the + * directory cannot be created. * @throws AssertionError if a condition was not met */ protected static void prepareTestWebapp() { String webapps = System.getProperty(TEST_BUILD_WEBAPPS, BUILD_WEBAPPS_DIR); - File testWebappDir = new File(webapps + - File.separatorChar + TEST); + File testWebappDir = new File(webapps + File.separatorChar + TEST); try { if (!testWebappDir.exists()) { fail("Test webapp dir " + testWebappDir.getCanonicalPath() + " missing"); } - } - catch (IOException e) { + } catch (IOException e) { } } @@ -146,12 +136,10 @@ protected static void prepareTestWebapp() { * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String host, int port) - throws IOException { + public static HttpServer createServer(String host, int port) throws IOException { prepareTestWebapp(); return new HttpServer.Builder().setName(TEST) - .addEndpoint(URI.create("http://" + host + ":" + port)) - .setFindPort(true).build(); + .addEndpoint(URI.create("http://" + host + ":" + port)).setFindPort(true).build(); } /** @@ -163,48 +151,45 @@ public static HttpServer createServer(String host, int port) public static HttpServer createServer(String webapp) throws IOException { return localServerBuilder(webapp).setFindPort(true).build(); } + /** * Create an HttpServer instance for the given webapp * @param webapp the webapp to work with - * @param conf the configuration to use for the server + * @param conf the configuration to use for the server * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String webapp, Configuration conf) - throws IOException { + public static HttpServer createServer(String webapp, Configuration conf) throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).build(); } public static HttpServer createServer(String webapp, Configuration conf, - AccessControlList adminsAcl) throws IOException { + AccessControlList adminsAcl) throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build(); } private static Builder localServerBuilder(String webapp) { - return new HttpServer.Builder().setName(webapp).addEndpoint( - URI.create("http://localhost:0")); + return new HttpServer.Builder().setName(webapp).addEndpoint(URI.create("http://localhost:0")); } /** * Create an HttpServer instance for the given webapp - * @param webapp the webapp to work with - * @param conf the configuration to use for the server + * @param webapp the webapp to work with + * @param conf the configuration to use for the server * @param pathSpecs the paths specifications the server will service * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String webapp, Configuration conf, - String[] pathSpecs) throws IOException { + public static HttpServer createServer(String webapp, Configuration conf, String[] pathSpecs) + throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs) - .build(); + .build(); } /** * Create and start a server with the test webapp - * * @return the newly started server - * - * @throws IOException on any failure + * @throws IOException on any failure * @throws AssertionError if a condition was not met */ public static HttpServer createAndStartTestServer() throws IOException { @@ -230,11 +215,9 @@ public static void stop(HttpServer server) throws Exception { * @return a URL bonded to the base of the server * @throws MalformedURLException if the URL cannot be created. */ - public static URL getServerURL(HttpServer server) - throws MalformedURLException { + public static URL getServerURL(HttpServer server) throws MalformedURLException { assertNotNull("No server", server); - return new URL("http://" - + NetUtils.getHostPortString(server.getConnectorAddress(0))); + return new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); } /** @@ -297,15 +280,14 @@ public static void access(String urlstring) throws IOException { URLConnection connection = url.openConnection(); connection.connect(); - try (BufferedReader in = new BufferedReader(new InputStreamReader( - connection.getInputStream(), StandardCharsets.UTF_8))){ - for(; in.readLine() != null;) { + try (BufferedReader in = new BufferedReader( + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8))) { + for (; in.readLine() != null;) { continue; } - } catch(IOException ioe) { + } catch (IOException ioe) { LOG.info("Got exception: ", ioe); } } - } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java index 1917655d3426..06c62f03fcaa 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestGlobalFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGlobalFilter.class); + HBaseClassTestRule.forClass(TestGlobalFilter.class); private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class); private static final Set RECORDS = new TreeSet<>(); @@ -63,12 +63,12 @@ public void destroy() { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { if (filterConfig == null) { return; } - String uri = ((HttpServletRequest)request).getRequestURI(); + String uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); RECORDS.add(uri); chain.doFilter(request, response); @@ -76,7 +76,8 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha /** Configuration for RecordingFilter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -89,9 +90,8 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testServletFilter() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - RecordingFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, RecordingFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); http.start(); @@ -106,14 +106,11 @@ public void testServletFilter() throws Exception { final String outURL = "/static/a.out"; final String logURL = "/logs/a.log"; - final String[] urls = { - fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, allURL, - outURL, logURL - }; + final String[] urls = { fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, + allURL, outURL, logURL }; - //access the urls - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + // access the urls + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (String url : urls) { access(prefix + url); @@ -124,7 +121,7 @@ public void testServletFilter() throws Exception { LOG.info("RECORDS = " + RECORDS); - //verify records + // verify records for (String url : urls) { assertTrue(RECORDS.remove(url)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java index 0f4c4d5d2a14..e5835fd65b61 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,13 +31,14 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHtmlQuoting { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHtmlQuoting.class); + HBaseClassTestRule.forClass(TestHtmlQuoting.class); - @Test public void testNeedsQuoting() throws Exception { + @Test + public void testNeedsQuoting() throws Exception { assertTrue(HtmlQuoting.needsQuoting("abcde>")); assertTrue(HtmlQuoting.needsQuoting("")); assertEquals("&&&", HtmlQuoting.quoteHtmlChars("&&&")); @@ -58,18 +60,18 @@ public class TestHtmlQuoting { } private void runRoundTrip(String str) throws Exception { - assertEquals(str, - HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); + assertEquals(str, HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); } - @Test public void testRoundtrip() throws Exception { + @Test + public void testRoundtrip() throws Exception { runRoundTrip(""); runRoundTrip("<>&'\""); runRoundTrip("ab>cd params = request.getParameterMap(); SortedSet keys = new TreeSet<>(params.keySet()); - for(String key: keys) { + for (String key : keys) { out.print(key); out.print(':'); String[] values = params.get(key); if (values.length > 0) { out.print(values[0]); - for(int i=1; i < values.length; ++i) { + for (int i = 1; i < values.length; ++i) { out.print(','); out.print(values[i]); } @@ -120,15 +122,14 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro @SuppressWarnings("serial") public static class EchoServlet extends HttpServlet { @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException { + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { PrintWriter out = response.getWriter(); SortedSet sortedKeys = new TreeSet<>(); Enumeration keys = request.getParameterNames(); - while(keys.hasMoreElements()) { + while (keys.hasMoreElements()) { sortedKeys.add(keys.nextElement()); } - for(String key: sortedKeys) { + for (String key : sortedKeys) { out.print(key); out.print(':'); out.print(request.getParameter(key)); @@ -158,7 +159,8 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro } } - @BeforeClass public static void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { Configuration conf = new Configuration(); conf.setInt(HttpServer.HTTP_MAX_THREADS, MAX_THREADS); server = createTestServer(conf); @@ -166,14 +168,14 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro server.addUnprivilegedServlet("echomap", "/echomap", EchoMapServlet.class); server.addUnprivilegedServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class); server.addUnprivilegedServlet("longheader", "/longheader", LongHeaderServlet.class); - server.addJerseyResourcePackage( - JerseyResource.class.getPackage().getName(), "/jersey/*"); + server.addJerseyResourcePackage(JerseyResource.class.getPackage().getName(), "/jersey/*"); server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } - @AfterClass public static void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); } @@ -192,13 +194,13 @@ public void testMaxThreads() throws Exception { ready.countDown(); try { start.await(); - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); int serverThreads = server.webServer.getThreadPool().getThreads(); - assertTrue("More threads are started than expected, Server Threads count: " - + serverThreads, serverThreads <= MAX_THREADS); - LOG.info("Number of threads = " + serverThreads + - " which is less or equal than the max = " + MAX_THREADS); + assertTrue( + "More threads are started than expected, Server Threads count: " + serverThreads, + serverThreads <= MAX_THREADS); + LOG.info("Number of threads = " + serverThreads + + " which is less or equal than the max = " + MAX_THREADS); } catch (Exception e) { // do nothing } @@ -209,31 +211,30 @@ public void testMaxThreads() throws Exception { start.countDown(); } - @Test public void testEcho() throws Exception { - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", - readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); + @Test + public void testEcho() throws Exception { + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc<:d\ne:>\n", readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); } /** Test the echo map servlet that uses getParameterMap. */ - @Test public void testEchoMap() throws Exception { - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); - assertEquals("a:b,>\nc<:d\n", - readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); + @Test + public void testEchoMap() throws Exception { + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); + assertEquals("a:b,>\nc<:d\n", readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); } /** - * Test that verifies headers can be up to 64K long. - * The test adds a 63K header leaving 1K for other headers. - * This is because the header buffer setting is for ALL headers, - * names and values included. */ - @Test public void testLongHeader() throws Exception { + * Test that verifies headers can be up to 64K long. The test adds a 63K header leaving 1K for + * other headers. This is because the header buffer setting is for ALL headers, names and values + * included. + */ + @Test + public void testLongHeader() throws Exception { URL url = new URL(baseUrl, "/longheader"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); StringBuilder sb = new StringBuilder(); - for (int i = 0 ; i < 63 * 1024; i++) { + for (int i = 0; i < 63 * 1024; i++) { sb.append("a"); } conn.setRequestProperty("longheader", sb.toString()); @@ -244,14 +245,14 @@ public void testMaxThreads() throws Exception { public void testContentTypes() throws Exception { // Static CSS files should have text/css URL cssUrl = new URL(baseUrl, "/static/test.css"); - HttpURLConnection conn = (HttpURLConnection)cssUrl.openConnection(); + HttpURLConnection conn = (HttpURLConnection) cssUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/css", conn.getContentType()); // Servlets should have text/plain with proper encoding by default URL servletUrl = new URL(baseUrl, "/echo?a=b"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/plain;charset=utf-8", conn.getContentType()); @@ -259,14 +260,14 @@ public void testContentTypes() throws Exception { // We should ignore parameters for mime types - ie a parameter // ending in .css should not change mime type servletUrl = new URL(baseUrl, "/echo?a=b.css"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/plain;charset=utf-8", conn.getContentType()); // Servlets that specify text/html should get that content type servletUrl = new URL(baseUrl, "/htmlcontent"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/html;charset=utf-8", conn.getContentType()); @@ -335,21 +336,20 @@ private static String readFully(final InputStream input) throws IOException { } /** - * Dummy filter that mimics as an authentication filter. Obtains user identity - * from the request parameter user.name. Wraps around the request so that - * request.getRemoteUser() returns the user identity. - * + * Dummy filter that mimics as an authentication filter. Obtains user identity from the request + * parameter user.name. Wraps around the request so that request.getRemoteUser() returns the user + * identity. */ public static class DummyServletFilter implements Filter { @Override - public void destroy() { } + public void destroy() { + } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain filterChain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) + throws IOException, ServletException { final String userName = request.getParameter("user.name"); - ServletRequest requestModified = - new HttpServletRequestWrapper((HttpServletRequest) request) { + ServletRequest requestModified = new HttpServletRequestWrapper((HttpServletRequest) request) { @Override public String getRemoteUser() { return userName; @@ -359,12 +359,12 @@ public String getRemoteUser() { } @Override - public void init(FilterConfig arg0) { } + public void init(FilterConfig arg0) { + } } /** * FilterInitializer that initialized the DummyFilter. - * */ public static class DummyFilterInitializer extends FilterInitializer { public DummyFilterInitializer() { @@ -377,19 +377,17 @@ public void initFilter(FilterContainer container, Configuration conf) { } /** - * Access a URL and get the corresponding return Http status code. The URL - * will be accessed as the passed user, by sending user.name request - * parameter. - * + * Access a URL and get the corresponding return Http status code. The URL will be accessed as the + * passed user, by sending user.name request parameter. * @param urlstring The url to access - * @param userName The user to perform access as + * @param userName The user to perform access as * @return The HTTP response code * @throws IOException if there is a problem communicating with the server */ private static int getHttpStatusCode(String urlstring, String userName) throws IOException { URL url = new URL(urlstring + "?user.name=" + userName); System.out.println("Accessing " + url + " as user " + userName); - HttpURLConnection connection = (HttpURLConnection)url.openConnection(); + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.connect(); return connection.getResponseCode(); } @@ -411,9 +409,8 @@ public List getGroups(String user) { } /** - * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics - * servlets, when authentication filters are set, but authorization is not - * enabled. + * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics servlets, when + * authentication filters are set, but authorization is not enabled. */ @Test @Ignore @@ -421,46 +418,41 @@ public void testDisabledAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); // Authorization is disabled by default - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - DummyFilterInitializer.class.getName()); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - MyGroupsProvider.class.getName()); + MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); MyGroupsProvider.mapping.put("userB", Collections.singletonList("groupB")); HttpServer myServer = new HttpServer.Builder().setName("test") - .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; + String serverURL = + "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB" }) { - assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL - + servlet, user)); + assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } } myServer.stop(); } /** - * Verify the administrator access for /logs, /stacks, /conf, /logLevel and - * /metrics servlets. + * Verify the administrator access for /logs, /stacks, /conf, /logLevel and /metrics servlets. */ @Test @Ignore public void testAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - true); - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, - true); - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - DummyFilterInitializer.class.getName()); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - MyGroupsProvider.class.getName()); + MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); @@ -470,20 +462,19 @@ public void testAuthorizationOfDefaultServlets() throws Exception { MyGroupsProvider.mapping.put("userE", Collections.singletonList("groupE")); HttpServer myServer = new HttpServer.Builder().setName("test") - .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf) - .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf) + .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - String serverURL = "http://" - + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; + String serverURL = + "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB", "userC", "userD" }) { - assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL - + servlet, user)); + assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } - assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, getHttpStatusCode( - serverURL + servlet, "userE")); + assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, + getHttpStatusCode(serverURL + servlet, "userE")); } myServer.stop(); } @@ -494,8 +485,8 @@ public void testRequestQuoterWithNull() { Mockito.doReturn(null).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter = new RequestQuoter(request); String[] parameterValues = requestQuoter.getParameterValues("dummy"); - Assert.assertNull("It should return null " - + "when there are no values for the parameter", parameterValues); + Assert.assertNull("It should return null " + "when there are no values for the parameter", + parameterValues); } @Test @@ -505,16 +496,16 @@ public void testRequestQuoterWithNotNull() { Mockito.doReturn(values).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter = new RequestQuoter(request); String[] parameterValues = requestQuoter.getParameterValues("dummy"); - Assert.assertTrue("It should return Parameter Values", Arrays.equals( - values, parameterValues)); + Assert.assertTrue("It should return Parameter Values", Arrays.equals(values, parameterValues)); } @SuppressWarnings("unchecked") private static Map parse(String jsonString) { - return (Map)JSON.parse(jsonString); + return (Map) JSON.parse(jsonString); } - @Test public void testJersey() throws Exception { + @Test + public void testJersey() throws Exception { LOG.info("BEGIN testJersey()"); final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar")); final Map m = parse(js); @@ -535,33 +526,33 @@ public void testHasAdministratorAccess() throws Exception { Mockito.when(request.getRemoteUser()).thenReturn(null); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - //authorization OFF + // authorization OFF Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); - //authorization ON & user NULL + // authorization ON & user NULL response = Mockito.mock(HttpServletResponse.class); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), - Mockito.anyString()); + Mockito.anyString()); - //authorization ON & user NOT NULL & ACLs NULL + // authorization ON & user NOT NULL & ACLs NULL response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getRemoteUser()).thenReturn("foo"); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); - //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs + // authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs response = Mockito.mock(HttpServletResponse.class); AccessControlList acls = Mockito.mock(AccessControlList.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), - Mockito.anyString()); + Mockito.anyString()); - //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs + // authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs response = Mockito.mock(HttpServletResponse.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(true); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(true); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); @@ -575,14 +566,14 @@ public void testRequiresAuthorizationAccess() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - //requires admin access to instrumentation, FALSE by default + // requires admin access to instrumentation, FALSE by default Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response)); - //requires admin access to instrumentation, TRUE + // requires admin access to instrumentation, TRUE conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); AccessControlList acls = Mockito.mock(AccessControlList.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response)); } @@ -611,8 +602,7 @@ public void testBindAddress() throws Exception { } } - private HttpServer checkBindAddress(String host, int port, boolean findPort) - throws Exception { + private HttpServer checkBindAddress(String host, int port, boolean findPort) throws Exception { HttpServer server = createServer(host, port); try { // not bound, ephemeral should return requested port (0 for ephemeral) @@ -645,14 +635,12 @@ public void testXFrameHeaderSameOrigin() throws Exception { conf.set("hbase.http.filter.xframeoptions.mode", "SAMEORIGIN"); HttpServer myServer = new HttpServer.Builder().setName("test") - .addEndpoint(new URI("http://localhost:0")) - .setFindPort(true).setConf(conf).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.addUnprivilegedServlet("echo", "/echo", EchoServlet.class); myServer.start(); - String serverURL = "http://" - + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); + String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); URL url = new URL(new URL(serverURL), "/echo?a=b&c=d"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java index ce0d6d6bc327..e517a5ffedbb 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,16 +25,16 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHttpServerLifecycle extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHttpServerLifecycle.class); + HBaseClassTestRule.forClass(TestHttpServerLifecycle.class); /** - * Check that a server is alive by probing the {@link HttpServer#isAlive()} method - * and the text of its toString() description + * Check that a server is alive by probing the {@link HttpServer#isAlive()} method and the text of + * its toString() description * @param server server */ private void assertAlive(HttpServer server) { @@ -49,16 +49,17 @@ private void assertNotLive(HttpServer server) { /** * Test that the server is alive once started - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testCreatedServerIsNotAlive() throws Throwable { HttpServer server = createTestServer(); assertNotLive(server); } - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStopUnstartedServer() throws Throwable { HttpServer server = createTestServer(); stop(server); @@ -66,10 +67,10 @@ public void testStopUnstartedServer() throws Throwable { /** * Test that the server is alive once started - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStartedServerIsAlive() throws Throwable { HttpServer server = null; server = createTestServer(); @@ -82,20 +83,20 @@ public void testStartedServerIsAlive() throws Throwable { /** * Assert that the result of {@link HttpServer#toString()} contains the specific text * @param server server to examine - * @param text text to search for + * @param text text to search for */ private void assertToStringContains(HttpServer server, String text) { String description = server.toString(); assertTrue("Did not find \"" + text + "\" in \"" + description + "\"", - description.contains(text)); + description.contains(text)); } /** * Test that the server is not alive once stopped - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStoppedServerIsNotAlive() throws Throwable { HttpServer server = createAndStartTestServer(); assertAlive(server); @@ -105,10 +106,10 @@ public void testStoppedServerIsNotAlive() throws Throwable { /** * Test that the server is not alive once stopped - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStoppingTwiceServerIsAllowed() throws Throwable { HttpServer server = createAndStartTestServer(); assertAlive(server); @@ -119,12 +120,10 @@ public void testStoppingTwiceServerIsAllowed() throws Throwable { } /** - * Test that the server is alive once started - * - * @throws Throwable - * on failure + * Test that the server is alive once started n * on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testWepAppContextAfterServerStop() throws Throwable { HttpServer server = null; String key = "test.attribute.key"; diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java index 11a7db2fbf05..a2916cafb3ce 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,12 +30,12 @@ /** * Test webapp loading */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHttpServerWebapps extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHttpServerWebapps.class); + HBaseClassTestRule.forClass(TestHttpServerWebapps.class); private static final Logger log = LoggerFactory.getLogger(TestHttpServerWebapps.class); @@ -61,8 +61,8 @@ public void testValidServerResource() throws Throwable { public void testMissingServerResource() throws Throwable { try { HttpServer server = createServer("NoSuchWebapp"); - //should not have got here. - //close the server + // should not have got here. + // close the server String serverDescription = server.toString(); stop(server); fail("Expected an exception, got " + serverDescription); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java index 7737b298b6a6..9cb36db20cdb 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestPathFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPathFilter.class); + HBaseClassTestRule.forClass(TestPathFilter.class); private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class); private static final Set RECORDS = new TreeSet<>(); @@ -62,13 +62,13 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { if (filterConfig == null) { return; } - String uri = ((HttpServletRequest)request).getRequestURI(); + String uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); RECORDS.add(uri); chain.doFilter(request, response); @@ -76,7 +76,8 @@ public void doFilter(ServletRequest request, ServletResponse response, /** Configuration for RecordingFilter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -89,9 +90,8 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testPathSpecFilters() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - RecordingFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, RecordingFilter.Initializer.class.getName()); String[] pathSpecs = { "/path", "/path/*" }; HttpServer http = createTestServer(conf, pathSpecs); http.start(); @@ -105,12 +105,11 @@ public void testPathSpecFilters() throws Exception { final String allURL = "/*"; final String[] filteredUrls = { baseURL, baseSlashURL, addedURL, addedSlashURL, longURL }; - final String[] notFilteredUrls = {rootURL, allURL}; + final String[] notFilteredUrls = { rootURL, allURL }; // access the urls and verify our paths specs got added to the // filters - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (String filteredUrl : filteredUrls) { access(prefix + filteredUrl); @@ -124,7 +123,7 @@ public void testPathSpecFilters() throws Exception { LOG.info("RECORDS = " + RECORDS); - //verify records + // verify records for (String filteredUrl : filteredUrls) { assertTrue(RECORDS.remove(filteredUrl)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java index 7723e6e78871..442fc0e37fe7 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,23 +22,22 @@ import java.util.Arrays; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestProfileOutputServlet { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProfileOutputServlet.class); + HBaseClassTestRule.forClass(TestProfileOutputServlet.class); @Test public void testSanitization() { - List good = Arrays.asList("abcd", "key=value", "key1=value&key2=value2", "", - "host=host-1.example.com"); + List good = + Arrays.asList("abcd", "key=value", "key1=value&key2=value2", "", "host=host-1.example.com"); for (String input : good) { assertEquals(input, ProfileOutputServlet.sanitize(input)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java index e4ecaedaa3e8..7c2166754400 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,10 +23,8 @@ import java.security.Principal; import java.security.PrivilegedExceptionAction; import java.util.Set; - import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosTicket; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; @@ -72,11 +70,11 @@ * HttpComponents to verify that the doas= mechanicsm works, and that the proxyuser settings are * observed. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProxyUserSpnegoHttpServer.class); + HBaseClassTestRule.forClass(TestProxyUserSpnegoHttpServer.class); private static final Logger LOG = LoggerFactory.getLogger(TestProxyUserSpnegoHttpServer.class); private static final String KDC_SERVER_HOST = "localhost"; @@ -94,7 +92,6 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { private static File privilegedKeytab; private static File privileged2Keytab; - @BeforeClass public static void setupServer() throws Exception { Configuration conf = new Configuration(); @@ -132,7 +129,7 @@ public static void setupServer() throws Exception { server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } @AfterClass @@ -154,14 +151,13 @@ public static void stopServer() throws Exception { } private static void setupUser(SimpleKdcServer kdc, File keytab, String principal) - throws KrbException { + throws KrbException { kdc.createPrincipal(principal); kdc.exportPrincipal(principal, keytab); } - protected static Configuration buildSpnegoConfiguration(Configuration conf, - String serverPrincipal, File serverKeytab) { + String serverPrincipal, File serverKeytab) { KerberosName.setRules("DEFAULT"); conf.setInt(HttpServer.HTTP_MAX_THREADS, TestHttpServer.MAX_THREADS); @@ -182,13 +178,13 @@ protected static Configuration buildSpnegoConfiguration(Configuration conf, } /** - * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI - * which are meant only for administrators. + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which + * are meant only for administrators. */ public static AccessControlList buildAdminAcl(Configuration conf) { final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); - final String adminGroups = conf.get( - HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + final String adminGroups = + conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); if (userGroups == null && adminGroups == null) { // Backwards compatibility - if the user doesn't have anything set, allow all users in. return new AccessControlList("*", null); @@ -198,20 +194,23 @@ public static AccessControlList buildAdminAcl(Configuration conf) { @Test public void testProxyAllowed() throws Exception { - testProxy(WHEEL_PRINCIPAL, PRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_OK, null); + testProxy(WHEEL_PRINCIPAL, PRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_OK, null); } @Test public void testProxyDisallowedForUnprivileged() throws Exception { - testProxy(WHEEL_PRINCIPAL, UNPRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, "403 User unprivileged is unauthorized to access this page."); + testProxy(WHEEL_PRINCIPAL, UNPRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, + "403 User unprivileged is unauthorized to access this page."); } @Test public void testProxyDisallowedForNotSudoAble() throws Exception { - testProxy(WHEEL_PRINCIPAL, PRIVILEGED2_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, "403 Forbidden"); + testProxy(WHEEL_PRINCIPAL, PRIVILEGED2_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, + "403 Forbidden"); } - public void testProxy(String clientPrincipal, String doAs, int responseCode, String statusLine) throws Exception { + public void testProxy(String clientPrincipal, String doAs, int responseCode, String statusLine) + throws Exception { // Create the subject for the client final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(WHEEL_PRINCIPAL, wheelKeytab); final Set clientPrincipals = clientSubject.getPrincipals(); @@ -221,7 +220,7 @@ public void testProxy(String clientPrincipal, String doAs, int responseCode, Str // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set privateCredentials = - clientSubject.getPrivateCredentials(KerberosTicket.class); + clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt); @@ -231,34 +230,32 @@ public void testProxy(String clientPrincipal, String doAs, int responseCode, Str // Run this code, logged in as the subject (the client) HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public HttpResponse run() throws Exception { - // Logs in with Kerberos via GSS - GSSManager gssManager = GSSManager.getInstance(); - // jGSS Kerberos login constant - Oid oid = new Oid("1.2.840.113554.1.2.2"); - GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential(gssClient, - GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - - HttpClientContext context = HttpClientContext.create(); - Lookup authRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)) - .build(); - - HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) - .build(); - BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); - - URL url = new URL(getServerURL(server), "/echo?doAs=" + doAs + "&a=b"); - context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); - context.setCredentialsProvider(credentialsProvider); - context.setAuthSchemeRegistry(authRegistry); - - HttpGet get = new HttpGet(url.toURI()); - return client.execute(get, context); - } + @Override + public HttpResponse run() throws Exception { + // Logs in with Kerberos via GSS + GSSManager gssManager = GSSManager.getInstance(); + // jGSS Kerberos login constant + Oid oid = new Oid("1.2.840.113554.1.2.2"); + GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + + HttpClientContext context = HttpClientContext.create(); + Lookup authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); + + HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build(); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); + + URL url = new URL(getServerURL(server), "/echo?doAs=" + doAs + "&a=b"); + context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); + + HttpGet get = new HttpGet(url.toURI()); + return client.execute(get, context); + } }); assertNotNull(resp); @@ -266,8 +263,8 @@ public HttpResponse run() throws Exception { if (responseCode == HttpURLConnection.HTTP_OK) { assertTrue(EntityUtils.toString(resp.getEntity()).trim().contains("a:b")); } else { - assertTrue(resp.getStatusLine().toString().contains(statusLine) || - EntityUtils.toString(resp.getEntity()).contains(statusLine)); + assertTrue(resp.getStatusLine().toString().contains(statusLine) + || EntityUtils.toString(resp.getEntity()).contains(statusLine)); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java index cbb5635690b9..15d321390978 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,19 +45,18 @@ import org.slf4j.LoggerFactory; /** - * This testcase issues SSL certificates configures the HttpServer to serve - * HTTPS using the created certficates and calls an echo servlet using the - * corresponding HTTPS URL. + * This testcase issues SSL certificates configures the HttpServer to serve HTTPS using the created + * certficates and calls an echo servlet using the corresponding HTTPS URL. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestSSLHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSSLHttpServer.class); + HBaseClassTestRule.forClass(TestSSLHttpServer.class); - private static final String BASEDIR = System.getProperty("test.build.dir", - "target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName(); + private static final String BASEDIR = System.getProperty("test.build.dir", "target/test-dir") + + "/" + TestSSLHttpServer.class.getSimpleName(); private static final Logger LOG = LoggerFactory.getLogger(TestSSLHttpServer.class); private static Configuration serverConf; @@ -87,26 +86,24 @@ public static void setup() throws Exception { clientConf.addResource(serverConf.get(SSLFactory.SSL_CLIENT_CONF_KEY)); serverConf.addResource(serverConf.get(SSLFactory.SSL_SERVER_CONF_KEY)); clientConf.set(SSLFactory.SSL_CLIENT_CONF_KEY, serverConf.get(SSLFactory.SSL_CLIENT_CONF_KEY)); - + clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, clientConf); clientSslFactory.init(); - server = new HttpServer.Builder() - .setName("test") - .addEndpoint(new URI("https://localhost")) + server = new HttpServer.Builder().setName("test").addEndpoint(new URI("https://localhost")) .setConf(serverConf) - .keyPassword(HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.keypassword", - null)) + .keyPassword( + HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.keypassword", null)) .keyStore(serverConf.get("ssl.server.keystore.location"), HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.password", null), clientConf.get("ssl.server.keystore.type", "jks")) .trustStore(serverConf.get("ssl.server.truststore.location"), HBaseConfiguration.getPassword(serverConf, "ssl.server.truststore.password", null), - serverConf.get("ssl.server.truststore.type", "jks")).build(); + serverConf.get("ssl.server.truststore.type", "jks")) + .build(); server.addUnprivilegedServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.start(); - baseUrl = new URL("https://" - + NetUtils.getHostPortString(server.getConnectorAddress(0))); + baseUrl = new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); LOG.info("HTTP server started: " + baseUrl); } @@ -121,8 +118,7 @@ public static void cleanup() throws Exception { @Test public void testEcho() throws Exception { assertEquals("a:b\nc:d\n", readOut(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, - "/echo?a=b&c<=d&e=>"))); + assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); } @Test diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java index 6b9d2c341ed7..006025e0a978 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,14 +37,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({HttpServerFunctionalTest.class, MediumTests.class}) +@Category({ HttpServerFunctionalTest.class, MediumTests.class }) public class TestSecurityHeadersFilter { private static URL baseUrl; private HttpServer http; @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); + HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); @After public void tearDown() throws Exception { @@ -62,28 +62,27 @@ public void testDefaultValues() throws Exception { assertThat(conn.getResponseCode(), equalTo(HttpURLConnection.HTTP_OK)); assertThat("Header 'X-Content-Type-Options' is missing", - conn.getHeaderField("X-Content-Type-Options"), is(not((String)null))); + conn.getHeaderField("X-Content-Type-Options"), is(not((String) null))); assertThat(conn.getHeaderField("X-Content-Type-Options"), equalTo("nosniff")); - assertThat("Header 'X-XSS-Protection' is missing", - conn.getHeaderField("X-XSS-Protection"), is(not((String)null))); + assertThat("Header 'X-XSS-Protection' is missing", conn.getHeaderField("X-XSS-Protection"), + is(not((String) null))); assertThat("Header 'X-XSS-Protection' has invalid value", - conn.getHeaderField("X-XSS-Protection"), equalTo("1; mode=block")); + conn.getHeaderField("X-XSS-Protection"), equalTo("1; mode=block")); - assertThat("Header 'Strict-Transport-Security' should be missing from response," + - "but it's present", - conn.getHeaderField("Strict-Transport-Security"), is((String)null)); - assertThat("Header 'Content-Security-Policy' should be missing from response," + - "but it's present", - conn.getHeaderField("Content-Security-Policy"), is((String)null)); + assertThat( + "Header 'Strict-Transport-Security' should be missing from response," + "but it's present", + conn.getHeaderField("Strict-Transport-Security"), is((String) null)); + assertThat( + "Header 'Content-Security-Policy' should be missing from response," + "but it's present", + conn.getHeaderField("Content-Security-Policy"), is((String) null)); } @Test public void testHstsAndCspSettings() throws IOException { Configuration conf = new Configuration(); - conf.set("hbase.http.filter.hsts.value", - "max-age=63072000;includeSubDomains;preload"); + conf.set("hbase.http.filter.hsts.value", "max-age=63072000;includeSubDomains;preload"); conf.set("hbase.http.filter.csp.value", - "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); http = createTestServer(conf); http.start(); baseUrl = getServerURL(http); @@ -93,15 +92,15 @@ public void testHstsAndCspSettings() throws IOException { assertThat(conn.getResponseCode(), equalTo(HttpURLConnection.HTTP_OK)); assertThat("Header 'Strict-Transport-Security' is missing from Rest response", - conn.getHeaderField("Strict-Transport-Security"), Is.is(not((String)null))); + conn.getHeaderField("Strict-Transport-Security"), Is.is(not((String) null))); assertThat("Header 'Strict-Transport-Security' has invalid value", - conn.getHeaderField("Strict-Transport-Security"), - IsEqual.equalTo("max-age=63072000;includeSubDomains;preload")); + conn.getHeaderField("Strict-Transport-Security"), + IsEqual.equalTo("max-age=63072000;includeSubDomains;preload")); assertThat("Header 'Content-Security-Policy' is missing from Rest response", - conn.getHeaderField("Content-Security-Policy"), Is.is(not((String)null))); + conn.getHeaderField("Content-Security-Policy"), Is.is(not((String) null))); assertThat("Header 'Content-Security-Policy' has invalid value", - conn.getHeaderField("Content-Security-Policy"), - IsEqual.equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); + conn.getHeaderField("Content-Security-Policy"), + IsEqual.equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java index 1e9a2861c9ef..7ea8abe066db 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -42,11 +41,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestServletFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServletFilter.class); + HBaseClassTestRule.forClass(TestServletFilter.class); private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class); private static volatile String uri = null; @@ -66,20 +65,21 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { if (filterConfig == null) { return; } - uri = ((HttpServletRequest)request).getRequestURI(); + uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); chain.doFilter(request, response); } /** Configuration for the filter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -90,22 +90,20 @@ public void initFilter(FilterContainer container, Configuration conf) { private static void assertExceptionContains(String string, Throwable t) { String msg = t.getMessage(); - Assert.assertTrue( - "Expected to find '" + string + "' but got unexpected exception:" - + StringUtils.stringifyException(t), msg.contains(string)); + Assert.assertTrue("Expected to find '" + string + "' but got unexpected exception:" + + StringUtils.stringifyException(t), msg.contains(string)); } @Test @Ignore - //From stack + // From stack // Its a 'foreign' test, one that came in from hadoop when we copy/pasted http // It's second class. Could comment it out if only failing test (as per @nkeywal – sort of) public void testServletFilter() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - SimpleFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, SimpleFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); http.start(); @@ -115,23 +113,22 @@ public void testServletFilter() throws Exception { final String logURL = "/logs/a.log"; final String hadooplogoURL = "/static/hadoop-logo.jpg"; - final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL}; + final String[] urls = { fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL }; final Random rand = ThreadLocalRandom.current(); final int[] sequence = new int[50]; - //generate a random sequence and update counts - for(int i = 0; i < sequence.length; i++) { + // generate a random sequence and update counts + for (int i = 0; i < sequence.length; i++) { sequence[i] = rand.nextInt(urls.length); } - //access the urls as the sequence - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + // access the urls as the sequence + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (int aSequence : sequence) { access(prefix + urls[aSequence]); - //make sure everything except fsck get filtered + // make sure everything except fsck get filtered if (aSequence == 0) { assertNull(uri); } else { @@ -166,8 +163,7 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); // start an http server with ErrorFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - ErrorFilter.Initializer.class.getName()); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, ErrorFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); try { http.start(); @@ -178,17 +174,15 @@ public void testServletFilterWhenInitThrowsException() throws Exception { } /** - * Similar to the above test case, except that it uses a different API to add the - * filter. Regression test for HADOOP-8786. + * Similar to the above test case, except that it uses a different API to add the filter. + * Regression test for HADOOP-8786. */ @Test - public void testContextSpecificServletFilterWhenInitThrowsException() - throws Exception { + public void testContextSpecificServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); HttpServer http = createTestServer(conf); - HttpServer.defineFilter(http.webAppContext, - "ErrorFilter", ErrorFilter.class.getName(), - null, null); + HttpServer.defineFilter(http.webAppContext, "ErrorFilter", ErrorFilter.class.getName(), null, + null); try { http.start(); fail("expecting exception"); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java index eb3394300011..ae6019546678 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,11 +69,11 @@ * Test class for SPNEGO authentication on the HttpServer. Uses Kerby's MiniKDC and Apache * HttpComponents to verify that a simple Servlet is reachable via SPNEGO and unreachable w/o. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestSpnegoHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSpnegoHttpServer.class); + HBaseClassTestRule.forClass(TestSpnegoHttpServer.class); private static final Logger LOG = LoggerFactory.getLogger(TestSpnegoHttpServer.class); private static final String KDC_SERVER_HOST = "localhost"; @@ -114,7 +114,7 @@ public static void setupServer() throws Exception { server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } @AfterClass @@ -136,13 +136,13 @@ public static void stopServer() throws Exception { } private static void setupUser(SimpleKdcServer kdc, File keytab, String principal) - throws KrbException { + throws KrbException { kdc.createPrincipal(principal); kdc.exportPrincipal(principal, keytab); } private static Configuration buildSpnegoConfiguration(Configuration conf, String serverPrincipal, - File serverKeytab) { + File serverKeytab) { KerberosName.setRules("DEFAULT"); conf.setInt(HttpServer.HTTP_MAX_THREADS, TestHttpServer.MAX_THREADS); @@ -174,7 +174,7 @@ public void testAllowedClient() throws Exception { // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set privateCredentials = - clientSubject.getPrivateCredentials(KerberosTicket.class); + clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt); @@ -184,34 +184,32 @@ public void testAllowedClient() throws Exception { // Run this code, logged in as the subject (the client) HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public HttpResponse run() throws Exception { - // Logs in with Kerberos via GSS - GSSManager gssManager = GSSManager.getInstance(); - // jGSS Kerberos login constant - Oid oid = new Oid("1.2.840.113554.1.2.2"); - GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential(gssClient, - GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - - HttpClientContext context = HttpClientContext.create(); - Lookup authRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)) - .build(); - - HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) - .build(); - BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); - - URL url = new URL(getServerURL(server), "/echo?a=b"); - context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); - context.setCredentialsProvider(credentialsProvider); - context.setAuthSchemeRegistry(authRegistry); - - HttpGet get = new HttpGet(url.toURI()); - return client.execute(get, context); - } + @Override + public HttpResponse run() throws Exception { + // Logs in with Kerberos via GSS + GSSManager gssManager = GSSManager.getInstance(); + // jGSS Kerberos login constant + Oid oid = new Oid("1.2.840.113554.1.2.2"); + GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + + HttpClientContext context = HttpClientContext.create(); + Lookup authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); + + HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build(); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); + + URL url = new URL(getServerURL(server), "/echo?a=b"); + context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); + + HttpGet get = new HttpGet(url.toURI()); + return client.execute(get, context); + } }); assertNotNull(resp); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java index ac2ef8f66497..49f5af37f4fe 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,15 +44,15 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON; /** - * Basic test case that the ConfServlet can write configuration - * to its output in XML and JSON format. + * Basic test case that the ConfServlet can write configuration to its output in XML and JSON + * format. */ @Category({ MiscTests.class, SmallTests.class }) public class TestConfServlet { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConfServlet.class); + HBaseClassTestRule.forClass(TestConfServlet.class); private static final String TEST_KEY = "testconfservlet.key"; private static final String TEST_VAL = "testval"; @@ -74,15 +74,14 @@ public void testWriteJson() throws Exception { programSet.add("programatically"); programSet.add("programmatically"); Object parsed = JSON.parse(json); - Object[] properties = ((Map)parsed).get("properties"); + Object[] properties = ((Map) parsed).get("properties"); for (Object o : properties) { - Map propertyInfo = (Map)o; - String key = (String)propertyInfo.get("key"); - String val = (String)propertyInfo.get("value"); - String resource = (String)propertyInfo.get("resource"); + Map propertyInfo = (Map) o; + String key = (String) propertyInfo.get("key"); + String val = (String) propertyInfo.get("value"); + String resource = (String) propertyInfo.get("resource"); System.err.println("k: " + key + " v: " + val + " r: " + resource); - if (TEST_KEY.equals(key) && TEST_VAL.equals(val) - && programSet.contains(resource)) { + if (TEST_KEY.equals(key) && TEST_VAL.equals(val) && programSet.contains(resource)) { foundSetting = true; } } @@ -95,8 +94,7 @@ public void testWriteXml() throws Exception { ConfServlet.writeResponse(getTestConf(), sw, "xml"); String xml = sw.toString(); - DocumentBuilderFactory docBuilderFactory - = DocumentBuilderFactory.newInstance(); + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = builder.parse(new InputSource(new StringReader(xml))); NodeList nameNodes = doc.getElementsByTagName("name"); @@ -107,7 +105,7 @@ public void testWriteXml() throws Exception { System.err.println("xml key: " + key); if (TEST_KEY.equals(key)) { foundSetting = true; - Element propertyElem = (Element)nameNode.getParentNode(); + Element propertyElem = (Element) nameNode.getParentNode(); String val = propertyElem.getElementsByTagName("value").item(0).getTextContent(); assertEquals(TEST_VAL, val); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java index 02248d6bcd29..f70c06dfc73a 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,18 +36,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestJMXJsonServlet extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestJMXJsonServlet.class); + HBaseClassTestRule.forClass(TestJMXJsonServlet.class); private static final Logger LOG = LoggerFactory.getLogger(TestJMXJsonServlet.class); private static HttpServer server; private static URL baseUrl; - @BeforeClass public static void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { // Eclipse doesn't pick this up correctly from the plugin // configuration in the pom. System.setProperty(HttpServerFunctionalTest.TEST_BUILD_WEBAPPS, "target/test-classes/webapps"); @@ -56,93 +57,92 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { baseUrl = getServerURL(server); } - @AfterClass public static void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); } public static void assertReFind(String re, String value) { Pattern p = Pattern.compile(re); Matcher m = p.matcher(value); - assertTrue("'"+p+"' does not match "+value, m.find()); + assertTrue("'" + p + "' does not match " + value, m.find()); } public static void assertNotFind(String re, String value) { Pattern p = Pattern.compile(re); Matcher m = p.matcher(value); - assertFalse("'"+p+"' should not match "+value, m.find()); + assertFalse("'" + p + "' should not match " + value, m.find()); } - @Test public void testQuery() throws Exception { + @Test + public void testQuery() throws Exception { String result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Runtime")); - LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Runtime\"", result); assertReFind("\"modelerType\"", result); result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory")); - LOG.info("/jmx?qry=java.lang:type=Memory RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Memory RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"modelerType\"", result); result = readOutput(new URL(baseUrl, "/jmx")); - LOG.info("/jmx RESULT: "+result); + LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); // test to get an attribute of a mbean - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::HeapMemoryUsage")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::HeapMemoryUsage")); + LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); // negative test to get an attribute of a mbean - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::")); + LOG.info("/jmx RESULT: " + result); assertReFind("\"ERROR\"", result); // test to get JSONP result result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory&callback=mycallback1")); - LOG.info("/jmx?qry=java.lang:type=Memory&callback=mycallback RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Memory&callback=mycallback RESULT: " + result); assertReFind("^mycallback1\\(\\{", result); assertReFind("\\}\\);$", result); // negative test to get an attribute of a mbean as JSONP - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::&callback=mycallback2")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::&callback=mycallback2")); + LOG.info("/jmx RESULT: " + result); assertReFind("^mycallback2\\(\\{", result); assertReFind("\"ERROR\"", result); assertReFind("\\}\\);$", result); // test to get an attribute of a mbean as JSONP - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::HeapMemoryUsage&callback=mycallback3")); - LOG.info("/jmx RESULT: "+result); + result = readOutput( + new URL(baseUrl, "/jmx?get=java.lang:type=Memory::HeapMemoryUsage&callback=mycallback3")); + LOG.info("/jmx RESULT: " + result); assertReFind("^mycallback3\\(\\{", result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); assertReFind("\\}\\);$", result); // test exclude the specific mbean - result = readOutput(new URL(baseUrl, - "/jmx?excl=Hadoop:service=HBase,name=RegionServer,sub=Regions")); + result = + readOutput(new URL(baseUrl, "/jmx?excl=Hadoop:service=HBase,name=RegionServer,sub=Regions")); LOG.info("/jmx RESULT: " + result); - assertNotFind("\"name\"\\s*:\\s*\"Hadoop:service=HBase,name=RegionServer,sub=Regions\"",result); + assertNotFind("\"name\"\\s*:\\s*\"Hadoop:service=HBase,name=RegionServer,sub=Regions\"", + result); } @Test public void testGetPattern() throws Exception { // test to get an attribute of a mbean as JSONP - String result = readOutput( - new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[a-zA-z_]*NonHeapMemoryUsage")); + String result = + readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[a-zA-z_]*NonHeapMemoryUsage")); LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); assertReFind("\"NonHeapMemoryUsage\"\\s*:", result); assertNotFind("\"HeapMemoryUsage\"\\s*:", result); - result = - readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[^Non]*HeapMemoryUsage")); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[^Non]*HeapMemoryUsage")); LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); @@ -150,7 +150,7 @@ public void testGetPattern() throws Exception { assertNotFind("\"NonHeapHeapMemoryUsage\"\\s*:", result); result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::[a-zA-z_]*HeapMemoryUsage,[a-zA-z_]*NonHeapMemoryUsage")); + "/jmx?get=java.lang:type=Memory::[a-zA-z_]*HeapMemoryUsage,[a-zA-z_]*NonHeapMemoryUsage")); LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); @@ -166,8 +166,8 @@ public void testPatternMatching() throws Exception { @Test public void testDisallowedJSONPCallback() throws Exception { String callback = "function(){alert('bigproblems!')};foo"; - URL url = new URL( - baseUrl, "/jmx?qry=java.lang:type=Memory&callback="+URLEncoder.encode(callback, "UTF-8")); + URL url = new URL(baseUrl, + "/jmx?qry=java.lang:type=Memory&callback=" + URLEncoder.encode(callback, "UTF-8")); HttpURLConnection cnxn = (HttpURLConnection) url.openConnection(); assertEquals(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, cnxn.getResponseCode()); } @@ -175,8 +175,8 @@ public void testDisallowedJSONPCallback() throws Exception { @Test public void testUnderscoresInJSONPCallback() throws Exception { String callback = "my_function"; - URL url = new URL( - baseUrl, "/jmx?qry=java.lang:type=Memory&callback="+URLEncoder.encode(callback, "UTF-8")); + URL url = new URL(baseUrl, + "/jmx?qry=java.lang:type=Memory&callback=" + URLEncoder.encode(callback, "UTF-8")); HttpURLConnection cnxn = (HttpURLConnection) url.openConnection(); assertEquals(HttpServletResponse.SC_OK, cnxn.getResponseCode()); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java index 39855ee86eff..eff83edfa6b1 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,17 +38,17 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestStaticUserWebFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStaticUserWebFilter.class); + HBaseClassTestRule.forClass(TestStaticUserWebFilter.class); private FilterConfig mockConfig(String username) { FilterConfig mock = Mockito.mock(FilterConfig.class); - Mockito.doReturn(username).when(mock).getInitParameter( - ServerConfigurationKeys.HBASE_HTTP_STATIC_USER); + Mockito.doReturn(username).when(mock) + .getInitParameter(ServerConfigurationKeys.HBASE_HTTP_STATIC_USER); return mock; } @@ -63,10 +63,9 @@ public void testFilter() throws Exception { FilterChain chain = mock(FilterChain.class); - suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class), - chain); + suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class), chain); - Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito.anyObject()); + Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito. anyObject()); HttpServletRequestWrapper wrapper = wrapperArg.getValue(); assertEquals("myuser", wrapper.getUserPrincipal().getName()); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java index acbcb55d5dbb..bc45a6295551 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -263,23 +263,20 @@ private HttpServer createServer(String protocol, boolean isSpnego) throws Except private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, final boolean isSpnego) throws Exception { - testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - logName, + testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, logName, org.apache.logging.log4j.Level.DEBUG.toString()); } private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, final boolean isSpnego, final String newLevel) throws Exception { - testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - logName, - newLevel); + testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, logName, newLevel); } /** * Run both client and server using the given protocol. - * @param bindProtocol specify either http or https for server + * @param bindProtocol specify either http or https for server * @param connectProtocol specify either http or https for client - * @param isSpnego true if SPNEGO is enabled + * @param isSpnego true if SPNEGO is enabled * @throws Exception if client can't accesss server. */ private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, @@ -334,7 +331,7 @@ private void testDynamicLogLevel(final String bindProtocol, final String connect /** * Run LogLevel command line to start a client to get log level of this test class. - * @param protocol specify either http or https + * @param protocol specify either http or https * @param authority daemon's web UI address * @throws Exception if unable to connect */ @@ -346,11 +343,12 @@ private void getLevel(String protocol, String authority, String logName) throws /** * Run LogLevel command line to start a client to set log level of this test class to debug. - * @param protocol specify either http or https + * @param protocol specify either http or https * @param authority daemon's web UI address * @throws Exception if unable to run or log level does not change as expected */ - private void setLevel(String protocol, String authority, String logName, String newLevel) throws Exception { + private void setLevel(String protocol, String authority, String logName, String newLevel) + throws Exception { String[] setLevelArgs = { "-setlevel", authority, logName, newLevel, "-protocol", protocol }; CLI cli = new CLI(protocol.equalsIgnoreCase("https") ? sslConf : clientConf); cli.run(setLevelArgs); @@ -369,7 +367,8 @@ public void testSettingProtectedLogLevel() throws Exception { fail("Expected IO exception due to protected logger"); } catch (IOException e) { assertTrue(e.getMessage().contains("" + HttpServletResponse.SC_PRECONDITION_FAILED)); - assertTrue(e.getMessage().contains("Modification of logger " + protectedLogName + " is disallowed in configuration.")); + assertTrue(e.getMessage().contains( + "Modification of logger " + protectedLogName + " is disallowed in configuration.")); } } @@ -394,7 +393,7 @@ public void testErrorLogLevel() throws Exception { /** * Server runs HTTP, no SPNEGO. * @throws Exception if http client can't access http server, or http client can access https - * server. + * server. */ @Test public void testLogLevelByHttp() throws Exception { @@ -410,7 +409,7 @@ public void testLogLevelByHttp() throws Exception { /** * Server runs HTTP + SPNEGO. * @throws Exception if http client can't access http server, or http client can access https - * server. + * server. */ @Test public void testLogLevelByHttpWithSpnego() throws Exception { @@ -426,7 +425,7 @@ public void testLogLevelByHttpWithSpnego() throws Exception { /** * Server runs HTTPS, no SPNEGO. * @throws Exception if https client can't access https server, or https client can access http - * server. + * server. */ @Test public void testLogLevelByHttps() throws Exception { @@ -442,7 +441,7 @@ public void testLogLevelByHttps() throws Exception { /** * Server runs HTTPS + SPNEGO. * @throws Exception if https client can't access https server, or https client can access http - * server. + * server. */ @Test public void testLogLevelByHttpsWithSpnego() throws Exception { @@ -472,7 +471,7 @@ private static void exceptionShouldContains(String substr, Throwable throwable) } t = t.getCause(); } - throw new AssertionError("Expected to find '" + substr + "' but got unexpected exception:" + - StringUtils.stringifyException(throwable), throwable); + throw new AssertionError("Expected to find '" + substr + "' but got unexpected exception:" + + StringUtils.stringifyException(throwable), throwable); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java index ee900db62301..89d71b403af7 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,9 +34,8 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON; /** - * A simple Jersey resource class TestHttpServer. - * The servlet simply puts the path and the op parameter in a map - * and return it in JSON format in the response. + * A simple Jersey resource class TestHttpServer. The servlet simply puts the path and the op + * parameter in a map and return it in JSON format in the response. */ @Path("") public class JerseyResource { @@ -47,11 +46,9 @@ public class JerseyResource { @GET @Path("{" + PATH + ":.*}") - @Produces({MediaType.APPLICATION_JSON}) - public Response get( - @PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, - @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op - ) throws IOException { + @Produces({ MediaType.APPLICATION_JSON }) + public Response get(@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, + @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op) throws IOException { LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op); final Map m = new TreeMap<>(); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index e09f64204a6d..d037037a6108 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http.ssl; import java.io.File; @@ -63,23 +62,21 @@ public static String getClasspathDir(Class klass) throws Exception { /** * Create a self-signed X.509 Certificate. - * - * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" - * @param pair the KeyPair - * @param days how many days from now the Certificate is valid for + * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" + * @param pair the KeyPair + * @param days how many days from now the Certificate is valid for * @param algorithm the signing algorithm, eg "SHA1withRSA" * @return the self-signed certificate */ public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, - String algorithm) throws CertificateEncodingException, InvalidKeyException, - IllegalStateException, NoSuchProviderException, NoSuchAlgorithmException, - SignatureException { + String algorithm) throws CertificateEncodingException, InvalidKeyException, + IllegalStateException, NoSuchProviderException, NoSuchAlgorithmException, SignatureException { Date from = new Date(); Date to = new Date(from.getTime() + days * 86400000L); BigInteger sn = new BigInteger(64, new SecureRandom()); KeyPair keyPair = pair; X509V1CertificateGenerator certGen = new X509V1CertificateGenerator(); - X500Principal dnName = new X500Principal(dn); + X500Principal dnName = new X500Principal(dn); certGen.setSerialNumber(sn); certGen.setIssuerDN(dnName); @@ -92,15 +89,13 @@ public static X509Certificate generateCertificate(String dn, KeyPair pair, int d return cert; } - public static KeyPair generateKeyPair(String algorithm) - throws NoSuchAlgorithmException { + public static KeyPair generateKeyPair(String algorithm) throws NoSuchAlgorithmException { KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm); keyGen.initialize(1024); return keyGen.genKeyPair(); } - private static KeyStore createEmptyKeyStore() - throws GeneralSecurityException, IOException { + private static KeyStore createEmptyKeyStore() throws GeneralSecurityException, IOException { return createEmptyKeyStore("jks"); } @@ -111,8 +106,7 @@ private static KeyStore createEmptyKeyStore(String keyStoreType) return ks; } - private static void saveKeyStore(KeyStore ks, String filename, - String password) + private static void saveKeyStore(KeyStore ks, String filename, String password) throws GeneralSecurityException, IOException { FileOutputStream out = new FileOutputStream(filename); try { @@ -123,109 +117,93 @@ private static void saveKeyStore(KeyStore ks, String filename, } /** - * Creates a keystore with a single key and saves it to a file. - * This method will use the same password for the keystore and for the key. - * This method will always generate a keystore file in JKS format. - * - * @param filename String file to save - * @param password String store password to set on keystore - * @param alias String alias to use for the key + * Creates a keystore with a single key and saves it to a file. This method will use the same + * password for the keystore and for the key. This method will always generate a keystore file in + * JKS format. + * @param filename String file to save + * @param password String store password to set on keystore + * @param alias String alias to use for the key * @param privateKey Key to save in keystore - * @param cert Certificate to use as certificate chain associated to key + * @param cert Certificate to use as certificate chain associated to key * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ - public static void createKeyStore(String filename, - String password, String alias, - Key privateKey, Certificate cert) - throws GeneralSecurityException, IOException { + public static void createKeyStore(String filename, String password, String alias, Key privateKey, + Certificate cert) throws GeneralSecurityException, IOException { createKeyStore(filename, password, password, alias, privateKey, cert); } /** - * Creates a keystore with a single key and saves it to a file. - * This method will always generate a keystore file in JKS format. - * - * @param filename String file to save - * @param password String store password to set on keystore + * Creates a keystore with a single key and saves it to a file. This method will always generate a + * keystore file in JKS format. + * @param filename String file to save + * @param password String store password to set on keystore * @param keyPassword String key password to set on key - * @param alias String alias to use for the key - * @param privateKey Key to save in keystore - * @param cert Certificate to use as certificate chain associated to key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ - public static void createKeyStore(String filename, - String password, String keyPassword, String alias, - Key privateKey, Certificate cert) - throws GeneralSecurityException, IOException { + public static void createKeyStore(String filename, String password, String keyPassword, + String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { createKeyStore(filename, password, keyPassword, alias, privateKey, cert, "JKS"); } - /** * Creates a keystore with a single key and saves it to a file. - * - * @param filename String file to save - * @param password String store password to set on keystore - * @param keyPassword String key password to set on key - * @param alias String alias to use for the key - * @param privateKey Key to save in keystore - * @param cert Certificate to use as certificate chain associated to key + * @param filename String file to save + * @param password String store password to set on keystore + * @param keyPassword String key password to set on key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key * @param keystoreType String keystore file type (e.g. "JKS") * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ public static void createKeyStore(String filename, String password, String keyPassword, - String alias, Key privateKey, Certificate cert, - String keystoreType) - throws GeneralSecurityException, IOException { + String alias, Key privateKey, Certificate cert, String keystoreType) + throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(keystoreType); - ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), - new Certificate[]{cert}); + ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), new Certificate[] { cert }); saveKeyStore(ks, filename, password); } /** - * Creates a truststore with a single certificate and saves it to a file. - * This method uses the default JKS truststore type. - * + * Creates a truststore with a single certificate and saves it to a file. This method uses the + * default JKS truststore type. * @param filename String file to save * @param password String store password to set on truststore - * @param alias String alias to use for the certificate - * @param cert Certificate to add + * @param alias String alias to use for the certificate + * @param cert Certificate to add * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ - public static void createTrustStore(String filename, - String password, String alias, - Certificate cert) - throws GeneralSecurityException, IOException { + public static void createTrustStore(String filename, String password, String alias, + Certificate cert) throws GeneralSecurityException, IOException { createTrustStore(filename, password, alias, cert, "JKS"); } /** * Creates a truststore with a single certificate and saves it to a file. - * - * @param filename String file to save - * @param password String store password to set on truststore - * @param alias String alias to use for the certificate - * @param cert Certificate to add + * @param filename String file to save + * @param password String store password to set on truststore + * @param alias String alias to use for the certificate + * @param cert Certificate to add * @param trustStoreType String keystore file type (e.g. "JKS") * @throws GeneralSecurityException for any error with the security APIs - * @throws IOException if there is an I/O error saving the file + * @throws IOException if there is an I/O error saving the file */ public static void createTrustStore(String filename, String password, String alias, - Certificate cert, String trustStoreType) - throws GeneralSecurityException, IOException { + Certificate cert, String trustStoreType) throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(trustStoreType); ks.setCertificateEntry(alias, cert); saveKeyStore(ks, filename, password); } - public static void createTrustStore( - String filename, String password, Map certs) - throws GeneralSecurityException, IOException { + public static void createTrustStore(String filename, String password, + Map certs) throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(); for (Map.Entry cert : certs.entrySet()) { ks.setCertificateEntry(cert.getKey(), cert.getValue()); @@ -233,46 +211,41 @@ public static void createTrustStore( saveKeyStore(ks, filename, password); } - public static void cleanupSSLConfig(Configuration conf) - throws Exception { + public static void cleanupSSLConfig(Configuration conf) throws Exception { File f = new File(conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER, - FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY))); + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY))); f.delete(); f = new File(conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER, - FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY))); + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY))); f.delete(); - String clientKeyStore = conf.get(FileBasedKeyStoresFactory - .resolvePropertyName(SSLFactory.Mode.CLIENT, - FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY)); + String clientKeyStore = + conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.CLIENT, + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY)); if (clientKeyStore != null) { f = new File(clientKeyStore); f.delete(); } - f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + conf - .get(SSLFactory.SSL_CLIENT_CONF_KEY)); + f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + + conf.get(SSLFactory.SSL_CLIENT_CONF_KEY)); f.delete(); - f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + conf - .get(SSLFactory.SSL_SERVER_CONF_KEY)); + f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + + conf.get(SSLFactory.SSL_SERVER_CONF_KEY)); f.delete(); } /** - * Performs complete setup of SSL configuration in preparation for testing an - * SSLFactory. This includes keys, certs, keystores, truststores, the server - * SSL configuration file, the client SSL configuration file, and the master - * configuration file read by the SSLFactory. - * - * @param keystoresDir String directory to save keystores - * @param sslConfDir String directory to save SSL configuration files - * @param conf Configuration master configuration to be used by an SSLFactory, - * which will be mutated by this method - * @param useClientCert boolean true to make the client present a cert in the - * SSL handshake + * Performs complete setup of SSL configuration in preparation for testing an SSLFactory. This + * includes keys, certs, keystores, truststores, the server SSL configuration file, the client SSL + * configuration file, and the master configuration file read by the SSLFactory. + * @param keystoresDir String directory to save keystores + * @param sslConfDir String directory to save SSL configuration files + * @param conf Configuration master configuration to be used by an SSLFactory, which will + * be mutated by this method + * @param useClientCert boolean true to make the client present a cert in the SSL handshake */ - public static void setupSSLConfig(String keystoresDir, String sslConfDir, - Configuration conf, boolean useClientCert) - throws Exception { + public static void setupSSLConfig(String keystoresDir, String sslConfDir, Configuration conf, + boolean useClientCert) throws Exception { String clientKS = keystoresDir + "/clientKS.jks"; String clientPassword = "clientP"; String serverKS = keystoresDir + "/serverKS.jks"; @@ -280,39 +253,33 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, String trustKS = keystoresDir + "/trustKS.jks"; String trustPassword = "trustP"; - File sslClientConfFile = new File( - sslConfDir + "/ssl-client-" + System.nanoTime() + "-" + HBaseCommonTestingUtil - .getRandomUUID() + ".xml"); - File sslServerConfFile = new File( - sslConfDir + "/ssl-server-" + System.nanoTime() + "-" + HBaseCommonTestingUtil - .getRandomUUID() + ".xml"); + File sslClientConfFile = new File(sslConfDir + "/ssl-client-" + System.nanoTime() + "-" + + HBaseCommonTestingUtil.getRandomUUID() + ".xml"); + File sslServerConfFile = new File(sslConfDir + "/ssl-server-" + System.nanoTime() + "-" + + HBaseCommonTestingUtil.getRandomUUID() + ".xml"); Map certs = new HashMap<>(); if (useClientCert) { KeyPair cKP = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate cCert = - KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, - "SHA1withRSA"); - KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", - cKP.getPrivate(), cCert); + KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", cKP.getPrivate(), cCert); certs.put("client", cCert); } KeyPair sKP = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate sCert = - KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, - "SHA1withRSA"); - KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", - sKP.getPrivate(), sCert); + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", sKP.getPrivate(), sCert); certs.put("server", sCert); KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs); - Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword, - clientPassword, trustKS); - Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword, - serverPassword, trustKS); + Configuration clientSSLConf = + createClientSSLConfig(clientKS, clientPassword, clientPassword, trustKS); + Configuration serverSSLConf = + createServerSSLConfig(serverKS, serverPassword, serverPassword, trustKS); saveConfig(sslClientConfFile, clientSSLConf); saveConfig(sslServerConfFile, serverSSLConf); @@ -322,60 +289,50 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName()); conf.set("dfs.https.server.keystore.resource", sslServerConfFile.getName()); - conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert); } /** * Creates SSL configuration for a client. - * - * @param clientKS String client keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password - * @param trustKS String truststore file + * @param clientKS String client keystore file + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password + * @param trustKS String truststore file * @return Configuration for client SSL */ - public static Configuration createClientSSLConfig(String clientKS, - String password, String keyPassword, String trustKS) { - Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT, - clientKS, password, keyPassword, trustKS); + public static Configuration createClientSSLConfig(String clientKS, String password, + String keyPassword, String trustKS) { + Configuration clientSSLConf = + createSSLConfig(SSLFactory.Mode.CLIENT, clientKS, password, keyPassword, trustKS); return clientSSLConf; } /** * Creates SSL configuration for a server. - * - * @param serverKS String server keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password - * @param trustKS String truststore file + * @param serverKS String server keystore file + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password + * @param trustKS String truststore file * @return Configuration for server SSL */ - public static Configuration createServerSSLConfig(String serverKS, - String password, String keyPassword, String trustKS) throws IOException { - Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER, - serverKS, password, keyPassword, trustKS); + public static Configuration createServerSSLConfig(String serverKS, String password, + String keyPassword, String trustKS) throws IOException { + Configuration serverSSLConf = + createSSLConfig(SSLFactory.Mode.SERVER, serverKS, password, keyPassword, trustKS); return serverSSLConf; } /** * Creates SSL configuration. - * - * @param mode SSLFactory.Mode mode to configure - * @param keystore String keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password - * @param trustKS String truststore file + * @param mode SSLFactory.Mode mode to configure + * @param keystore String keystore file + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password + * @param trustKS String truststore file * @return Configuration for SSL */ - private static Configuration createSSLConfig(SSLFactory.Mode mode, - String keystore, String password, String keyPassword, String trustKS) { + private static Configuration createSSLConfig(SSLFactory.Mode mode, String keystore, + String password, String keyPassword, String trustKS) { String trustPassword = "trustP"; Configuration sslConf = new Configuration(false); @@ -389,8 +346,7 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, } if (keyPassword != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, - FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), - keyPassword); + FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), keyPassword); } if (trustKS != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, @@ -398,8 +354,7 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, } if (trustPassword != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, - FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), - trustPassword); + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword); } sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000"); @@ -409,13 +364,11 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, /** * Saves configuration to a file. - * * @param file File to save * @param conf Configuration contents to write to file * @throws IOException if there is an I/O error saving the file */ - public static void saveConfig(File file, Configuration conf) - throws IOException { + public static void saveConfig(File file, Configuration conf) throws IOException { Writer writer = new FileWriter(file); try { conf.writeXml(writer); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java index c277cd068da3..ef36d19c0468 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,6 +22,7 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.PrintWriter; import java.io.StringWriter; import java.lang.reflect.Type; @@ -39,13 +39,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.reflect.TypeToken; import org.apache.hbase.thirdparty.com.google.gson.Gson; /** * Test {@link JSONBean}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestJSONBean { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -58,23 +59,17 @@ private MBeanServer getMockMBeanServer() throws Exception { when(mbeanServer.queryNames(any(), any())).thenReturn(names); MBeanInfo mbeanInfo = mock(MBeanInfo.class); when(mbeanInfo.getClassName()).thenReturn("testClassName"); - String[] attributeNames = new String[] {"intAttr", "nanAttr", "infinityAttr", - "strAttr", "boolAttr", "test:Attr"}; + String[] attributeNames = + new String[] { "intAttr", "nanAttr", "infinityAttr", "strAttr", "boolAttr", "test:Attr" }; MBeanAttributeInfo[] attributeInfos = new MBeanAttributeInfo[attributeNames.length]; for (int i = 0; i < attributeInfos.length; i++) { - attributeInfos[i] = new MBeanAttributeInfo(attributeNames[i], - null, - null, - true, - false, - false); + attributeInfos[i] = new MBeanAttributeInfo(attributeNames[i], null, null, true, false, false); } when(mbeanInfo.getAttributes()).thenReturn(attributeInfos); when(mbeanServer.getMBeanInfo(any())).thenReturn(mbeanInfo); when(mbeanServer.getAttribute(any(), eq("intAttr"))).thenReturn(3); when(mbeanServer.getAttribute(any(), eq("nanAttr"))).thenReturn(Double.NaN); - when(mbeanServer.getAttribute(any(), eq("infinityAttr"))). - thenReturn(Double.POSITIVE_INFINITY); + when(mbeanServer.getAttribute(any(), eq("infinityAttr"))).thenReturn(Double.POSITIVE_INFINITY); when(mbeanServer.getAttribute(any(), eq("strAttr"))).thenReturn("aString"); when(mbeanServer.getAttribute(any(), eq("boolAttr"))).thenReturn(true); when(mbeanServer.getAttribute(any(), eq("test:Attr"))).thenReturn("aString"); @@ -105,14 +100,14 @@ private String getExpectedJSON() { public void testJSONBeanValueTypes() throws Exception { JSONBean bean = new JSONBean(); StringWriter stringWriter = new StringWriter(); - try ( - PrintWriter printWriter = new PrintWriter(stringWriter); + try (PrintWriter printWriter = new PrintWriter(stringWriter); JSONBean.Writer jsonWriter = bean.open(printWriter)) { jsonWriter.write(getMockMBeanServer(), null, null, false); } final Gson gson = GsonUtil.createGson().create(); - Type typeOfHashMap = new TypeToken>() {}.getType(); + Type typeOfHashMap = new TypeToken>() { + }.getType(); Map expectedJson = gson.fromJson(getExpectedJSON(), typeOfHashMap); Map actualJson = gson.fromJson(stringWriter.toString(), typeOfHashMap); assertEquals(expectedJson, actualJson); diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 890e24f74d78..5d1f920fd6e3 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -40,105 +40,6 @@ - - - - - ../hbase-server/src/test/resources - - META-INF/NOTICE - META-INF/LICENSE - - - - src/test/resources - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-failsafe-plugin - ${surefire.version} - - - org.apache.maven.surefire - surefire-junit4 - ${surefire.version} - - - - - ${integrationtest.include} - - - ${unittest.include} - **/*$* - - ${test.output.tofile} - - ${env.LD_LIBRARY_PATH}:${project.build.directory}/nativelib - ${env.DYLD_LIBRARY_PATH}:${project.build.directory}/nativelib - 4 - - false - false - - - - integration-test - integration-test - - integration-test - - - - verify - verify - - verify - - - - - - - - - - - org.apache.maven.plugins - maven-failsafe-plugin - - false - always - - 1800 - -enableassertions -Xmx${failsafe.Xmx} - -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled - -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal - - - - net.revelc.code - warbucks-maven-plugin - - - - + org.apache.hbase hbase-annotations @@ -198,8 +99,8 @@ which pulls in the below. It messes up this build at assembly time. See HBASE-22029--> - com.sun.jersey - jersey-core + com.sun.jersey + jersey-core @@ -269,8 +170,8 @@ test - javax.servlet-api javax.servlet + javax.servlet-api test @@ -295,6 +196,129 @@ + + + + + ../hbase-server/src/test/resources + + META-INF/NOTICE + META-INF/LICENSE + + + + src/test/resources + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${surefire.version} + + + ${integrationtest.include} + + + ${unittest.include} + **/*$* + + ${test.output.tofile} + + ${env.LD_LIBRARY_PATH}:${project.build.directory}/nativelib + ${env.DYLD_LIBRARY_PATH}:${project.build.directory}/nativelib + 4 + + false + false + + + + org.apache.maven.surefire + surefire-junit4 + ${surefire.version} + + + + + integration-test + + integration-test + + integration-test + + + verify + + verify + + verify + + + + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + false + always + + 1800 + -enableassertions -Xmx${failsafe.Xmx} + -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled + -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + + org.apache.maven.plugins + maven-surefire-report-plugin + ${surefire.version} + + + integration-tests + + report-only + + + failsafe-report + + ${project.build.directory}/failsafe-reports + + + + + + + + @@ -325,13 +349,15 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + ${hadoop-three.version} - + org.apache.hadoop hadoop-common @@ -361,10 +387,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -33,35 +31,6 @@ Apache HBase - Logging Logging Support for HBase - - - - src/test/resources - - log4j2.properties - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase @@ -109,4 +78,33 @@ test + + + + + src/test/resources + + log4j2.properties + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java index b0711d7e8f1a..18c2c0f6e679 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java index e7b5fdd39356..e389f58aacb8 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java index 9b3459194ab6..ba136663e092 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/HBaseTestAppender.java b/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/HBaseTestAppender.java index 6ac1ce053cde..01ac73fde178 100644 --- a/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/HBaseTestAppender.java +++ b/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/HBaseTestAppender.java @@ -81,14 +81,8 @@ public HBaseTestAppender build() { Layout layout = getOrCreateLayout(StandardCharsets.UTF_8); OutputStreamManager manager = OutputStreamManager.getManager(target.name(), FACTORY, new FactoryData(target, layout)); - return new HBaseTestAppender(getName(), - layout, - getFilter(), - isIgnoreExceptions(), - isImmediateFlush(), - getPropertyArray(), - manager, - size); + return new HBaseTestAppender(getName(), layout, getFilter(), isIgnoreExceptions(), + isImmediateFlush(), getPropertyArray(), manager, size); } } diff --git a/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/Target.java b/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/Target.java index 22b649cd44da..b2dceda619ca 100644 --- a/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/Target.java +++ b/hbase-logging/src/test/java/org/apache/hadoop/hbase/logging/Target.java @@ -32,4 +32,4 @@ private Target(PrintStream output) { public PrintStream output() { return output; } -} \ No newline at end of file +} diff --git a/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java b/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java index 7b3876ce0833..57d8732fb1c6 100644 --- a/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java +++ b/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.log4j; import java.io.BufferedWriter; @@ -222,7 +221,7 @@ public void setBufferSize(int bufferSize) { * Do not use this method directly. To configure a FileAppender or one of its subclasses, set * its properties one by one and then call activateOptions. * @param fileName The path to the log file. - * @param append If true will append to fileName. Otherwise will truncate fileName. + * @param append If true will append to fileName. Otherwise will truncate fileName. */ public synchronized void setFile(String fileName, boolean append, boolean bufferedIO, int bufferSize) throws IOException { diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 68963a0253dd..a0c952e6ea31 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-mapreduce Apache HBase - MapReduce - - This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which + This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which are needed for running MR jobs on tables, WALs, HFiles and other HBase specific constructs. It also contains a bunch of tools: RowCounter, ImportTsv, Import, Export, CompactionTool, - ExportSnapshot, WALPlayer, etc - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - - - org/apache/hadoop/hbase/mapreduce/Driver - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - + ExportSnapshot, WALPlayer, etc @@ -284,6 +247,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + org/apache/hadoop/hbase/mapreduce/Driver + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + @@ -302,7 +299,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -344,8 +343,7 @@ lifecycle-mapping - - + diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java index 0484fbbf239a..60e24be5128e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +18,12 @@ package org.apache.hadoop.hbase.mapred; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.util.ProgramDriver; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.util.ProgramDriver; /** - * Driver for hbase mapreduce jobs. Select which to run by passing name of job - * to this main. + * Driver for hbase mapreduce jobs. Select which to run by passing name of job to this main. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable @@ -39,12 +37,11 @@ static void setProgramDriver(ProgramDriver pgd0) { } /** - * @param args - * @throws Throwable + * nn */ public static void main(String[] args) throws Throwable { pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table"); - ProgramDriver.class.getMethod("driver", new Class[] { String[].class }) - .invoke(pgd, new Object[] { args }); + ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd, + new Object[] { args }); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java index 594816fcf503..3d609ffd73b8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,6 @@ import java.io.IOException; import java.util.ArrayList; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Result; @@ -31,42 +28,37 @@ import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; - +import org.apache.yetus.audience.InterfaceAudience; /** * Extract grouping columns from input record */ @InterfaceAudience.Public -public class GroupingTableMap -extends MapReduceBase -implements TableMap { +public class GroupingTableMap extends MapReduceBase + implements TableMap { /** - * JobConf parameter to specify the columns used to produce the key passed to - * collect from the map phase + * JobConf parameter to specify the columns used to produce the key passed to collect from the map + * phase */ - public static final String GROUP_COLUMNS = - "hbase.mapred.groupingtablemap.columns"; + public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; - protected byte [][] columns; + protected byte[][] columns; /** - * Use this before submitting a TableMap job. It will appropriately set up the - * JobConf. - * - * @param table table to be processed - * @param columns space separated list of columns to fetch - * @param groupColumns space separated list of columns used to form the key - * used in collect - * @param mapper map class - * @param job job configuration object + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. + * @param table table to be processed + * @param columns space separated list of columns to fetch + * @param groupColumns space separated list of columns used to form the key used in collect + * @param mapper map class + * @param job job configuration object */ @SuppressWarnings("unchecked") public static void initJob(String table, String columns, String groupColumns, Class mapper, JobConf job) { - TableMapReduceUtil.initTableMapJob(table, columns, mapper, - ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, + Result.class, job); job.set(GROUP_COLUMNS, groupColumns); } @@ -75,50 +67,38 @@ public void configure(JobConf job) { super.configure(job); String[] cols = job.get(GROUP_COLUMNS, "").split(" "); columns = new byte[cols.length][]; - for(int i = 0; i < cols.length; i++) { + for (int i = 0; i < cols.length; i++) { columns[i] = Bytes.toBytes(cols[i]); } } /** - * Extract the grouping columns from value to construct a new key. - * - * Pass the new key and value to reduce. - * If any of the grouping columns are not found in the value, the record is skipped. - * @param key - * @param value - * @param output - * @param reporter - * @throws IOException + * Extract the grouping columns from value to construct a new key. Pass the new key and value to + * reduce. If any of the grouping columns are not found in the value, the record is skipped. nnnnn */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) throws IOException { byte[][] keyVals = extractKeyValues(value); - if(keyVals != null) { + if (keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); output.collect(tKey, value); } } /** - * Extract columns values from the current record. This method returns - * null if any of the columns are not found. - * - * Override this method if you want to deal with nulls differently. - * - * @param r - * @return array of byte values + * Extract columns values from the current record. This method returns null if any of the columns + * are not found. Override this method if you want to deal with nulls differently. n * @return + * array of byte values */ protected byte[][] extractKeyValues(Result r) { byte[][] keyVals = null; ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { - for (Cell value: r.listCells()) { - byte [] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value)); + for (Cell value : r.listCells()) { + byte[] column = + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { foundList.add(CellUtil.cloneValue(value)); @@ -126,7 +106,7 @@ protected byte[][] extractKeyValues(Result r) { } } } - if(foundList.size() == numCols) { + if (foundList.size() == numCols) { keyVals = foundList.toArray(new byte[numCols][]); } } @@ -134,19 +114,17 @@ protected byte[][] extractKeyValues(Result r) { } /** - * Create a key by concatenating multiple column values. - * Override this function in order to produce different types of keys. - * - * @param vals - * @return key generated by concatenating multiple column values + * Create a key by concatenating multiple column values. Override this function in order to + * produce different types of keys. n * @return key generated by concatenating multiple column + * values */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { - if(vals == null) { + if (vals == null) { return null; } - StringBuilder sb = new StringBuilder(); - for(int i = 0; i < vals.length; i++) { - if(i > 0) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < vals.length; i++) { + if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index b777f7ae24ff..a600f7fe85b0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +20,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; @@ -31,18 +27,18 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Partitioner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is used to partition the output keys into groups of keys. - * Keys are grouped according to the regions that currently exist - * so that each reducer fills a single region so load is distributed. - * + * This is used to partition the output keys into groups of keys. Keys are grouped according to the + * regions that currently exist so that each reducer fills a single region so load is distributed. * @param * @param */ @InterfaceAudience.Public -public class HRegionPartitioner -implements Partitioner { +public class HRegionPartitioner implements Partitioner { private static final Logger LOG = LoggerFactory.getLogger(HRegionPartitioner.class); // Connection and locator are not cleaned up; they just die when partitioner is done. private Connection connection; @@ -70,7 +66,7 @@ public void configure(JobConf job) { public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) { byte[] region = null; // Only one region return 0 - if (this.startKeys.length == 1){ + if (this.startKeys.length == 1) { return 0; } try { @@ -80,12 +76,11 @@ public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) } catch (IOException e) { LOG.error(e.toString(), e); } - for (int i = 0; i < this.startKeys.length; i++){ - if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ - if (i >= numPartitions){ + for (int i = 0; i < this.startKeys.length; i++) { + if (Bytes.compareTo(region, this.startKeys[i]) == 0) { + if (i >= numPartitions) { // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() - & Integer.MAX_VALUE) % numPartitions; + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java index c97bcc025230..16256942d72a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +18,20 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; /** * Pass the given key and record as-is to reduce */ @InterfaceAudience.Public -public class IdentityTableMap -extends MapReduceBase -implements TableMap { +public class IdentityTableMap extends MapReduceBase + implements TableMap { /** constructor */ public IdentityTableMap() { @@ -42,33 +39,24 @@ public IdentityTableMap() { } /** - * Use this before submitting a TableMap job. It will - * appropriately set up the JobConf. - * - * @param table table name + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. + * @param table table name * @param columns columns to scan - * @param mapper mapper class - * @param job job configuration + * @param mapper mapper class + * @param job job configuration */ @SuppressWarnings("unchecked") - public static void initJob(String table, String columns, - Class mapper, JobConf job) { - TableMapReduceUtil.initTableMapJob(table, columns, mapper, - ImmutableBytesWritable.class, + public static void initJob(String table, String columns, Class mapper, + JobConf job) { + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, Result.class, job); } /** - * Pass the key, value to reduce - * @param key - * @param value - * @param output - * @param reporter - * @throws IOException + * Pass the key, value to reduce nnnnn */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) throws IOException { // convert output.collect(key, value); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java index ba1df4c3a835..79d5f3dc8c0d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,41 +19,31 @@ import java.io.IOException; import java.util.Iterator; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Write to table each key, record pair */ @InterfaceAudience.Public -public class IdentityTableReduce -extends MapReduceBase -implements TableReduce { +public class IdentityTableReduce extends MapReduceBase + implements TableReduce { @SuppressWarnings("unused") - private static final Logger LOG = - LoggerFactory.getLogger(IdentityTableReduce.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReduce.class.getName()); /** - * No aggregation, output pairs of (key, record) - * @param key - * @param values - * @param output - * @param reporter - * @throws IOException + * No aggregation, output pairs of (key, record) nnnnn */ public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector output, - Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { - while(values.hasNext()) { + while (values.hasNext()) { output.collect(key, values.next()); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java index 7902d1a3b4c3..24e9da0f28d2 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapred; import edu.umd.cs.findbugs.annotations.SuppressWarnings; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -32,33 +34,25 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import org.apache.yetus.audience.InterfaceAudience; /** * MultiTableSnapshotInputFormat generalizes - * {@link org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat} - * allowing a MapReduce job to run over one or more table snapshots, with one or more scans - * configured for each. - * Internally, the input format delegates to - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * and thus has the same performance advantages; see - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * for more details. - * Usage is similar to TableSnapshotInputFormat, with the following exception: - * initMultiTableSnapshotMapperJob takes in a map - * from snapshot name to a collection of scans. For each snapshot in the map, each corresponding - * scan will be applied; - * the overall dataset for the job is defined by the concatenation of the regions and tables - * included in each snapshot/scan + * {@link org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat} allowing a MapReduce job to run + * over one or more table snapshots, with one or more scans configured for each. Internally, the + * input format delegates to {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} and + * thus has the same performance advantages; see + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more details. Usage is + * similar to TableSnapshotInputFormat, with the following exception: + * initMultiTableSnapshotMapperJob takes in a map from snapshot name to a collection of scans. For + * each snapshot in the map, each corresponding scan will be applied; the overall dataset for the + * job is defined by the concatenation of the regions and tables included in each snapshot/scan * pair. - * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, - * Class, Class, Class, JobConf, boolean, Path)} + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, Class, Class, Class, JobConf, boolean, Path)} * can be used to configure the job. - *
    {@code
    + *
    + * 
    + * {@code
      * Job job = new Job(conf);
      * Map> snapshotScans = ImmutableMap.of(
      *    "snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("a"), Bytes.toBytes("b"))),
    @@ -70,21 +64,18 @@
      *      MyMapOutputValueWritable.class, job, true, restoreDir);
      * }
      * 
    - * Internally, this input format restores each snapshot into a subdirectory of the given tmp - * directory. Input splits and - * record readers are created as described in - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * (one per region). - * See {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on - * permissioning; the - * same caveats apply here. * + * Internally, this input format restores each snapshot into a subdirectory of the given tmp + * directory. Input splits and record readers are created as described in + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} (one per region). See + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on + * permissioning; the same caveats apply here. * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ @InterfaceAudience.Public public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat - implements InputFormat { + implements InputFormat { private final MultiTableSnapshotInputFormatImpl delegate; @@ -104,25 +95,20 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { @Override public RecordReader getRecordReader(InputSplit split, JobConf job, - Reporter reporter) throws IOException { + Reporter reporter) throws IOException { return new TableSnapshotRecordReader((TableSnapshotRegionSplit) split, job); } @SuppressWarnings("checkstyle:linelength") /** * Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of - * restoreDir. - * Sets: + * restoreDir. Sets: * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#RESTORE_DIRS_KEY}, * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#SNAPSHOT_TO_SCANS_KEY} - * - * @param conf - * @param snapshotScans - * @param restoreDir - * @throws IOException + * nnnn */ public static void setInput(Configuration conf, Map> snapshotScans, - Path restoreDir) throws IOException { + Path restoreDir) throws IOException { new MultiTableSnapshotInputFormatImpl().setInput(conf, snapshotScans, restoreDir); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java index 75b221c5526b..4f95950589c0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +18,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,11 +30,11 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; /** - * A job with a map to count rows. - * Map outputs table rows IF the input row has columns that have content. - * Uses a org.apache.hadoop.mapred.lib.IdentityReducer + * A job with a map to count rows. Map outputs table rows IF the input row has columns that have + * content. Uses a org.apache.hadoop.mapred.lib.IdentityReducer */ @InterfaceAudience.Public public class RowCounter extends Configured implements Tool { @@ -47,16 +44,16 @@ public class RowCounter extends Configured implements Tool { /** * Mapper that runs the count. */ - static class RowCounterMapper - implements TableMap { - private static enum Counters {ROWS} + static class RowCounterMapper implements TableMap { + private static enum Counters { + ROWS + } public void map(ImmutableBytesWritable row, Result values, - OutputCollector output, - Reporter reporter) - throws IOException { - // Count every row containing data, whether it's in qualifiers or values - reporter.incrCounter(Counters.ROWS, 1); + OutputCollector output, Reporter reporter) + throws IOException { + // Count every row containing data, whether it's in qualifiers or values + reporter.incrCounter(Counters.ROWS, 1); } public void configure(JobConf jc) { @@ -69,9 +66,7 @@ public void close() throws IOException { } /** - * @param args - * @return the JobConf - * @throws IOException + * n * @return the JobConf n */ public JobConf createSubmittableJob(String[] args) throws IOException { JobConf c = new JobConf(getConf(), getClass()); @@ -86,8 +81,8 @@ public JobConf createSubmittableJob(String[] args) throws IOException { sb.append(args[i]); } // Second argument is the table name. - TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, c); + TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, c); c.setNumReduceTasks(0); // First arg is the output directory. FileOutputFormat.setOutputPath(c, new Path(args[0])); @@ -95,8 +90,7 @@ public JobConf createSubmittableJob(String[] args) throws IOException { } static int printUsage() { - System.out.println(NAME + - " [...]"); + System.out.println(NAME + " [...]"); return -1; } @@ -111,8 +105,7 @@ public int run(final String[] args) throws Exception { } /** - * @param args - * @throws Exception + * nn */ public static void main(String[] args) throws Exception { int errCode = ToolRunner.run(HBaseConfiguration.create(), new RowCounter(), args); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java index d9bb66bdf07f..3e38b0172ca0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +18,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -32,13 +27,15 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobConfigurable; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ @InterfaceAudience.Public -public class TableInputFormat extends TableInputFormatBase implements - JobConfigurable { +public class TableInputFormat extends TableInputFormatBase implements JobConfigurable { private static final Logger LOG = LoggerFactory.getLogger(TableInputFormat.class); /** @@ -59,7 +56,7 @@ protected void initialize(JobConf job) throws IOException { Path[] tableNames = FileInputFormat.getInputPaths(job); String colArg = job.get(COLUMN_LIST); String[] colNames = colArg.split(" "); - byte [][] m_cols = new byte[colNames.length][]; + byte[][] m_cols = new byte[colNames.length][]; for (int i = 0; i < m_cols.length; i++) { m_cols[i] = Bytes.toBytes(colNames[i]); } @@ -70,15 +67,14 @@ protected void initialize(JobConf job) throws IOException { public void validateInput(JobConf job) throws IOException { // expecting exactly one path - Path [] tableNames = FileInputFormat.getInputPaths(job); + Path[] tableNames = FileInputFormat.getInputPaths(job); if (tableNames == null || tableNames.length > 1) { throw new IOException("expecting one table name"); } // connected to table? if (getTable() == null) { - throw new IOException("could not connect to table '" + - tableNames[0].getName() + "'"); + throw new IOException("could not connect to table '" + tableNames[0].getName() + "'"); } // expecting at least one column diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index cef8e8a8176a..34736bd6a3db 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +19,6 @@ import java.io.Closeable; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -37,21 +32,23 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A Base for {@link TableInputFormat}s. Receives a {@link Table}, a - * byte[] of input columns and optionally a {@link Filter}. - * Subclasses may use other TableRecordReader implementations. + * A Base for {@link TableInputFormat}s. Receives a {@link Table}, a byte[] of input columns and + * optionally a {@link Filter}. Subclasses may use other TableRecordReader implementations. *

    * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to * function properly. Each of the entry points to this class used by the MapReduce framework, * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)}, - * will call {@link #initialize(JobConf)} as a convenient centralized location to handle - * retrieving the necessary configuration information. If your subclass overrides either of these - * methods, either call the parent version or call initialize yourself. - * + * will call {@link #initialize(JobConf)} as a convenient centralized location to handle retrieving + * the necessary configuration information. If your subclass overrides either of these methods, + * either call the parent version or call initialize yourself. *

    * An example of a subclass: + * *

      *   class ExampleTIF extends TableInputFormatBase {
      *
    @@ -77,32 +74,28 @@
      */
     
     @InterfaceAudience.Public
    -public abstract class TableInputFormatBase
    -implements InputFormat {
    +public abstract class TableInputFormatBase implements InputFormat {
       private static final Logger LOG = LoggerFactory.getLogger(TableInputFormatBase.class);
    -  private byte [][] inputColumns;
    +  private byte[][] inputColumns;
       private Table table;
       private RegionLocator regionLocator;
       private Connection connection;
       private TableRecordReader tableRecordReader;
       private Filter rowFilter;
     
    -  private static final String NOT_INITIALIZED = "The input format instance has not been properly " +
    -      "initialized. Ensure you call initializeTable either in your constructor or initialize " +
    -      "method";
    -  private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" +
    -            " previous error. Please look at the previous logs lines from" +
    -            " the task's full log for more details.";
    +  private static final String NOT_INITIALIZED = "The input format instance has not been properly "
    +    + "initialized. Ensure you call initializeTable either in your constructor or initialize "
    +    + "method";
    +  private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a"
    +    + " previous error. Please look at the previous logs lines from"
    +    + " the task's full log for more details.";
     
       /**
    -   * Builds a TableRecordReader. If no TableRecordReader was provided, uses
    -   * the default.
    -   *
    +   * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default.
        * @see InputFormat#getRecordReader(InputSplit, JobConf, Reporter)
        */
    -  public RecordReader getRecordReader(
    -      InputSplit split, JobConf job, Reporter reporter)
    -  throws IOException {
    +  public RecordReader getRecordReader(InputSplit split, JobConf job,
    +    Reporter reporter) throws IOException {
         // In case a subclass uses the deprecated approach or calls initializeTable directly
         if (table == null) {
           initialize(job);
    @@ -119,8 +112,8 @@ public RecordReader getRecordReader(
     
         TableSplit tSplit = (TableSplit) split;
         // if no table record reader was provided use default
    -    final TableRecordReader trr = this.tableRecordReader == null ? new TableRecordReader() :
    -        this.tableRecordReader;
    +    final TableRecordReader trr =
    +      this.tableRecordReader == null ? new TableRecordReader() : this.tableRecordReader;
         trr.setStartRow(tSplit.getStartRow());
         trr.setEndRow(tSplit.getEndRow());
         trr.setHTable(this.table);
    @@ -165,20 +158,15 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
       /**
        * Calculates the splits that will serve as input for the map tasks.
        * 

    - * Splits are created in number equal to the smallest between numSplits and - * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. - * If the number of splits is smaller than the number of - * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across - * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s - * and are grouped the most evenly possible. In the - * case splits are uneven the bigger splits are placed first in the - * {@link InputSplit} array. - * - * @param job the map task {@link JobConf} + * Splits are created in number equal to the smallest between numSplits and the number of + * {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. If the number of splits is + * smaller than the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits + * are spanned across multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s and are + * grouped the most evenly possible. In the case splits are uneven the bigger splits are placed + * first in the {@link InputSplit} array. + * @param job the map task {@link JobConf} * @param numSplits a hint to calculate the number of splits (mapred.map.tasks). - * * @return the input splits - * * @see InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int) */ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { @@ -195,26 +183,24 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { throw new IOException(INITIALIZATION_ERROR, exception); } - byte [][] startKeys = this.regionLocator.getStartKeys(); + byte[][] startKeys = this.regionLocator.getStartKeys(); if (startKeys == null || startKeys.length == 0) { throw new IOException("Expecting at least one region"); } if (this.inputColumns == null || this.inputColumns.length == 0) { throw new IOException("Expecting at least one column"); } - int realNumSplits = numSplits > startKeys.length? startKeys.length: - numSplits; + int realNumSplits = numSplits > startKeys.length ? startKeys.length : numSplits; InputSplit[] splits = new InputSplit[realNumSplits]; int middle = startKeys.length / realNumSplits; int startPos = 0; for (int i = 0; i < realNumSplits; i++) { int lastPos = startPos + middle; lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos; - String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]). - getHostname(); - splits[i] = new TableSplit(this.table.getName(), - startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]: - HConstants.EMPTY_START_ROW, regionLocation); + String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).getHostname(); + splits[i] = new TableSplit(this.table.getName(), startKeys[startPos], + ((i + 1) < realNumSplits) ? startKeys[lastPos] : HConstants.EMPTY_START_ROW, + regionLocation); LOG.info("split: " + i + "->" + splits[i]); startPos = lastPos; } @@ -223,15 +209,13 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { /** * Allows subclasses to initialize the table information. - * - * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. - * @param tableName The {@link TableName} of the table to process. - * @throws IOException + * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. + * @param tableName The {@link TableName} of the table to process. n */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if (this.table != null || this.connection != null) { - LOG.warn("initializeTable called multiple times. Overwriting connection and table " + - "reference; TableInputFormatBase will not close these old references when done."); + LOG.warn("initializeTable called multiple times. Overwriting connection and table " + + "reference; TableInputFormatBase will not close these old references when done."); } this.table = connection.getTable(tableName); this.regionLocator = connection.getRegionLocator(tableName); @@ -241,7 +225,7 @@ protected void initializeTable(Connection connection, TableName tableName) throw /** * @param inputColumns to be passed in {@link Result} to the map task. */ - protected void setInputColumns(byte [][] inputColumns) { + protected void setInputColumns(byte[][] inputColumns) { this.inputColumns = inputColumns; } @@ -256,27 +240,22 @@ protected Table getTable() { } /** - * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader - * to provide other {@link TableRecordReader} implementations. + * Allows subclasses to set the {@link TableRecordReader}. n * to provide other + * {@link TableRecordReader} implementations. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; } /** - * Allows subclasses to set the {@link Filter} to be used. - * - * @param rowFilter + * Allows subclasses to set the {@link Filter} to be used. n */ protected void setRowFilter(Filter rowFilter) { this.rowFilter = rowFilter; } /** - * Handle subclass specific set up. - * Each of the entry points used by the MapReduce framework, + * Handle subclass specific set up. Each of the entry points used by the MapReduce framework, * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)}, * will call {@link #initialize(JobConf)} as a convenient centralized location to handle * retrieving the necessary configuration information and calling @@ -284,19 +263,16 @@ protected void setRowFilter(Filter rowFilter) { *

    * Subclasses should implement their initialize call such that it is safe to call multiple times. * The current TableInputFormatBase implementation relies on a non-null table reference to decide - * if an initialize call is needed, but this behavior may change in the future. In particular, - * it is critical that initializeTable not be called multiple times since this will leak - * Connection instances. - * + * if an initialize call is needed, but this behavior may change in the future. In particular, it + * is critical that initializeTable not be called multiple times since this will leak Connection + * instances. */ protected void initialize(JobConf job) throws IOException { } /** * Close the Table and related objects that were initialized via - * {@link #initializeTable(Connection, TableName)}. - * - * @throws IOException + * {@link #initializeTable(Connection, TableName)}. n */ protected void closeTable() throws IOException { close(table, connection); @@ -306,7 +282,9 @@ protected void closeTable() throws IOException { private void close(Closeable... closables) throws IOException { for (Closeable c : closables) { - if(c != null) { c.close(); } + if (c != null) { + c.close(); + } } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java index d76572722b6f..639ad707208a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,20 @@ */ package org.apache.hadoop.hbase.mapred; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.Mapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Scan an HBase table to sort by a specified sort column. - * If the column does not exist, the record is not passed to Reduce. - * + * Scan an HBase table to sort by a specified sort column. If the column does not exist, the record + * is not passed to Reduce. * @param WritableComparable key class * @param Writable value class */ @InterfaceAudience.Public public interface TableMap, V> -extends Mapper { + extends Mapper { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java index 99f6eb4b92ee..76b01721c594 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +17,13 @@ */ package org.apache.hadoop.hbase.mapred; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; @@ -41,13 +42,10 @@ import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.mapred.TextOutputFormat; +import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collection; -import java.util.Map; - /** * Utility for {@link TableMap} and {@link TableReduce} */ @@ -57,49 +55,39 @@ public class TableMapReduceUtil { private static final Logger LOG = LoggerFactory.getLogger(TableMapReduceUtil.class); /** - * Use this before submitting a TableMap job. It will - * appropriately set up the JobConf. - * - * @param table The table name to read from. - * @param columns The columns to scan. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job configuration to adjust. + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. + * @param table The table name to read from. + * @param columns The columns to scan. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job configuration to adjust. */ - public static void initTableMapJob(String table, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf job) { - initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, - true, TableInputFormat.class); + public static void initTableMapJob(String table, String columns, Class mapper, + Class outputKeyClass, Class outputValueClass, JobConf job) { + initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, true, + TableInputFormat.class); } - public static void initTableMapJob(String table, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf job, boolean addDependencyJars) { + public static void initTableMapJob(String table, String columns, Class mapper, + Class outputKeyClass, Class outputValueClass, JobConf job, boolean addDependencyJars) { initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableInputFormat.class); } /** - * Use this before submitting a TableMap job. It will - * appropriately set up the JobConf. - * - * @param table The table name to read from. - * @param columns The columns to scan. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. + * @param table The table name to read from. + * @param columns The columns to scan. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job configuration to adjust. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job configuration to adjust. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). */ - public static void initTableMapJob(String table, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf job, boolean addDependencyJars, + public static void initTableMapJob(String table, String columns, Class mapper, + Class outputKeyClass, Class outputValueClass, JobConf job, boolean addDependencyJars, Class inputFormat) { job.setInputFormat(inputFormat); @@ -107,7 +95,7 @@ public static void initTableMapJob(String table, String columns, job.setMapOutputKeyClass(outputKeyClass); job.setMapperClass(mapper); job.setStrings("io.serializations", job.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName()); FileInputFormat.addInputPaths(job, table); job.set(TableInputFormat.COLUMN_LIST, columns); if (addDependencyJars) { @@ -120,28 +108,26 @@ public static void initTableMapJob(String table, String columns, try { initCredentials(job); } catch (IOException ioe) { - // just spit out the stack trace? really? + // just spit out the stack trace? really? LOG.error("IOException encountered while initializing credentials", ioe); } } /** * Sets up the job for reading from one or more multiple table snapshots, with one or more scans - * per snapshot. - * It bypasses hbase servers and read directly from snapshot files. - * + * per snapshot. It bypasses hbase servers and read directly from snapshot files. * @param snapshotScans map of snapshot name to scans on that snapshot. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). */ public static void initMultiTableSnapshotMapperJob(Map> snapshotScans, - Class mapper, Class outputKeyClass, Class outputValueClass, - JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { MultiTableSnapshotInputFormat.setInput(job, snapshotScans, tmpRestoreDir); job.setInputFormat(MultiTableSnapshotInputFormat.class); @@ -160,30 +146,27 @@ public static void initMultiTableSnapshotMapperJob(Map> } /** - * Sets up the job for reading from a table snapshot. It bypasses hbase servers - * and read directly from snapshot files. - * - * @param snapshotName The name of the snapshot (of a table) to read from. - * @param columns The columns to scan. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly + * from snapshot files. + * @param snapshotName The name of the snapshot (of a table) to read from. + * @param columns The columns to scan. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restore directory can be deleted. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restore + * directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapJob(String snapshotName, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf job, - boolean addDependencyJars, Path tmpRestoreDir) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableSnapshotInputFormat.class); @@ -191,97 +174,81 @@ public static void initTableSnapshotMapJob(String snapshotName, String columns, } /** - * Sets up the job for reading from a table snapshot. It bypasses hbase servers - * and read directly from snapshot files. - * - * @param snapshotName The name of the snapshot (of a table) to read from. - * @param columns The columns to scan. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param jobConf The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restore directory can be deleted. - * @param splitAlgo algorithm to split + * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly + * from snapshot files. + * @param snapshotName The name of the snapshot (of a table) to read from. + * @param columns The columns to scan. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param jobConf The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restore + * directory can be deleted. + * @param splitAlgo algorithm to split * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapJob(String snapshotName, String columns, - Class mapper, - Class outputKeyClass, - Class outputValueClass, JobConf jobConf, - boolean addDependencyJars, Path tmpRestoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, - int numSplitsPerRegion) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + JobConf jobConf, boolean addDependencyJars, Path tmpRestoreDir, + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { TableSnapshotInputFormat.setInput(jobConf, snapshotName, tmpRestoreDir, splitAlgo, - numSplitsPerRegion); + numSplitsPerRegion); initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, jobConf, - addDependencyJars, TableSnapshotInputFormat.class); + addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(jobConf); } - /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job configuration to adjust. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job configuration to adjust. * @throws IOException When determining the region count fails. */ - public static void initTableReduceJob(String table, - Class reducer, JobConf job) - throws IOException { + public static void initTableReduceJob(String table, Class reducer, + JobConf job) throws IOException { initTableReduceJob(table, reducer, job, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job configuration to adjust. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job configuration to adjust. + * @param partitioner Partitioner to use. Pass null to use default partitioner. * @throws IOException When determining the region count fails. */ - public static void initTableReduceJob(String table, - Class reducer, JobConf job, Class partitioner) - throws IOException { + public static void initTableReduceJob(String table, Class reducer, + JobConf job, Class partitioner) throws IOException { initTableReduceJob(table, reducer, job, partitioner, true); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job configuration to adjust. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job configuration to adjust. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When determining the region count fails. */ - public static void initTableReduceJob(String table, - Class reducer, JobConf job, Class partitioner, - boolean addDependencyJars) throws IOException { + public static void initTableReduceJob(String table, Class reducer, + JobConf job, Class partitioner, boolean addDependencyJars) throws IOException { job.setOutputFormat(TableOutputFormat.class); job.setReducerClass(reducer); job.set(TableOutputFormat.OUTPUT_TABLE, table); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(Put.class); job.setStrings("io.serializations", job.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName()); if (partitioner == HRegionPartitioner.class) { job.setPartitionerClass(HRegionPartitioner.class); int regions = getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); @@ -322,11 +289,10 @@ public static void initCredentials(JobConf job) throws IOException { } /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * - * @param table The table to get the region count for. - * @param job The current job configuration to adjust. + * Ensures that the given number of reduce tasks for the given job configuration does not exceed + * the number of regions for the given table. + * @param table The table to get the region count for. + * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ // Used by tests. @@ -338,11 +304,10 @@ public static void limitNumReduceTasks(String table, JobConf job) throws IOExcep } /** - * Ensures that the given number of map tasks for the given job - * configuration does not exceed the number of regions for the given table. - * - * @param table The table to get the region count for. - * @param job The current job configuration to adjust. + * Ensures that the given number of map tasks for the given job configuration does not exceed the + * number of regions for the given table. + * @param table The table to get the region count for. + * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ // Used by tests. @@ -354,11 +319,10 @@ public static void limitNumMapTasks(String table, JobConf job) throws IOExceptio } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * - * @param table The table to get the region count for. - * @param job The current job configuration to adjust. + * Sets the number of reduce tasks for the given job configuration to the number of regions the + * given table has. + * @param table The table to get the region count for. + * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ public static void setNumReduceTasks(String table, JobConf job) throws IOException { @@ -366,11 +330,10 @@ public static void setNumReduceTasks(String table, JobConf job) throws IOExcepti } /** - * Sets the number of map tasks for the given job configuration to the - * number of regions the given table has. - * - * @param table The table to get the region count for. - * @param job The current job configuration to adjust. + * Sets the number of map tasks for the given job configuration to the number of regions the given + * table has. + * @param table The table to get the region count for. + * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ public static void setNumMapTasks(String table, JobConf job) throws IOException { @@ -378,13 +341,11 @@ public static void setNumMapTasks(String table, JobConf job) throws IOException } /** - * Sets the number of rows to return and cache with each scanner iteration. - * Higher caching values will enable faster mapreduce jobs at the expense of - * requiring more heap to contain the cached rows. - * - * @param job The current job configuration to adjust. - * @param batchSize The number of rows to return in batch with each scanner - * iteration. + * Sets the number of rows to return and cache with each scanner iteration. Higher caching values + * will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached + * rows. + * @param job The current job configuration to adjust. + * @param batchSize The number of rows to return in batch with each scanner iteration. */ public static void setScannerCaching(JobConf job, int batchSize) { job.setInt("hbase.client.scanner.caching", batchSize); @@ -395,19 +356,14 @@ public static void setScannerCaching(JobConf job, int batchSize) { */ public static void addDependencyJars(JobConf job) throws IOException { org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job); - org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses( - job, - job.getMapOutputKeyClass(), - job.getMapOutputValueClass(), - job.getOutputKeyClass(), - job.getOutputValueClass(), - job.getPartitionerClass(), + org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(job, + job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getOutputKeyClass(), + job.getOutputValueClass(), job.getPartitionerClass(), job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class), job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class), job.getCombinerClass()); } - private static int getRegionCount(Configuration conf, TableName tableName) throws IOException { try (Connection conn = ConnectionFactory.createConnection(conf); RegionLocator locator = conn.getRegionLocator(tableName)) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index a55784729c0b..1c60bec84cda 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,9 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -35,6 +32,7 @@ import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Progressable; +import org.apache.yetus.audience.InterfaceAudience; /** * Convert Map/Reduce output and write it to an HBase table @@ -46,8 +44,8 @@ public class TableOutputFormat extends FileOutputFormat { private BufferedMutator m_mutator; @@ -88,31 +86,25 @@ public void write(ImmutableBytesWritable key, Put value) throws IOException { } /** - * Creates a new record writer. - * - * Be aware that the baseline javadoc gives the impression that there is a single - * {@link RecordWriter} per job but in HBase, it is more natural if we give you a new + * Creates a new record writer. Be aware that the baseline javadoc gives the impression that there + * is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new * RecordWriter per call of this method. You must close the returned RecordWriter when done. * Failure to do so will drop writes. - * * @param ignored Ignored filesystem - * @param job Current JobConf - * @param name Name of the job - * @param progress - * @return The newly created writer instance. + * @param job Current JobConf + * @param name Name of the job n * @return The newly created writer instance. * @throws IOException When creating the writer fails. */ @Override public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name, - Progressable progress) - throws IOException { + Progressable progress) throws IOException { // Clear write buffer on fail is true by default so no need to reset it. return new TableRecordWriter(job); } @Override public void checkOutputSpecs(FileSystem ignored, JobConf job) - throws FileAlreadyExistsException, InvalidJobConfException, IOException { + throws FileAlreadyExistsException, InvalidJobConfException, IOException { String tableName = job.get(OUTPUT_TABLE); if (tableName == null) { throw new IOException("Must specify table name"); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java index 3d41d8c5fcf8..c19531be46fe 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,38 +18,30 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.RecordReader; - +import org.apache.yetus.audience.InterfaceAudience; /** * Iterate over an HBase table data, return (Text, RowResult) pairs */ @InterfaceAudience.Public -public class TableRecordReader -implements RecordReader { +public class TableRecordReader implements RecordReader { private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl(); /** - * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow - * @throws IOException + * Restart from survivable exceptions by creating a new scanner. nn */ public void restart(byte[] firstRow) throws IOException { this.recordReaderImpl.restart(firstRow); } /** - * Build the scanner. Not done in constructor to allow for extension. - * - * @throws IOException + * Build the scanner. Not done in constructor to allow for extension. n */ public void init() throws IOException { this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow()); @@ -66,22 +57,21 @@ public void setHTable(Table htable) { /** * @param inputColumns the columns to be placed in {@link Result}. */ - public void setInputColumns(final byte [][] inputColumns) { + public void setInputColumns(final byte[][] inputColumns) { this.recordReaderImpl.setInputColumns(inputColumns); } /** * @param startRow the first row in the split */ - public void setStartRow(final byte [] startRow) { + public void setStartRow(final byte[] startRow) { this.recordReaderImpl.setStartRow(startRow); } /** - * * @param endRow the last row in the split */ - public void setEndRow(final byte [] endRow) { + public void setEndRow(final byte[] endRow) { this.recordReaderImpl.setEndRow(endRow); } @@ -97,8 +87,7 @@ public void close() { } /** - * @return ImmutableBytesWritable - * + * n * * @see org.apache.hadoop.mapred.RecordReader#createKey() */ public ImmutableBytesWritable createKey() { @@ -106,8 +95,7 @@ public ImmutableBytesWritable createKey() { } /** - * @return RowResult - * + * n * * @see org.apache.hadoop.mapred.RecordReader#createValue() */ public Result createValue() { @@ -127,13 +115,11 @@ public float getProgress() { } /** - * @param key HStoreKey as input key. + * @param key HStoreKey as input key. * @param value MapWritable as input value - * @return true if there was more data - * @throws IOException + * @return true if there was more data n */ - public boolean next(ImmutableBytesWritable key, Result value) - throws IOException { + public boolean next(ImmutableBytesWritable key, Result value) throws IOException { return this.recordReaderImpl.next(key, value); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java index aff83ddcefe1..80d6668eda1f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +18,7 @@ package org.apache.hadoop.hbase.mapred; import static org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl.LOG_PER_ROW_COUNT; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -44,13 +44,13 @@ public class TableRecordReaderImpl { private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class); - private byte [] startRow; - private byte [] endRow; - private byte [] lastSuccessfulRow; + private byte[] startRow; + private byte[] endRow; + private byte[] lastSuccessfulRow; private Filter trrRowFilter; private ResultScanner scanner; private Table htable; - private byte [][] trrInputColumns; + private byte[][] trrInputColumns; private long timestamp; private int rowcount; private boolean logScannerActivity = false; @@ -70,17 +70,15 @@ public void restart(byte[] firstRow) throws IOException { this.scanner = this.htable.getScanner(scan); currentScan = scan; } else { - LOG.debug("TIFB.restart, firstRow: " + - Bytes.toStringBinary(firstRow) + ", endRow: " + - Bytes.toStringBinary(endRow)); + LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", endRow: " + + Bytes.toStringBinary(endRow)); Scan scan = new Scan().withStartRow(firstRow).withStopRow(endRow); TableInputFormat.addColumns(scan, trrInputColumns); this.scanner = this.htable.getScanner(scan); currentScan = scan; } } else { - LOG.debug("TIFB.restart, firstRow: " + - Bytes.toStringBinary(firstRow) + ", no endRow"); + LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", no endRow"); Scan scan = new Scan().withStartRow(firstRow); TableInputFormat.addColumns(scan, trrInputColumns); @@ -119,22 +117,21 @@ public void setHTable(Table htable) { /** * @param inputColumns the columns to be placed in {@link Result}. */ - public void setInputColumns(final byte [][] inputColumns) { + public void setInputColumns(final byte[][] inputColumns) { this.trrInputColumns = inputColumns; } /** * @param startRow the first row in the split */ - public void setStartRow(final byte [] startRow) { + public void setStartRow(final byte[] startRow) { this.startRow = startRow; } /** - * * @param endRow the last row in the split */ - public void setEndRow(final byte [] endRow) { + public void setEndRow(final byte[] endRow) { this.endRow = endRow; } @@ -157,8 +154,7 @@ public void close() { } /** - * @return ImmutableBytesWritable - * + * n * * @see org.apache.hadoop.mapred.RecordReader#createKey() */ public ImmutableBytesWritable createKey() { @@ -166,8 +162,7 @@ public ImmutableBytesWritable createKey() { } /** - * @return RowResult - * + * n * * @see org.apache.hadoop.mapred.RecordReader#createValue() */ public Result createValue() { @@ -186,7 +181,7 @@ public float getProgress() { } /** - * @param key HStoreKey as input key. + * @param key HStoreKey as input key. * @param value MapWritable as input value * @return true if there was more data */ @@ -196,11 +191,10 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException try { result = this.scanner.next(); if (logScannerActivity) { - rowcount ++; + rowcount++; if (rowcount >= logPerRowCount) { long now = EnvironmentEdgeManager.currentTime(); - LOG.info("Mapper took " + (now-timestamp) - + "ms to process " + rowcount + " rows"); + LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows"); timestamp = now; rowcount = 0; } @@ -214,16 +208,16 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException // the scanner, if the second call fails, it will be rethrown LOG.debug("recovered from " + StringUtils.stringifyException(e)); if (lastSuccessfulRow == null) { - LOG.warn("We are restarting the first next() invocation," + - " if your mapper has restarted a few other times like this" + - " then you should consider killing this job and investigate" + - " why it's taking so long."); + LOG.warn("We are restarting the first next() invocation," + + " if your mapper has restarted a few other times like this" + + " then you should consider killing this job and investigate" + + " why it's taking so long."); } if (lastSuccessfulRow == null) { restart(startRow); } else { restart(lastSuccessfulRow); - this.scanner.next(); // skip presumed already mapped row + this.scanner.next(); // skip presumed already mapped row } result = this.scanner.next(); } @@ -238,11 +232,10 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException } catch (IOException ioe) { if (logScannerActivity) { long now = EnvironmentEdgeManager.currentTime(); - LOG.info("Mapper took " + (now-timestamp) - + "ms to process " + rowcount + " rows"); + LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows"); LOG.info(ioe.toString(), ioe); - String lastRow = lastSuccessfulRow == null ? - "null" : Bytes.toStringBinary(lastSuccessfulRow); + String lastRow = + lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow); LOG.info("lastSuccessfulRow=" + lastRow); } throw ioe; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java index a64e4cdc82f9..b26d3d70adf4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,20 @@ */ package org.apache.hadoop.hbase.mapred; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.Reducer; +import org.apache.yetus.audience.InterfaceAudience; /** * Write a table, sorting by the input key - * * @param key class * @param value class */ @InterfaceAudience.Public @SuppressWarnings("unchecked") public interface TableReduce -extends Reducer { + extends Reducer { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java index 4506b597164e..34d2e200d967 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapred; import java.io.DataInput; @@ -40,7 +39,6 @@ /** * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. Further * documentation available on {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}. - * * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat */ @InterfaceAudience.Public @@ -59,9 +57,9 @@ public TableSnapshotRegionSplit(TableSnapshotInputFormatImpl.InputSplit delegate } public TableSnapshotRegionSplit(TableDescriptor htd, RegionInfo regionInfo, - List locations, Scan scan, Path restoreDir) { + List locations, Scan scan, Path restoreDir) { this.delegate = - new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); + new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); } @Override @@ -85,13 +83,12 @@ public void readFields(DataInput in) throws IOException { } } - static class TableSnapshotRecordReader - implements RecordReader { + static class TableSnapshotRecordReader implements RecordReader { private TableSnapshotInputFormatImpl.RecordReader delegate; public TableSnapshotRecordReader(TableSnapshotRegionSplit split, JobConf job) - throws IOException { + throws IOException { delegate = new TableSnapshotInputFormatImpl.RecordReader(); delegate.initialize(split.delegate, job); } @@ -145,38 +142,41 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { } @Override - public RecordReader - getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { + public RecordReader getRecordReader(InputSplit split, JobConf job, + Reporter reporter) throws IOException { return new TableSnapshotRecordReader((TableSnapshotRegionSplit) split, job); } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param job the job to configure + * @param job the job to configure * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should + * have write permissions to this directory, and this should not be a + * subdirectory of rootdir. After the job is finished, restoreDir can be + * deleted. * @throws IOException if an error occurs */ public static void setInput(JobConf job, String snapshotName, Path restoreDir) - throws IOException { + throws IOException { TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir); } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param job the job to configure - * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. - * @param splitAlgo split algorithm to generate splits from region + * @param job the job to configure + * @param snapshotName the name of the snapshot to read from + * @param restoreDir a temporary directory to restore the snapshot into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restoreDir + * can be deleted. + * @param splitAlgo split algorithm to generate splits from region * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException if an error occurs */ public static void setInput(JobConf job, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { - TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo, numSplitsPerRegion); + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { + TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo, + numSplitsPerRegion); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java index d6e663730a7b..8cc03b222547 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,12 +21,11 @@ import java.io.DataOutput; import java.io.IOException; import java.util.Arrays; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.InputSplit; +import org.apache.yetus.audience.InterfaceAudience; /** * A table split corresponds to a key range [low, high) @@ -35,35 +33,27 @@ @InterfaceAudience.Public public class TableSplit implements InputSplit, Comparable { private TableName m_tableName; - private byte [] m_startRow; - private byte [] m_endRow; + private byte[] m_startRow; + private byte[] m_endRow; private String m_regionLocation; /** default constructor */ public TableSplit() { - this((TableName)null, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, ""); + this((TableName) null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } /** - * Constructor - * @param tableName - * @param startRow - * @param endRow - * @param location + * Constructor nnnn */ - public TableSplit(TableName tableName, byte [] startRow, byte [] endRow, - final String location) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) { this.m_tableName = tableName; this.m_startRow = startRow; this.m_endRow = endRow; this.m_regionLocation = location; } - public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow, - final String location) { - this(TableName.valueOf(tableName), startRow, endRow, - location); + public TableSplit(byte[] tableName, byte[] startRow, byte[] endRow, final String location) { + this(TableName.valueOf(tableName), startRow, endRow, location); } /** @return table name */ @@ -72,17 +62,17 @@ public TableName getTable() { } /** @return table name */ - public byte [] getTableName() { - return this.m_tableName.getName(); - } + public byte[] getTableName() { + return this.m_tableName.getName(); + } /** @return starting row key */ - public byte [] getStartRow() { + public byte[] getStartRow() { return this.m_startRow; } /** @return end row key */ - public byte [] getEndRow() { + public byte[] getEndRow() { return this.m_endRow; } @@ -92,7 +82,7 @@ public String getRegionLocation() { } public String[] getLocations() { - return new String[] {this.m_regionLocation}; + return new String[] { this.m_regionLocation }; } public long getLength() { @@ -116,14 +106,14 @@ public void write(DataOutput out) throws IOException { @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("HBase table split("); - sb.append("table name: ").append(m_tableName); - sb.append(", start row: ").append(Bytes.toStringBinary(m_startRow)); - sb.append(", end row: ").append(Bytes.toStringBinary(m_endRow)); - sb.append(", region location: ").append(m_regionLocation); - sb.append(")"); - return sb.toString(); + StringBuilder sb = new StringBuilder(); + sb.append("HBase table split("); + sb.append("table name: ").append(m_tableName); + sb.append(", start row: ").append(Bytes.toStringBinary(m_startRow)); + sb.append(", end row: ").append(Bytes.toStringBinary(m_endRow)); + sb.append(", region location: ").append(m_regionLocation); + sb.append(")"); + return sb.toString(); } @Override @@ -136,11 +126,9 @@ public boolean equals(Object o) { if (o == null || !(o instanceof TableSplit)) { return false; } - TableSplit other = (TableSplit)o; - return m_tableName.equals(other.m_tableName) && - Bytes.equals(m_startRow, other.m_startRow) && - Bytes.equals(m_endRow, other.m_endRow) && - m_regionLocation.equals(other.m_regionLocation); + TableSplit other = (TableSplit) o; + return m_tableName.equals(other.m_tableName) && Bytes.equals(m_startRow, other.m_startRow) + && Bytes.equals(m_endRow, other.m_endRow) && m_regionLocation.equals(other.m_regionLocation); } @Override diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index c244d8b7bd91..38bc9030511c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,18 +18,14 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; @@ -47,12 +42,16 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * A job with a a map and reduce phase to count cells in a table. - * The counter lists the following stats for a given table: + * A job with a a map and reduce phase to count cells in a table. The counter lists the following + * stats for a given table: + * *

      * 1. Total number of rows in the table
      * 2. Total number of CFs across all rows
    @@ -65,17 +64,14 @@
      * 9. Total size of serialized cells across all rows.
      * 
    * - * The cellcounter can take optional parameters to use a user - * supplied row/family/qualifier string to use in the report and - * second a regex based or prefix based row filter to restrict the - * count operation to a limited subset of rows from the table or a - * start time and/or end time to limit the count to a time range. + * The cellcounter can take optional parameters to use a user supplied row/family/qualifier string + * to use in the report and second a regex based or prefix based row filter to restrict the count + * operation to a limited subset of rows from the table or a start time and/or end time to limit the + * count to a time range. */ @InterfaceAudience.Public public class CellCounter extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(CellCounter.class.getName()); - + private static final Logger LOG = LoggerFactory.getLogger(CellCounter.class.getName()); /** * Name of this 'program'. @@ -87,8 +83,7 @@ public class CellCounter extends Configured implements Tool { /** * Mapper that runs the count. */ - static class CellCounterMapper - extends TableMapper { + static class CellCounterMapper extends TableMapper { /** * Counter enumeration to count the actual rows. */ @@ -117,12 +112,11 @@ public static enum Counters { @Override protected void setup(Context context) throws IOException, InterruptedException { conf = context.getConfiguration(); - separator = conf.get("ReportSeparator",":"); + separator = conf.get("ReportSeparator", ":"); } /** * Maps the data. - * * @param row The current table row key. * @param values The columns. * @param context The current context. @@ -130,13 +124,10 @@ protected void setup(Context context) throws IOException, InterruptedException { */ @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="Findbugs is blind to the Precondition null check") - public void map(ImmutableBytesWritable row, Result values, - Context context) - throws IOException { - Preconditions.checkState(values != null, - "values passed to the map is null"); + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "Findbugs is blind to the Precondition null check") + public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { + Preconditions.checkState(values != null, "values passed to the map is null"); try { byte[] currentRow = values.getRow(); @@ -167,14 +158,13 @@ public void map(ImmutableBytesWritable row, Result values, context.getCounter("CF", currentFamilyName + "_Size").increment(size); context.write(new Text(currentFamilyName + "_Size"), new LongWritable(size)); } - if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)){ + if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)) { currentQualifier = CellUtil.cloneQualifier(value); - currentQualifierName = currentFamilyName + separator + - Bytes.toStringBinary(currentQualifier); + currentQualifierName = + currentFamilyName + separator + Bytes.toStringBinary(currentQualifier); currentRowQualifierName = currentRowKey + separator + currentQualifierName; - context.write(new Text("Total Qualifiers across all Rows"), - new LongWritable(1)); + context.write(new Text("Total Qualifiers across all Rows"), new LongWritable(1)); context.write(new Text(currentQualifierName), new LongWritable(1)); context.getCounter("Q", currentQualifierName + "_Size").increment(size); context.write(new Text(currentQualifierName + "_Size"), new LongWritable(size)); @@ -196,7 +186,7 @@ static class LongSumReducer extends Reducer values, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { long sum = 0; for (LongWritable val : values) { sum += val.get(); @@ -209,23 +199,21 @@ public void reduce(Key key, Iterable values, Context context) /** * Sets up the actual job. - * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; Path outputDir = new Path(args[1]); - String reportSeparatorString = (args.length > 2) ? args[2]: ":"; + String reportSeparatorString = (args.length > 2) ? args[2] : ":"; conf.set("ReportSeparator", reportSeparatorString); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName)); job.setJarByClass(CellCounter.class); Scan scan = getConfiguredScanForJob(conf, args); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - CellCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, CellCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputFormatClass(TextOutputFormat.class); @@ -238,7 +226,7 @@ public static Job createSubmittableJob(Configuration conf, String[] args) } private static Scan getConfiguredScanForJob(Configuration conf, String[] args) - throws IOException { + throws IOException { // create scan with any properties set from TableInputFormat Scan s = TableInputFormat.createScanFromConfiguration(conf); // Set Scan Versions @@ -249,7 +237,7 @@ private static Scan getConfiguredScanForJob(Configuration conf, String[] args) s.setCacheBlocks(false); // Set RowFilter or Prefix Filter if applicable. Filter rowFilter = getRowFilter(args); - if (rowFilter!= null) { + if (rowFilter != null) { LOG.info("Setting Row Filter for counter."); s.setFilter(rowFilter); } @@ -262,10 +250,9 @@ private static Scan getConfiguredScanForJob(Configuration conf, String[] args) return s; } - private static Filter getRowFilter(String[] args) { Filter rowFilter = null; - String filterCriteria = (args.length > 3) ? args[3]: null; + String filterCriteria = (args.length > 3) ? args[3] : null; if (filterCriteria == null) return null; if (filterCriteria.startsWith("^")) { String regexPattern = filterCriteria.substring(1, filterCriteria.length()); @@ -292,11 +279,10 @@ private static long[] getTimeRange(String[] args) throws IOException { } } - if (startTime == 0 && endTime == 0) - return null; + if (startTime == 0 && endTime == 0) return null; endTime = endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime; - return new long [] {startTime, endTime}; + return new long[] { startTime, endTime }; } @Override @@ -312,15 +298,14 @@ public int run(String[] args) throws Exception { private void printUsage(int parameterCount) { System.err.println("ERROR: Wrong number of parameters: " + parameterCount); System.err.println("Usage: hbase cellcounter [reportSeparator] " - + "[^[regex pattern] or [Prefix]] [--starttime= --endtime=]"); + + "[^[regex pattern] or [Prefix]] [--starttime= --endtime=]"); System.err.println(" Note: -D properties will be applied to the conf used."); System.err.println(" Additionally, all of the SCAN properties from TableInputFormat can be " - + "specified to get fine grained control on what is counted."); + + "specified to get fine grained control on what is counted."); System.err.println(" -D" + TableInputFormat.SCAN_ROW_START + "="); System.err.println(" -D" + TableInputFormat.SCAN_ROW_STOP + "="); System.err.println(" -D" + TableInputFormat.SCAN_COLUMNS + "=\" ...\""); - System.err.println(" -D" + TableInputFormat.SCAN_COLUMN_FAMILY - + "=,, ..."); + System.err.println(" -D" + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); System.err.println(" -D" + TableInputFormat.SCAN_TIMESTAMP + "="); System.err.println(" -D" + TableInputFormat.SCAN_TIMERANGE_START + "="); System.err.println(" -D" + TableInputFormat.SCAN_TIMERANGE_END + "="); @@ -328,10 +313,10 @@ private void printUsage(int parameterCount) { System.err.println(" -D" + TableInputFormat.SCAN_CACHEDROWS + "="); System.err.println(" -D" + TableInputFormat.SCAN_BATCHSIZE + "="); System.err.println(" parameter can be used to override the default report " - + "separator string : used to separate the rowId/column family name and qualifier name."); + + "separator string : used to separate the rowId/column family name and qualifier name."); System.err.println(" [^[regex pattern] or [Prefix] parameter can be used to limit the cell " - + "counter count operation to a limited subset of rows from the table based on regex or " - + "prefix pattern."); + + "counter count operation to a limited subset of rows from the table based on regex or " + + "prefix pattern."); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java index 6c69651d0a43..bc2f8040db17 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Facade to create Cells for HFileOutputFormat. The created Cells are of Put type. @@ -34,97 +33,91 @@ public class CellCreator { public static final String VISIBILITY_EXP_RESOLVER_CLASS = - "hbase.mapreduce.visibility.expression.resolver.class"; + "hbase.mapreduce.visibility.expression.resolver.class"; private VisibilityExpressionResolver visExpResolver; public CellCreator(Configuration conf) { - Class clazz = conf.getClass( - VISIBILITY_EXP_RESOLVER_CLASS, DefaultVisibilityExpressionResolver.class, + Class clazz = + conf.getClass(VISIBILITY_EXP_RESOLVER_CLASS, DefaultVisibilityExpressionResolver.class, VisibilityExpressionResolver.class); this.visExpResolver = ReflectionUtils.newInstance(clazz, conf); this.visExpResolver.init(); } /** - * @param row row key - * @param roffset row offset - * @param rlength row length - * @param family family name - * @param foffset family offset - * @param flength family length + * @param row row key + * @param roffset row offset + * @param rlength row length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length + * @param qoffset qualifier offset + * @param qlength qualifier length * @param timestamp version timestamp - * @param value column value - * @param voffset value offset - * @param vlength value length - * @return created Cell - * @throws IOException + * @param value column value + * @param voffset value offset + * @param vlength value length + * @return created Cell n */ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, - byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, - int vlength) throws IOException { + byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, + int vlength) throws IOException { return create(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, - timestamp, value, voffset, vlength, (List)null); + timestamp, value, voffset, vlength, (List) null); } /** - * @param row row key - * @param roffset row offset - * @param rlength row length - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * @param timestamp version timestamp - * @param value column value - * @param voffset value offset - * @param vlength value length + * @param row row key + * @param roffset row offset + * @param rlength row length + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * @param timestamp version timestamp + * @param value column value + * @param voffset value offset + * @param vlength value length * @param visExpression visibility expression to be associated with cell - * @return created Cell - * @throws IOException - * @deprecated since 0.98.9 + * @return created Cell n * @deprecated since 0.98.9 * @see HBASE-10560 */ @Deprecated public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, - byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, - int vlength, String visExpression) throws IOException { + byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, + int vlength, String visExpression) throws IOException { List visTags = null; if (visExpression != null) { visTags = this.visExpResolver.createVisibilityExpTags(visExpression); } return new KeyValue(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, - qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, visTags); + qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, visTags); } /** - * @param row row key - * @param roffset row offset - * @param rlength row length - * @param family family name - * @param foffset family offset - * @param flength family length + * @param row row key + * @param roffset row offset + * @param rlength row length + * @param family family name + * @param foffset family offset + * @param flength family length * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length + * @param qoffset qualifier offset + * @param qlength qualifier length * @param timestamp version timestamp - * @param value column value - * @param voffset value offset - * @param vlength value length - * @param tags - * @return created Cell - * @throws IOException + * @param value column value + * @param voffset value offset + * @param vlength value length n * @return created Cell n */ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, - byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, - int vlength, List tags) throws IOException { + byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, + int vlength, List tags) throws IOException { return new KeyValue(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, - qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, tags); + qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, tags); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java index 2e7e020986ff..9d567f95a0e0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,16 +22,15 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public class CellSerialization implements Serialization { @@ -60,7 +59,7 @@ public void close() throws IOException { @Override public KeyValue deserialize(Cell ignore) throws IOException { - // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO + // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO return KeyValueUtil.create(this.dis); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java index de961cf35458..9380b0e71336 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.io.IOException; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -30,18 +28,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted Cells. - * Reads in all Cells from passed Iterator, sorts them, then emits - * Cells in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted Cells. Reads in all Cells from passed Iterator, sorts them, then emits Cells in + * sorted order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 */ @InterfaceAudience.Public public class CellSortReducer - extends Reducer { + extends Reducer { protected void reduce(ImmutableBytesWritable row, Iterable kvs, - Reducer.Context context) - throws java.io.IOException, InterruptedException { + Reducer.Context context) + throws java.io.IOException, InterruptedException { TreeSet map = new TreeSet<>(CellComparator.getInstance()); for (Cell kv : kvs) { try { @@ -52,7 +48,7 @@ protected void reduce(ImmutableBytesWritable row, Iterable kvs, } context.setStatus("Read " + map.getClass()); int index = 0; - for (Cell kv: map) { + for (Cell kv : map) { context.write(row, new MapReduceExtendedCell(kv)); if (++index % 100 == 0) context.setStatus("Wrote " + index); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index 9344400e4458..273271b1867b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,9 +45,9 @@ import org.slf4j.LoggerFactory; /** - * Tool used to copy a table to another one which can be on a different setup. - * It is also configurable with a start and time as well as a specification - * of the region server implementation if different from the local cluster. + * Tool used to copy a table to another one which can be on a different setup. It is also + * configurable with a start and time as well as a specification of the region server implementation + * if different from the local cluster. */ @InterfaceAudience.Public public class CopyTable extends Configured implements Tool { @@ -102,8 +101,7 @@ private void initCopyTableMapperReducerJob(Job job, Scan scan) throws IOExceptio /** * Sets up the actual job. - * - * @param args The command line parameters. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -146,20 +144,20 @@ public Job createSubmittableJob(String[] args) throws IOException { scan.withStopRow(Bytes.toBytesBinary(stopRow)); } - if(families != null) { + if (families != null) { String[] fams = families.split(","); - Map cfRenameMap = new HashMap<>(); - for(String fam : fams) { + Map cfRenameMap = new HashMap<>(); + for (String fam : fams) { String sourceCf; - if(fam.contains(":")) { - // fam looks like "sourceCfName:destCfName" - String[] srcAndDest = fam.split(":", 2); - sourceCf = srcAndDest[0]; - String destCf = srcAndDest[1]; - cfRenameMap.put(sourceCf, destCf); + if (fam.contains(":")) { + // fam looks like "sourceCfName:destCfName" + String[] srcAndDest = fam.split(":", 2); + sourceCf = srcAndDest[0]; + String destCf = srcAndDest[1]; + cfRenameMap.put(sourceCf, destCf); } else { - // fam is just "sourceCf" - sourceCf = fam; + // fam is just "sourceCf" + sourceCf = fam; } scan.addFamily(Bytes.toBytes(sourceCf)); } @@ -177,7 +175,7 @@ public Job createSubmittableJob(String[] args) throws IOException { LOG.info("HFiles will be stored at " + this.bulkloadDir); HFileOutputFormat2.setOutputPath(job, bulkloadDir); try (Connection conn = ConnectionFactory.createConnection(getConf()); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { HFileOutputFormat2.configureIncrementalLoadMap(job, admin.getDescriptor((TableName.valueOf(dstTableName)))); } @@ -191,14 +189,14 @@ public Job createSubmittableJob(String[] args) throws IOException { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " + - "[--new.name=NEW] [--peer.adr=ADR] "); + System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " + + "[--new.name=NEW] [--peer.adr=ADR] "); System.err.println(); System.err.println("Options:"); System.err.println(" rs.class hbase.regionserver.class of the peer cluster"); @@ -213,36 +211,36 @@ private static void printUsage(final String errorMsg) { System.err.println(" new.name new table's name"); System.err.println(" peer.adr Address of the peer cluster given in the format"); System.err.println(" hbase.zookeeper.quorum:hbase.zookeeper.client" - + ".port:zookeeper.znode.parent"); + + ".port:zookeeper.znode.parent"); System.err.println(" families comma-separated list of families to copy"); System.err.println(" To copy from cf1 to cf2, give sourceCfName:destCfName. "); System.err.println(" To keep the same name, just give \"cfName\""); System.err.println(" all.cells also copy delete markers and deleted cells"); - System.err.println(" bulkload Write input into HFiles and bulk load to the destination " - + "table"); + System.err + .println(" bulkload Write input into HFiles and bulk load to the destination " + "table"); System.err.println(" snapshot Copy the data from snapshot to destination table."); System.err.println(); System.err.println("Args:"); System.err.println(" tablename Name of the table to copy"); System.err.println(); System.err.println("Examples:"); - System.err.println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " + - "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable "); + System.err + .println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:"); + System.err.println(" $ hbase " + + "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " + + "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable "); System.err.println(" To copy data from 'sourceTableSnapshot' to 'destTable': "); System.err.println(" $ hbase org.apache.hadoop.hbase.mapreduce.CopyTable " - + "--snapshot --new.name=destTable sourceTableSnapshot"); + + "--snapshot --new.name=destTable sourceTableSnapshot"); System.err.println(" To copy data from 'sourceTableSnapshot' and bulk load to 'destTable': "); System.err.println(" $ hbase org.apache.hadoop.hbase.mapreduce.CopyTable " - + "--new.name=destTable --snapshot --bulkload sourceTableSnapshot"); + + "--new.name=destTable --snapshot --bulkload sourceTableSnapshot"); System.err.println("For performance consider the following general option:\n" - + " It is recommended that you set the following to >=100. A higher value uses more memory but\n" - + " decreases the round trip time to the server and may increase performance.\n" - + " -Dhbase.client.scanner.caching=100\n" - + " The following should always be set to false, to prevent writing data twice, which may produce \n" - + " inaccurate results.\n" - + " -Dmapreduce.map.speculative=false"); + + " It is recommended that you set the following to >=100. A higher value uses more memory but\n" + + " decreases the round trip time to the server and may increase performance.\n" + + " -Dhbase.client.scanner.caching=100\n" + + " The following should always be set to false, to prevent writing data twice, which may produce \n" + + " inaccurate results.\n" + " -Dmapreduce.map.speculative=false"); } private boolean doCommandLine(final String[] args) { @@ -333,7 +331,7 @@ private boolean doCommandLine(final String[] args) { continue; } - if(cmd.startsWith("--snapshot")){ + if (cmd.startsWith("--snapshot")) { readingSnapshot = true; continue; } @@ -370,7 +368,7 @@ private boolean doCommandLine(final String[] args) { if (readingSnapshot && dstTableName == null) { printUsage("The --new.name= for destination table should be " - + "provided when copying data from snapshot ."); + + "provided when copying data from snapshot ."); return false; } @@ -393,8 +391,7 @@ private boolean doCommandLine(final String[] args) { /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { @@ -418,8 +415,10 @@ public int run(String[] args) throws Exception { LOG.info("Trying to bulk load data to destination table: " + dstTableName); LOG.info("command: ./bin/hbase {} {} {}", BulkLoadHFilesTool.NAME, this.bulkloadDir.toString(), this.dstTableName); - if (!BulkLoadHFiles.create(getConf()).bulkLoad(TableName.valueOf(dstTableName), bulkloadDir) - .isEmpty()) { + if ( + !BulkLoadHFiles.create(getConf()).bulkLoad(TableName.valueOf(dstTableName), bulkloadDir) + .isEmpty() + ) { // bulkloadDir is deleted only BulkLoadHFiles was successful so that one can rerun // BulkLoadHFiles. FileSystem fs = CommonFSUtils.getCurrentFileSystem(getConf()); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java index 620c02eefa7e..2b0595205877 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,13 +25,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; @@ -43,6 +39,9 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityLabelOrdinalProvider; import org.apache.hadoop.hbase.security.visibility.VisibilityUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This implementation creates tags by expanding expression using label ordinal. Labels will be @@ -51,7 +50,7 @@ @InterfaceAudience.Private public class DefaultVisibilityExpressionResolver implements VisibilityExpressionResolver { private static final Logger LOG = - LoggerFactory.getLogger(DefaultVisibilityExpressionResolver.class); + LoggerFactory.getLogger(DefaultVisibilityExpressionResolver.class); private Configuration conf; private final Map labels = new HashMap<>(); @@ -140,7 +139,7 @@ public int getLabelOrdinal(String label) { public String getLabel(int ordinal) { // Unused throw new UnsupportedOperationException( - "getLabel should not be used in VisibilityExpressionResolver"); + "getLabel should not be used in VisibilityExpressionResolver"); } }; return VisibilityUtils.createVisibilityExpTags(visExpression, true, false, null, provider); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index ed31c8422e7e..86a6a6705561 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,39 +27,36 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Driver for hbase mapreduce jobs. Select which to run by passing - * name of job to this main. + * Driver for hbase mapreduce jobs. Select which to run by passing name of job to this main. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { - private Driver() {} + private Driver() { + } public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); - pgd.addClass(RowCounter.NAME, RowCounter.class, - "Count rows in HBase table."); - pgd.addClass(CellCounter.NAME, CellCounter.class, - "Count cells in HBase table."); + pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table."); + pgd.addClass(CellCounter.NAME, CellCounter.class, "Count cells in HBase table."); pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS."); pgd.addClass(Import.NAME, Import.class, "Import data written by Export."); pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format."); - pgd.addClass(BulkLoadHFilesTool.NAME, BulkLoadHFilesTool.class, - "Complete a bulk data load."); + pgd.addClass(BulkLoadHFilesTool.NAME, BulkLoadHFilesTool.class, "Complete a bulk data load."); pgd.addClass(CopyTable.NAME, CopyTable.class, - "Export a table from local cluster to peer cluster."); - pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" + - " data from tables in two different clusters. It" + - " doesn't work for incrementColumnValues'd cells since" + - " timestamp is changed after appending to WAL."); + "Export a table from local cluster to peer cluster."); + pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, + "Compare" + " data from tables in two different clusters. It" + + " doesn't work for incrementColumnValues'd cells since" + + " timestamp is changed after appending to WAL."); pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files."); - pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" + - " the specific snapshot to a given FileSystem."); - pgd.addClass(MobRefReporter.NAME, MobRefReporter.class, "Check the mob cells in a particular " + - "table and cf and confirm that the files they point to are correct."); + pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, + "Export" + " the specific snapshot to a given FileSystem."); + pgd.addClass(MobRefReporter.NAME, MobRefReporter.class, "Check the mob cells in a particular " + + "table and cf and confirm that the files they point to are correct."); - ProgramDriver.class.getMethod("driver", new Class [] {String[].class}). - invoke(pgd, new Object[]{args}); + ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd, + new Object[] { args }); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java index eb0f649e643b..17e305cbb3a2 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java @@ -1,33 +1,31 @@ -/** -* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.mapreduce.Job; @@ -38,8 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Export an HBase table. - * Writes content to sequence files up in HDFS. Use {@link Import} to read it + * Export an HBase table. Writes content to sequence files up in HDFS. Use {@link Import} to read it * back in again. */ @InterfaceAudience.Public @@ -49,14 +46,12 @@ public class Export extends Configured implements Tool { /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { Triple arguments = ExportUtils.getArgumentsFromCommandLine(conf, args); String tableName = arguments.getFirst().getNameAsString(); Path outputDir = arguments.getThird(); @@ -66,12 +61,13 @@ public static Job createSubmittableJob(Configuration conf, String[] args) // Set optional scan parameters Scan s = arguments.getSecond(); IdentityTableMapper.initJob(tableName, s, IdentityTableMapper.class, job); - // No reducers. Just write straight to output files. + // No reducers. Just write straight to output files. job.setNumReduceTasks(0); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(Result.class); - FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't have a default fs. + FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't + // have a default fs. return job; } @@ -80,7 +76,7 @@ public int run(String[] args) throws Exception { if (!ExportUtils.isValidArguements(args)) { ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.getLength(args)); System.err.println(" -D " + JOB_NAME_CONF_KEY - + "=jobName - use the specified mapreduce job name for the export"); + + "=jobName - use the specified mapreduce job name for the export"); System.err.println("For MR performance consider the following properties:"); System.err.println(" -D mapreduce.map.speculative=false"); System.err.println(" -D mapreduce.reduce.speculative=false"); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java index 568c47fd6e53..0c37534ff409 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +20,11 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.IncompatibleFilterException; @@ -40,10 +35,13 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Some helper methods are used by {@link org.apache.hadoop.hbase.mapreduce.Export} - * and org.apache.hadoop.hbase.coprocessor.Export (in hbase-endpooint). + * Some helper methods are used by {@link org.apache.hadoop.hbase.mapreduce.Export} and + * org.apache.hadoop.hbase.coprocessor.Export (in hbase-endpooint). */ @InterfaceAudience.Private public final class ExportUtils { @@ -52,37 +50,39 @@ public final class ExportUtils { public static final String EXPORT_BATCHING = "hbase.export.scanner.batch"; public static final String EXPORT_CACHING = "hbase.export.scanner.caching"; public static final String EXPORT_VISIBILITY_LABELS = "hbase.export.visibility.labels"; + /** * Common usage for other export tools. - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ public static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]\n"); + System.err.println("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]\n"); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); System.err.println(" -D " + FileOutputFormat.COMPRESS + "=true"); - System.err.println(" -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec"); + System.err.println( + " -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec"); System.err.println(" -D " + FileOutputFormat.COMPRESS_TYPE + "=BLOCK"); System.err.println(" Additionally, the following SCAN properties can be specified"); System.err.println(" to control/limit what is exported.."); - System.err.println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); + System.err + .println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); System.err.println(" -D " + RAW_SCAN + "=true"); System.err.println(" -D " + TableInputFormat.SCAN_ROW_START + "="); System.err.println(" -D " + TableInputFormat.SCAN_ROW_STOP + "="); System.err.println(" -D " + HConstants.HBASE_CLIENT_SCANNER_CACHING + "=100"); System.err.println(" -D " + EXPORT_VISIBILITY_LABELS + "="); System.err.println("For tables with very wide rows consider setting the batch size as below:\n" - + " -D " + EXPORT_BATCHING + "=10\n" - + " -D " + EXPORT_CACHING + "=100"); + + " -D " + EXPORT_BATCHING + "=10\n" + " -D " + EXPORT_CACHING + "=100"); } private static Filter getExportFilter(String[] args) { Filter exportFilter; - String filterCriteria = (args.length > 5) ? args[5]: null; + String filterCriteria = (args.length > 5) ? args[5] : null; if (filterCriteria == null) return null; if (filterCriteria.startsWith("^")) { String regexPattern = filterCriteria.substring(1, filterCriteria.length()); @@ -97,23 +97,24 @@ public static boolean isValidArguements(String[] args) { return args != null && args.length >= 2; } - public static Triple getArgumentsFromCommandLine( - Configuration conf, String[] args) throws IOException { + public static Triple getArgumentsFromCommandLine(Configuration conf, + String[] args) throws IOException { if (!isValidArguements(args)) { return null; } - return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), new Path(args[1])); + return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), + new Path(args[1])); } static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOException { Scan s = new Scan(); // Optional arguments. // Set Scan Versions - int versions = args.length > 2? Integer.parseInt(args[2]): 1; + int versions = args.length > 2 ? Integer.parseInt(args[2]) : 1; s.readVersions(versions); // Set Scan Range - long startTime = args.length > 3? Long.parseLong(args[3]): 0L; - long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE; + long startTime = args.length > 3 ? Long.parseLong(args[3]) : 0L; + long endTime = args.length > 4 ? Long.parseLong(args[4]) : Long.MAX_VALUE; s.setTimeRange(startTime, endTime); // Set cache blocks s.setCacheBlocks(false); @@ -134,8 +135,8 @@ static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOE } // Set RowFilter or Prefix Filter if applicable. Filter exportFilter = getExportFilter(args); - if (exportFilter!= null) { - LOG.info("Setting Scan Filter for Export."); + if (exportFilter != null) { + LOG.info("Setting Scan Filter for Export."); s.setFilter(exportFilter); } List labels = null; @@ -163,9 +164,8 @@ static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOE LOG.error("Caching could not be set", e); } } - LOG.info("versions=" + versions + ", starttime=" + startTime - + ", endtime=" + endTime + ", keepDeletedCells=" + raw - + ", visibility labels=" + labels); + LOG.info("versions=" + versions + ", starttime=" + startTime + ", endtime=" + endTime + + ", keepDeletedCells=" + raw + ", visibility labels=" + labels); return s; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java index 1909b2d57b38..36fb493033c4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,6 @@ import java.io.IOException; import java.util.ArrayList; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -31,74 +28,68 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; /** * Extract grouping columns from input record. */ @InterfaceAudience.Public -public class GroupingTableMapper -extends TableMapper implements Configurable { +public class GroupingTableMapper extends TableMapper + implements Configurable { /** - * JobConf parameter to specify the columns used to produce the key passed to - * collect from the map phase. + * JobConf parameter to specify the columns used to produce the key passed to collect from the map + * phase. */ - public static final String GROUP_COLUMNS = - "hbase.mapred.groupingtablemap.columns"; + public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; /** The grouping columns. */ - protected byte [][] columns; + protected byte[][] columns; /** The current configuration. */ private Configuration conf = null; /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table to be processed. - * @param scan The scan with the columns etc. - * @param groupColumns A space separated list of columns used to form the - * key used in collect. - * @param mapper The mapper class. - * @param job The current job. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table to be processed. + * @param scan The scan with the columns etc. + * @param groupColumns A space separated list of columns used to form the key used in collect. + * @param mapper The mapper class. + * @param job The current job. * @throws IOException When setting up the job fails. */ @SuppressWarnings("unchecked") public static void initJob(String table, Scan scan, String groupColumns, Class mapper, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(table, scan, mapper, - ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, + Result.class, job); job.getConfiguration().set(GROUP_COLUMNS, groupColumns); } /** - * Extract the grouping columns from value to construct a new key. Pass the - * new key and value to reduce. If any of the grouping columns are not found - * in the value, the record is skipped. - * - * @param key The current key. - * @param value The current value. - * @param context The current context. - * @throws IOException When writing the record fails. + * Extract the grouping columns from value to construct a new key. Pass the new key and value to + * reduce. If any of the grouping columns are not found in the value, the record is skipped. + * @param key The current key. + * @param value The current value. + * @param context The current context. + * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { byte[][] keyVals = extractKeyValues(value); - if(keyVals != null) { + if (keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); context.write(tKey, value); } } /** - * Extract columns values from the current record. This method returns - * null if any of the columns are not found. + * Extract columns values from the current record. This method returns null if any of the columns + * are not found. *

    * Override this method if you want to deal with nulls differently. - * - * @param r The current values. + * @param r The current values. * @return Array of byte values. */ protected byte[][] extractKeyValues(Result r) { @@ -106,9 +97,9 @@ protected byte[][] extractKeyValues(Result r) { ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { - for (Cell value: r.listCells()) { - byte [] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value)); + for (Cell value : r.listCells()) { + byte[] column = + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { foundList.add(CellUtil.cloneValue(value)); @@ -116,7 +107,7 @@ protected byte[][] extractKeyValues(Result r) { } } } - if(foundList.size() == numCols) { + if (foundList.size() == numCols) { keyVals = foundList.toArray(new byte[numCols][]); } } @@ -127,17 +118,16 @@ protected byte[][] extractKeyValues(Result r) { * Create a key by concatenating multiple column values. *

    * Override this function in order to produce different types of keys. - * - * @param vals The current key/values. + * @param vals The current key/values. * @return A key generated by concatenating multiple column values. */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { - if(vals == null) { + if (vals == null) { return null; } - StringBuilder sb = new StringBuilder(); - for(int i = 0; i < vals.length; i++) { - if(i > 0) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < vals.length; i++) { + if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); @@ -147,7 +137,6 @@ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -158,17 +147,15 @@ public Configuration getConf() { /** * Sets the configuration. This is used to set up the grouping details. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { this.conf = configuration; String[] cols = conf.get(GROUP_COLUMNS, "").split(" "); columns = new byte[cols.length][]; - for(int i = 0; i < cols.length; i++) { + for (int i = 0; i < cols.length; i++) { columns[i] = Bytes.toBytes(cols[i]); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java index 03254feec042..4ff4a5b95b91 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,9 +41,8 @@ import org.slf4j.LoggerFactory; /** - * Simple MR input format for HFiles. - * This code was borrowed from Apache Crunch project. - * Updated to the recent version of HBase. + * Simple MR input format for HFiles. This code was borrowed from Apache Crunch project. Updated to + * the recent version of HBase. */ @InterfaceAudience.Private public class HFileInputFormat extends FileInputFormat { @@ -51,9 +50,9 @@ public class HFileInputFormat extends FileInputFormat { private static final Logger LOG = LoggerFactory.getLogger(HFileInputFormat.class); /** - * File filter that removes all "hidden" files. This might be something worth removing from - * a more general purpose utility; it accounts for the presence of metadata files created - * in the way we're doing exports. + * File filter that removes all "hidden" files. This might be something worth removing from a more + * general purpose utility; it accounts for the presence of metadata files created in the way + * we're doing exports. */ static final PathFilter HIDDEN_FILE_FILTER = new PathFilter() { @Override @@ -81,7 +80,7 @@ private static class HFileRecordReader extends RecordReader @Override public void initialize(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { FileSplit fileSplit = (FileSplit) split; conf = context.getConfiguration(); Path path = fileSplit.getPath(); @@ -95,7 +94,6 @@ public void initialize(InputSplit split, TaskAttemptContext context) } - @Override public boolean nextKeyValue() throws IOException, InterruptedException { boolean hasNext; @@ -161,8 +159,8 @@ protected List listStatus(JobContext job) throws IOException { } @Override - public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { return new HFileRecordReader(); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index d68ee88fe4a8..c17066974bb0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -93,18 +93,17 @@ import org.slf4j.LoggerFactory; /** - * Writes HFiles. Passed Cells must arrive in order. - * Writes current time as the sequence id for the file. Sets the major compacted - * attribute on created {@link HFile}s. Calling write(null,null) will forcibly roll - * all HFiles being written. + * Writes HFiles. Passed Cells must arrive in order. Writes current time as the sequence id for the + * file. Sets the major compacted attribute on created {@link HFile}s. Calling write(null,null) will + * forcibly roll all HFiles being written. *

    - * Using this class as part of a MapReduce job is best done - * using {@link #configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}. + * Using this class as part of a MapReduce job is best done using + * {@link #configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}. */ @InterfaceAudience.Public -public class HFileOutputFormat2 - extends FileOutputFormat { +public class HFileOutputFormat2 extends FileOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(HFileOutputFormat2.class); + static class TableInfo { private TableDescriptor tableDesctiptor; private RegionLocator regionLocator; @@ -134,38 +133,33 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) // reducer run using conf. // These should not be changed by the client. static final String COMPRESSION_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.compression"; - static final String BLOOM_TYPE_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomtype"; - static final String BLOOM_PARAM_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomparam"; - static final String BLOCK_SIZE_FAMILIES_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.blocksize"; + "hbase.hfileoutputformat.families.compression"; + static final String BLOOM_TYPE_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.bloomtype"; + static final String BLOOM_PARAM_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.bloomparam"; + static final String BLOCK_SIZE_FAMILIES_CONF_KEY = "hbase.mapreduce.hfileoutputformat.blocksize"; static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.families.datablock.encoding"; + "hbase.mapreduce.hfileoutputformat.families.datablock.encoding"; // This constant is public since the client can modify this when setting // up their conf object and thus refer to this symbol. // It is present for backwards compatibility reasons. Use it only to // override the auto-detection of datablock encoding and compression. public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.datablock.encoding"; + "hbase.mapreduce.hfileoutputformat.datablock.encoding"; public static final String COMPRESSION_OVERRIDE_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.compression"; + "hbase.mapreduce.hfileoutputformat.compression"; /** * Keep locality while generating HFiles for bulkload. See HBASE-12596 */ public static final String LOCALITY_SENSITIVE_CONF_KEY = - "hbase.bulkload.locality.sensitive.enabled"; + "hbase.bulkload.locality.sensitive.enabled"; private static final boolean DEFAULT_LOCALITY_SENSITIVE = true; - static final String OUTPUT_TABLE_NAME_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.table.name"; + static final String OUTPUT_TABLE_NAME_CONF_KEY = "hbase.mapreduce.hfileoutputformat.table.name"; static final String MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY = - "hbase.mapreduce.use.multi.table.hfileoutputformat"; + "hbase.mapreduce.use.multi.table.hfileoutputformat"; - public static final String REMOTE_CLUSTER_CONF_PREFIX = - "hbase.hfileoutputformat.remote.cluster."; + public static final String REMOTE_CLUSTER_CONF_PREFIX = "hbase.hfileoutputformat.remote.cluster."; public static final String REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY = REMOTE_CLUSTER_CONF_PREFIX + "zookeeper.quorum"; public static final String REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY = @@ -177,8 +171,8 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) public static final String STORAGE_POLICY_PROPERTY_CF_PREFIX = STORAGE_POLICY_PROPERTY + "."; @Override - public RecordWriter getRecordWriter( - final TaskAttemptContext context) throws IOException, InterruptedException { + public RecordWriter + getRecordWriter(final TaskAttemptContext context) throws IOException, InterruptedException { return createRecordWriter(context, this.getOutputCommitter(context)); } @@ -190,7 +184,7 @@ static RecordWriter createRecordWrit final TaskAttemptContext context, final OutputCommitter committer) throws IOException { // Get the path of the temporary output file - final Path outputDir = ((FileOutputCommitter)committer).getWorkPath(); + final Path outputDir = ((FileOutputCommitter) committer).getWorkPath(); final Configuration conf = context.getConfiguration(); final boolean writeMultipleTables = conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); @@ -200,19 +194,19 @@ static RecordWriter createRecordWrit } final FileSystem fs = outputDir.getFileSystem(conf); // These configs. are from hbase-*.xml - final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE); - // Invented config. Add to hbase-*.xml if other than default compression. - final String defaultCompressionStr = conf.get("hfile.compression", - Compression.Algorithm.NONE.getName()); + final long maxsize = + conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE); + // Invented config. Add to hbase-*.xml if other than default compression. + final String defaultCompressionStr = + conf.get("hfile.compression", Compression.Algorithm.NONE.getName()); final Algorithm defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr); String compressionStr = conf.get(COMPRESSION_OVERRIDE_CONF_KEY); - final Algorithm overriddenCompression = compressionStr != null ? - Compression.getCompressionAlgorithmByName(compressionStr): null; - final boolean compactionExclude = conf.getBoolean( - "hbase.mapreduce.hfileoutputformat.compaction.exclude", false); - final Set allTableNames = Arrays.stream(writeTableNames.split( - Bytes.toString(tableSeparator))).collect(Collectors.toSet()); + final Algorithm overriddenCompression = + compressionStr != null ? Compression.getCompressionAlgorithmByName(compressionStr) : null; + final boolean compactionExclude = + conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", false); + final Set allTableNames = Arrays + .stream(writeTableNames.split(Bytes.toString(tableSeparator))).collect(Collectors.toSet()); // create a map from column family to the compression algorithm final Map compressionMap = createFamilyCompressionMap(conf); @@ -221,10 +215,10 @@ static RecordWriter createRecordWrit final Map blockSizeMap = createFamilyBlockSizeMap(conf); String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY); - final Map datablockEncodingMap - = createFamilyDataBlockEncodingMap(conf); - final DataBlockEncoding overriddenEncoding = dataBlockEncodingStr != null ? - DataBlockEncoding.valueOf(dataBlockEncodingStr) : null; + final Map datablockEncodingMap = + createFamilyDataBlockEncodingMap(conf); + final DataBlockEncoding overriddenEncoding = + dataBlockEncodingStr != null ? DataBlockEncoding.valueOf(dataBlockEncodingStr) : null; return new RecordWriter() { // Map of families to writers and how much has been output on the writer. @@ -248,10 +242,10 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { if (writeMultipleTables) { tableNameBytes = MultiTableHFileOutputFormat.getTableName(row.get()); tableNameBytes = TableName.valueOf(tableNameBytes).getNameWithNamespaceInclAsString() - .getBytes(Charset.defaultCharset()); + .getBytes(Charset.defaultCharset()); if (!allTableNames.contains(Bytes.toString(tableNameBytes))) { - throw new IllegalArgumentException("TableName " + Bytes.toString(tableNameBytes) + - " not expected"); + throw new IllegalArgumentException( + "TableName " + Bytes.toString(tableNameBytes) + " not expected"); } } byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableNameBytes, family); @@ -272,8 +266,10 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { } // This can only happen once a row is finished though - if (wl != null && wl.written + length >= maxsize - && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0) { + if ( + wl != null && wl.written + length >= maxsize + && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0 + ) { rollWriters(wl); } @@ -284,10 +280,10 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { HRegionLocation loc = null; String tableName = Bytes.toString(tableNameBytes); if (tableName != null) { - try (Connection connection = ConnectionFactory.createConnection( - createRemoteClusterConf(conf)); - RegionLocator locator = - connection.getRegionLocator(TableName.valueOf(tableName))) { + try ( + Connection connection = + ConnectionFactory.createConnection(createRemoteClusterConf(conf)); + RegionLocator locator = connection.getRegionLocator(TableName.valueOf(tableName))) { loc = locator.getRegionLocation(rowKey); } catch (Throwable e) { LOG.warn("Something wrong locating rowkey {} in {}", Bytes.toString(rowKey), @@ -300,7 +296,7 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { } else { LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey)); InetSocketAddress initialIsa = - new InetSocketAddress(loc.getHostname(), loc.getPort()); + new InetSocketAddress(loc.getHostname(), loc.getPort()); if (initialIsa.isUnresolved()) { LOG.trace("Failed resolve address {}, use default writer", loc.getHostnamePort()); } else { @@ -331,6 +327,7 @@ private Path getTableRelativePath(byte[] tableNameBytes) { } return tableRelPath; } + private void rollWriters(WriterLength writerLength) throws IOException { if (writerLength != null) { closeWriter(writerLength); @@ -343,8 +340,8 @@ private void rollWriters(WriterLength writerLength) throws IOException { private void closeWriter(WriterLength wl) throws IOException { if (wl.writer != null) { - LOG.info("Writer=" + wl.writer.getPath() + - ((wl.written == 0)? "": ", wrote=" + wl.written)); + LOG.info( + "Writer=" + wl.writer.getPath() + ((wl.written == 0) ? "" : ", wrote=" + wl.written)); close(wl.writer); wl.writer = null; } @@ -366,9 +363,11 @@ private Configuration createRemoteClusterConf(Configuration conf) { for (Entry entry : conf) { String key = entry.getKey(); - if (REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) || - REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) || - REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key)) { + if ( + REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) + || REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) + || REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key) + ) { // Handled them above continue; } @@ -388,15 +387,15 @@ private Configuration createRemoteClusterConf(Configuration conf) { * Create a new StoreFile.Writer. * @return A WriterLength, containing a new StoreFile.Writer. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED", - justification="Not important") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED", + justification = "Not important") private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration conf, - InetSocketAddress[] favoredNodes) throws IOException { + InetSocketAddress[] favoredNodes) throws IOException { byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableName, family); Path familydir = new Path(outputDir, Bytes.toString(family)); if (writeMultipleTables) { - familydir = new Path(outputDir, - new Path(getTableRelativePath(tableName), Bytes.toString(family))); + familydir = + new Path(outputDir, new Path(getTableRelativePath(tableName), Bytes.toString(family))); } WriterLength wl = new WriterLength(); Algorithm compression = overriddenCompression; @@ -414,9 +413,9 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding; encoding = encoding == null ? DataBlockEncoding.NONE : encoding; HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression) - .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) - .withColumnFamily(family).withTableName(tableName); + .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withColumnFamily(family).withTableName(tableName); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { contextBuilder.withIncludesTags(true); @@ -424,13 +423,13 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration HFileContext hFileContext = contextBuilder.build(); if (null == favoredNodes) { - wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext).build(); + wl.writer = + new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs).withOutputDir(familydir) + .withBloomType(bloomType).withFileContext(hFileContext).build(); } else { wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs)) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext).withFavoredNodes(favoredNodes).build(); + .withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext) + .withFavoredNodes(favoredNodes).build(); } this.writers.put(tableAndFamily, wl); @@ -439,10 +438,8 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration private void close(final StoreFileWriter w) throws IOException { if (w != null) { - w.appendFileInfo(BULKLOAD_TIME_KEY, - Bytes.toBytes(EnvironmentEdgeManager.currentTime())); - w.appendFileInfo(BULKLOAD_TASK_KEY, - Bytes.toBytes(context.getTaskAttemptID().toString())); + w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime())); + w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString())); w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true)); w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude)); w.appendTrackedTimestampsToMetadata(); @@ -452,7 +449,7 @@ private void close(final StoreFileWriter w) throws IOException { @Override public void close(TaskAttemptContext c) throws IOException, InterruptedException { - for (WriterLength wl: this.writers.values()) { + for (WriterLength wl : this.writers.values()) { close(wl.writer); } } @@ -463,14 +460,13 @@ public void close(TaskAttemptContext c) throws IOException, InterruptedException * Configure block storage policy for CF after the directory is created. */ static void configureStoragePolicy(final Configuration conf, final FileSystem fs, - byte[] tableAndFamily, Path cfPath) { + byte[] tableAndFamily, Path cfPath) { if (null == conf || null == fs || null == tableAndFamily || null == cfPath) { return; } - String policy = - conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), - conf.get(STORAGE_POLICY_PROPERTY)); + String policy = conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), + conf.get(STORAGE_POLICY_PROPERTY)); CommonFSUtils.setStoragePolicy(fs, cfPath, policy); } @@ -483,22 +479,20 @@ static class WriterLength { } /** - * Return the start keys of all of the regions in this table, - * as a list of ImmutableBytesWritable. + * Return the start keys of all of the regions in this table, as a list of ImmutableBytesWritable. */ private static List getRegionStartKeys(List regionLocators, - boolean writeMultipleTables) - throws IOException { + boolean writeMultipleTables) throws IOException { ArrayList ret = new ArrayList<>(); - for(RegionLocator regionLocator : regionLocators) { + for (RegionLocator regionLocator : regionLocators) { TableName tableName = regionLocator.getName(); LOG.info("Looking up current regions for table " + tableName); byte[][] byteKeys = regionLocator.getStartKeys(); for (byte[] byteKey : byteKeys) { - byte[] fullKey = byteKey; //HFileOutputFormat2 use case + byte[] fullKey = byteKey; // HFileOutputFormat2 use case if (writeMultipleTables) { - //MultiTableHFileOutputFormat use case + // MultiTableHFileOutputFormat use case fullKey = combineTableNameSuffix(tableName.getName(), byteKey); } if (LOG.isDebugEnabled()) { @@ -511,12 +505,12 @@ private static List getRegionStartKeys(List startKeys, boolean writeMultipleTables) throws IOException { + List startKeys, boolean writeMultipleTables) throws IOException { LOG.info("Writing partition information to " + partitionsPath); if (startKeys.isEmpty()) { throw new IllegalArgumentException("No regions passed"); @@ -534,16 +528,15 @@ private static void writePartitions(Configuration conf, Path partitionsPath, } if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) { throw new IllegalArgumentException( - "First region of table should have empty start key. Instead has: " + "First region of table should have empty start key. Instead has: " + Bytes.toStringBinary(first.get())); } sorted.remove(sorted.first()); // Write the actual file FileSystem fs = partitionsPath.getFileSystem(conf); - SequenceFile.Writer writer = SequenceFile.createWriter( - fs, conf, partitionsPath, ImmutableBytesWritable.class, - NullWritable.class); + SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, + ImmutableBytesWritable.class, NullWritable.class); try { for (ImmutableBytesWritable startKey : sorted) { @@ -555,49 +548,47 @@ private static void writePartitions(Configuration conf, Path partitionsPath, } /** - * Configure a MapReduce Job to perform an incremental load into the given - * table. This + * Configure a MapReduce Job to perform an incremental load into the given table. This *

      - *
    • Inspects the table to configure a total order partitioner
    • - *
    • Uploads the partitions file to the cluster and adds it to the DistributedCache
    • - *
    • Sets the number of reduce tasks to match the current number of regions
    • - *
    • Sets the output key/value class to match HFileOutputFormat2's requirements
    • - *
    • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or - * PutSortReducer)
    • - *
    • Sets the HBase cluster key to load region locations for locality-sensitive
    • + *
    • Inspects the table to configure a total order partitioner
    • + *
    • Uploads the partitions file to the cluster and adds it to the DistributedCache
    • + *
    • Sets the number of reduce tasks to match the current number of regions
    • + *
    • Sets the output key/value class to match HFileOutputFormat2's requirements
    • + *
    • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or + * PutSortReducer)
    • + *
    • Sets the HBase cluster key to load region locations for locality-sensitive
    • *
    * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. */ public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator) - throws IOException { + throws IOException { configureIncrementalLoad(job, table.getDescriptor(), regionLocator); configureRemoteCluster(job, table.getConfiguration()); } /** - * Configure a MapReduce Job to perform an incremental load into the given - * table. This + * Configure a MapReduce Job to perform an incremental load into the given table. This *
      - *
    • Inspects the table to configure a total order partitioner
    • - *
    • Uploads the partitions file to the cluster and adds it to the DistributedCache
    • - *
    • Sets the number of reduce tasks to match the current number of regions
    • - *
    • Sets the output key/value class to match HFileOutputFormat2's requirements
    • - *
    • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or - * PutSortReducer)
    • + *
    • Inspects the table to configure a total order partitioner
    • + *
    • Uploads the partitions file to the cluster and adds it to the DistributedCache
    • + *
    • Sets the number of reduce tasks to match the current number of regions
    • + *
    • Sets the output key/value class to match HFileOutputFormat2's requirements
    • + *
    • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or + * PutSortReducer)
    • *
    * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. */ public static void configureIncrementalLoad(Job job, TableDescriptor tableDescriptor, - RegionLocator regionLocator) throws IOException { + RegionLocator regionLocator) throws IOException { ArrayList singleTableInfo = new ArrayList<>(); singleTableInfo.add(new TableInfo(tableDescriptor, regionLocator)); configureIncrementalLoad(job, singleTableInfo, HFileOutputFormat2.class); } static void configureIncrementalLoad(Job job, List multiTableInfo, - Class> cls) throws IOException { + Class> cls) throws IOException { Configuration conf = job.getConfiguration(); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(MapReduceExtendedCell.class); @@ -614,8 +605,10 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, // Based on the configured map output class, set the correct reducer to properly // sort the incoming values. // TODO it would be nice to pick one or the other of these formats. - if (KeyValue.class.equals(job.getMapOutputValueClass()) - || MapReduceExtendedCell.class.equals(job.getMapOutputValueClass())) { + if ( + KeyValue.class.equals(job.getMapOutputValueClass()) + || MapReduceExtendedCell.class.equals(job.getMapOutputValueClass()) + ) { job.setReducerClass(CellSortReducer.class); } else if (Put.class.equals(job.getMapOutputValueClass())) { job.setReducerClass(PutSortReducer.class); @@ -626,8 +619,8 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, } conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { LOG.info("bulkload locality sensitive enabled"); @@ -638,46 +631,46 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, List regionLocators = new ArrayList<>(multiTableInfo.size()); List tableDescriptors = new ArrayList<>(multiTableInfo.size()); - for(TableInfo tableInfo : multiTableInfo) { + for (TableInfo tableInfo : multiTableInfo) { regionLocators.add(tableInfo.getRegionLocator()); - String tn = writeMultipleTables? - tableInfo.getRegionLocator().getName().getNameWithNamespaceInclAsString(): - tableInfo.getRegionLocator().getName().getNameAsString(); + String tn = writeMultipleTables + ? tableInfo.getRegionLocator().getName().getNameWithNamespaceInclAsString() + : tableInfo.getRegionLocator().getName().getNameAsString(); allTableNames.add(tn); tableDescriptors.add(tableInfo.getTableDescriptor()); } // Record tablenames for creating writer by favored nodes, and decoding compression, // block size and other attributes of columnfamily per table - conf.set(OUTPUT_TABLE_NAME_CONF_KEY, StringUtils.join(allTableNames, Bytes - .toString(tableSeparator))); + conf.set(OUTPUT_TABLE_NAME_CONF_KEY, + StringUtils.join(allTableNames, Bytes.toString(tableSeparator))); List startKeys = getRegionStartKeys(regionLocators, writeMultipleTables); // Use table's region boundaries for TOP split points. - LOG.info("Configuring " + startKeys.size() + " reduce partitions " + - "to match current region count for all tables"); + LOG.info("Configuring " + startKeys.size() + " reduce partitions " + + "to match current region count for all tables"); job.setNumReduceTasks(startKeys.size()); configurePartitioner(job, startKeys, writeMultipleTables); // Set compression algorithms based on column families - conf.set(COMPRESSION_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(compressionDetails, - tableDescriptors)); - conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(blockSizeDetails, - tableDescriptors)); - conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomTypeDetails, - tableDescriptors)); - conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomParamDetails, - tableDescriptors)); + conf.set(COMPRESSION_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(compressionDetails, tableDescriptors)); + conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(blockSizeDetails, tableDescriptors)); + conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(bloomTypeDetails, tableDescriptors)); + conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(bloomParamDetails, tableDescriptors)); conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(dataBlockEncodingDetails, tableDescriptors)); + serializeColumnFamilyAttribute(dataBlockEncodingDetails, tableDescriptors)); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); LOG.info("Incremental output configured for tables: " + StringUtils.join(allTableNames, ",")); } - public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) throws - IOException { + public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) + throws IOException { Configuration conf = job.getConfiguration(); job.setOutputKeyClass(ImmutableBytesWritable.class); @@ -690,15 +683,15 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes conf.set(OUTPUT_TABLE_NAME_CONF_KEY, tableDescriptor.getTableName().getNameAsString()); // Set compression algorithms based on column families conf.set(COMPRESSION_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor)); conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor)); conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor)); conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor)); conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor)); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); @@ -707,21 +700,16 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes /** * Configure HBase cluster key for remote cluster to load region location for locality-sensitive - * if it's enabled. - * It's not necessary to call this method explicitly when the cluster key for HBase cluster to be - * used to load region location is configured in the job configuration. - * Call this method when another HBase cluster key is configured in the job configuration. - * For example, you should call when you load data from HBase cluster A using - * {@link TableInputFormat} and generate hfiles for HBase cluster B. - * Otherwise, HFileOutputFormat2 fetch location from cluster A and locality-sensitive won't - * working correctly. + * if it's enabled. It's not necessary to call this method explicitly when the cluster key for + * HBase cluster to be used to load region location is configured in the job configuration. Call + * this method when another HBase cluster key is configured in the job configuration. For example, + * you should call when you load data from HBase cluster A using {@link TableInputFormat} and + * generate hfiles for HBase cluster B. Otherwise, HFileOutputFormat2 fetch location from cluster + * A and locality-sensitive won't working correctly. * {@link #configureIncrementalLoad(Job, Table, RegionLocator)} calls this method using - * {@link Table#getConfiguration} as clusterConf. - * See HBASE-25608. - * - * @param job which has configuration to be updated + * {@link Table#getConfiguration} as clusterConf. See HBASE-25608. + * @param job which has configuration to be updated * @param clusterConf which contains cluster key of the HBase cluster to be locality-sensitive - * * @see #configureIncrementalLoad(Job, Table, RegionLocator) * @see #LOCALITY_SENSITIVE_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY @@ -736,31 +724,28 @@ public static void configureRemoteCluster(Job job, Configuration clusterConf) { } final String quorum = clusterConf.get(HConstants.ZOOKEEPER_QUORUM); - final int clientPort = clusterConf.getInt( - HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); - final String parent = clusterConf.get( - HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + final int clientPort = clusterConf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, + HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); + final String parent = + clusterConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); conf.set(REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY, quorum); conf.setInt(REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY, clientPort); conf.set(REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY, parent); - LOG.info("ZK configs for remote cluster of bulkload is configured: " + - quorum + ":" + clientPort + "/" + parent); + LOG.info("ZK configs for remote cluster of bulkload is configured: " + quorum + ":" + clientPort + + "/" + parent); } /** - * Runs inside the task to deserialize column family to compression algorithm - * map from the configuration. - * + * Runs inside the task to deserialize column family to compression algorithm map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the configured compression algorithm */ @InterfaceAudience.Private - static Map createFamilyCompressionMap(Configuration - conf) { - Map stringMap = createFamilyConfValueMap(conf, - COMPRESSION_FAMILIES_CONF_KEY); + static Map createFamilyCompressionMap(Configuration conf) { + Map stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY); Map compressionMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); @@ -770,16 +755,14 @@ static Map createFamilyCompressionMap(Configuration } /** - * Runs inside the task to deserialize column family to bloom filter type - * map from the configuration. - * + * Runs inside the task to deserialize column family to bloom filter type map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter type */ @InterfaceAudience.Private static Map createFamilyBloomTypeMap(Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - BLOOM_TYPE_FAMILIES_CONF_KEY); + Map stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); Map bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { BloomType bloomType = BloomType.valueOf(e.getValue()); @@ -789,9 +772,8 @@ static Map createFamilyBloomTypeMap(Configuration conf) { } /** - * Runs inside the task to deserialize column family to bloom filter param - * map from the configuration. - * + * Runs inside the task to deserialize column family to bloom filter param map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter param */ @@ -801,16 +783,13 @@ static Map createFamilyBloomParamMap(Configuration conf) { } /** - * Runs inside the task to deserialize column family to block size - * map from the configuration. - * + * Runs inside the task to deserialize column family to block size map from the configuration. * @param conf to read the serialized values from * @return a map from column family to the configured block size */ @InterfaceAudience.Private static Map createFamilyBlockSizeMap(Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - BLOCK_SIZE_FAMILIES_CONF_KEY); + Map stringMap = createFamilyConfValueMap(conf, BLOCK_SIZE_FAMILIES_CONF_KEY); Map blockSizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Integer blockSize = Integer.parseInt(e.getValue()); @@ -820,18 +799,16 @@ static Map createFamilyBlockSizeMap(Configuration conf) { } /** - * Runs inside the task to deserialize column family to data block encoding - * type map from the configuration. - * + * Runs inside the task to deserialize column family to data block encoding type map from the + * configuration. * @param conf to read the serialized values from - * @return a map from column family to HFileDataBlockEncoder for the - * configured data block type for the family + * @return a map from column family to HFileDataBlockEncoder for the configured data block type + * for the family */ @InterfaceAudience.Private - static Map createFamilyDataBlockEncodingMap( - Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - DATABLOCK_ENCODING_FAMILIES_CONF_KEY); + static Map createFamilyDataBlockEncodingMap(Configuration conf) { + Map stringMap = + createFamilyConfValueMap(conf, DATABLOCK_ENCODING_FAMILIES_CONF_KEY); Map encoderMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue()))); @@ -841,13 +818,11 @@ static Map createFamilyDataBlockEncodingMap( /** * Run inside the task to deserialize column family to given conf value map. - * - * @param conf to read the serialized values from + * @param conf to read the serialized values from * @param confName conf key to read from the configuration * @return a map of column family to the given configuration value */ - private static Map createFamilyConfValueMap( - Configuration conf, String confName) { + private static Map createFamilyConfValueMap(Configuration conf, String confName) { Map confValMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); String confVal = conf.get(confName, ""); for (String familyConf : confVal.split("&")) { @@ -857,7 +832,7 @@ private static Map createFamilyConfValueMap( } try { confValMap.put(Bytes.toBytes(URLDecoder.decode(familySplit[0], "UTF-8")), - URLDecoder.decode(familySplit[1], "UTF-8")); + URLDecoder.decode(familySplit[1], "UTF-8")); } catch (UnsupportedEncodingException e) { // will not happen with UTF-8 encoding throw new AssertionError(e); @@ -870,15 +845,13 @@ private static Map createFamilyConfValueMap( * Configure job with a TotalOrderPartitioner, partitioning against * splitPoints. Cleans up the partitions file after job exists. */ - static void configurePartitioner(Job job, List splitPoints, boolean - writeMultipleTables) - throws IOException { + static void configurePartitioner(Job job, List splitPoints, + boolean writeMultipleTables) throws IOException { Configuration conf = job.getConfiguration(); // create the partitions file FileSystem fs = FileSystem.get(conf); String hbaseTmpFsDir = - conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - fs.getHomeDirectory() + "/hbase-staging"); + conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging"); Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID()); fs.makeQualified(partitionsPath); writePartitions(conf, partitionsPath, splitPoints, writeMultipleTables); @@ -889,12 +862,11 @@ static void configurePartitioner(Job job, List splitPoin TotalOrderPartitioner.setPartitionFile(conf, partitionsPath); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = - "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") @InterfaceAudience.Private static String serializeColumnFamilyAttribute(Function fn, - List allTables) - throws UnsupportedEncodingException { + List allTables) throws UnsupportedEncodingException { StringBuilder attributeValue = new StringBuilder(); int i = 0; for (TableDescriptor tableDescriptor : allTables) { @@ -907,8 +879,8 @@ static String serializeColumnFamilyAttribute(Function 0) { attributeValue.append('&'); } - attributeValue.append(URLEncoder.encode( - Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), + attributeValue.append(URLEncoder + .encode(Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), familyDescriptor.getName())), "UTF-8")); attributeValue.append('='); attributeValue.append(URLEncoder.encode(fn.apply(familyDescriptor), "UTF-8")); @@ -919,24 +891,24 @@ static String serializeColumnFamilyAttribute(Function compressionDetails = familyDescriptor -> - familyDescriptor.getCompressionType().getName(); + static Function compressionDetails = + familyDescriptor -> familyDescriptor.getCompressionType().getName(); /** - * Serialize column family to block size map to configuration. Invoked while - * configuring the MR job for incremental load. + * Serialize column family to block size map to configuration. Invoked while configuring the MR + * job for incremental load. */ @InterfaceAudience.Private - static Function blockSizeDetails = familyDescriptor -> String - .valueOf(familyDescriptor.getBlocksize()); + static Function blockSizeDetails = + familyDescriptor -> String.valueOf(familyDescriptor.getBlocksize()); /** - * Serialize column family to bloom type map to configuration. Invoked while - * configuring the MR job for incremental load. + * Serialize column family to bloom type map to configuration. Invoked while configuring the MR + * job for incremental load. */ @InterfaceAudience.Private static Function bloomTypeDetails = familyDescriptor -> { @@ -948,8 +920,8 @@ static String serializeColumnFamilyAttribute(Function bloomParamDetails = familyDescriptor -> { @@ -962,8 +934,8 @@ static String serializeColumnFamilyAttribute(Function dataBlockEncodingDetails = familyDescriptor -> { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java index fad91599b6f1..a3d55a4cbf2c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,24 +29,25 @@ import org.apache.hadoop.hbase.mapred.TableOutputFormat; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Partitioner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is used to partition the output keys into groups of keys. - * Keys are grouped according to the regions that currently exist - * so that each reducer fills a single region so load is distributed. - * - *

    This class is not suitable as partitioner creating hfiles - * for incremental bulk loads as region spread will likely change between time of - * hfile creation and load time. See {@link org.apache.hadoop.hbase.tool.BulkLoadHFiles} - * and Bulk Load.

    - * - * @param The type of the key. - * @param The type of the value. + * This is used to partition the output keys into groups of keys. Keys are grouped according to the + * regions that currently exist so that each reducer fills a single region so load is distributed. + *

    + * This class is not suitable as partitioner creating hfiles for incremental bulk loads as region + * spread will likely change between time of hfile creation and load time. See + * {@link org.apache.hadoop.hbase.tool.BulkLoadHFiles} and + * Bulk Load. + *

    + * @param The type of the key. + * @param The type of the value. */ @InterfaceAudience.Public -public class HRegionPartitioner -extends Partitioner -implements Configurable { +public class HRegionPartitioner extends Partitioner + implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(HRegionPartitioner.class); private Configuration conf = null; @@ -60,24 +57,23 @@ public class HRegionPartitioner private byte[][] startKeys; /** - * Gets the partition number for a given key (hence record) given the total - * number of partitions i.e. number of reduce-tasks for the job. - * - *

    Typically a hash function on a all or a subset of the key.

    - * - * @param key The key to be partitioned. - * @param value The entry value. - * @param numPartitions The total number of partitions. + * Gets the partition number for a given key (hence record) given the total number of partitions + * i.e. number of reduce-tasks for the job. + *

    + * Typically a hash function on a all or a subset of the key. + *

    + * @param key The key to be partitioned. + * @param value The entry value. + * @param numPartitions The total number of partitions. * @return The partition number for the key. - * @see org.apache.hadoop.mapreduce.Partitioner#getPartition( - * java.lang.Object, java.lang.Object, int) + * @see org.apache.hadoop.mapreduce.Partitioner#getPartition( java.lang.Object, java.lang.Object, + * int) */ @Override - public int getPartition(ImmutableBytesWritable key, - VALUE value, int numPartitions) { + public int getPartition(ImmutableBytesWritable key, VALUE value, int numPartitions) { byte[] region = null; // Only one region return 0 - if (this.startKeys.length == 1){ + if (this.startKeys.length == 1) { return 0; } try { @@ -87,12 +83,11 @@ public int getPartition(ImmutableBytesWritable key, } catch (IOException e) { LOG.error(e.toString(), e); } - for (int i = 0; i < this.startKeys.length; i++){ - if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ - if (i >= numPartitions){ + for (int i = 0; i < this.startKeys.length; i++) { + if (Bytes.compareTo(region, this.startKeys[i]) == 0) { + if (i >= numPartitions) { // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() - & Integer.MAX_VALUE) % numPartitions; + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; } @@ -103,7 +98,6 @@ public int getPartition(ImmutableBytesWritable key, /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -113,12 +107,9 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to determine the start keys for the - * given table. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * Sets the configuration. This is used to determine the start keys for the given table. + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java index 0a779618eac9..b41d94fcebb4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -220,9 +220,9 @@ Scan initScan() throws IOException { } /** - * Choose partitions between row ranges to hash to a single output file - * Selects region boundaries that fall within the scan range, and groups them - * into the desired number of partitions. + * Choose partitions between row ranges to hash to a single output file Selects region + * boundaries that fall within the scan range, and groups them into the desired number of + * partitions. */ void selectPartitions(Pair regionStartEndKeys) { List startKeys = new ArrayList<>(); @@ -232,13 +232,15 @@ void selectPartitions(Pair regionStartEndKeys) { // if scan begins after this region, or starts before this region, then drop this region // in other words: - // IF (scan begins before the end of this region - // AND scan ends before the start of this region) - // THEN include this region - if ((isTableStartRow(startRow) || isTableEndRow(regionEndKey) + // IF (scan begins before the end of this region + // AND scan ends before the start of this region) + // THEN include this region + if ( + (isTableStartRow(startRow) || isTableEndRow(regionEndKey) || Bytes.compareTo(startRow, regionEndKey) < 0) - && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey) - || Bytes.compareTo(stopRow, regionStartKey) > 0)) { + && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey) + || Bytes.compareTo(stopRow, regionStartKey) > 0) + ) { startKeys.add(regionStartKey); } } @@ -267,8 +269,8 @@ void selectPartitions(Pair regionStartEndKeys) { void writePartitionFile(Configuration conf, Path path) throws IOException { FileSystem fs = path.getFileSystem(conf); @SuppressWarnings("deprecation") - SequenceFile.Writer writer = SequenceFile.createWriter( - fs, conf, path, ImmutableBytesWritable.class, NullWritable.class); + SequenceFile.Writer writer = + SequenceFile.createWriter(fs, conf, path, ImmutableBytesWritable.class, NullWritable.class); for (int i = 0; i < partitions.size(); i++) { writer.append(partitions.get(i), NullWritable.get()); @@ -277,7 +279,7 @@ void writePartitionFile(Configuration conf, Path path) throws IOException { } private void readPartitionFile(FileSystem fs, Configuration conf, Path path) - throws IOException { + throws IOException { @SuppressWarnings("deprecation") SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf); ImmutableBytesWritable key = new ImmutableBytesWritable(); @@ -328,11 +330,10 @@ static String getDataFileName(int hashFileIndex) { } /** - * Open a TableHash.Reader starting at the first hash at or after the given key. - * @throws IOException + * Open a TableHash.Reader starting at the first hash at or after the given key. n */ public Reader newReader(Configuration conf, ImmutableBytesWritable startKey) - throws IOException { + throws IOException { return new Reader(conf, startKey); } @@ -351,15 +352,15 @@ public class Reader implements java.io.Closeable { int partitionIndex = Collections.binarySearch(partitions, startKey); if (partitionIndex >= 0) { // if the key is equal to a partition, then go the file after that partition - hashFileIndex = partitionIndex+1; + hashFileIndex = partitionIndex + 1; } else { // if the key is between partitions, then go to the file between those partitions - hashFileIndex = -1-partitionIndex; + hashFileIndex = -1 - partitionIndex; } openHashFile(); // MapFile's don't make it easy to seek() so that the subsequent next() returns - // the desired key/value pair. So we cache it for the first call of next(). + // the desired key/value pair. So we cache it for the first call of next(). hash = new ImmutableBytesWritable(); key = (ImmutableBytesWritable) mapFileReader.getClosest(startKey, hash); if (key == null) { @@ -371,8 +372,8 @@ public class Reader implements java.io.Closeable { } /** - * Read the next key/hash pair. - * Returns true if such a pair exists and false when at the end of the data. + * Read the next key/hash pair. Returns true if such a pair exists and false when at the end + * of the data. */ public boolean next() throws IOException { if (cachedNext) { @@ -443,19 +444,19 @@ public Job createSubmittableJob(String[] args) throws IOException { generatePartitions(partitionsPath); Job job = Job.getInstance(getConf(), - getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); + getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); Configuration jobConf = job.getConfiguration(); jobConf.setLong(HASH_BATCH_SIZE_CONF_KEY, tableHash.batchSize); jobConf.setBoolean(IGNORE_TIMESTAMPS, tableHash.ignoreTimestamps); job.setJarByClass(HashTable.class); TableMapReduceUtil.initTableMapperJob(tableHash.tableName, tableHash.initScan(), - HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); + HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); // use a TotalOrderPartitioner and reducers to group region output into hash files job.setPartitionerClass(TotalOrderPartitioner.class); TotalOrderPartitioner.setPartitionFile(jobConf, partitionsPath); - job.setReducerClass(Reducer.class); // identity reducer + job.setReducerClass(Reducer.class); // identity reducer job.setNumReduceTasks(tableHash.numHashFiles); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(ImmutableBytesWritable.class); @@ -467,8 +468,8 @@ public Job createSubmittableJob(String[] args) throws IOException { private void generatePartitions(Path partitionsPath) throws IOException { Connection connection = ConnectionFactory.createConnection(getConf()); - Pair regionKeys - = connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); + Pair regionKeys = + connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); connection.close(); tableHash.selectPartitions(regionKeys); @@ -565,18 +566,17 @@ public static class HashMapper @Override protected void setup(Context context) throws IOException, InterruptedException { - targetBatchSize = context.getConfiguration() - .getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); + targetBatchSize = + context.getConfiguration().getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); hasher = new ResultHasher(); - hasher.ignoreTimestamps = context.getConfiguration(). - getBoolean(IGNORE_TIMESTAMPS, false); + hasher.ignoreTimestamps = context.getConfiguration().getBoolean(IGNORE_TIMESTAMPS, false); TableSplit split = (TableSplit) context.getInputSplit(); hasher.startBatch(new ImmutableBytesWritable(split.getStartRow())); } @Override protected void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { if (currentRow == null || !currentRow.equals(key)) { currentRow = new ImmutableBytesWritable(key); // not immutable @@ -612,6 +612,7 @@ private void completeManifest() throws IOException { } private static final int NUM_ARGS = 2; + private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); @@ -646,10 +647,10 @@ private static void printUsage(final String errorMsg) { System.err.println(); System.err.println("Examples:"); System.err.println(" To hash 'TestTable' in 32kB batches for a 1 hour window into 50 files:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50" - + " --starttime=1265875194289 --endtime=1265878794289 --families=cf2,cf3" - + " TestTable /hashes/testTable"); + System.err.println(" $ hbase " + + "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50" + + " --starttime=1265875194289 --endtime=1265878794289 --families=cf2,cf3" + + " TestTable /hashes/testTable"); } private boolean doCommandLine(final String[] args) { @@ -659,8 +660,8 @@ private boolean doCommandLine(final String[] args) { } try { - tableHash.tableName = args[args.length-2]; - destPath = new Path(args[args.length-1]); + tableHash.tableName = args[args.length - 2]; + destPath = new Path(args[args.length - 1]); for (int i = 0; i < args.length - NUM_ARGS; i++) { String cmd = args[i]; @@ -731,18 +732,20 @@ private boolean doCommandLine(final String[] args) { final String ignoreTimestampsKey = "--ignoreTimestamps="; if (cmd.startsWith(ignoreTimestampsKey)) { - tableHash.ignoreTimestamps = Boolean. - parseBoolean(cmd.substring(ignoreTimestampsKey.length())); + tableHash.ignoreTimestamps = + Boolean.parseBoolean(cmd.substring(ignoreTimestampsKey.length())); continue; } printUsage("Invalid argument '" + cmd + "'"); return false; } - if ((tableHash.startTime != 0 || tableHash.endTime != 0) - && (tableHash.startTime >= tableHash.endTime)) { - printUsage("Invalid time range filter: starttime=" - + tableHash.startTime + " >= endtime=" + tableHash.endTime); + if ( + (tableHash.startTime != 0 || tableHash.endTime != 0) + && (tableHash.startTime >= tableHash.endTime) + ) { + printUsage("Invalid time range filter: starttime=" + tableHash.startTime + " >= endtime=" + + tableHash.endTime); return false; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java index 831607c730c5..0b27a8822402 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,48 +18,43 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; /** * Pass the given key and record as-is to the reduce phase. */ @InterfaceAudience.Public -public class IdentityTableMapper -extends TableMapper { +public class IdentityTableMapper extends TableMapper { /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table The table name. - * @param scan The scan with the columns to scan. - * @param mapper The mapper class. - * @param job The job configuration. + * @param scan The scan with the columns to scan. + * @param mapper The mapper class. + * @param job The job configuration. * @throws IOException When setting up the job fails. */ @SuppressWarnings("rawtypes") - public static void initJob(String table, Scan scan, - Class mapper, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(table, scan, mapper, - ImmutableBytesWritable.class, Result.class, job); + public static void initJob(String table, Scan scan, Class mapper, Job job) + throws IOException { + TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, + Result.class, job); } /** * Pass the key, value to reduce. - * - * @param key The current key. - * @param value The current value. - * @param context The current context. - * @throws IOException When writing the record fails. + * @param key The current key. + * @param value The current value. + * @param context The current context. + * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { context.write(key, value); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java index 876953c862b3..e3e63b14eb93 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,60 +18,50 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.io.Writable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.io.Writable; /** * Convenience class that simply writes all values (which must be - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} instances) - * passed to it out to the configured HBase table. This works in combination - * with {@link TableOutputFormat} which actually does the writing to HBase.

    - * - * Keys are passed along but ignored in TableOutputFormat. However, they can - * be used to control how your values will be divided up amongst the specified - * number of reducers.

    - * - * You can also use the {@link TableMapReduceUtil} class to set up the two - * classes in one step: + * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete + * Delete} instances) passed to it out to the configured HBase table. This works in combination with + * {@link TableOutputFormat} which actually does the writing to HBase. + *

    + * Keys are passed along but ignored in TableOutputFormat. However, they can be used to control how + * your values will be divided up amongst the specified number of reducers. + *

    + * You can also use the {@link TableMapReduceUtil} class to set up the two classes in one step: *

    * TableMapReduceUtil.initTableReducerJob("table", IdentityTableReducer.class, job); - *
    - * This will also set the proper {@link TableOutputFormat} which is given the - * table parameter. The - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} define the - * row and columns implicitly. + * This will also set the proper {@link TableOutputFormat} which is given the + * table parameter. The {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} define the row and columns implicitly. */ @InterfaceAudience.Public -public class IdentityTableReducer -extends TableReducer { +public class IdentityTableReducer extends TableReducer { @SuppressWarnings("unused") private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReducer.class); /** - * Writes each given record, consisting of the row key and the given values, - * to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}. - * It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put} - * or {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. - * - * @param key The current row key. + * Writes each given record, consisting of the row key and the given values, to the configured + * {@link org.apache.hadoop.mapreduce.OutputFormat}. It is emitting the row key and each + * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete + * Delete} as separate pairs. + * @param key The current row key. * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given - * row. - * @param context The context of the reduce. - * @throws IOException When writing the record fails. + * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given row. + * @param context The context of the reduce. + * @throws IOException When writing the record fails. * @throws InterruptedException When the job gets interrupted. */ @Override public void reduce(Writable key, Iterable values, Context context) - throws IOException, InterruptedException { - for(Mutation putOrDelete : values) { + throws IOException, InterruptedException { + for (Mutation putOrDelete : values) { context.write(key, putOrDelete); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 30071fdfd809..6605c6783ba8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +31,6 @@ import java.util.Map; import java.util.TreeMap; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -41,15 +39,12 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.util.MapReduceExtendedCell; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -63,7 +58,9 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; @@ -77,11 +74,11 @@ import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Import data written by {@link Export}. */ @@ -95,16 +92,16 @@ public class Import extends Configured implements Tool { public final static String FILTER_ARGS_CONF_KEY = "import.filter.args"; public final static String TABLE_NAME = "import.table.name"; public final static String WAL_DURABILITY = "import.wal.durability"; - public final static String HAS_LARGE_RESULT= "import.bulk.hasLargeResult"; + public final static String HAS_LARGE_RESULT = "import.bulk.hasLargeResult"; private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; public static class CellWritableComparablePartitioner - extends Partitioner { + extends Partitioner { private static CellWritableComparable[] START_KEYS = null; + @Override - public int getPartition(CellWritableComparable key, Cell value, - int numPartitions) { + public int getPartition(CellWritableComparable key, Cell value, int numPartitions) { for (int i = 0; i < START_KEYS.length; ++i) { if (key.compareTo(START_KEYS[i]) <= 0) { return i; @@ -115,15 +112,13 @@ public int getPartition(CellWritableComparable key, Cell value, } - public static class CellWritableComparable - implements WritableComparable { + public static class CellWritableComparable implements WritableComparable { private Cell kv = null; static { // register this comparator - WritableComparator.define(CellWritableComparable.class, - new CellWritableComparator()); + WritableComparator.define(CellWritableComparable.class, new CellWritableComparator()); } public CellWritableComparable() { @@ -172,48 +167,42 @@ public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { } public static class CellReducer - extends - Reducer { - protected void reduce( - CellWritableComparable row, - Iterable kvs, - Reducer.Context context) - throws java.io.IOException, InterruptedException { + extends Reducer { + protected void reduce(CellWritableComparable row, Iterable kvs, + Reducer.Context context) + throws java.io.IOException, InterruptedException { int index = 0; for (Cell kv : kvs) { context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), new MapReduceExtendedCell(kv)); - if (++index % 100 == 0) - context.setStatus("Wrote " + index + " KeyValues, " - + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); + if (++index % 100 == 0) context.setStatus("Wrote " + index + " KeyValues, " + + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); } } } - public static class CellSortImporter - extends TableMapper { + public static class CellSortImporter extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(CellImporter.class); /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(row.get(), row.getOffset(), row.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null || !filter.filterRowKey( - PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength()))) { + if ( + filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) + ) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -236,16 +225,15 @@ public void setup(Context context) throws IOException { Configuration conf = context.getConfiguration(); TableName tableName = TableName.valueOf(context.getConfiguration().get(TABLE_NAME)); try (Connection conn = ConnectionFactory.createConnection(conf); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { byte[][] startKeys = regionLocator.getStartKeys(); if (startKeys.length != reduceNum) { throw new IOException("Region split after job initialization"); } - CellWritableComparable[] startKeyWraps = - new CellWritableComparable[startKeys.length - 1]; + CellWritableComparable[] startKeyWraps = new CellWritableComparable[startKeys.length - 1]; for (int i = 1; i < startKeys.length; ++i) { startKeyWraps[i - 1] = - new CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); + new CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); } CellWritableComparablePartitioner.START_KEYS = startKeyWraps; } @@ -255,31 +243,30 @@ public void setup(Context context) throws IOException { /** * A mapper that just writes out KeyValues. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS", - justification="Writables are going away and this has been this way forever") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_COMPARETO_USE_OBJECT_EQUALS", + justification = "Writables are going away and this has been this way forever") public static class CellImporter extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(CellImporter.class); /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(row.get(), row.getOffset(), row.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null - || !filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), - (short) row.getLength()))) { + if ( + filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) + ) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -310,15 +297,13 @@ public static class Importer extends TableMapper cfRenameMap) { - if(cfRenameMap != null) { + if (cfRenameMap != null) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer - kv.getRowOffset(), // row offset - kv.getRowLength(), // row length - newCfName, // CF buffer - 0, // CF offset - newCfName.length, // CF length - kv.getQualifierArray(), // qualifier buffer - kv.getQualifierOffset(), // qualifier offset - kv.getQualifierLength(), // qualifier length - kv.getTimestamp(), // timestamp - KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type - kv.getValueArray(), // value buffer - kv.getValueOffset(), // value offset - kv.getValueLength(), // value length - tags.size() == 0 ? null: tags); + kv.getRowOffset(), // row offset + kv.getRowLength(), // row length + newCfName, // CF buffer + 0, // CF offset + newCfName.length, // CF length + kv.getQualifierArray(), // qualifier buffer + kv.getQualifierOffset(), // qualifier offset + kv.getQualifierLength(), // qualifier length + kv.getTimestamp(), // timestamp + KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type + kv.getValueArray(), // value buffer + kv.getValueOffset(), // value offset + kv.getValueLength(), // value length + tags.size() == 0 ? null : tags); } } return kv; @@ -537,16 +525,16 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { private static Map createCfRenameMap(Configuration conf) { Map cfRenameMap = null; String allMappingsPropVal = conf.get(CF_RENAME_PROP); - if(allMappingsPropVal != null) { + if (allMappingsPropVal != null) { // The conf value format should be sourceCf1:destCf1,sourceCf2:destCf2,... String[] allMappings = allMappingsPropVal.split(","); - for (String mapping: allMappings) { - if(cfRenameMap == null) { - cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (String mapping : allMappings) { + if (cfRenameMap == null) { + cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); } - String [] srcAndDest = mapping.split(":"); - if(srcAndDest.length != 2) { - continue; + String[] srcAndDest = mapping.split(":"); + if (srcAndDest.length != 2) { + continue; } cfRenameMap.put(Bytes.toBytes(srcAndDest[0]), Bytes.toBytes(srcAndDest[1])); } @@ -555,32 +543,36 @@ private static Map createCfRenameMap(Configuration conf) { } /** - *

    Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells - * the mapper how to rename column families. - * - *

    Alternately, instead of calling this function, you could set the configuration key + *

    + * Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells the mapper + * how to rename column families. + *

    + * Alternately, instead of calling this function, you could set the configuration key * {@link #CF_RENAME_PROP} yourself. The value should look like - *

    srcCf1:destCf1,srcCf2:destCf2,....
    . This would have the same effect on - * the mapper behavior. * - * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be - * set + *
    +   * srcCf1:destCf1,srcCf2:destCf2,....
    +   * 
    + * + * . This would have the same effect on the mapper behavior. + * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be set * @param renameMap a mapping from source CF names to destination CF names */ - static public void configureCfRenaming(Configuration conf, - Map renameMap) { + static public void configureCfRenaming(Configuration conf, Map renameMap) { StringBuilder sb = new StringBuilder(); - for(Map.Entry entry: renameMap.entrySet()) { + for (Map.Entry entry : renameMap.entrySet()) { String sourceCf = entry.getKey(); String destCf = entry.getValue(); - if(sourceCf.contains(":") || sourceCf.contains(",") || - destCf.contains(":") || destCf.contains(",")) { - throw new IllegalArgumentException("Illegal character in CF names: " - + sourceCf + ", " + destCf); + if ( + sourceCf.contains(":") || sourceCf.contains(",") || destCf.contains(":") + || destCf.contains(",") + ) { + throw new IllegalArgumentException( + "Illegal character in CF names: " + sourceCf + ", " + destCf); } - if(sb.length() != 0) { + if (sb.length() != 0) { sb.append(","); } sb.append(sourceCf + ":" + destCf); @@ -590,12 +582,12 @@ static public void configureCfRenaming(Configuration conf, /** * Add a Filter to be instantiated on import - * @param conf Configuration to update (will be passed to the job) - * @param clazz {@link Filter} subclass to instantiate on the server. + * @param conf Configuration to update (will be passed to the job) + * @param clazz {@link Filter} subclass to instantiate on the server. * @param filterArgs List of arguments to pass to the filter on instantiation */ public static void addFilterAndArguments(Configuration conf, Class clazz, - List filterArgs) throws IOException { + List filterArgs) throws IOException { conf.set(Import.FILTER_CLASS_CONF_KEY, clazz.getName()); conf.setStrings(Import.FILTER_ARGS_CONF_KEY, filterArgs.toArray(new String[filterArgs.size()])); } @@ -607,8 +599,7 @@ public static void addFilterAndArguments(Configuration conf, Class 0) { @@ -690,29 +680,28 @@ private static void usage(final String errorMsg) { System.err.println("HFiles of data to prepare for a bulk data load, pass the option:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println("If there is a large result that includes too much Cell " - + "whitch can occur OOME caused by the memery sort in reducer, pass the option:"); + + "whitch can occur OOME caused by the memery sort in reducer, pass the option:"); System.err.println(" -D" + HAS_LARGE_RESULT + "=true"); System.err - .println(" To apply a generic org.apache.hadoop.hbase.filter.Filter to the input, use"); + .println(" To apply a generic org.apache.hadoop.hbase.filter.Filter to the input, use"); System.err.println(" -D" + FILTER_CLASS_CONF_KEY + "="); System.err.println(" -D" + FILTER_ARGS_CONF_KEY + "="); + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false\n" + + " -D" + WAL_DURABILITY + "="); } /** @@ -721,15 +710,17 @@ private static void usage(final String errorMsg) { * present in the Write Ahead Log to replay in scenarios of a crash. This method flushes all the * regions of the table in the scenarios of import data to hbase with {@link Durability#SKIP_WAL} */ - public static void flushRegionsIfNecessary(Configuration conf) throws IOException, - InterruptedException { + public static void flushRegionsIfNecessary(Configuration conf) + throws IOException, InterruptedException { String tableName = conf.get(TABLE_NAME); Admin hAdmin = null; Connection connection = null; String durability = conf.get(WAL_DURABILITY); // Need to flush if the data is written to hbase and skip wal is enabled. - if (conf.get(BULK_OUTPUT_CONF_KEY) == null && durability != null - && Durability.SKIP_WAL.name().equalsIgnoreCase(durability)) { + if ( + conf.get(BULK_OUTPUT_CONF_KEY) == null && durability != null + && Durability.SKIP_WAL.name().equalsIgnoreCase(durability) + ) { LOG.info("Flushing all data that skipped the WAL."); try { connection = ConnectionFactory.createConnection(conf); @@ -758,7 +749,7 @@ public int run(String[] args) throws Exception { } Job job = createSubmittableJob(getConf(), args); boolean isJobSuccessful = job.waitForCompletion(true); - if(isJobSuccessful){ + if (isJobSuccessful) { // Flush all the regions of the table flushRegionsIfNecessary(getConf()); } @@ -767,8 +758,8 @@ public int run(String[] args) throws Exception { if (outputRecords < inputRecords) { System.err.println("Warning, not all records were imported (maybe filtered out)."); if (outputRecords == 0) { - System.err.println("If the data was exported from HBase 0.94 "+ - "consider using -Dhbase.import.version=0.94."); + System.err.println("If the data was exported from HBase 0.94 " + + "consider using -Dhbase.import.version=0.94."); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index 2e94a906f289..2bf6e6b5a048 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,11 +66,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Tool to import data from a TSV file. - * - * This tool is rather simplistic - it doesn't do any quoting or - * escaping, but is useful for many data loads. - * + * Tool to import data from a TSV file. This tool is rather simplistic - it doesn't do any quoting + * or escaping, but is useful for many data loads. * @see ImportTsv#usage(String) */ @InterfaceAudience.Public @@ -95,8 +91,8 @@ public class ImportTsv extends Configured implements Tool { public final static String COLUMNS_CONF_KEY = "importtsv.columns"; public final static String SEPARATOR_CONF_KEY = "importtsv.separator"; public final static String ATTRIBUTE_SEPERATOR_CONF_KEY = "attributes.seperator"; - //This config is used to propagate credentials from parent MR jobs which launch - //ImportTSV jobs. SEE IntegrationTestImportTsv. + // This config is used to propagate credentials from parent MR jobs which launch + // ImportTSV jobs. SEE IntegrationTestImportTsv. public final static String CREDENTIALS_LOCATION = "credentials_location"; final static String DEFAULT_SEPARATOR = "\t"; final static String DEFAULT_ATTRIBUTES_SEPERATOR = "=>"; @@ -105,8 +101,8 @@ public class ImportTsv extends Configured implements Tool { public final static String CREATE_TABLE_CONF_KEY = "create.table"; public final static String NO_STRICT_COL_FAMILY = "no.strict"; /** - * If table didn't exist and was created in dry-run mode, this flag is - * flipped to delete it when MR ends. + * If table didn't exist and was created in dry-run mode, this flag is flipped to delete it when + * MR ends. */ private static boolean DRY_RUN_TABLE_CREATED; @@ -151,9 +147,8 @@ public static class TsvParser { private int cellTTLColumnIndex = DEFAULT_CELL_TTL_COLUMN_INDEX; /** - * @param columnsSpecification the list of columns to parser out, comma separated. - * The row key should be the special token TsvParser.ROWKEY_COLUMN_SPEC - * @param separatorStr + * @param columnsSpecification the list of columns to parser out, comma separated. The row key + * should be the special token TsvParser.ROWKEY_COLUMN_SPEC n */ public TsvParser(String columnsSpecification, String separatorStr) { // Configure separator @@ -163,8 +158,8 @@ public TsvParser(String columnsSpecification, String separatorStr) { separatorByte = separator[0]; // Configure columns - ArrayList columnStrings = Lists.newArrayList( - Splitter.on(',').trimResults().split(columnsSpecification)); + ArrayList columnStrings = + Lists.newArrayList(Splitter.on(',').trimResults().split(columnsSpecification)); maxColumnCount = columnStrings.size(); families = new byte[maxColumnCount][]; @@ -242,12 +237,12 @@ public int getRowKeyColumnIndex() { public byte[] getFamily(int idx) { return families[idx]; } + public byte[] getQualifier(int idx) { return qualifiers[idx]; } - public ParsedLine parse(byte[] lineBytes, int length) - throws BadTsvLineException { + public ParsedLine parse(byte[] lineBytes, int length) throws BadTsvLineException { // Enumerate separator offsets ArrayList tabOffsets = new ArrayList<>(maxColumnCount); for (int i = 0; i < length; i++) { @@ -265,8 +260,7 @@ public ParsedLine parse(byte[] lineBytes, int length) throw new BadTsvLineException("Excessive columns"); } else if (tabOffsets.size() <= getRowKeyColumnIndex()) { throw new BadTsvLineException("No row key"); - } else if (hasTimestamp() - && tabOffsets.size() <= getTimestampKeyColumnIndex()) { + } else if (hasTimestamp() && tabOffsets.size() <= getTimestampKeyColumnIndex()) { throw new BadTsvLineException("No timestamp"); } else if (hasAttributes() && tabOffsets.size() <= getAttributesKeyColumnIndex()) { throw new BadTsvLineException("No attributes specified"); @@ -290,6 +284,7 @@ class ParsedLine { public int getRowKeyOffset() { return getColumnOffset(rowKeyColumnIndex); } + public int getRowKeyLength() { return getColumnLength(rowKeyColumnIndex); } @@ -300,9 +295,8 @@ public long getTimestamp(long ts) throws BadTsvLineException { return ts; } - String timeStampStr = Bytes.toString(lineBytes, - getColumnOffset(timestampKeyColumnIndex), - getColumnLength(timestampKeyColumnIndex)); + String timeStampStr = Bytes.toString(lineBytes, getColumnOffset(timestampKeyColumnIndex), + getColumnLength(timestampKeyColumnIndex)); try { return Long.parseLong(timeStampStr); } catch (NumberFormatException nfe) { @@ -316,7 +310,7 @@ private String getAttributes() { return null; } else { return Bytes.toString(lineBytes, getColumnOffset(attrKeyColumnIndex), - getColumnLength(attrKeyColumnIndex)); + getColumnLength(attrKeyColumnIndex)); } } @@ -366,7 +360,7 @@ public String getCellVisibility() { return null; } else { return Bytes.toString(lineBytes, getColumnOffset(cellVisibilityColumnIndex), - getColumnLength(cellVisibilityColumnIndex)); + getColumnLength(cellVisibilityColumnIndex)); } } @@ -391,22 +385,23 @@ public long getCellTTL() { return 0; } else { return Bytes.toLong(lineBytes, getColumnOffset(cellTTLColumnIndex), - getColumnLength(cellTTLColumnIndex)); + getColumnLength(cellTTLColumnIndex)); } } public int getColumnOffset(int idx) { - if (idx > 0) - return tabOffsets.get(idx - 1) + 1; - else - return 0; + if (idx > 0) return tabOffsets.get(idx - 1) + 1; + else return 0; } + public int getColumnLength(int idx) { return tabOffsets.get(idx) - getColumnOffset(idx); } + public int getColumnCount() { return tabOffsets.size(); } + public byte[] getLineBytes() { return lineBytes; } @@ -416,18 +411,16 @@ public static class BadTsvLineException extends Exception { public BadTsvLineException(String err) { super(err); } + private static final long serialVersionUID = 1L; } /** - * Return starting position and length of row key from the specified line bytes. - * @param lineBytes - * @param length - * @return Pair of row key offset and length. - * @throws BadTsvLineException + * Return starting position and length of row key from the specified line bytes. nn * @return + * Pair of row key offset and length. n */ public Pair parseRowKey(byte[] lineBytes, int length) - throws BadTsvLineException { + throws BadTsvLineException { int rkColumnIndex = 0; int startPos = 0, endPos = 0; for (int i = 0; i <= length; i++) { @@ -443,9 +436,8 @@ public Pair parseRowKey(byte[] lineBytes, int length) } } if (i == length) { - throw new BadTsvLineException( - "Row key does not exist as number of columns in the line" - + " are less than row key position."); + throw new BadTsvLineException("Row key does not exist as number of columns in the line" + + " are less than row key position."); } } return new Pair<>(startPos, endPos - startPos + 1); @@ -454,14 +446,13 @@ public Pair parseRowKey(byte[] lineBytes, int length) /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ protected static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException, ClassNotFoundException { + throws IOException, ClassNotFoundException { Job job = null; boolean isDryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false); try (Connection connection = ConnectionFactory.createConnection(conf)) { @@ -471,16 +462,17 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) String actualSeparator = conf.get(SEPARATOR_CONF_KEY); if (actualSeparator != null) { conf.set(SEPARATOR_CONF_KEY, - Bytes.toString(Base64.getEncoder().encode(Bytes.toBytes(actualSeparator)))); + Bytes.toString(Base64.getEncoder().encode(Bytes.toBytes(actualSeparator)))); } // See if a non-default Mapper was set String mapperClassName = conf.get(MAPPER_CONF_KEY); - Class mapperClass = mapperClassName != null? Class.forName(mapperClassName): DEFAULT_MAPPER; + Class mapperClass = + mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER; TableName tableName = TableName.valueOf(args[0]); Path inputDir = new Path(args[1]); - String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString()); + String jobName = conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName.getNameAsString()); job = Job.getInstance(conf, jobName); job.setJarByClass(mapperClass); FileInputFormat.setInputPaths(job, inputDir); @@ -489,7 +481,7 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) job.setMapOutputKeyClass(ImmutableBytesWritable.class); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); String[] columns = conf.getStrings(COLUMNS_CONF_KEY); - if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { + if (StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { String fileLoc = conf.get(CREDENTIALS_LOCATION); Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf); job.getCredentials().addAll(cred); @@ -509,37 +501,34 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) } } } else { - String errorMsg = - format("Table '%s' does not exist and '%s' is set to no.", tableName, - CREATE_TABLE_CONF_KEY); + String errorMsg = format("Table '%s' does not exist and '%s' is set to no.", + tableName, CREATE_TABLE_CONF_KEY); LOG.error(errorMsg); throw new TableNotFoundException(errorMsg); } } try (Table table = connection.getTable(tableName); - RegionLocator regionLocator = connection.getRegionLocator(tableName)) { + RegionLocator regionLocator = connection.getRegionLocator(tableName)) { boolean noStrict = conf.getBoolean(NO_STRICT_COL_FAMILY, false); // if no.strict is false then check column family - if(!noStrict) { + if (!noStrict) { ArrayList unmatchedFamilies = new ArrayList<>(); Set cfSet = getColumnFamilies(columns); TableDescriptor tDesc = table.getDescriptor(); for (String cf : cfSet) { - if(!tDesc.hasColumnFamily(Bytes.toBytes(cf))) { + if (!tDesc.hasColumnFamily(Bytes.toBytes(cf))) { unmatchedFamilies.add(cf); } } - if(unmatchedFamilies.size() > 0) { + if (unmatchedFamilies.size() > 0) { ArrayList familyNames = new ArrayList<>(); for (ColumnFamilyDescriptor family : table.getDescriptor().getColumnFamilies()) { familyNames.add(family.getNameAsString()); } - String msg = - "Column Families " + unmatchedFamilies + " specified in " + COLUMNS_CONF_KEY - + " does not match with any of the table " + tableName - + " column families " + familyNames + ".\n" - + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY - + "=true.\n"; + String msg = "Column Families " + unmatchedFamilies + " specified in " + + COLUMNS_CONF_KEY + " does not match with any of the table " + tableName + + " column families " + familyNames + ".\n" + + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY + "=true.\n"; usage(msg); System.exit(-1); } @@ -556,7 +545,7 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), - regionLocator); + regionLocator); } } } else { @@ -567,9 +556,8 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) } if (mapperClass.equals(TsvImporterTextMapper.class)) { usage(TsvImporterTextMapper.class.toString() - + " should not be used for non bulkloading case. use " - + TsvImporterMapper.class.toString() - + " or custom mapper whose value type is Put."); + + " should not be used for non bulkloading case. use " + + TsvImporterMapper.class.toString() + " or custom mapper whose value type is Put."); System.exit(-1); } if (!isDryRun) { @@ -582,35 +570,36 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) if (isDryRun) { job.setOutputFormatClass(NullOutputFormat.class); job.getConfiguration().setStrings("io.serializations", - job.getConfiguration().get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + job.getConfiguration().get("io.serializations"), MutationSerialization.class.getName(), + ResultSerialization.class.getName(), CellSerialization.class.getName()); } TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - org.apache.hbase.thirdparty.com.google.common.base.Function.class /* Guava used by TsvParser */); + org.apache.hbase.thirdparty.com.google.common.base.Function.class /* + * Guava used by + * TsvParser + */); } } return job; } private static void createTable(Admin admin, TableName tableName, String[] columns) - throws IOException { - TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(tableName); + throws IOException { + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); Set cfSet = getColumnFamilies(columns); for (String cf : cfSet) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)); } - LOG.warn(format("Creating table '%s' with '%s' columns and default descriptors.", - tableName, cfSet)); + LOG.warn( + format("Creating table '%s' with '%s' columns and default descriptors.", tableName, cfSet)); admin.createTable(builder.build()); } private static void deleteTable(Configuration conf, String[] args) { TableName tableName = TableName.valueOf(args[0]); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { try { admin.disableTable(tableName); } catch (TableNotEnabledException e) { @@ -618,8 +607,7 @@ private static void deleteTable(Configuration conf, String[] args) { } admin.deleteTable(tableName); } catch (IOException e) { - LOG.error(format("***Dry run: Failed to delete table '%s'.***%n%s", tableName, - e.toString())); + LOG.error(format("***Dry run: Failed to delete table '%s'.***%n%s", tableName, e.toString())); return; } LOG.info(format("Dry run: Deleted table '%s'.", tableName)); @@ -628,12 +616,13 @@ private static void deleteTable(Configuration conf, String[] args) { private static Set getColumnFamilies(String[] columns) { Set cfSet = new HashSet<>(); for (String aColumn : columns) { - if (TsvParser.ROWKEY_COLUMN_SPEC.equals(aColumn) + if ( + TsvParser.ROWKEY_COLUMN_SPEC.equals(aColumn) || TsvParser.TIMESTAMPKEY_COLUMN_SPEC.equals(aColumn) || TsvParser.CELL_VISIBILITY_COLUMN_SPEC.equals(aColumn) || TsvParser.CELL_TTL_COLUMN_SPEC.equals(aColumn) - || TsvParser.ATTRIBUTES_COLUMN_SPEC.equals(aColumn)) - continue; + || TsvParser.ATTRIBUTES_COLUMN_SPEC.equals(aColumn) + ) continue; // we are only concerned with the first one (in case this is a cf:cq) cfSet.add(aColumn.split(":", 2)[0]); } @@ -641,64 +630,56 @@ private static Set getColumnFamilies(String[] columns) { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - String usage = - "Usage: " + NAME + " -D"+ COLUMNS_CONF_KEY + "=a,b,c \n" + - "\n" + - "Imports the given input directory of TSV data into the specified table.\n" + - "\n" + - "The column names of the TSV data must be specified using the -D" + COLUMNS_CONF_KEY + "\n" + - "option. This option takes the form of comma-separated column names, where each\n" + - "column name is either a simple column family, or a columnfamily:qualifier. The special\n" + - "column name " + TsvParser.ROWKEY_COLUMN_SPEC + " is used to designate that this column should be used\n" + - "as the row key for each imported record. You must specify exactly one column\n" + - "to be the row key, and you must specify a column name for every column that exists in the\n" + - "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + - " designates that this column should be\n" + - "used as timestamp for each record. Unlike " + TsvParser.ROWKEY_COLUMN_SPEC + ", " + - TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + "\n" + - "You must specify at most one column as timestamp key for each imported record.\n" + - "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + - "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + "' option will be ignored.\n" + - "\n" + - "Other special columns that can be specified are " + TsvParser.CELL_TTL_COLUMN_SPEC + - " and " + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + ".\n" + - TsvParser.CELL_TTL_COLUMN_SPEC + " designates that this column will be used " + - "as a Cell's Time To Live (TTL) attribute.\n" + - TsvParser.CELL_VISIBILITY_COLUMN_SPEC + " designates that this column contains the " + - "visibility label expression.\n" + - "\n" + - TsvParser.ATTRIBUTES_COLUMN_SPEC+" can be used to specify Operation Attributes per record.\n"+ - " Should be specified as key=>value where "+TsvParser.DEFAULT_ATTRIBUTES_COLUMN_INDEX+ " is used \n"+ - " as the seperator. Note that more than one OperationAttributes can be specified.\n"+ - "By default importtsv will load data directly into HBase. To instead generate\n" + - "HFiles of data to prepare for a bulk data load, pass the option:\n" + - " -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output\n" + - " Note: if you do not use this option, then the target table must already exist in HBase\n" + - "\n" + - "Other options that may be specified with -D include:\n" + - " -D" + DRY_RUN_CONF_KEY + "=true - Dry run mode. Data is not actually populated into" + - " table. If table does not exist, it is created but deleted in the end.\n" + - " -D" + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + - " -D" + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to stderr\n" + - " -D" + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns in bulk import\n" + - " '-D" + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + - " -D" + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n" + - " -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use instead of " + - DEFAULT_MAPPER.getName() + "\n" + - " -D" + JOB_NAME_CONF_KEY + "=jobName - use the specified mapreduce job name for the import\n" + - " -D" + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" + - " Note: if you set this to 'no', then the target table must already exist in HBase\n" + - " -D" + NO_STRICT_COL_FAMILY + "=true - ignore column family check in hbase table. " + - "Default is false\n\n" + - "For performance consider the following options:\n" + - " -Dmapreduce.map.speculative=false\n" + - " -Dmapreduce.reduce.speculative=false"; + String usage = "Usage: " + NAME + " -D" + COLUMNS_CONF_KEY + "=a,b,c \n" + + "\n" + "Imports the given input directory of TSV data into the specified table.\n" + "\n" + + "The column names of the TSV data must be specified using the -D" + COLUMNS_CONF_KEY + "\n" + + "option. This option takes the form of comma-separated column names, where each\n" + + "column name is either a simple column family, or a columnfamily:qualifier. The special\n" + + "column name " + TsvParser.ROWKEY_COLUMN_SPEC + + " is used to designate that this column should be used\n" + + "as the row key for each imported record. You must specify exactly one column\n" + + "to be the row key, and you must specify a column name for every column that exists in the\n" + + "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + + " designates that this column should be\n" + "used as timestamp for each record. Unlike " + + TsvParser.ROWKEY_COLUMN_SPEC + ", " + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + + "\n" + "You must specify at most one column as timestamp key for each imported record.\n" + + "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + + "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + "' option will be ignored.\n" + + "\n" + "Other special columns that can be specified are " + TsvParser.CELL_TTL_COLUMN_SPEC + + " and " + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + ".\n" + TsvParser.CELL_TTL_COLUMN_SPEC + + " designates that this column will be used " + "as a Cell's Time To Live (TTL) attribute.\n" + + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + " designates that this column contains the " + + "visibility label expression.\n" + "\n" + TsvParser.ATTRIBUTES_COLUMN_SPEC + + " can be used to specify Operation Attributes per record.\n" + + " Should be specified as key=>value where " + TsvParser.DEFAULT_ATTRIBUTES_COLUMN_INDEX + + " is used \n" + + " as the seperator. Note that more than one OperationAttributes can be specified.\n" + + "By default importtsv will load data directly into HBase. To instead generate\n" + + "HFiles of data to prepare for a bulk data load, pass the option:\n" + " -D" + + BULK_OUTPUT_CONF_KEY + "=/path/for/output\n" + + " Note: if you do not use this option, then the target table must already exist in HBase\n" + + "\n" + "Other options that may be specified with -D include:\n" + " -D" + DRY_RUN_CONF_KEY + + "=true - Dry run mode. Data is not actually populated into" + + " table. If table does not exist, it is created but deleted in the end.\n" + " -D" + + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + " -D" + + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to stderr\n" + " -D" + + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns in bulk import\n" + " '-D" + + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + " -D" + + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n" + + " -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use instead of " + + DEFAULT_MAPPER.getName() + "\n" + " -D" + JOB_NAME_CONF_KEY + + "=jobName - use the specified mapreduce job name for the import\n" + " -D" + + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" + + " Note: if you set this to 'no', then the target table must already exist in HBase\n" + + " -D" + NO_STRICT_COL_FAMILY + "=true - ignore column family check in hbase table. " + + "Default is false\n\n" + "For performance consider the following options:\n" + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"; System.err.println(usage); } @@ -718,8 +699,7 @@ public int run(String[] args) throws Exception { // Make sure columns are specified String[] columns = getConf().getStrings(COLUMNS_CONF_KEY); if (columns == null) { - usage("No columns specified. Please specify with -D" + - COLUMNS_CONF_KEY+"=..."); + usage("No columns specified. Please specify with -D" + COLUMNS_CONF_KEY + "=..."); return -1; } @@ -736,30 +716,27 @@ public int run(String[] args) throws Exception { // Make sure we have at most one column as the timestamp key int tskeysFound = 0; for (String col : columns) { - if (col.equals(TsvParser.TIMESTAMPKEY_COLUMN_SPEC)) - tskeysFound++; + if (col.equals(TsvParser.TIMESTAMPKEY_COLUMN_SPEC)) tskeysFound++; } if (tskeysFound > 1) { - usage("Must specify at most one column as " - + TsvParser.TIMESTAMPKEY_COLUMN_SPEC); + usage("Must specify at most one column as " + TsvParser.TIMESTAMPKEY_COLUMN_SPEC); return -1; } int attrKeysFound = 0; for (String col : columns) { - if (col.equals(TsvParser.ATTRIBUTES_COLUMN_SPEC)) - attrKeysFound++; + if (col.equals(TsvParser.ATTRIBUTES_COLUMN_SPEC)) attrKeysFound++; } if (attrKeysFound > 1) { - usage("Must specify at most one column as " - + TsvParser.ATTRIBUTES_COLUMN_SPEC); + usage("Must specify at most one column as " + TsvParser.ATTRIBUTES_COLUMN_SPEC); return -1; } // Make sure one or more columns are specified excluding rowkey and // timestamp key if (columns.length - (rowkeysFound + tskeysFound + attrKeysFound) < 1) { - usage("One or more columns in addition to the row key and timestamp(optional) are required"); + usage( + "One or more columns in addition to the row key and timestamp(optional) are required"); return -1; } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java index 0127b51ab3fe..76c64e79780c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.mapreduce; @@ -38,18 +37,16 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Finds the Jar for a class. If the class is in a directory in the - * classpath, it creates a Jar on the fly with the contents of the directory - * and returns the path to that Jar. If a Jar is created, it is created in - * the system temporary directory. - * - * This file was forked from hadoop/common/branches/branch-2@1377176. + * Finds the Jar for a class. If the class is in a directory in the classpath, it creates a Jar on + * the fly with the contents of the directory and returns the path to that Jar. If a Jar is created, + * it is created in the system temporary directory. This file was forked from + * hadoop/common/branches/branch-2@1377176. */ @InterfaceAudience.Private public final class JarFinder { - private static void copyToZipStream(File file, ZipEntry entry, - ZipOutputStream zos) throws IOException { + private static void copyToZipStream(File file, ZipEntry entry, ZipOutputStream zos) + throws IOException { InputStream is = new FileInputStream(file); try { zos.putNextEntry(entry); @@ -68,8 +65,7 @@ private static void copyToZipStream(File file, ZipEntry entry, } } - public static void jarDir(File dir, String relativePath, ZipOutputStream zos) - throws IOException { + public static void jarDir(File dir, String relativePath, ZipOutputStream zos) throws IOException { Preconditions.checkNotNull(relativePath, "relativePath"); Preconditions.checkNotNull(zos, "zos"); @@ -89,8 +85,8 @@ public static void jarDir(File dir, String relativePath, ZipOutputStream zos) zos.close(); } - private static void zipDir(File dir, String relativePath, ZipOutputStream zos, - boolean start) throws IOException { + private static void zipDir(File dir, String relativePath, ZipOutputStream zos, boolean start) + throws IOException { String[] dirList = dir.list(); if (dirList == null) { return; @@ -107,8 +103,7 @@ private static void zipDir(File dir, String relativePath, ZipOutputStream zos, String filePath = f.getPath(); File file = new File(filePath); zipDir(file, relativePath + f.getName() + "/", zos, false); - } - else { + } else { String path = relativePath + f.getName(); if (!path.equals(JarFile.MANIFEST_NAME)) { ZipEntry anEntry = new ZipEntry(path); @@ -125,22 +120,18 @@ private static void createJar(File dir, File jarFile) throws IOException { File jarDir = jarFile.getParentFile(); if (!jarDir.exists()) { if (!jarDir.mkdirs()) { - throw new IOException(MessageFormat.format("could not create dir [{0}]", - jarDir)); + throw new IOException(MessageFormat.format("could not create dir [{0}]", jarDir)); } } try (FileOutputStream fos = new FileOutputStream(jarFile); - JarOutputStream jos = new JarOutputStream(fos)) { + JarOutputStream jos = new JarOutputStream(fos)) { jarDir(dir, "", jos); } } /** - * Returns the full path to the Jar containing the class. It always return a - * JAR. - * + * Returns the full path to the Jar containing the class. It always return a JAR. * @param klass class. - * * @return path to the Jar containing the class. */ public static String getJar(Class klass) { @@ -149,8 +140,7 @@ public static String getJar(Class klass) { if (loader != null) { String class_file = klass.getName().replaceAll("\\.", "/") + ".class"; try { - for (Enumeration itr = loader.getResources(class_file); - itr.hasMoreElements(); ) { + for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements();) { URL url = (URL) itr.nextElement(); String path = url.getPath(); if (path.startsWith("file:")) { @@ -160,8 +150,7 @@ public static String getJar(Class klass) { if ("jar".equals(url.getProtocol())) { path = URLDecoder.decode(path, "UTF-8"); return path.replaceAll("!.*$", ""); - } - else if ("file".equals(url.getProtocol())) { + } else if ("file".equals(url.getProtocol())) { String klassName = klass.getName(); klassName = klassName.replace(".", "/") + ".class"; path = path.substring(0, path.length() - klassName.length()); @@ -178,13 +167,13 @@ else if ("file".equals(url.getProtocol())) { return tempJar.getAbsolutePath(); } } - } - catch (IOException e) { + } catch (IOException e) { throw new RuntimeException(e); } } return null; } - private JarFinder() {} + private JarFinder() { + } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java index 6410bf8726c6..fb42e3328337 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,42 +6,33 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.List; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Create 3 level tree directory, first level is using table name as parent - * directory and then use family name as child directory, and all related HFiles - * for one family are under child directory - * -tableName1 - * -columnFamilyName1 - * -columnFamilyName2 - * -HFiles - * -tableName2 - * -columnFamilyName1 - * -HFiles - * -columnFamilyName2 + * Create 3 level tree directory, first level is using table name as parent directory and then use + * family name as child directory, and all related HFiles for one family are under child directory + * -tableName1 -columnFamilyName1 -columnFamilyName2 -HFiles -tableName2 -columnFamilyName1 -HFiles + * -columnFamilyName2 */ @InterfaceAudience.Public public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { @@ -50,13 +41,11 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { /** * Creates a composite key to use as a mapper output key when using * MultiTableHFileOutputFormat.configureIncrementaLoad to set up bulk ingest job - * * @param tableName Name of the Table - Eg: TableName.getNameAsString() * @param suffix Usually represents a rowkey when creating a mapper key or column family - * @return byte[] representation of composite key + * @return byte[] representation of composite key */ - public static byte[] createCompositeKey(byte[] tableName, - byte[] suffix) { + public static byte[] createCompositeKey(byte[] tableName, byte[] suffix) { return combineTableNameSuffix(tableName, suffix); } @@ -64,8 +53,7 @@ public static byte[] createCompositeKey(byte[] tableName, * Alternate api which accepts an ImmutableBytesWritable for the suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ - public static byte[] createCompositeKey(byte[] tableName, - ImmutableBytesWritable suffix) { + public static byte[] createCompositeKey(byte[] tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName, suffix.get()); } @@ -74,26 +62,22 @@ public static byte[] createCompositeKey(byte[] tableName, * suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ - public static byte[] createCompositeKey(String tableName, - ImmutableBytesWritable suffix) { + public static byte[] createCompositeKey(String tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName.getBytes(Charset.forName("UTF-8")), suffix.get()); } /** * Analogous to - * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, - * this function will configure the requisite number of reducers to write HFiles for multple - * tables simultaneously - * + * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, this + * function will configure the requisite number of reducers to write HFiles for multple tables + * simultaneously * @param job See {@link org.apache.hadoop.mapreduce.Job} - * @param multiTableDescriptors Table descriptor and region locator pairs - * @throws IOException + * @param multiTableDescriptors Table descriptor and region locator pairs n */ - public static void configureIncrementalLoad(Job job, List - multiTableDescriptors) - throws IOException { + public static void configureIncrementalLoad(Job job, List multiTableDescriptors) + throws IOException { MultiTableHFileOutputFormat.configureIncrementalLoad(job, multiTableDescriptors, - MultiTableHFileOutputFormat.class); + MultiTableHFileOutputFormat.class); } final private static int validateCompositeKey(byte[] keyBytes) { @@ -102,8 +86,8 @@ final private static int validateCompositeKey(byte[] keyBytes) { // Either the separator was not found or a tablename wasn't present or a key wasn't present if (separatorIdx == -1) { - throw new IllegalArgumentException("Invalid format for composite key [" + Bytes - .toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); + throw new IllegalArgumentException("Invalid format for composite key [" + + Bytes.toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); } return separatorIdx; } @@ -115,6 +99,6 @@ protected static byte[] getTableName(byte[] keyBytes) { protected static byte[] getSuffix(byte[] keyBytes) { int separatorIdx = validateCompositeKey(keyBytes); - return Bytes.copy(keyBytes, separatorIdx+1, keyBytes.length - separatorIdx - 1); + return Bytes.copy(keyBytes, separatorIdx + 1, keyBytes.length - separatorIdx - 1); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java index b69b486ba277..847d4e2ffb88 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,16 +20,13 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Scan; +import org.apache.yetus.audience.InterfaceAudience; /** - * Convert HBase tabular data from multiple scanners into a format that - * is consumable by Map/Reduce. - * + * Convert HBase tabular data from multiple scanners into a format that is consumable by Map/Reduce. *

    * Usage example *

    @@ -49,13 +46,12 @@ * scan1.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, table2); * scans.add(scan2); * - * TableMapReduceUtil.initTableMapperJob(scans, TableMapper.class, Text.class, - * IntWritable.class, job); + * TableMapReduceUtil.initTableMapperJob(scans, TableMapper.class, Text.class, IntWritable.class, + * job); * */ @InterfaceAudience.Public -public class MultiTableInputFormat extends MultiTableInputFormatBase implements - Configurable { +public class MultiTableInputFormat extends MultiTableInputFormatBase implements Configurable { /** Job parameter that specifies the scan list. */ public static final String SCANS = "hbase.mapreduce.scans"; @@ -65,7 +61,6 @@ public class MultiTableInputFormat extends MultiTableInputFormatBase implements /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -75,20 +70,17 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to set the details for the tables to - * be scanned. - * + * Sets the configuration. This is used to set the details for the tables to be scanned. * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { this.conf = configuration; String[] rawScans = conf.getStrings(SCANS); if (rawScans.length <= 0) { - throw new IllegalArgumentException("There must be at least 1 scan configuration set to : " - + SCANS); + throw new IllegalArgumentException( + "There must be at least 1 scan configuration set to : " + SCANS); } List scans = new ArrayList<>(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java index 57b7c537adba..a36fe716f011 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -47,13 +46,12 @@ import org.slf4j.LoggerFactory; /** - * A base for {@link MultiTableInputFormat}s. Receives a list of - * {@link Scan} instances that define the input tables and - * filters etc. Subclasses may use other TableRecordReader implementations. + * A base for {@link MultiTableInputFormat}s. Receives a list of {@link Scan} instances that define + * the input tables and filters etc. Subclasses may use other TableRecordReader implementations. */ @InterfaceAudience.Public -public abstract class MultiTableInputFormatBase extends - InputFormat { +public abstract class MultiTableInputFormatBase + extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(MultiTableInputFormatBase.class); @@ -64,27 +62,24 @@ public abstract class MultiTableInputFormatBase extends private TableRecordReader tableRecordReader = null; /** - * Builds a TableRecordReader. If no TableRecordReader was provided, uses the - * default. - * - * @param split The split to work with. + * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default. + * @param split The split to work with. * @param context The current context. * @return The newly created record reader. - * @throws IOException When creating the reader fails. + * @throws IOException When creating the reader fails. * @throws InterruptedException when record reader initialization fails * @see InputFormat#createRecordReader(InputSplit, TaskAttemptContext) */ @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { TableSplit tSplit = (TableSplit) split; LOG.info(MessageFormat.format("Input split length: {0} bytes.", tSplit.getLength())); if (tSplit.getTable() == null) { throw new IOException("Cannot create a record reader because of a" - + " previous error. Please look at the previous logs lines from" - + " the task's full log for more details."); + + " previous error. Please look at the previous logs lines from" + + " the task's full log for more details."); } final Connection connection = ConnectionFactory.createConnection(context.getConfiguration()); Table table = connection.getTable(tSplit.getTable()); @@ -125,7 +120,7 @@ public float getProgress() throws IOException, InterruptedException { @Override public void initialize(InputSplit inputsplit, TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { trr.initialize(inputsplit, context); } @@ -144,9 +139,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } /** - * Calculates the splits that will serve as input for the map tasks. The - * number of splits matches the number of regions in a table. - * + * Calculates the splits that will serve as input for the map tasks. The number of splits matches + * the number of regions in a table. * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. @@ -161,8 +155,7 @@ public List getSplits(JobContext context) throws IOException { Map> tableMaps = new HashMap<>(); for (Scan scan : scans) { byte[] tableNameBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME); - if (tableNameBytes == null) - throw new IOException("A scan object did not have a table name"); + if (tableNameBytes == null) throw new IOException("A scan object did not have a table name"); TableName tableName = TableName.valueOf(tableNameBytes); @@ -183,14 +176,14 @@ public List getSplits(JobContext context) throws IOException { TableName tableName = entry.getKey(); List scanList = entry.getValue(); try (Table table = conn.getTable(tableName); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { - RegionSizeCalculator sizeCalculator = new RegionSizeCalculator( - regionLocator, conn.getAdmin()); + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + RegionSizeCalculator sizeCalculator = + new RegionSizeCalculator(regionLocator, conn.getAdmin()); Pair keys = regionLocator.getStartEndKeys(); for (Scan scan : scanList) { if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { - throw new IOException("Expecting at least one region for table : " - + tableName.getNameAsString()); + throw new IOException( + "Expecting at least one region for table : " + tableName.getNameAsString()); } int count = 0; @@ -202,29 +195,28 @@ public List getSplits(JobContext context) throws IOException { continue; } - if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || - Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || Bytes.compareTo(stopRow, - keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? - keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || - Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? - keys.getSecond()[i] : stopRow; - - HRegionLocation hregionLocation = regionLocator.getRegionLocation( - keys.getFirst()[i], false); + if ( + (startRow.length == 0 || keys.getSecond()[i].length == 0 + || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) + && (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0) + ) { + byte[] splitStart = + startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 + ? keys.getFirst()[i] + : startRow; + byte[] splitStop = + (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) + && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; + + HRegionLocation hregionLocation = + regionLocator.getRegionLocation(keys.getFirst()[i], false); String regionHostname = hregionLocation.getHostname(); RegionInfo regionInfo = hregionLocation.getRegion(); String encodedRegionName = regionInfo.getEncodedName(); - long regionSize = sizeCalculator.getRegionSize( - regionInfo.getRegionName()); + long regionSize = sizeCalculator.getRegionSize(regionInfo.getRegionName()); - TableSplit split = new TableSplit(table.getName(), - scan, splitStart, splitStop, regionHostname, - encodedRegionName, regionSize); + TableSplit split = new TableSplit(table.getName(), scan, splitStart, splitStop, + regionHostname, encodedRegionName, regionSize); splits.add(split); @@ -242,29 +234,25 @@ public List getSplits(JobContext context) throws IOException { } /** - * Test if the given region is to be included in the InputSplit while - * splitting the regions of a table. + * Test if the given region is to be included in the InputSplit while splitting the regions of a + * table. *

    - * This optimization is effective when there is a specific reasoning to - * exclude an entire region from the M-R job, (and hence, not contributing to - * the InputSplit), given the start and end keys of the same.
    - * Useful when we need to remember the last-processed top record and revisit - * the [last, current) interval for M-R processing, continuously. In addition - * to reducing InputSplits, reduces the load on the region server as well, due - * to the ordering of the keys.
    + * This optimization is effective when there is a specific reasoning to exclude an entire region + * from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys + * of the same.
    + * Useful when we need to remember the last-processed top record and revisit the [last, current) + * interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the + * load on the region server as well, due to the ordering of the keys.
    + *
    + * Note: It is possible that endKey.length() == 0 , for the last (recent) region. *
    - * Note: It is possible that endKey.length() == 0 , for the last - * (recent) region.
    - * Override this method, if you want to bulk exclude regions altogether from - * M-R. By default, no region is excluded( i.e. all regions are included). - * + * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no + * region is excluded( i.e. all regions are included). * @param startKey Start key of the region - * @param endKey End key of the region - * @return true, if this region needs to be included as part of the input - * (default). + * @param endKey End key of the region + * @return true, if this region needs to be included as part of the input (default). */ - protected boolean includeRegionInSplit(final byte[] startKey, - final byte[] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { return true; } @@ -277,7 +265,6 @@ protected List getScans() { /** * Allows subclasses to set the list of {@link Scan} objects. - * * @param scans The list of {@link Scan} used to define the input */ protected void setScans(List scans) { @@ -286,9 +273,7 @@ protected void setScans(List scans) { /** * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader A different {@link TableRecordReader} - * implementation. + * @param tableRecordReader A different {@link TableRecordReader} implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java index 2a4fae944095..5a5d11497552 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; @@ -32,9 +27,9 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.JobContext; @@ -42,21 +37,22 @@ import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** *

    - * Hadoop output format that writes to one or more HBase tables. The key is - * taken to be the table name while the output value must be either a - * {@link Put} or a {@link Delete} instance. All tables must already exist, and - * all Puts and Deletes must reference only valid column families. + * Hadoop output format that writes to one or more HBase tables. The key is taken to be the table + * name while the output value must be either a {@link Put} or a {@link Delete} instance. + * All tables must already exist, and all Puts and Deletes must reference only valid column + * families. *

    - * *

    - * Write-ahead logging (WAL) for Puts can be disabled by setting - * {@link #WAL_PROPERTY} to {@link #WAL_OFF}. Default value is {@link #WAL_ON}. - * Note that disabling write-ahead logging is only appropriate for jobs where - * loss of data due to region server failure can be tolerated (for example, - * because it is easy to rerun a bulk import). + * Write-ahead logging (WAL) for Puts can be disabled by setting {@link #WAL_PROPERTY} to + * {@link #WAL_OFF}. Default value is {@link #WAL_ON}. Note that disabling write-ahead logging is + * only appropriate for jobs where loss of data due to region server failure can be tolerated (for + * example, because it is easy to rerun a bulk import). *

    */ @InterfaceAudience.Public @@ -67,11 +63,12 @@ public class MultiTableOutputFormat extends OutputFormat { + protected static class MultiTableRecordWriter + extends RecordWriter { private static final Logger LOG = LoggerFactory.getLogger(MultiTableRecordWriter.class); Connection connection; Map mutatorMap = new HashMap<>(); @@ -79,36 +76,29 @@ protected static class MultiTableRecordWriter extends boolean useWriteAheadLogging; /** - * @param conf - * HBaseConfiguration to used - * @param useWriteAheadLogging - * whether to use write ahead logging. This can be turned off ( - * false) to improve performance when bulk loading data. + * n * HBaseConfiguration to used n * whether to use write ahead logging. This can be turned off + * ( false) to improve performance when bulk loading data. */ - public MultiTableRecordWriter(Configuration conf, - boolean useWriteAheadLogging) throws IOException { - LOG.debug("Created new MultiTableRecordReader with WAL " - + (useWriteAheadLogging ? "on" : "off")); + public MultiTableRecordWriter(Configuration conf, boolean useWriteAheadLogging) + throws IOException { + LOG.debug( + "Created new MultiTableRecordReader with WAL " + (useWriteAheadLogging ? "on" : "off")); this.conf = conf; this.useWriteAheadLogging = useWriteAheadLogging; } /** - * @param tableName - * the name of the table, as a string - * @return the named mutator - * @throws IOException - * if there is a problem opening a table + * n * the name of the table, as a string + * @return the named mutator n * if there is a problem opening a table */ BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException { - if(this.connection == null){ + if (this.connection == null) { this.connection = ConnectionFactory.createConnection(conf); } if (!mutatorMap.containsKey(tableName)) { - LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing"); + LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get()) + "\" for writing"); - BufferedMutator mutator = - connection.getBufferedMutator(TableName.valueOf(tableName.get())); + BufferedMutator mutator = connection.getBufferedMutator(TableName.valueOf(tableName.get())); mutatorMap.put(tableName, mutator); } return mutatorMap.get(tableName); @@ -125,14 +115,8 @@ public void close(TaskAttemptContext context) throws IOException { } /** - * Writes an action (Put or Delete) to the specified table. - * - * @param tableName - * the table being updated. - * @param action - * the update, either a put or a delete. - * @throws IllegalArgumentException - * if the action is not a put or a delete. + * Writes an action (Put or Delete) to the specified table. n * the table being updated. n * the + * update, either a put or a delete. n * if the action is not a put or a delete. */ @Override public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException { @@ -140,37 +124,33 @@ public void write(ImmutableBytesWritable tableName, Mutation action) throws IOEx // The actions are not immutable, so we defensively copy them if (action instanceof Put) { Put put = new Put((Put) action); - put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL - : Durability.SKIP_WAL); + put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); } else if (action instanceof Delete) { Delete delete = new Delete((Delete) action); mutator.mutate(delete); - } else - throw new IllegalArgumentException( - "action must be either Delete or Put"); + } else throw new IllegalArgumentException("action must be either Delete or Put"); } } @Override - public void checkOutputSpecs(JobContext context) throws IOException, - InterruptedException { + public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException { // we can't know ahead of time if it's going to blow up when the user // passes a table name that doesn't exist, so nothing useful here. } @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { return new TableOutputCommitter(); } @Override public RecordWriter getRecordWriter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); return new MultiTableRecordWriter(HBaseConfiguration.create(conf), - conf.getBoolean(WAL_PROPERTY, WAL_ON)); + conf.getBoolean(WAL_PROPERTY, WAL_ON)); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java index 07c0820c1aea..8b15140b46a7 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,43 +15,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; +import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * MultiTableSnapshotInputFormat generalizes - * {@link TableSnapshotInputFormat} - * allowing a MapReduce job to run over one or more table snapshots, with one or more scans - * configured for each. - * Internally, the input format delegates to - * {@link TableSnapshotInputFormat} - * and thus has the same performance advantages; - * see {@link TableSnapshotInputFormat} for - * more details. - * Usage is similar to TableSnapshotInputFormat, with the following exception: - * initMultiTableSnapshotMapperJob takes in a map - * from snapshot name to a collection of scans. For each snapshot in the map, each corresponding - * scan will be applied; - * the overall dataset for the job is defined by the concatenation of the regions and tables - * included in each snapshot/scan - * pair. - * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob - * (Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, Path)} + * MultiTableSnapshotInputFormat generalizes {@link TableSnapshotInputFormat} allowing a MapReduce + * job to run over one or more table snapshots, with one or more scans configured for each. + * Internally, the input format delegates to {@link TableSnapshotInputFormat} and thus has the same + * performance advantages; see {@link TableSnapshotInputFormat} for more details. Usage is similar + * to TableSnapshotInputFormat, with the following exception: initMultiTableSnapshotMapperJob takes + * in a map from snapshot name to a collection of scans. For each snapshot in the map, each + * corresponding scan will be applied; the overall dataset for the job is defined by the + * concatenation of the regions and tables included in each snapshot/scan pair. + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob (Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, Path)} * can be used to configure the job. - *
    {@code
    + *
    + * 
    + * {@code
      * Job job = new Job(conf);
      * Map> snapshotScans = ImmutableMap.of(
      *    "snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("a"), Bytes.toBytes("b"))),
    @@ -63,14 +55,11 @@
      *      MyMapOutputValueWritable.class, job, true, restoreDir);
      * }
      * 
    - * Internally, this input format restores each snapshot into a subdirectory of the given tmp - * directory. Input splits and - * record readers are created as described in - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * (one per region). - * See {@link TableSnapshotInputFormat} for more notes on - * permissioning; the same caveats apply here. * + * Internally, this input format restores each snapshot into a subdirectory of the given tmp + * directory. Input splits and record readers are created as described in + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} (one per region). See + * {@link TableSnapshotInputFormat} for more notes on permissioning; the same caveats apply here. * @see TableSnapshotInputFormat * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ @@ -85,9 +74,9 @@ public MultiTableSnapshotInputFormat() { @Override public List getSplits(JobContext jobContext) - throws IOException, InterruptedException { + throws IOException, InterruptedException { List splits = - delegate.getSplits(jobContext.getConfiguration()); + delegate.getSplits(jobContext.getConfiguration()); List rtn = Lists.newArrayListWithCapacity(splits.size()); for (TableSnapshotInputFormatImpl.InputSplit split : splits) { @@ -98,7 +87,7 @@ public List getSplits(JobContext jobContext) } public static void setInput(Configuration configuration, - Map> snapshotScans, Path tmpRestoreDir) throws IOException { + Map> snapshotScans, Path tmpRestoreDir) throws IOException { new MultiTableSnapshotInputFormatImpl().setInput(configuration, snapshotScans, tmpRestoreDir); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java index 3e9e377a27d1..3fc992235305 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; @@ -42,8 +41,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * Shared implementation of mapreduce code over multiple table snapshots. - * Utilized by both mapreduce + * Shared implementation of mapreduce code over multiple table snapshots. Utilized by both mapreduce * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormat} and mapred * {@link org.apache.hadoop.hbase.mapred.MultiTableSnapshotInputFormat} implementations. */ @@ -51,12 +49,12 @@ @InterfaceStability.Evolving public class MultiTableSnapshotInputFormatImpl { private static final Logger LOG = - LoggerFactory.getLogger(MultiTableSnapshotInputFormatImpl.class); + LoggerFactory.getLogger(MultiTableSnapshotInputFormatImpl.class); public static final String RESTORE_DIRS_KEY = - "hbase.MultiTableSnapshotInputFormat.restore.snapshotDirMapping"; + "hbase.MultiTableSnapshotInputFormat.restore.snapshotDirMapping"; public static final String SNAPSHOT_TO_SCANS_KEY = - "hbase.MultiTableSnapshotInputFormat.snapshotsToScans"; + "hbase.MultiTableSnapshotInputFormat.snapshotsToScans"; /** * Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of @@ -65,13 +63,13 @@ public class MultiTableSnapshotInputFormatImpl { * Sets: {@link #RESTORE_DIRS_KEY}, {@link #SNAPSHOT_TO_SCANS_KEY} */ public void setInput(Configuration conf, Map> snapshotScans, - Path restoreDir) throws IOException { + Path restoreDir) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); setSnapshotToScans(conf, snapshotScans); Map restoreDirs = - generateSnapshotToRestoreDirMapping(snapshotScans.keySet(), restoreDir); + generateSnapshotToRestoreDirMapping(snapshotScans.keySet(), restoreDir); setSnapshotDirs(conf, restoreDirs); restoreSnapshots(conf, restoreDirs, fs); } @@ -79,13 +77,11 @@ public void setInput(Configuration conf, Map> snapshotS /** * Return the list of splits extracted from the scans/snapshots pushed to conf by * {@link #setInput(Configuration, Map, Path)} - * * @param conf Configuration to determine splits from - * @return Return the list of splits extracted from the scans/snapshots pushed to conf - * @throws IOException + * @return Return the list of splits extracted from the scans/snapshots pushed to conf n */ public List getSplits(Configuration conf) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); @@ -99,13 +95,13 @@ public List getSplits(Configuration con Path restoreDir = snapshotsToRestoreDirs.get(snapshotName); SnapshotManifest manifest = - TableSnapshotInputFormatImpl.getSnapshotManifest(conf, snapshotName, rootDir, fs); + TableSnapshotInputFormatImpl.getSnapshotManifest(conf, snapshotName, rootDir, fs); List regionInfos = - TableSnapshotInputFormatImpl.getRegionInfosFromManifest(manifest); + TableSnapshotInputFormatImpl.getRegionInfosFromManifest(manifest); for (Scan scan : entry.getValue()) { List splits = - TableSnapshotInputFormatImpl.getSplits(scan, manifest, regionInfos, restoreDir, conf); + TableSnapshotInputFormatImpl.getSplits(scan, manifest, regionInfos, restoreDir, conf); rtn.addAll(splits); } } @@ -115,17 +111,15 @@ public List getSplits(Configuration con /** * Retrieve the snapshot name -> list<scan> mapping pushed to configuration by * {@link #setSnapshotToScans(Configuration, Map)} - * * @param conf Configuration to extract name -> list<scan> mappings from. - * @return the snapshot name -> list<scan> mapping pushed to configuration - * @throws IOException + * @return the snapshot name -> list<scan> mapping pushed to configuration n */ public Map> getSnapshotsToScans(Configuration conf) throws IOException { Map> rtn = Maps.newHashMap(); - for (Map.Entry entry : ConfigurationUtil - .getKeyValues(conf, SNAPSHOT_TO_SCANS_KEY)) { + for (Map.Entry entry : ConfigurationUtil.getKeyValues(conf, + SNAPSHOT_TO_SCANS_KEY)) { String snapshotName = entry.getKey(); String scan = entry.getValue(); @@ -142,14 +136,10 @@ public Map> getSnapshotsToScans(Configuration conf) thr } /** - * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) - * - * @param conf - * @param snapshotScans - * @throws IOException + * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) nnn */ public void setSnapshotToScans(Configuration conf, Map> snapshotScans) - throws IOException { + throws IOException { // flatten out snapshotScans for serialization to the job conf List> snapshotToSerializedScans = Lists.newArrayList(); @@ -160,7 +150,7 @@ public void setSnapshotToScans(Configuration conf, Map> // serialize all scans and map them to the appropriate snapshot for (Scan scan : scans) { snapshotToSerializedScans.add(new AbstractMap.SimpleImmutableEntry<>(snapshotName, - TableMapReduceUtil.convertScanToString(scan))); + TableMapReduceUtil.convertScanToString(scan))); } } @@ -170,10 +160,8 @@ public void setSnapshotToScans(Configuration conf, Map> /** * Retrieve the directories into which snapshots have been restored from * ({@link #RESTORE_DIRS_KEY}) - * * @param conf Configuration to extract restore directories from - * @return the directories into which snapshots have been restored from - * @throws IOException + * @return the directories into which snapshots have been restored from n */ public Map getSnapshotDirs(Configuration conf) throws IOException { List> kvps = ConfigurationUtil.getKeyValues(conf, RESTORE_DIRS_KEY); @@ -197,20 +185,19 @@ public void setSnapshotDirs(Configuration conf, Map snapshotDirs) } /** - * Generate a random path underneath baseRestoreDir for each snapshot in snapshots and - * return a map from the snapshot to the restore directory. - * + * Generate a random path underneath baseRestoreDir for each snapshot in snapshots and return a + * map from the snapshot to the restore directory. * @param snapshots collection of snapshot names to restore * @param baseRestoreDir base directory under which all snapshots in snapshots will be restored * @return a mapping from snapshot name to the directory in which that snapshot has been restored */ private Map generateSnapshotToRestoreDirMapping(Collection snapshots, - Path baseRestoreDir) { + Path baseRestoreDir) { Map rtn = Maps.newHashMap(); for (String snapshotName : snapshots) { Path restoreSnapshotDir = - new Path(baseRestoreDir, snapshotName + "__" + UUID.randomUUID().toString()); + new Path(baseRestoreDir, snapshotName + "__" + UUID.randomUUID().toString()); rtn.put(snapshotName, restoreSnapshotDir); } @@ -219,13 +206,12 @@ private Map generateSnapshotToRestoreDirMapping(Collection /** * Restore each (snapshot name, restore directory) pair in snapshotToDir - * * @param conf configuration to restore with * @param snapshotToDir mapping from snapshot names to restore directories * @param fs filesystem to do snapshot restoration on */ public void restoreSnapshots(Configuration conf, Map snapshotToDir, FileSystem fs) - throws IOException { + throws IOException { // TODO: restore from record readers to parallelize. Path rootDir = CommonFSUtils.getRootDir(conf); @@ -233,13 +219,13 @@ public void restoreSnapshots(Configuration conf, Map snapshotToDir String snapshotName = entry.getKey(); Path restoreDir = entry.getValue(); LOG.info("Restoring snapshot " + snapshotName + " into " + restoreDir - + " for MultiTableSnapshotInputFormat"); + + " for MultiTableSnapshotInputFormat"); restoreSnapshot(conf, snapshotName, rootDir, restoreDir, fs); } } void restoreSnapshot(Configuration conf, String snapshotName, Path rootDir, Path restoreDir, - FileSystem fs) throws IOException { + FileSystem fs) throws IOException { RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java index 4ad1935f37a1..04f4dbf960cb 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,25 +42,23 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Multithreaded implementation for @link org.apache.hbase.mapreduce.TableMapper *

    - * It can be used instead when the Map operation is not CPU - * bound in order to improve throughput. + * It can be used instead when the Map operation is not CPU bound in order to improve throughput. *

    * Mapper implementations using this MapRunnable must be thread-safe. *

    - * The Map-Reduce job has to be configured with the mapper to use via - * {@link #setMapperClass} and the number of thread the thread-pool can use with the - * {@link #getNumberOfThreads} method. The default value is 10 threads. + * The Map-Reduce job has to be configured with the mapper to use via {@link #setMapperClass} and + * the number of thread the thread-pool can use with the {@link #getNumberOfThreads} method. The + * default value is 10 threads. *

    */ @InterfaceAudience.Private public class MultithreadedTableMapper extends TableMapper { private static final Logger LOG = LoggerFactory.getLogger(MultithreadedTableMapper.class); - private Class> mapClass; + private Class> mapClass; private Context outer; private ExecutorService executor; public static final String NUMBER_OF_THREADS = "hbase.mapreduce.multithreadedmapper.threads"; @@ -72,51 +70,46 @@ public class MultithreadedTableMapper extends TableMapper { * @return the number of threads */ public static int getNumberOfThreads(JobContext job) { - return job.getConfiguration(). - getInt(NUMBER_OF_THREADS, 10); + return job.getConfiguration().getInt(NUMBER_OF_THREADS, 10); } /** * Set the number of threads in the pool for running maps. - * @param job the job to modify + * @param job the job to modify * @param threads the new number of threads */ public static void setNumberOfThreads(Job job, int threads) { - job.getConfiguration().setInt(NUMBER_OF_THREADS, - threads); + job.getConfiguration().setInt(NUMBER_OF_THREADS, threads); } /** * Get the application's mapper class. * @param the map's output key type * @param the map's output value type - * @param job the job + * @param job the job * @return the mapper class to run */ @SuppressWarnings("unchecked") - public static - Class> getMapperClass(JobContext job) { - return (Class>) - job.getConfiguration().getClass( MAPPER_CLASS, - Mapper.class); + public static Class> + getMapperClass(JobContext job) { + return (Class>) job.getConfiguration() + .getClass(MAPPER_CLASS, Mapper.class); } /** * Set the application's mapper class. * @param the map output key type * @param the map output value type - * @param job the job to modify - * @param cls the class to use as the mapper + * @param job the job to modify + * @param cls the class to use as the mapper */ - public static - void setMapperClass(Job job, - Class> cls) { + public static void setMapperClass(Job job, + Class> cls) { if (MultithreadedTableMapper.class.isAssignableFrom(cls)) { - throw new IllegalArgumentException("Can't have recursive " + - "MultithreadedTableMapper instances."); + throw new IllegalArgumentException( + "Can't have recursive " + "MultithreadedTableMapper instances."); } - job.getConfiguration().setClass(MAPPER_CLASS, - cls, Mapper.class); + job.getConfiguration().setClass(MAPPER_CLASS, cls, Mapper.class); } /** @@ -128,11 +121,10 @@ public void run(Context context) throws IOException, InterruptedException { int numberOfThreads = getNumberOfThreads(context); mapClass = getMapperClass(context); if (LOG.isDebugEnabled()) { - LOG.debug("Configuring multithread runner to use " + numberOfThreads + - " threads"); + LOG.debug("Configuring multithread runner to use " + numberOfThreads + " threads"); } executor = Executors.newFixedThreadPool(numberOfThreads); - for(int i=0; i < numberOfThreads; ++i) { + for (int i = 0; i < numberOfThreads; ++i) { MapRunner thread = new MapRunner(context); executor.execute(thread); } @@ -143,8 +135,7 @@ public void run(Context context) throws IOException, InterruptedException { } } - private class SubMapRecordReader - extends RecordReader { + private class SubMapRecordReader extends RecordReader { private ImmutableBytesWritable key; private Result value; private Configuration conf; @@ -159,9 +150,8 @@ public float getProgress() throws IOException, InterruptedException { } @Override - public void initialize(InputSplit split, - TaskAttemptContext context - ) throws IOException, InterruptedException { + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { conf = context.getConfiguration(); } @@ -171,8 +161,7 @@ public boolean nextKeyValue() throws IOException, InterruptedException { if (!outer.nextKeyValue()) { return false; } - key = ReflectionUtils.copy(outer.getConfiguration(), - outer.getCurrentKey(), key); + key = ReflectionUtils.copy(outer.getConfiguration(), outer.getCurrentKey(), key); value = ReflectionUtils.copy(conf, outer.getCurrentValue(), value); return true; } @@ -188,16 +177,14 @@ public Result getCurrentValue() { } } - private class SubMapRecordWriter extends RecordWriter { + private class SubMapRecordWriter extends RecordWriter { @Override - public void close(TaskAttemptContext context) throws IOException, - InterruptedException { + public void close(TaskAttemptContext context) throws IOException, InterruptedException { } @Override - public void write(K2 key, V2 value) throws IOException, - InterruptedException { + public void write(K2 key, V2 value) throws IOException, InterruptedException { synchronized (outer) { outer.write(key, value); } @@ -235,56 +222,34 @@ public float getProgress() { justification = "Don't understand why FB is complaining about this one." + " We do throw exception") private class MapRunner implements Runnable { - private Mapper mapper; + private Mapper mapper; private Context subcontext; @SuppressWarnings({ "rawtypes", "unchecked" }) MapRunner(Context context) throws IOException, InterruptedException { - mapper = ReflectionUtils.newInstance(mapClass, - context.getConfiguration()); + mapper = ReflectionUtils.newInstance(mapClass, context.getConfiguration()); try { - Constructor c = context.getClass().getConstructor( - Mapper.class, - Configuration.class, - TaskAttemptID.class, - RecordReader.class, - RecordWriter.class, - OutputCommitter.class, - StatusReporter.class, - InputSplit.class); + Constructor c = context.getClass().getConstructor(Mapper.class, Configuration.class, + TaskAttemptID.class, RecordReader.class, RecordWriter.class, OutputCommitter.class, + StatusReporter.class, InputSplit.class); c.setAccessible(true); - subcontext = (Context) c.newInstance( - mapper, - outer.getConfiguration(), - outer.getTaskAttemptID(), - new SubMapRecordReader(), - new SubMapRecordWriter(), - context.getOutputCommitter(), - new SubMapStatusReporter(), - outer.getInputSplit()); + subcontext = (Context) c.newInstance(mapper, outer.getConfiguration(), + outer.getTaskAttemptID(), new SubMapRecordReader(), new SubMapRecordWriter(), + context.getOutputCommitter(), new SubMapStatusReporter(), outer.getInputSplit()); } catch (Exception e) { try { - Constructor c = Class.forName("org.apache.hadoop.mapreduce.task.MapContextImpl").getConstructor( - Configuration.class, - TaskAttemptID.class, - RecordReader.class, - RecordWriter.class, - OutputCommitter.class, - StatusReporter.class, - InputSplit.class); + Constructor c = Class.forName("org.apache.hadoop.mapreduce.task.MapContextImpl") + .getConstructor(Configuration.class, TaskAttemptID.class, RecordReader.class, + RecordWriter.class, OutputCommitter.class, StatusReporter.class, InputSplit.class); c.setAccessible(true); - MapContext mc = (MapContext) c.newInstance( - outer.getConfiguration(), - outer.getTaskAttemptID(), - new SubMapRecordReader(), - new SubMapRecordWriter(), - context.getOutputCommitter(), - new SubMapStatusReporter(), - outer.getInputSplit()); - Class wrappedMapperClass = Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper"); + MapContext mc = (MapContext) c.newInstance(outer.getConfiguration(), + outer.getTaskAttemptID(), new SubMapRecordReader(), new SubMapRecordWriter(), + context.getOutputCommitter(), new SubMapStatusReporter(), outer.getInputSplit()); + Class wrappedMapperClass = + Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper"); Method getMapContext = wrappedMapperClass.getMethod("getMapContext", MapContext.class); - subcontext = (Context) getMapContext.invoke( - wrappedMapperClass.getDeclaredConstructor().newInstance(), mc); + subcontext = (Context) getMapContext + .invoke(wrappedMapperClass.getDeclaredConstructor().newInstance(), mc); } catch (Exception ee) { // FindBugs: REC_CATCH_EXCEPTION // rethrow as IOE throw new IOException(e); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java index 7859afa496c4..63ed8d1fdc15 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,17 +20,17 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; @InterfaceAudience.Public public class MutationSerialization implements Serialization { @@ -69,6 +69,7 @@ public void open(InputStream in) throws IOException { } } + private static class MutationSerializer implements Serializer { private OutputStream out; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java index 317b328df782..5ab4e5a292e9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,21 +19,19 @@ import java.io.IOException; import java.util.List; -import java.util.Map.Entry; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.Map.Entry; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Combine Puts. Merges Put instances grouped by K into a single - * instance. + * Combine Puts. Merges Put instances grouped by K into a single instance. * @see TableMapReduceUtil */ @InterfaceAudience.Public @@ -43,14 +40,14 @@ public class PutCombiner extends Reducer { @Override protected void reduce(K row, Iterable vals, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { // Using HeapSize to create an upper bound on the memory size of // the puts and flush some portion of the content while looping. This // flush could result in multiple Puts for a single rowkey. That is // acceptable because Combiner is run as an optimization and it's not // critical that all Puts are grouped perfectly. - long threshold = context.getConfiguration().getLong( - "putcombiner.row.threshold", 1L * (1<<30)); + long threshold = + context.getConfiguration().getLong("putcombiner.row.threshold", 1L * (1 << 30)); int cnt = 0; long curSize = 0; Put put = null; @@ -61,8 +58,7 @@ protected void reduce(K row, Iterable vals, Context context) put = p; familyMap = put.getFamilyCellMap(); } else { - for (Entry> entry : p.getFamilyCellMap() - .entrySet()) { + for (Entry> entry : p.getFamilyCellMap().entrySet()) { List cells = familyMap.get(entry.getKey()); List kvs = (cells != null) ? (List) cells : null; for (Cell cell : entry.getValue()) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index f4ad1f25fe4b..90905090f89d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.Iterator; import java.util.List; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -33,7 +31,6 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -41,40 +38,35 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted Puts. - * Reads in all Puts from passed Iterator, sorts them, then emits - * Puts in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted Puts. Reads in all Puts from passed Iterator, sorts them, then emits Puts in sorted + * order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 * @see CellSortReducer */ @InterfaceAudience.Public -public class PutSortReducer extends - Reducer { +public class PutSortReducer + extends Reducer { // the cell creator private CellCreator kvCreator; @Override protected void - setup(Reducer.Context context) - throws IOException, InterruptedException { + setup(Reducer.Context context) + throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); this.kvCreator = new CellCreator(conf); } @Override - protected void reduce( - ImmutableBytesWritable row, - java.lang.Iterable puts, - Reducer.Context context) - throws java.io.IOException, InterruptedException - { + protected void reduce(ImmutableBytesWritable row, java.lang.Iterable puts, + Reducer.Context context) + throws java.io.IOException, InterruptedException { // although reduce() is called per-row, handle pathological case - long threshold = context.getConfiguration().getLong( - "putsortreducer.row.threshold", 1L * (1<<30)); + long threshold = + context.getConfiguration().getLong("putsortreducer.row.threshold", 1L * (1 << 30)); Iterator iter = puts.iterator(); while (iter.hasNext()) { TreeSet map = new TreeSet<>(CellComparator.getInstance()); @@ -100,15 +92,15 @@ protected void reduce( if (cellVisibility != null) { // add the visibility labels if any tags.addAll(kvCreator.getVisibilityExpressionResolver() - .createVisibilityExpTags(cellVisibility.getExpression())); + .createVisibilityExpTags(cellVisibility.getExpression())); } } catch (DeserializationException e) { // We just throw exception here. Should we allow other mutations to proceed by // just ignoring the bad one? throw new IOException("Invalid visibility expression found in mutation " + p, e); } - for (List cells: p.getFamilyCellMap().values()) { - for (Cell cell: cells) { + for (List cells : p.getFamilyCellMap().values()) { + for (Cell cell : cells) { // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. KeyValue kv = null; @@ -128,13 +120,12 @@ protected void reduce( } } } - context.setStatus("Read " + map.size() + " entries of " + map.getClass() - + "(" + StringUtils.humanReadableInt(curSize) + ")"); + context.setStatus("Read " + map.size() + " entries of " + map.getClass() + "(" + + StringUtils.humanReadableInt(curSize) + ")"); int index = 0; for (KeyValue kv : map) { context.write(row, kv); - if (++index % 100 == 0) - context.setStatus("Wrote " + index); + if (++index % 100 == 0) context.setStatus("Wrote " + index); } // if we have more entries to process diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java index 40cd34f3844a..4d027196a8fe 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,9 +38,9 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * Computes size of each region for given table and given column families. - * The value is used by MapReduce for better scheduling. - * */ + * Computes size of each region for given table and given column families. The value is used by + * MapReduce for better scheduling. + */ @InterfaceAudience.Private public class RegionSizeCalculator { @@ -48,7 +48,7 @@ public class RegionSizeCalculator { /** * Maps each region to its size in bytes. - * */ + */ private final Map sizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable"; @@ -56,13 +56,12 @@ public class RegionSizeCalculator { /** * Computes size of each region for table and given column families. - * */ + */ public RegionSizeCalculator(RegionLocator regionLocator, Admin admin) throws IOException { init(regionLocator, admin); } - private void init(RegionLocator regionLocator, Admin admin) - throws IOException { + private void init(RegionLocator regionLocator, Admin admin) throws IOException { if (!enabled(admin.getConfiguration())) { LOG.info("Region size calculation disabled."); return; @@ -79,12 +78,12 @@ private void init(RegionLocator regionLocator, Admin admin) Set tableServers = getRegionServersOfTable(regionLocator); for (ServerName tableServerName : tableServers) { - for (RegionMetrics regionLoad : admin.getRegionMetrics( - tableServerName,regionLocator.getName())) { + for (RegionMetrics regionLoad : admin.getRegionMetrics(tableServerName, + regionLocator.getName())) { byte[] regionId = regionLoad.getRegionName(); - long regionSizeBytes - = ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; + long regionSizeBytes = + ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; sizeMap.put(regionId, regionSizeBytes); @@ -96,8 +95,7 @@ private void init(RegionLocator regionLocator, Admin admin) LOG.debug("Region sizes calculated"); } - private Set getRegionServersOfTable(RegionLocator regionLocator) - throws IOException { + private Set getRegionServersOfTable(RegionLocator regionLocator) throws IOException { Set tableServers = Sets.newHashSet(); for (HRegionLocation regionLocation : regionLocator.getAllRegionLocations()) { @@ -112,7 +110,7 @@ boolean enabled(Configuration configuration) { /** * Returns size of given region in bytes. Returns 0 if region was not found. - * */ + */ public long getRegionSize(byte[] regionId) { Long size = sizeMap.get(regionId); if (size == null) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java index 9fdaa7b78f75..782621e120af 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,21 +24,21 @@ import java.io.OutputStream; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @InterfaceAudience.Public public class ResultSerialization extends Configured implements Serialization { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java index 2427e909ff23..81eacb440997 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java @@ -50,11 +50,11 @@ public class RoundRobinTableInputFormat extends TableInputFormat { private Boolean hbaseRegionsizecalculatorEnableOriginalValue = null; /** * Boolean config for whether superclass should produce InputSplits with 'lengths'. If true, TIF - * will query every RegionServer to get the 'size' of all involved Regions and this 'size' will - * be used the the InputSplit length. If false, we skip this query and the super-classes - * returned InputSplits will have lenghths of zero. This override will set the flag to false. - * All returned lengths will be zero. Makes it so sorting on 'length' becomes a noop. The sort - * returned by this override will prevail. Thats what we want. + * will query every RegionServer to get the 'size' of all involved Regions and this 'size' will be + * used the the InputSplit length. If false, we skip this query and the super-classes returned + * InputSplits will have lenghths of zero. This override will set the flag to false. All returned + * lengths will be zero. Makes it so sorting on 'length' becomes a noop. The sort returned by this + * override will prevail. Thats what we want. */ static String HBASE_REGIONSIZECALCULATOR_ENABLE = "hbase.regionsizecalculator.enable"; @@ -116,26 +116,26 @@ List roundRobin(List inputs) throws IOException { } /** - * Adds a configuration to the Context disabling remote rpc'ing to figure Region size - * when calculating InputSplits. See up in super-class TIF where we rpc to every server to find - * the size of all involved Regions. Here we disable this super-class action. This means - * InputSplits will have a length of zero. If all InputSplits have zero-length InputSplits, the - * ordering done in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will - * ask every node for the current size of each of the participating Table Regions. It does this - * because it wants to schedule the biggest Regions first (This fixation comes of hadoop itself - * -- see JobSubmitter where it sorts inputs by size). This extra diligence takes time and is of - * no utility in this RRTIF where spread is of more import than size-first. Also, if a rolling - * restart is happening when we go to launch the job, the job launch may fail because the request - * for Region size fails -- even after retries -- because rolled RegionServer may take a while to - * come online: e.g. it takes java 90 seconds to allocate a 160G. RegionServer is offline during - * this time. The job launch will fail with 'Connection rejected'. So, we set - * 'hbase.regionsizecalculator.enable' to false here in RRTIF. + * Adds a configuration to the Context disabling remote rpc'ing to figure Region size when + * calculating InputSplits. See up in super-class TIF where we rpc to every server to find the + * size of all involved Regions. Here we disable this super-class action. This means InputSplits + * will have a length of zero. If all InputSplits have zero-length InputSplits, the ordering done + * in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will ask every node + * for the current size of each of the participating Table Regions. It does this because it wants + * to schedule the biggest Regions first (This fixation comes of hadoop itself -- see JobSubmitter + * where it sorts inputs by size). This extra diligence takes time and is of no utility in this + * RRTIF where spread is of more import than size-first. Also, if a rolling restart is happening + * when we go to launch the job, the job launch may fail because the request for Region size fails + * -- even after retries -- because rolled RegionServer may take a while to come online: e.g. it + * takes java 90 seconds to allocate a 160G. RegionServer is offline during this time. The job + * launch will fail with 'Connection rejected'. So, we set 'hbase.regionsizecalculator.enable' to + * false here in RRTIF. * @see #unconfigure() */ void configure() { if (getConf().get(HBASE_REGIONSIZECALCULATOR_ENABLE) != null) { - this.hbaseRegionsizecalculatorEnableOriginalValue = getConf(). - getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true); + this.hbaseRegionsizecalculatorEnableOriginalValue = + getConf().getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true); } getConf().setBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, false); } @@ -165,7 +165,7 @@ public static void main(String[] args) throws IOException { configuration.set(TableInputFormat.INPUT_TABLE, args[0]); tif.setConf(configuration); List splits = tif.getSplits(new JobContextImpl(configuration, new JobID())); - for (InputSplit split: splits) { + for (InputSplit split : splits) { System.out.println(split); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java index 9c3ab4801f56..9228daf4fb42 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,37 +18,37 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import java.util.List; import java.util.ArrayList; - +import java.util.List; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.util.AbstractHBaseTool; -import org.apache.hbase.thirdparty.com.google.common.base.Splitter; -import org.apache.hbase.thirdparty.org.apache.commons.cli.BasicParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; -import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.org.apache.commons.cli.BasicParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; /** - * A job with a just a map phase to count rows. Map outputs table rows IF the - * input row has columns that have content. + * A job with a just a map phase to count rows. Map outputs table rows IF the input row has columns + * that have content. */ @InterfaceAudience.Public public class RowCounter extends AbstractHBaseTool { @@ -77,25 +76,23 @@ public class RowCounter extends AbstractHBaseTool { /** * Mapper that runs the count. */ - static class RowCounterMapper - extends TableMapper { + static class RowCounterMapper extends TableMapper { /** Counter enumeration to count the actual rows. */ - public static enum Counters {ROWS} + public static enum Counters { + ROWS + } /** * Maps the data. - * - * @param row The current table row key. + * @param row The current table row key. * @param values The columns. - * @param context The current context. + * @param context The current context. * @throws IOException When something is broken with the data. * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, Context) */ @Override - public void map(ImmutableBytesWritable row, Result values, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { // Count every row containing data, whether it's in qualifiers or values context.getCounter(Counters.ROWS).increment(1); } @@ -103,8 +100,7 @@ public void map(ImmutableBytesWritable row, Result values, /** * Sets up the actual job. - * - * @param conf The current configuration. + * @param conf The current configuration. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -125,30 +121,28 @@ public Job createSubmittableJob(Configuration conf) throws IOException { } } - if(this.expectedCount >= 0) { + if (this.expectedCount >= 0) { conf.setLong(EXPECTED_COUNT_KEY, this.expectedCount); } scan.setTimeRange(startTime, endTime); job.setOutputFormatClass(NullOutputFormat.class); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setNumReduceTasks(0); return job; } /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. * @deprecated as of release 2.3.0. Will be removed on 4.0.0. Please use main method instead. */ @Deprecated - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; List rowRangeList = null; long startTime = 0; @@ -166,7 +160,7 @@ public static Job createSubmittableJob(Configuration conf, String[] args) if (args[i].startsWith(rangeSwitch)) { try { rowRangeList = parseRowRangeParameter( - args[i].substring(args[1].indexOf(rangeSwitch)+rangeSwitch.length())); + args[i].substring(args[1].indexOf(rangeSwitch) + rangeSwitch.length())); } catch (IllegalArgumentException e) { return null; } @@ -206,60 +200,55 @@ public static Job createSubmittableJob(Configuration conf, String[] args) if (StringUtils.isBlank(qualifier)) { scan.addFamily(Bytes.toBytes(family)); - } - else { + } else { scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier)); } } } scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime); job.setOutputFormatClass(NullOutputFormat.class); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setNumReduceTasks(0); return job; } /** - * Prints usage without error message. - * Note that we don't document --expected-count, because it's intended for test. + * Prints usage without error message. Note that we don't document --expected-count, because it's + * intended for test. */ private static void printUsage(String errorMessage) { System.err.println("ERROR: " + errorMessage); - System.err.println("Usage: hbase rowcounter [options] " - + "[--starttime= --endtime=] " - + "[--range=[startKey],[endKey][;[startKey],[endKey]...]] [ ...]"); + System.err.println( + "Usage: hbase rowcounter [options] " + "[--starttime= --endtime=] " + + "[--range=[startKey],[endKey][;[startKey],[endKey]...]] [ ...]"); System.err.println("For performance consider the following options:\n" - + "-Dhbase.client.scanner.caching=100\n" - + "-Dmapreduce.map.speculative=false"); + + "-Dhbase.client.scanner.caching=100\n" + "-Dmapreduce.map.speculative=false"); } private static List parseRowRangeParameter(String arg) { final List rangesSplit = Splitter.on(";").splitToList(arg); final List rangeList = new ArrayList<>(); for (String range : rangesSplit) { - if(range!=null && !range.isEmpty()) { + if (range != null && !range.isEmpty()) { List startEnd = Splitter.on(",").splitToList(range); if (startEnd.size() != 2 || startEnd.get(1).contains(",")) { throw new IllegalArgumentException("Wrong range specification: " + range); } String startKey = startEnd.get(0); String endKey = startEnd.get(1); - rangeList.add(new MultiRowRangeFilter.RowRange(Bytes.toBytesBinary(startKey), - true, Bytes.toBytesBinary(endKey), false)); + rangeList.add(new MultiRowRangeFilter.RowRange(Bytes.toBytesBinary(startKey), true, + Bytes.toBytesBinary(endKey), false)); } } return rangeList; } /** - * Sets filter {@link FilterBase} to the {@link Scan} instance. - * If provided rowRangeList contains more than one element, - * method sets filter which is instance of {@link MultiRowRangeFilter}. - * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. - * If rowRangeList contains exactly one element, startRow and stopRow are set to the scan. - * @param scan - * @param rowRangeList + * Sets filter {@link FilterBase} to the {@link Scan} instance. If provided rowRangeList contains + * more than one element, method sets filter which is instance of {@link MultiRowRangeFilter}. + * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. If rowRangeList + * contains exactly one element, startRow and stopRow are set to the scan. nn */ private static void setScanFilter(Scan scan, List rowRangeList) { final int size = rowRangeList == null ? 0 : rowRangeList.size(); @@ -268,8 +257,8 @@ private static void setScanFilter(Scan scan, List } if (size == 1) { MultiRowRangeFilter.RowRange range = rowRangeList.get(0); - scan.withStartRow(range.getStartRow()); //inclusive - scan.withStopRow(range.getStopRow()); //exclusive + scan.withStartRow(range.getStartRow()); // inclusive + scan.withStopRow(range.getStopRow()); // exclusive } else if (size > 1) { scan.setFilter(new MultiRowRangeFilter(rowRangeList)); } @@ -281,13 +270,13 @@ protected void printUsage() { footerBuilder.append("For performance, consider the following configuration properties:\n"); footerBuilder.append("-Dhbase.client.scanner.caching=100\n"); footerBuilder.append("-Dmapreduce.map.speculative=false\n"); - printUsage("hbase rowcounter [options] [ ...]", - "Options:", footerBuilder.toString()); + printUsage("hbase rowcounter [options] [ ...]", "Options:", + footerBuilder.toString()); } @Override protected void printUsage(final String usageStr, final String usageHeader, - final String usageFooter) { + final String usageFooter) { HelpFormatter helpFormatter = new HelpFormatter(); helpFormatter.setWidth(120); helpFormatter.setOptionComparator(new AbstractHBaseTool.OptionsOrderComparator()); @@ -297,15 +286,15 @@ protected void printUsage(final String usageStr, final String usageHeader, @Override protected void addOptions() { - Option startTimeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("starting time filter to start counting rows from.").longOpt(OPT_START_TIME).build(); - Option endTimeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("end time filter limit, to only count rows up to this timestamp."). - longOpt(OPT_END_TIME).build(); - Option rangeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("[startKey],[endKey][;[startKey],[endKey]...]]").longOpt(OPT_RANGE).build(); - Option expectedOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("expected number of rows to be count.").longOpt(OPT_EXPECTED_COUNT).build(); + Option startTimeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("starting time filter to start counting rows from.").longOpt(OPT_START_TIME).build(); + Option endTimeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("end time filter limit, to only count rows up to this timestamp.").longOpt(OPT_END_TIME) + .build(); + Option rangeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("[startKey],[endKey][;[startKey],[endKey]...]]").longOpt(OPT_RANGE).build(); + Option expectedOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("expected number of rows to be count.").longOpt(OPT_EXPECTED_COUNT).build(); addOption(startTimeOption); addOption(endTimeOption); addOption(rangeOption); @@ -313,28 +302,31 @@ protected void addOptions() { } @Override - protected void processOptions(CommandLine cmd) throws IllegalArgumentException{ + protected void processOptions(CommandLine cmd) throws IllegalArgumentException { this.tableName = cmd.getArgList().get(0); - if(cmd.getOptionValue(OPT_RANGE)!=null) { + if (cmd.getOptionValue(OPT_RANGE) != null) { this.rowRangeList = parseRowRangeParameter(cmd.getOptionValue(OPT_RANGE)); } - this.endTime = cmd.getOptionValue(OPT_END_TIME) == null ? HConstants.LATEST_TIMESTAMP : - Long.parseLong(cmd.getOptionValue(OPT_END_TIME)); - this.expectedCount = cmd.getOptionValue(OPT_EXPECTED_COUNT) == null ? Long.MIN_VALUE : - Long.parseLong(cmd.getOptionValue(OPT_EXPECTED_COUNT)); - this.startTime = cmd.getOptionValue(OPT_START_TIME) == null ? 0 : - Long.parseLong(cmd.getOptionValue(OPT_START_TIME)); - - for(int i=1; ihbase.simpletotalorder.start - * and hbase.simpletotalorder.end. The end key needs to be - * exclusive; i.e. one larger than the biggest key in your key space. - * You may be surprised at how this class partitions the space; it may not - * align with preconceptions; e.g. a start key of zero and an end key of 100 - * divided in ten will not make regions whose range is 0-10, 10-20, and so on. - * Make your own partitioner if you need the region spacing to come out a + * A partitioner that takes start and end keys and uses bigdecimal to figure which reduce a key + * belongs to. Pass the start and end keys in the Configuration using + * hbase.simpletotalorder.start and hbase.simpletotalorder.end. The end + * key needs to be exclusive; i.e. one larger than the biggest key in your key space. You may be + * surprised at how this class partitions the space; it may not align with preconceptions; e.g. a + * start key of zero and an end key of 100 divided in ten will not make regions whose range is 0-10, + * 10-20, and so on. Make your own partitioner if you need the region spacing to come out a * particular way. * @param * @see #START @@ -46,7 +42,7 @@ */ @InterfaceAudience.Public public class SimpleTotalOrderPartitioner extends Partitioner -implements Configurable { + implements Configurable { private final static Logger LOG = LoggerFactory.getLogger(SimpleTotalOrderPartitioner.class); /** @@ -67,9 +63,9 @@ public class SimpleTotalOrderPartitioner extends Partitioner= 0) { @@ -289,8 +300,8 @@ protected void map(ImmutableBytesWritable key, Result value, Context context) } /** - * If there is an open hash batch, complete it and sync if there are diffs. - * Start a new batch, and seek to read the + * If there is an open hash batch, complete it and sync if there are diffs. Start a new batch, + * and seek to read the */ private void moveToNextBatch(Context context) throws IOException, InterruptedException { if (targetHasher.isBatchStarted()) { @@ -303,12 +314,11 @@ private void moveToNextBatch(Context context) throws IOException, InterruptedExc } /** - * Finish the currently open hash batch. - * Compare the target hash to the given source hash. - * If they do not match, then sync the covered key range. + * Finish the currently open hash batch. Compare the target hash to the given source hash. If + * they do not match, then sync the covered key range. */ private void finishBatchAndCompareHashes(Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { targetHasher.finishBatch(); context.getCounter(Counter.BATCHES).increment(1); if (targetHasher.getBatchSize() == 0) { @@ -321,33 +331,33 @@ private void finishBatchAndCompareHashes(Context context) context.getCounter(Counter.HASHES_NOT_MATCHED).increment(1); ImmutableBytesWritable stopRow = nextSourceKey == null - ? new ImmutableBytesWritable(sourceTableHash.stopRow) - : nextSourceKey; + ? new ImmutableBytesWritable(sourceTableHash.stopRow) + : nextSourceKey; if (LOG.isDebugEnabled()) { - LOG.debug("Hash mismatch. Key range: " + toHex(targetHasher.getBatchStartKey()) - + " to " + toHex(stopRow) - + " sourceHash: " + toHex(currentSourceHash) - + " targetHash: " + toHex(targetHash)); + LOG.debug("Hash mismatch. Key range: " + toHex(targetHasher.getBatchStartKey()) + " to " + + toHex(stopRow) + " sourceHash: " + toHex(currentSourceHash) + " targetHash: " + + toHex(targetHash)); } syncRange(context, targetHasher.getBatchStartKey(), stopRow); } } + private static String toHex(ImmutableBytesWritable bytes) { return Bytes.toHex(bytes.get(), bytes.getOffset(), bytes.getLength()); } - private static final CellScanner EMPTY_CELL_SCANNER - = new CellScanner(Collections.emptyIterator()); + private static final CellScanner EMPTY_CELL_SCANNER = + new CellScanner(Collections. emptyIterator()); /** - * Rescan the given range directly from the source and target tables. - * Count and log differences, and if this is not a dry run, output Puts and Deletes - * to make the target table match the source table for this range + * Rescan the given range directly from the source and target tables. Count and log differences, + * and if this is not a dry run, output Puts and Deletes to make the target table match the + * source table for this range */ private void syncRange(Context context, ImmutableBytesWritable startRow, - ImmutableBytesWritable stopRow) throws IOException, InterruptedException { + ImmutableBytesWritable stopRow) throws IOException, InterruptedException { Scan scan = sourceTableHash.initScan(); scan.withStartRow(startRow.copyBytes()); scan.withStopRow(stopRow.copyBytes()); @@ -361,7 +371,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, boolean rangeMatched = true; byte[] nextSourceRow = sourceCells.nextRow(); byte[] nextTargetRow = targetCells.nextRow(); - while(nextSourceRow != null || nextTargetRow != null) { + while (nextSourceRow != null || nextTargetRow != null) { boolean rowMatched; int rowComparison = compareRowKeys(nextSourceRow, nextTargetRow); if (rowComparison < 0) { @@ -371,7 +381,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, context.getCounter(Counter.TARGETMISSINGROWS).increment(1); rowMatched = syncRowCells(context, nextSourceRow, sourceCells, EMPTY_CELL_SCANNER); - nextSourceRow = sourceCells.nextRow(); // advance only source to next row + nextSourceRow = sourceCells.nextRow(); // advance only source to next row } else if (rowComparison > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Source missing row: " + Bytes.toString(nextTargetRow)); @@ -379,7 +389,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, context.getCounter(Counter.SOURCEMISSINGROWS).increment(1); rowMatched = syncRowCells(context, nextTargetRow, EMPTY_CELL_SCANNER, targetCells); - nextTargetRow = targetCells.nextRow(); // advance only target to next row + nextTargetRow = targetCells.nextRow(); // advance only target to next row } else { // current row is the same on both sides, compare cell by cell rowMatched = syncRowCells(context, nextSourceRow, sourceCells, targetCells); @@ -413,8 +423,7 @@ public CellScanner(Iterator results) { } /** - * Advance to the next row and return its row key. - * Returns null iff there are no more rows. + * Advance to the next row and return its row key. Returns null iff there are no more rows. */ public byte[] nextRow() { if (nextRowResult == null) { @@ -422,9 +431,10 @@ public byte[] nextRow() { while (results.hasNext()) { nextRowResult = results.next(); Cell nextCell = nextRowResult.rawCells()[0]; - if (currentRow == null - || !Bytes.equals(currentRow, 0, currentRow.length, nextCell.getRowArray(), - nextCell.getRowOffset(), nextCell.getRowLength())) { + if ( + currentRow == null || !Bytes.equals(currentRow, 0, currentRow.length, + nextCell.getRowArray(), nextCell.getRowOffset(), nextCell.getRowLength()) + ) { // found next row break; } else { @@ -464,8 +474,10 @@ public Cell nextCellInRow() { if (results.hasNext()) { Result result = results.next(); Cell cell = result.rawCells()[0]; - if (Bytes.equals(currentRow, 0, currentRow.length, cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength())) { + if ( + Bytes.equals(currentRow, 0, currentRow.length, cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength()) + ) { // result is part of current row currentRowResult = result; nextCellInRow = 0; @@ -484,31 +496,29 @@ public Cell nextCellInRow() { } } - private Cell checkAndResetTimestamp(Cell sourceCell){ + private Cell checkAndResetTimestamp(Cell sourceCell) { if (ignoreTimestamp) { - sourceCell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(sourceCell.getType()) - .setRow(sourceCell.getRowArray(), - sourceCell.getRowOffset(), sourceCell.getRowLength()) - .setFamily(sourceCell.getFamilyArray(), - sourceCell.getFamilyOffset(), sourceCell.getFamilyLength()) - .setQualifier(sourceCell.getQualifierArray(), - sourceCell.getQualifierOffset(), sourceCell.getQualifierLength()) - .setTimestamp(EnvironmentEdgeManager.currentTime()) - .setValue(sourceCell.getValueArray(), - sourceCell.getValueOffset(), sourceCell.getValueLength()).build(); + sourceCell = + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(sourceCell.getType()) + .setRow(sourceCell.getRowArray(), sourceCell.getRowOffset(), sourceCell.getRowLength()) + .setFamily(sourceCell.getFamilyArray(), sourceCell.getFamilyOffset(), + sourceCell.getFamilyLength()) + .setQualifier(sourceCell.getQualifierArray(), sourceCell.getQualifierOffset(), + sourceCell.getQualifierLength()) + .setTimestamp(EnvironmentEdgeManager.currentTime()).setValue(sourceCell.getValueArray(), + sourceCell.getValueOffset(), sourceCell.getValueLength()) + .build(); } return sourceCell; } /** - * Compare the cells for the given row from the source and target tables. - * Count and log any differences. - * If not a dry run, output a Put and/or Delete needed to sync the target table - * to match the source table. + * Compare the cells for the given row from the source and target tables. Count and log any + * differences. If not a dry run, output a Put and/or Delete needed to sync the target table to + * match the source table. */ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceCells, - CellScanner targetCells) throws IOException, InterruptedException { + CellScanner targetCells) throws IOException, InterruptedException { Put put = null; Delete delete = null; long matchingCells = 0; @@ -546,8 +556,8 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC delete = new Delete(rowKey); } // add a tombstone to exactly match the target cell that is missing on the source - delete.addColumn(CellUtil.cloneFamily(targetCell), - CellUtil.cloneQualifier(targetCell), targetCell.getTimestamp()); + delete.addColumn(CellUtil.cloneFamily(targetCell), CellUtil.cloneQualifier(targetCell), + targetCell.getTimestamp()); } targetCell = targetCells.nextCellInRow(); @@ -558,12 +568,12 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC } else { if (LOG.isDebugEnabled()) { LOG.debug("Different values: "); - LOG.debug(" source cell: " + sourceCell - + " value: " + Bytes.toString(sourceCell.getValueArray(), - sourceCell.getValueOffset(), sourceCell.getValueLength())); - LOG.debug(" target cell: " + targetCell - + " value: " + Bytes.toString(targetCell.getValueArray(), - targetCell.getValueOffset(), targetCell.getValueLength())); + LOG.debug(" source cell: " + sourceCell + " value: " + + Bytes.toString(sourceCell.getValueArray(), sourceCell.getValueOffset(), + sourceCell.getValueLength())); + LOG.debug(" target cell: " + targetCell + " value: " + + Bytes.toString(targetCell.getValueArray(), targetCell.getValueOffset(), + targetCell.getValueLength())); } context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1); matchingRow = false; @@ -615,12 +625,11 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC } /** - * Compare row keys of the given Result objects. - * Nulls are after non-nulls + * Compare row keys of the given Result objects. Nulls are after non-nulls */ private static int compareRowKeys(byte[] r1, byte[] r2) { if (r1 == null) { - return 1; // source missing row + return 1; // source missing row } else if (r2 == null) { return -1; // target missing row } else { @@ -631,11 +640,10 @@ private static int compareRowKeys(byte[] r1, byte[] r2) { } /** - * Compare families, qualifiers, and timestamps of the given Cells. - * They are assumed to be of the same row. - * Nulls are after non-nulls. + * Compare families, qualifiers, and timestamps of the given Cells. They are assumed to be of + * the same row. Nulls are after non-nulls. */ - private int compareCellKeysWithinRow(Cell c1, Cell c2) { + private int compareCellKeysWithinRow(Cell c1, Cell c2) { if (c1 == null) { return 1; // source missing cell } @@ -662,8 +670,7 @@ private int compareCellKeysWithinRow(Cell c1, Cell c2) { } @Override - protected void cleanup(Context context) - throws IOException, InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { if (mapperException == null) { try { finishRemainingHashRanges(context); @@ -693,23 +700,26 @@ protected void cleanup(Context context) } } - private void finishRemainingHashRanges(Context context) throws IOException, - InterruptedException { + private void finishRemainingHashRanges(Context context) + throws IOException, InterruptedException { TableSplit split = (TableSplit) context.getInputSplit(); byte[] splitEndRow = split.getEndRow(); boolean reachedEndOfTable = HashTable.isTableEndRow(splitEndRow); // if there are more hash batches that begin before the end of this split move to them - while (nextSourceKey != null - && (nextSourceKey.compareTo(splitEndRow) < 0 || reachedEndOfTable)) { + while ( + nextSourceKey != null && (nextSourceKey.compareTo(splitEndRow) < 0 || reachedEndOfTable) + ) { moveToNextBatch(context); } if (targetHasher.isBatchStarted()) { // need to complete the final open hash batch - if ((nextSourceKey != null && nextSourceKey.compareTo(splitEndRow) > 0) - || (nextSourceKey == null && !Bytes.equals(splitEndRow, sourceTableHash.stopRow))) { + if ( + (nextSourceKey != null && nextSourceKey.compareTo(splitEndRow) > 0) + || (nextSourceKey == null && !Bytes.equals(splitEndRow, sourceTableHash.stopRow)) + ) { // the open hash range continues past the end of this region // add a scan to complete the current hash range Scan scan = sourceTableHash.initScan(); @@ -739,6 +749,7 @@ private void finishRemainingHashRanges(Context context) throws IOException, } private static final int NUM_ARGS = 3; + private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); @@ -772,10 +783,9 @@ private static void printUsage(final String errorMsg) { System.err.println("Examples:"); System.err.println(" For a dry run SyncTable of tableA from a remote source cluster"); System.err.println(" to a local target cluster:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true" - + " --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase" - + " hdfs://nn:9000/hashes/tableA tableA tableA"); + System.err.println(" $ hbase " + "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true" + + " --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase" + + " hdfs://nn:9000/hashes/tableA tableA tableA"); } private boolean doCommandLine(final String[] args) { @@ -835,7 +845,6 @@ private boolean doCommandLine(final String[] args) { return false; } - } catch (Exception e) { LOG.error("Failed to parse commandLine arguments", e); printUsage("Can't start because " + e.getMessage()); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index c2351b91fe66..b02eb81a2f4a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,31 +21,29 @@ import java.util.Collections; import java.util.List; import java.util.Locale; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ @InterfaceAudience.Public -public class TableInputFormat extends TableInputFormatBase -implements Configurable { +public class TableInputFormat extends TableInputFormatBase implements Configurable { @SuppressWarnings("hiding") private static final Logger LOG = LoggerFactory.getLogger(TableInputFormat.class); @@ -54,12 +51,13 @@ public class TableInputFormat extends TableInputFormatBase /** Job parameter that specifies the input table. */ public static final String INPUT_TABLE = "hbase.mapreduce.inputtable"; /** - * If specified, use start keys of this table to split. - * This is useful when you are preparing data for bulkload. + * If specified, use start keys of this table to split. This is useful when you are preparing data + * for bulkload. */ private static final String SPLIT_TABLE = "hbase.mapreduce.splittable"; - /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. - * See {@link TableMapReduceUtil#convertScanToString(Scan)} for more details. + /** + * Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. See + * {@link TableMapReduceUtil#convertScanToString(Scan)} for more details. */ public static final String SCAN = "hbase.mapreduce.scan"; /** Scan start row */ @@ -92,7 +90,6 @@ public class TableInputFormat extends TableInputFormatBase /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -102,16 +99,13 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to set the details for the table to - * be scanned. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * Sets the configuration. This is used to set the details for the table to be scanned. + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Intentional") public void setConf(Configuration configuration) { this.conf = configuration; @@ -127,7 +121,7 @@ public void setConf(Configuration configuration) { try { scan = createScanFromConfiguration(conf); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error(StringUtils.stringifyException(e)); } } @@ -135,13 +129,13 @@ public void setConf(Configuration configuration) { } /** - * Sets up a {@link Scan} instance, applying settings from the configuration property - * constants defined in {@code TableInputFormat}. This allows specifying things such as: + * Sets up a {@link Scan} instance, applying settings from the configuration property constants + * defined in {@code TableInputFormat}. This allows specifying things such as: *

      - *
    • start and stop rows
    • - *
    • column qualifiers or families
    • - *
    • timestamps or timerange
    • - *
    • scanner caching and batch size
    • + *
    • start and stop rows
    • + *
    • column qualifiers or families
    • + *
    • timestamps or timerange
    • + *
    • scanner caching and batch size
    • *
    */ public static Scan createScanFromConfiguration(Configuration conf) throws IOException { @@ -168,9 +162,8 @@ public static Scan createScanFromConfiguration(Configuration conf) throws IOExce } if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) { - scan.setTimeRange( - Long.parseLong(conf.get(SCAN_TIMERANGE_START)), - Long.parseLong(conf.get(SCAN_TIMERANGE_END))); + scan.setTimeRange(Long.parseLong(conf.get(SCAN_TIMERANGE_START)), + Long.parseLong(conf.get(SCAN_TIMERANGE_END))); } if (conf.get(SCAN_MAXVERSIONS) != null) { @@ -204,16 +197,14 @@ protected void initialize(JobContext context) throws IOException { } /** - * Parses a combined family and qualifier and adds either both or just the - * family in case there is no qualifier. This assumes the older colon - * divided notation, e.g. "family:qualifier". - * - * @param scan The Scan to update. + * Parses a combined family and qualifier and adds either both or just the family in case there is + * no qualifier. This assumes the older colon divided notation, e.g. "family:qualifier". + * @param scan The Scan to update. * @param familyAndQualifier family and qualifier * @throws IllegalArgumentException When familyAndQualifier is invalid. */ private static void addColumn(Scan scan, byte[] familyAndQualifier) { - byte [][] fq = CellUtil.parseColumn(familyAndQualifier); + byte[][] fq = CellUtil.parseColumn(familyAndQualifier); if (fq.length == 1) { scan.addFamily(fq[0]); } else if (fq.length == 2) { @@ -228,31 +219,31 @@ private static void addColumn(Scan scan, byte[] familyAndQualifier) { *

    * Overrides previous calls to {@link Scan#addColumn(byte[], byte[])}for any families in the * input. - * - * @param scan The Scan to update. + * @param scan The Scan to update. * @param columns array of columns, formatted as family:qualifier * @see Scan#addColumn(byte[], byte[]) */ - public static void addColumns(Scan scan, byte [][] columns) { + public static void addColumns(Scan scan, byte[][] columns) { for (byte[] column : columns) { addColumn(scan, column); } } /** - * Calculates the splits that will serve as input for the map tasks. The - * number of splits matches the number of regions in a table. Splits are shuffled if - * required. - * @param context The current job context. + * Calculates the splits that will serve as input for the map tasks. The number of splits matches + * the number of regions in a table. Splits are shuffled if required. + * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - * org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ @Override public List getSplits(JobContext context) throws IOException { List splits = super.getSplits(context); - if ((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) { + if ( + (conf.get(SHUFFLE_MAPS) != null) + && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT)) + ) { Collections.shuffle(splits); } return splits; @@ -260,9 +251,8 @@ public List getSplits(JobContext context) throws IOException { /** * Convenience method to parse a string representation of an array of column specifiers. - * - * @param scan The Scan to update. - * @param columns The columns to parse. + * @param scan The Scan to update. + * @param columns The columns to parse. */ private static void addColumns(Scan scan, String columns) { String[] cols = columns.split(" "); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 265d30068d3f..efd872263b16 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,19 +52,18 @@ import org.slf4j.LoggerFactory; /** - * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, - * an {@link Scan} instance that defines the input columns etc. Subclasses may use - * other TableRecordReader implementations. - * - * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to - * function properly. Each of the entry points to this class used by the MapReduce framework, - * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, - * will call {@link #initialize(JobContext)} as a convenient centralized location to handle - * retrieving the necessary configuration information. If your subclass overrides either of these - * methods, either call the parent version or call initialize yourself. - * + * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, an + * {@link Scan} instance that defines the input columns etc. Subclasses may use other + * TableRecordReader implementations. Subclasses MUST ensure initializeTable(Connection, TableName) + * is called for an instance to function properly. Each of the entry points to this class used by + * the MapReduce framework, {@link #createRecordReader(InputSplit, TaskAttemptContext)} and + * {@link #getSplits(JobContext)}, will call {@link #initialize(JobContext)} as a convenient + * centralized location to handle retrieving the necessary configuration information. If your + * subclass overrides either of these methods, either call the parent version or call initialize + * yourself. *

    * An example of a subclass: + * *

      *   class ExampleTIF extends TableInputFormatBase {
      *
    @@ -92,42 +90,43 @@
      *   }
      * 
    * - * - * The number of InputSplits(mappers) match the number of regions in a table by default. - * Set "hbase.mapreduce.tableinput.mappers.per.region" to specify how many mappers per region, set - * this property will disable autobalance below.\ - * Set "hbase.mapreduce.tif.input.autobalance" to enable autobalance, hbase will assign mappers - * based on average region size; For regions, whose size larger than average region size may assigned - * more mappers, and for smaller one, they may group together to use one mapper. If actual average - * region size is too big, like 50G, it is not good to only assign 1 mapper for those large regions. - * Use "hbase.mapreduce.tif.ave.regionsize" to set max average region size when enable "autobalanece", - * default mas average region size is 8G. + * The number of InputSplits(mappers) match the number of regions in a table by default. Set + * "hbase.mapreduce.tableinput.mappers.per.region" to specify how many mappers per region, set this + * property will disable autobalance below.\ Set "hbase.mapreduce.tif.input.autobalance" to enable + * autobalance, hbase will assign mappers based on average region size; For regions, whose size + * larger than average region size may assigned more mappers, and for smaller one, they may group + * together to use one mapper. If actual average region size is too big, like 50G, it is not good to + * only assign 1 mapper for those large regions. Use "hbase.mapreduce.tif.ave.regionsize" to set max + * average region size when enable "autobalanece", default mas average region size is 8G. */ @InterfaceAudience.Public -public abstract class TableInputFormatBase - extends InputFormat { +public abstract class TableInputFormatBase extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(TableInputFormatBase.class); - private static final String NOT_INITIALIZED = "The input format instance has not been properly " + - "initialized. Ensure you call initializeTable either in your constructor or initialize " + - "method"; - private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + - " previous error. Please look at the previous logs lines from" + - " the task's full log for more details."; + private static final String NOT_INITIALIZED = "The input format instance has not been properly " + + "initialized. Ensure you call initializeTable either in your constructor or initialize " + + "method"; + private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + + " previous error. Please look at the previous logs lines from" + + " the task's full log for more details."; /** Specify if we enable auto-balance to set number of mappers in M/R jobs. */ public static final String MAPREDUCE_INPUT_AUTOBALANCE = "hbase.mapreduce.tif.input.autobalance"; - /** In auto-balance, we split input by ave region size, if calculated region size is too big, we can set it. */ + /** + * In auto-balance, we split input by ave region size, if calculated region size is too big, we + * can set it. + */ public static final String MAX_AVERAGE_REGION_SIZE = "hbase.mapreduce.tif.ave.regionsize"; /** Set the number of Mappers for each region, all regions have same number of Mappers */ - public static final String NUM_MAPPERS_PER_REGION = "hbase.mapreduce.tableinput.mappers.per.region"; + public static final String NUM_MAPPERS_PER_REGION = + "hbase.mapreduce.tableinput.mappers.per.region"; - - /** Holds the details for the internal scanner. - * - * @see Scan */ + /** + * Holds the details for the internal scanner. + * @see Scan + */ private Scan scan = null; /** The {@link Admin}. */ private Admin admin; @@ -142,27 +141,22 @@ public abstract class TableInputFormatBase /** Used to generate splits based on region size. */ private RegionSizeCalculator regionSizeCalculator; - /** The reverse DNS lookup cache mapping: IPAddress => HostName */ - private HashMap reverseDNSCacheMap = - new HashMap<>(); + private HashMap reverseDNSCacheMap = new HashMap<>(); /** - * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses - * the default. - * - * @param split The split to work with. - * @param context The current context. + * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses the + * default. + * @param split The split to work with. + * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) - throws IOException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException { // Just in case a subclass is relying on JobConfigurable magic. if (table == null) { initialize(context); @@ -179,7 +173,7 @@ public RecordReader createRecordReader( TableSplit tSplit = (TableSplit) split; LOG.info("Input split length: " + StringUtils.humanReadableInt(tSplit.getLength()) + " bytes."); final TableRecordReader trr = - this.tableRecordReader != null ? this.tableRecordReader : new TableRecordReader(); + this.tableRecordReader != null ? this.tableRecordReader : new TableRecordReader(); Scan sc = new Scan(this.scan); sc.withStartRow(tSplit.getStartRow()); sc.withStopRow(tSplit.getEndRow()); @@ -209,8 +203,8 @@ public float getProgress() throws IOException, InterruptedException { } @Override - public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { trr.initialize(inputsplit, context); } @@ -221,17 +215,16 @@ public boolean nextKeyValue() throws IOException, InterruptedException { }; } - protected Pair getStartEndKeys() throws IOException { + protected Pair getStartEndKeys() throws IOException { return getRegionLocator().getStartEndKeys(); } /** * Calculates the splits that will serve as input for the map tasks. - * @param context The current job context. + * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - * org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ @Override public List getSplits(JobContext context) throws IOException { @@ -267,10 +260,10 @@ public List getSplits(JobContext context) throws IOException { return res; } - //The default value of "hbase.mapreduce.input.autobalance" is false. + // The default value of "hbase.mapreduce.input.autobalance" is false. if (context.getConfiguration().getBoolean(MAPREDUCE_INPUT_AUTOBALANCE, false)) { - long maxAveRegionSize = context.getConfiguration() - .getLong(MAX_AVERAGE_REGION_SIZE, 8L*1073741824); //8GB + long maxAveRegionSize = + context.getConfiguration().getLong(MAX_AVERAGE_REGION_SIZE, 8L * 1073741824); // 8GB return calculateAutoBalancedSplits(splits, maxAveRegionSize); } @@ -285,7 +278,6 @@ public List getSplits(JobContext context) throws IOException { /** * Create one InputSplit per region - * * @return The list of InputSplit for all the regions * @throws IOException throws IOException */ @@ -299,10 +291,9 @@ private List oneInputSplitPerRegion() throws IOException { TableName tableName = getTable().getName(); Pair keys = getStartEndKeys(); - if (keys == null || keys.getFirst() == null || - keys.getFirst().length == 0) { + if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { HRegionLocation regLoc = - getRegionLocator().getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false); + getRegionLocator().getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false); if (null == regLoc) { throw new IOException("Expecting at least one region."); } @@ -311,9 +302,9 @@ private List oneInputSplitPerRegion() throws IOException { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - TableSplit split = new TableSplit(tableName, null, - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc - .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); + TableSplit split = + new TableSplit(tableName, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + regLoc.getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); splits.add(split); return splits; } @@ -326,17 +317,18 @@ private List oneInputSplitPerRegion() throws IOException { byte[] startRow = scan.getStartRow(); byte[] stopRow = scan.getStopRow(); // determine if the given start an stop key fall into the region - if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || - Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || - Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? - keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || - Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? - keys.getSecond()[i] : stopRow; + if ( + (startRow.length == 0 || keys.getSecond()[i].length == 0 + || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) + && (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0) + ) { + byte[] splitStart = + startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 + ? keys.getFirst()[i] + : startRow; + byte[] splitStop = + (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) + && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; HRegionLocation location = getRegionLocator().getRegionLocation(keys.getFirst()[i], false); // The below InetSocketAddress creation does a name resolution. @@ -354,8 +346,8 @@ private List oneInputSplitPerRegion() throws IOException { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - TableSplit split = new TableSplit(tableName, null, - splitStart, splitStop, regionLocation, encodedRegionName, regionSize); + TableSplit split = new TableSplit(tableName, null, splitStart, splitStop, regionLocation, + encodedRegionName, regionSize); splits.add(split); if (LOG.isDebugEnabled()) { LOG.debug("getSplits: split -> " + i + " -> " + split); @@ -368,19 +360,19 @@ private List oneInputSplitPerRegion() throws IOException { /** * Create n splits for one InputSplit, For now only support uniform distribution * @param split A TableSplit corresponding to a range of rowkeys - * @param n Number of ranges after splitting. Pass 1 means no split for the range - * Pass 2 if you want to split the range in two; + * @param n Number of ranges after splitting. Pass 1 means no split for the range Pass 2 if + * you want to split the range in two; * @return A list of TableSplit, the size of the list is n * @throws IllegalArgumentIOException throws IllegalArgumentIOException */ protected List createNInputSplitsUniform(InputSplit split, int n) - throws IllegalArgumentIOException { + throws IllegalArgumentIOException { if (split == null || !(split instanceof TableSplit)) { throw new IllegalArgumentIOException( - "InputSplit for CreateNSplitsPerRegion can not be null + " - + "and should be instance of TableSplit"); + "InputSplit for CreateNSplitsPerRegion can not be null + " + + "and should be instance of TableSplit"); } - //if n < 1, then still continue using n = 1 + // if n < 1, then still continue using n = 1 n = n < 1 ? 1 : n; List res = new ArrayList<>(n); if (n == 1) { @@ -398,51 +390,48 @@ protected List createNInputSplitsUniform(InputSplit split, int n) byte[] endRow = ts.getEndRow(); // For special case: startRow or endRow is empty - if (startRow.length == 0 && endRow.length == 0){ + if (startRow.length == 0 && endRow.length == 0) { startRow = new byte[1]; endRow = new byte[1]; startRow[0] = 0; endRow[0] = -1; } - if (startRow.length == 0 && endRow.length != 0){ + if (startRow.length == 0 && endRow.length != 0) { startRow = new byte[1]; startRow[0] = 0; } - if (startRow.length != 0 && endRow.length == 0){ - endRow =new byte[startRow.length]; - for (int k = 0; k < startRow.length; k++){ + if (startRow.length != 0 && endRow.length == 0) { + endRow = new byte[startRow.length]; + for (int k = 0; k < startRow.length; k++) { endRow[k] = -1; } } // Split Region into n chunks evenly - byte[][] splitKeys = Bytes.split(startRow, endRow, true, n-1); + byte[][] splitKeys = Bytes.split(startRow, endRow, true, n - 1); for (int i = 0; i < splitKeys.length - 1; i++) { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - //notice that the regionSize parameter may be not very accurate - TableSplit tsplit = - new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], regionLocation, - encodedRegionName, regionSize / n); + // notice that the regionSize parameter may be not very accurate + TableSplit tsplit = new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], + regionLocation, encodedRegionName, regionSize / n); res.add(tsplit); } return res; } + /** - * Calculates the number of MapReduce input splits for the map tasks. The number of - * MapReduce input splits depends on the average region size. - * Make it 'public' for testing - * - * @param splits The list of input splits before balance. + * Calculates the number of MapReduce input splits for the map tasks. The number of MapReduce + * input splits depends on the average region size. Make it 'public' for testing + * @param splits The list of input splits before balance. * @param maxAverageRegionSize max Average region size for one mapper * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - *org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ - public List calculateAutoBalancedSplits(List splits, long maxAverageRegionSize) - throws IOException { + public List calculateAutoBalancedSplits(List splits, + long maxAverageRegionSize) throws IOException { if (splits.size() == 0) { return splits; } @@ -455,15 +444,16 @@ public List calculateAutoBalancedSplits(List splits, lon long averageRegionSize = totalRegionSize / splits.size(); // totalRegionSize might be overflow, and the averageRegionSize must be positive. if (averageRegionSize <= 0) { - LOG.warn("The averageRegionSize is not positive: " + averageRegionSize + ", " + - "set it to Long.MAX_VALUE " + splits.size()); + LOG.warn("The averageRegionSize is not positive: " + averageRegionSize + ", " + + "set it to Long.MAX_VALUE " + splits.size()); averageRegionSize = Long.MAX_VALUE / splits.size(); } - //if averageRegionSize is too big, change it to default as 1 GB, + // if averageRegionSize is too big, change it to default as 1 GB, if (averageRegionSize > maxAverageRegionSize) { averageRegionSize = maxAverageRegionSize; } - // if averageRegionSize is too small, we do not need to allocate more mappers for those 'large' region + // if averageRegionSize is too small, we do not need to allocate more mappers for those 'large' + // region // set default as 16M = (default hdfs block size) / 4; if (averageRegionSize < 16 * 1048576) { return splits; @@ -477,7 +467,8 @@ public List calculateAutoBalancedSplits(List splits, lon if (regionSize >= averageRegionSize) { // make this region as multiple MapReduce input split. - int n = (int) Math.round(Math.log(((double) regionSize) / ((double) averageRegionSize)) + 1.0); + int n = + (int) Math.round(Math.log(((double) regionSize) / ((double) averageRegionSize)) + 1.0); List temp = createNInputSplitsUniform(ts, n); resultList.addAll(temp); } else { @@ -490,8 +481,10 @@ public List calculateAutoBalancedSplits(List splits, lon while (j < splits.size()) { TableSplit nextRegion = (TableSplit) splits.get(j); long nextRegionSize = nextRegion.getLength(); - if (totalSize + nextRegionSize <= averageRegionSize - && Bytes.equals(splitEndKey, nextRegion.getStartRow())) { + if ( + totalSize + nextRegionSize <= averageRegionSize + && Bytes.equals(splitEndKey, nextRegion.getStartRow()) + ) { totalSize = totalSize + nextRegionSize; splitEndKey = nextRegion.getEndRow(); j++; @@ -504,7 +497,7 @@ public List calculateAutoBalancedSplits(List splits, lon // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 TableSplit t = new TableSplit(tableName, null, splitStartKey, splitEndKey, regionLocation, - encodedRegionName, totalSize); + encodedRegionName, totalSize); resultList.add(t); } } @@ -533,26 +526,25 @@ String reverseDNS(InetAddress ipAddress) throws UnknownHostException { } /** - * Test if the given region is to be included in the InputSplit while splitting - * the regions of a table. + * Test if the given region is to be included in the InputSplit while splitting the regions of a + * table. *

    - * This optimization is effective when there is a specific reasoning to exclude an entire region from the M-R job, - * (and hence, not contributing to the InputSplit), given the start and end keys of the same.
    - * Useful when we need to remember the last-processed top record and revisit the [last, current) interval for M-R processing, - * continuously. In addition to reducing InputSplits, reduces the load on the region server as well, due to the ordering of the keys. - *
    + * This optimization is effective when there is a specific reasoning to exclude an entire region + * from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys + * of the same.
    + * Useful when we need to remember the last-processed top record and revisit the [last, current) + * interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the + * load on the region server as well, due to the ordering of the keys.
    *
    * Note: It is possible that endKey.length() == 0 , for the last (recent) region. *
    - * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no region is excluded( i.e. all regions are included). - * - * + * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no + * region is excluded( i.e. all regions are included). * @param startKey Start key of the region - * @param endKey End key of the region + * @param endKey End key of the region * @return true, if this region needs to be included as part of the input (default). - * */ - protected boolean includeRegionInSplit(final byte[] startKey, final byte [] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { return true; } @@ -588,15 +580,13 @@ protected Admin getAdmin() { /** * Allows subclasses to initialize the table information. - * - * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. - * @param tableName The {@link TableName} of the table to process. - * @throws IOException + * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. + * @param tableName The {@link TableName} of the table to process. n */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if (this.table != null || this.connection != null) { - LOG.warn("initializeTable called multiple times. Overwriting connection and table " + - "reference; TableInputFormatBase will not close these old references when done."); + LOG.warn("initializeTable called multiple times. Overwriting connection and table " + + "reference; TableInputFormatBase will not close these old references when done."); } this.table = connection.getTable(tableName); this.regionLocator = connection.getRegionLocator(tableName); @@ -607,13 +597,12 @@ protected void initializeTable(Connection connection, TableName tableName) throw @InterfaceAudience.Private protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, Admin admin) - throws IOException { + throws IOException { return new RegionSizeCalculator(locator, admin); } /** * Gets the scan defining the actual details like columns etc. - * * @return The internal scan instance. */ public Scan getScan() { @@ -623,8 +612,7 @@ public Scan getScan() { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.scan = scan; @@ -632,37 +620,29 @@ public void setScan(Scan scan) { /** * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader A different {@link TableRecordReader} - * implementation. + * @param tableRecordReader A different {@link TableRecordReader} implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; } /** - * Handle subclass specific set up. - * Each of the entry points used by the MapReduce framework, + * Handle subclass specific set up. Each of the entry points used by the MapReduce framework, * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, * will call {@link #initialize(JobContext)} as a convenient centralized location to handle * retrieving the necessary configuration information and calling - * {@link #initializeTable(Connection, TableName)}. - * - * Subclasses should implement their initialize call such that it is safe to call multiple times. - * The current TableInputFormatBase implementation relies on a non-null table reference to decide - * if an initialize call is needed, but this behavior may change in the future. In particular, - * it is critical that initializeTable not be called multiple times since this will leak - * Connection instances. - * + * {@link #initializeTable(Connection, TableName)}. Subclasses should implement their initialize + * call such that it is safe to call multiple times. The current TableInputFormatBase + * implementation relies on a non-null table reference to decide if an initialize call is needed, + * but this behavior may change in the future. In particular, it is critical that initializeTable + * not be called multiple times since this will leak Connection instances. */ protected void initialize(JobContext context) throws IOException { } /** * Close the Table and related objects that were initialized via - * {@link #initializeTable(Connection, TableName)}. - * - * @throws IOException + * {@link #initializeTable(Connection, TableName)}. n */ protected void closeTable() throws IOException { close(admin, table, regionLocator, connection); @@ -675,7 +655,9 @@ protected void closeTable() throws IOException { private void close(Closeable... closables) throws IOException { for (Closeable c : closables) { - if(c != null) { c.close(); } + if (c != null) { + c.close(); + } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index ac7196e98eae..a1b1a8ce4df4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; +import com.codahale.metrics.MetricRegistry; import java.io.File; import java.io.IOException; import java.net.URL; @@ -33,24 +33,18 @@ import java.util.Set; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.TokenUtil; @@ -61,8 +55,12 @@ import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import com.codahale.metrics.MetricRegistry; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** * Utility for {@link TableMapper} and {@link TableReducer} @@ -74,128 +72,98 @@ public class TableMapReduceUtil { public static final String TABLE_INPUT_CLASS_KEY = "hbase.table.input.class"; /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, - job, true); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, true); } + /** + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @throws IOException When setting up the details fails. + */ + public static void initTableMapperJob(TableName table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(table.getNameAsString(), scan, mapper, outputKeyClass, outputValueClass, job, + true); + } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table Binary representation of the table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(TableName table, - Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, - Job job) throws IOException { - initTableMapperJob(table.getNameAsString(), - scan, - mapper, - outputKeyClass, - outputValueClass, - job, - true); + public static void initTableMapperJob(byte[] table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + true); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, - job, true); + public static void initTableMapperJob(String table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Class inputFormatClass) + throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, true, inputFormatClass); } - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @throws IOException When setting up the details fails. - */ - public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Class inputFormatClass) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, true, inputFormatClass); - } - - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @param initCredentials whether to initialize hbase auth credentials for the job - * @param inputFormatClass the input format + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param initCredentials whether to initialize hbase auth credentials for the job + * @param inputFormatClass the input format * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, boolean initCredentials, - Class inputFormatClass) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, boolean initCredentials, + Class inputFormatClass) throws IOException { job.setInputFormatClass(inputFormatClass); if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass); if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass); @@ -208,8 +176,8 @@ public static void initTableMapperJob(String table, Scan scan, conf.set(TableInputFormat.INPUT_TABLE, table); conf.set(TableInputFormat.SCAN, convertScanToString(scan)); conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); if (addDependencyJars) { addDependencyJars(job); } @@ -219,120 +187,103 @@ public static void initTableMapperJob(String table, Scan scan, } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table Binary representation of the table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @param inputFormatClass The class of the input format + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param inputFormatClass The class of the input format * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Class inputFormatClass) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, inputFormatClass); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Class inputFormatClass) + throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, inputFormatClass); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table Binary representation of the table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, getConfiguredInputFormat(job)); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars) throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, getConfiguredInputFormat(job)); } /** * @return {@link TableInputFormat} .class unless Configuration has something else at - * {@link #TABLE_INPUT_CLASS_KEY}. + * {@link #TABLE_INPUT_CLASS_KEY}. */ private static Class getConfiguredInputFormat(Job job) { - return (Class)job.getConfiguration(). - getClass(TABLE_INPUT_CLASS_KEY, TableInputFormat.class); + return (Class) job.getConfiguration().getClass(TABLE_INPUT_CLASS_KEY, + TableInputFormat.class); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, getConfiguredInputFormat(job)); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars) throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, getConfiguredInputFormat(job)); } /** - * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on - * direct memory will likely cause the map tasks to OOM when opening the region. This - * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user - * wants to override this behavior in their job. + * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on direct + * memory will likely cause the map tasks to OOM when opening the region. This is done here + * instead of in TableSnapshotRegionRecordReader in case an advanced user wants to override this + * behavior in their job. */ public static void resetCacheConfig(Configuration conf) { - conf.setFloat( - HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f); conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); } /** - * Sets up the job for reading from one or more table snapshots, with one or more scans - * per snapshot. - * It bypasses hbase servers and read directly from snapshot files. - * + * Sets up the job for reading from one or more table snapshots, with one or more scans per + * snapshot. It bypasses hbase servers and read directly from snapshot files. * @param snapshotScans map of snapshot name to scans on that snapshot. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). */ public static void initMultiTableSnapshotMapperJob(Map> snapshotScans, - Class mapper, Class outputKeyClass, Class outputValueClass, - Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { MultiTableSnapshotInputFormat.setInput(job.getConfiguration(), snapshotScans, tmpRestoreDir); job.setInputFormatClass(MultiTableSnapshotInputFormat.class); @@ -357,27 +308,25 @@ public static void initMultiTableSnapshotMapperJob(Map> /** * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly * from snapshot files. - * @param snapshotName The name of the snapshot (of a table) to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase - * configuration. + * @param snapshotName The name of the snapshot (of a table) to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via - * the distributed cache (tmpjars). - * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of - * rootdir. After the job is finished, restore directory can be deleted. + * the distributed cache (tmpjars). + * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restore + * directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Path tmpRestoreDir) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); @@ -385,106 +334,87 @@ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, } /** - * Sets up the job for reading from a table snapshot. It bypasses hbase servers - * and read directly from snapshot files. - * - * @param snapshotName The name of the snapshot (of a table) to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * - * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restore directory can be deleted. - * @param splitAlgo algorithm to split + * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly + * from snapshot files. + * @param snapshotName The name of the snapshot (of a table) to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restore + * directory can be deleted. + * @param splitAlgo algorithm to split * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Path tmpRestoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, - int numSplitsPerRegion) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir, RegionSplitter.SplitAlgorithm splitAlgo, + int numSplitsPerRegion) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir, splitAlgo, - numSplitsPerRegion); - initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); + numSplitsPerRegion); + initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration()); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * - * @param scans The list of {@link Scan} objects to read from. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. + * @param scans The list of {@link Scan} objects to read from. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) throws IOException { - initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, - true); + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job) throws IOException { + initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, true); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * - * @param scans The list of {@link Scan} objects to read from. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the - * configured job classes via the distributed cache (tmpjars). + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. + * @param scans The list of {@link Scan} objects to read from. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) throws IOException { - initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, true); + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job, boolean addDependencyJars) + throws IOException { + initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, + true); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * - * @param scans The list of {@link Scan} objects to read from. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the - * configured job classes via the distributed cache (tmpjars). - * @param initCredentials whether to initialize hbase auth credentials for the job + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. + * @param scans The list of {@link Scan} objects to read from. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @param initCredentials whether to initialize hbase auth credentials for the job * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, - boolean initCredentials) throws IOException { + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job, boolean addDependencyJars, + boolean initCredentials) throws IOException { job.setInputFormatClass(MultiTableInputFormat.class); if (outputValueClass != null) { job.setMapOutputValueClass(outputValueClass); @@ -518,7 +448,7 @@ public static void initCredentials(Job job) throws IOException { // propagate delegation related props from launcher job to MR job if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) { job.getConfiguration().set("mapreduce.job.credentials.binary", - System.getenv("HADOOP_TOKEN_FILE_LOCATION")); + System.getenv("HADOOP_TOKEN_FILE_LOCATION")); } } @@ -529,7 +459,7 @@ public static void initCredentials(Job job) throws IOException { User user = userProvider.getCurrent(); if (quorumAddress != null) { Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); + quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); Connection peerConn = ConnectionFactory.createConnection(peerConf); try { TokenUtil.addTokenForJob(peerConn, user, job); @@ -552,39 +482,33 @@ public static void initCredentials(Job job) throws IOException { } /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user - * and add it to the credentials for the given map reduce job. - * - * The quorumAddress is the key to the ZK ensemble, which contains: - * hbase.zookeeper.quorum, hbase.zookeeper.client.port and + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. The quorumAddress is the key to the ZK + * ensemble, which contains: hbase.zookeeper.quorum, hbase.zookeeper.client.port and * zookeeper.znode.parent - * - * @param job The job that requires the permission. + * @param job The job that requires the permission. * @param quorumAddress string that contains the 3 required configuratins * @throws IOException When the authentication token cannot be obtained. * @deprecated Since 1.2.0 and will be removed in 3.0.0. Use - * {@link #initCredentialsForCluster(Job, Configuration)} instead. + * {@link #initCredentialsForCluster(Job, Configuration)} instead. * @see #initCredentialsForCluster(Job, Configuration) * @see HBASE-14886 */ @Deprecated - public static void initCredentialsForCluster(Job job, String quorumAddress) - throws IOException { - Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress); + public static void initCredentialsForCluster(Job job, String quorumAddress) throws IOException { + Configuration peerConf = + HBaseConfiguration.createClusterConf(job.getConfiguration(), quorumAddress); initCredentialsForCluster(job, peerConf); } /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user - * and add it to the credentials for the given map reduce job. - * - * @param job The job that requires the permission. + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. + * @param job The job that requires the permission. * @param conf The configuration to use in connecting to the peer cluster * @throws IOException When the authentication token cannot be obtained. */ - public static void initCredentialsForCluster(Job job, Configuration conf) - throws IOException { + public static void initCredentialsForCluster(Job job, Configuration conf) throws IOException { UserProvider userProvider = UserProvider.instantiate(conf); if (userProvider.isHBaseSecurityEnabled()) { try { @@ -603,8 +527,7 @@ public static void initCredentialsForCluster(Job job, Configuration conf) /** * Writes the given scan into a Base64 encoded string. - * - * @param scan The scan to write out. + * @param scan The scan to write out. * @return The scan saved in a Base64 encoded string. * @throws IOException When writing the scan fails. */ @@ -615,110 +538,94 @@ public static String convertScanToString(Scan scan) throws IOException { /** * Converts the given Base64 string back into a Scan instance. - * - * @param base64 The scan details. + * @param base64 The scan details. * @return The newly created Scan instance. * @throws IOException When reading the scan instance fails. */ public static Scan convertStringToScan(String base64) throws IOException { - byte [] decoded = Base64.getDecoder().decode(base64); + byte[] decoded = Base64.getDecoder().decode(base64); return ProtobufUtil.toScan(ClientProtos.Scan.parseFrom(decoded)); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job) - throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job) throws IOException { initTableReducerJob(table, reducer, job, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. + * @param partitioner Partitioner to use. Pass null to use default partitioner. * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner) throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner) throws IOException { initTableReducerJob(table, reducer, job, partitioner, null, null, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param quorumAddress Distant cluster to write to; default is null for - * output to the cluster that is designated in hbase-site.xml. - * Set this String to the zookeeper ensemble of an alternate remote cluster - * when you would have the reduce write a cluster that is other than the - * default; e.g. copying tables between clusters, the source would be - * designated by hbase-site.xml and this param would have the - * ensemble address of the remote cluster. The format to pass is particular. - * Pass <hbase.zookeeper.quorum>:< + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to the + * zookeeper ensemble of an alternate remote cluster when you would have the + * reduce write a cluster that is other than the default; e.g. copying tables + * between clusters, the source would be designated by + * hbase-site.xml and this param would have the ensemble address + * of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> - * such as server,server2,server3:2181:/hbase. - * @param serverClass redefined hbase.regionserver.class - * @param serverImpl redefined hbase.regionserver.impl + * such as server,server2,server3:2181:/hbase. + * @param serverClass redefined hbase.regionserver.class + * @param serverImpl redefined hbase.regionserver.impl * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner, String quorumAddress, String serverClass, - String serverImpl) throws IOException { - initTableReducerJob(table, reducer, job, partitioner, quorumAddress, - serverClass, serverImpl, true); - } - - /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param quorumAddress Distant cluster to write to; default is null for - * output to the cluster that is designated in hbase-site.xml. - * Set this String to the zookeeper ensemble of an alternate remote cluster - * when you would have the reduce write a cluster that is other than the - * default; e.g. copying tables between clusters, the source would be - * designated by hbase-site.xml and this param would have the - * ensemble address of the remote cluster. The format to pass is particular. - * Pass <hbase.zookeeper.quorum>:< + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl) + throws IOException { + initTableReducerJob(table, reducer, job, partitioner, quorumAddress, serverClass, serverImpl, + true); + } + + /** + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to + * the zookeeper ensemble of an alternate remote cluster when you would + * have the reduce write a cluster that is other than the default; e.g. + * copying tables between clusters, the source would be designated by + * hbase-site.xml and this param would have the ensemble + * address of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> - * such as server,server2,server3:2181:/hbase. - * @param serverClass redefined hbase.regionserver.class - * @param serverImpl redefined hbase.regionserver.impl - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * such as server,server2,server3:2181:/hbase. + * @param serverClass redefined hbase.regionserver.class + * @param serverImpl redefined hbase.regionserver.impl + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner, String quorumAddress, String serverClass, - String serverImpl, boolean addDependencyJars) throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl, + boolean addDependencyJars) throws IOException { Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); @@ -726,12 +633,12 @@ public static void initTableReducerJob(String table, if (reducer != null) job.setReducerClass(reducer); conf.set(TableOutputFormat.OUTPUT_TABLE, table); conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName()); // If passed a quorum/ensemble address, pass it on to TableOutputFormat. if (quorumAddress != null) { // Calling this will validate the format ZKConfig.validateClusterKey(quorumAddress); - conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress); + conf.set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress); } if (serverClass != null && serverImpl != null) { conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass); @@ -757,11 +664,10 @@ public static void initTableReducerJob(String table, } /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * - * @param table The table to get the region count for. - * @param job The current job to adjust. + * Ensures that the given number of reduce tasks for the given job configuration does not exceed + * the number of regions for the given table. + * @param table The table to get the region count for. + * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ public static void limitNumReduceTasks(String table, Job job) throws IOException { @@ -772,11 +678,10 @@ public static void limitNumReduceTasks(String table, Job job) throws IOException } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * - * @param table The table to get the region count for. - * @param job The current job to adjust. + * Sets the number of reduce tasks for the given job configuration to the number of regions the + * given table has. + * @param table The table to get the region count for. + * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ public static void setNumReduceTasks(String table, Job job) throws IOException { @@ -784,13 +689,11 @@ public static void setNumReduceTasks(String table, Job job) throws IOException { } /** - * Sets the number of rows to return and cache with each scanner iteration. - * Higher caching values will enable faster mapreduce jobs at the expense of - * requiring more heap to contain the cached rows. - * - * @param job The current job to adjust. - * @param batchSize The number of rows to return in batch with each scanner - * iteration. + * Sets the number of rows to return and cache with each scanner iteration. Higher caching values + * will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached + * rows. + * @param job The current job to adjust. + * @param batchSize The number of rows to return in batch with each scanner iteration. */ public static void setScannerCaching(Job job, int batchSize) { job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize); @@ -799,10 +702,9 @@ public static void setScannerCaching(Job job, int batchSize) { /** * Add HBase and its dependencies (only) to the job configuration. *

    - * This is intended as a low-level API, facilitating code reuse between this - * class and its mapred counterpart. It also of use to external tools that - * need to build a MapReduce job that interacts with HBase but want - * fine-grained control over the jars shipped to the cluster. + * This is intended as a low-level API, facilitating code reuse between this class and its mapred + * counterpart. It also of use to external tools that need to build a MapReduce job that interacts + * with HBase but want fine-grained control over the jars shipped to the cluster. *

    * @param conf The Configuration object to extend with dependencies. * @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil @@ -811,33 +713,33 @@ public static void setScannerCaching(Job job, int batchSize) { public static void addHBaseDependencyJars(Configuration conf) throws IOException { addDependencyJarsForClasses(conf, // explicitly pull a class from each module - org.apache.hadoop.hbase.HConstants.class, // hbase-common + org.apache.hadoop.hbase.HConstants.class, // hbase-common org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, // hbase-protocol-shaded - org.apache.hadoop.hbase.client.Put.class, // hbase-client - org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server - org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat - org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat - org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce - org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics - org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api - org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication - org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http - org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure - org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper + org.apache.hadoop.hbase.client.Put.class, // hbase-client + org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server + org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat + org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat + org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce + org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics + org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api + org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication + org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http + org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure + org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper org.apache.hbase.thirdparty.com.google.common.collect.Lists.class, // hb-shaded-miscellaneous org.apache.hbase.thirdparty.com.google.gson.GsonBuilder.class, // hbase-shaded-gson org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.class, // hb-sh-protobuf - org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty - org.apache.zookeeper.ZooKeeper.class, // zookeeper - com.codahale.metrics.MetricRegistry.class, // metrics-core - org.apache.commons.lang3.ArrayUtils.class, // commons-lang - io.opentelemetry.api.trace.Span.class, // opentelemetry-api + org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty + org.apache.zookeeper.ZooKeeper.class, // zookeeper + com.codahale.metrics.MetricRegistry.class, // metrics-core + org.apache.commons.lang3.ArrayUtils.class, // commons-lang + io.opentelemetry.api.trace.Span.class, // opentelemetry-api io.opentelemetry.semconv.trace.attributes.SemanticAttributes.class); // opentelemetry-semconv } /** - * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. - * Also exposed to shell scripts via `bin/hbase mapredcp`. + * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. Also + * exposed to shell scripts via `bin/hbase mapredcp`. */ public static String buildDependencyClasspath(Configuration conf) { if (conf == null) { @@ -859,63 +761,52 @@ public static String buildDependencyClasspath(Configuration conf) { } /** - * Add the HBase dependency jars as well as jars for any of the configured - * job classes to the job configuration, so that JobClient will ship them - * to the cluster and add them to the DistributedCache. + * Add the HBase dependency jars as well as jars for any of the configured job classes to the job + * configuration, so that JobClient will ship them to the cluster and add them to the + * DistributedCache. */ public static void addDependencyJars(Job job) throws IOException { addHBaseDependencyJars(job.getConfiguration()); try { addDependencyJarsForClasses(job.getConfiguration(), - // when making changes here, consider also mapred.TableMapReduceUtil - // pull job classes - job.getMapOutputKeyClass(), - job.getMapOutputValueClass(), - job.getInputFormatClass(), - job.getOutputKeyClass(), - job.getOutputValueClass(), - job.getOutputFormatClass(), - job.getPartitionerClass(), - job.getCombinerClass()); + // when making changes here, consider also mapred.TableMapReduceUtil + // pull job classes + job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getInputFormatClass(), + job.getOutputKeyClass(), job.getOutputValueClass(), job.getOutputFormatClass(), + job.getPartitionerClass(), job.getCombinerClass()); } catch (ClassNotFoundException e) { throw new IOException(e); } } /** - * Add the jars containing the given classes to the job's configuration - * such that JobClient will ship them to the cluster and add them to - * the DistributedCache. + * Add the jars containing the given classes to the job's configuration such that JobClient will + * ship them to the cluster and add them to the DistributedCache. * @deprecated since 1.3.0 and will be removed in 3.0.0. Use {@link #addDependencyJars(Job)} - * instead. + * instead. * @see #addDependencyJars(Job) * @see HBASE-8386 */ @Deprecated - public static void addDependencyJars(Configuration conf, - Class... classes) throws IOException { + public static void addDependencyJars(Configuration conf, Class... classes) throws IOException { LOG.warn("The addDependencyJars(Configuration, Class...) method has been deprecated since it" - + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " + - "instead. See HBASE-8386 for more details."); + + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " + + "instead. See HBASE-8386 for more details."); addDependencyJarsForClasses(conf, classes); } /** - * Add the jars containing the given classes to the job's configuration - * such that JobClient will ship them to the cluster and add them to - * the DistributedCache. - * - * N.B. that this method at most adds one jar per class given. If there is more than one - * jar available containing a class with the same name as a given class, we don't define - * which of those jars might be chosen. - * - * @param conf The Hadoop Configuration to modify + * Add the jars containing the given classes to the job's configuration such that JobClient will + * ship them to the cluster and add them to the DistributedCache. N.B. that this method at most + * adds one jar per class given. If there is more than one jar available containing a class with + * the same name as a given class, we don't define which of those jars might be chosen. + * @param conf The Hadoop Configuration to modify * @param classes will add just those dependencies needed to find the given classes * @throws IOException if an underlying library call fails. */ @InterfaceAudience.Private - public static void addDependencyJarsForClasses(Configuration conf, - Class... classes) throws IOException { + public static void addDependencyJarsForClasses(Configuration conf, Class... classes) + throws IOException { FileSystem localFs = FileSystem.getLocal(conf); Set jars = new HashSet<>(); @@ -932,13 +823,11 @@ public static void addDependencyJarsForClasses(Configuration conf, Path path = findOrCreateJar(clazz, localFs, packagedClasses); if (path == null) { - LOG.warn("Could not find jar for class " + clazz + - " in order to ship it to the cluster."); + LOG.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster."); continue; } if (!localFs.exists(path)) { - LOG.warn("Could not validate jar file " + path + " for class " - + clazz); + LOG.warn("Could not validate jar file " + path + " for class " + clazz); continue; } jars.add(path.toString()); @@ -949,21 +838,18 @@ public static void addDependencyJarsForClasses(Configuration conf, } /** - * Finds the Jar for a class or creates it if it doesn't exist. If the class is in - * a directory in the classpath, it creates a Jar on the fly with the - * contents of the directory and returns the path to that Jar. If a Jar is - * created, it is created in the system temporary directory. Otherwise, - * returns an existing jar that contains a class of the same name. Maintains - * a mapping from jar contents to the tmp jar created. - * @param my_class the class to find. - * @param fs the FileSystem with which to qualify the returned path. + * Finds the Jar for a class or creates it if it doesn't exist. If the class is in a directory in + * the classpath, it creates a Jar on the fly with the contents of the directory and returns the + * path to that Jar. If a Jar is created, it is created in the system temporary directory. + * Otherwise, returns an existing jar that contains a class of the same name. Maintains a mapping + * from jar contents to the tmp jar created. + * @param my_class the class to find. + * @param fs the FileSystem with which to qualify the returned path. * @param packagedClasses a map of class name to path. - * @return a jar file that contains the class. - * @throws IOException + * @return a jar file that contains the class. n */ private static Path findOrCreateJar(Class my_class, FileSystem fs, - Map packagedClasses) - throws IOException { + Map packagedClasses) throws IOException { // attempt to locate an existing jar for the class. String jar = findContainingJar(my_class, packagedClasses); if (null == jar || jar.isEmpty()) { @@ -980,12 +866,13 @@ private static Path findOrCreateJar(Class my_class, FileSystem fs, } /** - * Add entries to packagedClasses corresponding to class files - * contained in jar. - * @param jar The jar who's content to list. + * Add entries to packagedClasses corresponding to class files contained in + * jar. + * @param jar The jar who's content to list. * @param packagedClasses map[class -> jar] */ - private static void updateMap(String jar, Map packagedClasses) throws IOException { + private static void updateMap(String jar, Map packagedClasses) + throws IOException { if (null == jar || jar.isEmpty()) { return; } @@ -1004,16 +891,14 @@ private static void updateMap(String jar, Map packagedClasses) t } /** - * Find a jar that contains a class of the same name, if any. It will return - * a jar file, even if that is not the first thing on the class path that - * has a class with the same name. Looks first on the classpath and then in - * the packagedClasses map. + * Find a jar that contains a class of the same name, if any. It will return a jar file, even if + * that is not the first thing on the class path that has a class with the same name. Looks first + * on the classpath and then in the packagedClasses map. * @param my_class the class to find. - * @return a jar file that contains the class, or null. - * @throws IOException + * @return a jar file that contains the class, or null. n */ private static String findContainingJar(Class my_class, Map packagedClasses) - throws IOException { + throws IOException { ClassLoader loader = my_class.getClassLoader(); String class_file = my_class.getName().replaceAll("\\.", "/") + ".class"; @@ -1046,9 +931,8 @@ private static String findContainingJar(Class my_class, Map p } /** - * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job - * configuration contexts (HBASE-8140) and also for testing on MRv2. - * check if we have HADOOP-9426. + * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job configuration + * contexts (HBASE-8140) and also for testing on MRv2. check if we have HADOOP-9426. * @param my_class the class to find. * @return a jar file that contains the class, or null. */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java index 3a63bc60ab25..d561969c9a35 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,19 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Extends the base Mapper class to add the required input key - * and value classes. - * - * @param The type of the key. - * @param The type of the value. + * Extends the base Mapper class to add the required input key and value classes. + * @param The type of the key. + * @param The type of the value. * @see org.apache.hadoop.mapreduce.Mapper */ @InterfaceAudience.Public public abstract class TableMapper -extends Mapper { + extends Mapper { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java index e02ba5f54357..a59659534913 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,10 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; /** * Small committer class that does not do anything. @@ -60,8 +58,6 @@ public boolean isRecoverySupported() { return true; } - public void recoverTask(TaskAttemptContext taskContext) - throws IOException - { + public void recoverTask(TaskAttemptContext taskContext) throws IOException { } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index 8da8d83d9231..e8316c5016f4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,13 +42,11 @@ import org.slf4j.LoggerFactory; /** - * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored - * while the output value must be either a {@link Put} or a - * {@link Delete} instance. + * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored while the output + * value must be either a {@link Put} or a {@link Delete} instance. */ @InterfaceAudience.Public -public class TableOutputFormat extends OutputFormat -implements Configurable { +public class TableOutputFormat extends OutputFormat implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(TableOutputFormat.class); @@ -57,20 +54,19 @@ public class TableOutputFormat extends OutputFormat public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; /** - * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. - * For keys matching this prefix, the prefix is stripped, and the value is set in the - * configuration with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" - * would be set in the configuration as "key1 = value1". Use this to set properties - * which should only be applied to the {@code TableOutputFormat} configuration and not the - * input configuration. + * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. For + * keys matching this prefix, the prefix is stripped, and the value is set in the configuration + * with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" would be set in the + * configuration as "key1 = value1". Use this to set properties which should only be applied to + * the {@code TableOutputFormat} configuration and not the input configuration. */ public static final String OUTPUT_CONF_PREFIX = "hbase.mapred.output."; /** - * Optional job parameter to specify a peer cluster. - * Used specifying remote cluster when copying between hbase clusters (the - * source is picked up from hbase-site.xml). - * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, Class, String, String, String) + * Optional job parameter to specify a peer cluster. Used specifying remote cluster when copying + * between hbase clusters (the source is picked up from hbase-site.xml). + * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, + * Class, String, String, String) */ public static final String QUORUM_ADDRESS = OUTPUT_CONF_PREFIX + "quorum"; @@ -78,11 +74,9 @@ public class TableOutputFormat extends OutputFormat public static final String QUORUM_PORT = OUTPUT_CONF_PREFIX + "quorum.port"; /** Optional specification of the rs class name of the peer cluster */ - public static final String - REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; + public static final String REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; /** Optional specification of the rs impl name of the peer cluster */ - public static final String - REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; + public static final String REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; /** The configuration. */ private Configuration conf = null; @@ -90,26 +84,24 @@ public class TableOutputFormat extends OutputFormat /** * Writes the reducer output to an HBase table. */ - protected class TableRecordWriter - extends RecordWriter { + protected class TableRecordWriter extends RecordWriter { private Connection connection; private BufferedMutator mutator; /** - * @throws IOException - * + * n * */ public TableRecordWriter() throws IOException { String tableName = conf.get(OUTPUT_TABLE); this.connection = ConnectionFactory.createConnection(conf); this.mutator = connection.getBufferedMutator(TableName.valueOf(tableName)); - LOG.info("Created table instance for " + tableName); + LOG.info("Created table instance for " + tableName); } + /** * Closes the writer, in this case flush table commits. - * - * @param context The context. + * @param context The context. * @throws IOException When closing the writer fails. * @see RecordWriter#close(TaskAttemptContext) */ @@ -128,15 +120,13 @@ public void close(TaskAttemptContext context) throws IOException { /** * Writes a key/value pair into the table. - * - * @param key The key. - * @param value The value. + * @param key The key. + * @param value The value. * @throws IOException When writing fails. * @see RecordWriter#write(Object, Object) */ @Override - public void write(KEY key, Mutation value) - throws IOException { + public void write(KEY key, Mutation value) throws IOException { if (!(value instanceof Put) && !(value instanceof Delete)) { throw new IOException("Pass a Delete or a Put"); } @@ -145,29 +135,25 @@ public void write(KEY key, Mutation value) } /** - * Creates a new record writer. - * - * Be aware that the baseline javadoc gives the impression that there is a single - * {@link RecordWriter} per job but in HBase, it is more natural if we give you a new + * Creates a new record writer. Be aware that the baseline javadoc gives the impression that there + * is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new * RecordWriter per call of this method. You must close the returned RecordWriter when done. * Failure to do so will drop writes. - * - * @param context The current task context. + * @param context The current task context. * @return The newly created writer instance. - * @throws IOException When creating the writer fails. + * @throws IOException When creating the writer fails. * @throws InterruptedException When the job is cancelled. */ @Override public RecordWriter getRecordWriter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { return new TableRecordWriter(); } /** * Checks if the output table exists and is enabled. - * - * @param context The current context. - * @throws IOException When the check fails. + * @param context The current context. + * @throws IOException When the check fails. * @throws InterruptedException When the job is aborted. * @see OutputFormat#checkOutputSpecs(JobContext) */ @@ -182,29 +168,28 @@ public void checkOutputSpecs(JobContext context) throws IOException, Interrupted Admin admin = connection.getAdmin()) { TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE)); if (!admin.tableExists(tableName)) { - throw new TableNotFoundException("Can't write, table does not exist:" + - tableName.getNameAsString()); + throw new TableNotFoundException( + "Can't write, table does not exist:" + tableName.getNameAsString()); } if (!admin.isTableEnabled(tableName)) { - throw new TableNotEnabledException("Can't write, table is not enabled: " + - tableName.getNameAsString()); + throw new TableNotEnabledException( + "Can't write, table is not enabled: " + tableName.getNameAsString()); } } } /** * Returns the output committer. - * - * @param context The current context. + * @param context The current context. * @return The committer. - * @throws IOException When creating the committer fails. + * @throws IOException When creating the committer fails. * @throws InterruptedException When the job is aborted. * @see OutputFormat#getOutputCommitter(TaskAttemptContext) */ @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { return new TableOutputCommitter(); } @@ -216,7 +201,7 @@ public Configuration getConf() { @Override public void setConf(Configuration otherConf) { String tableName = otherConf.get(OUTPUT_TABLE); - if(tableName == null || tableName.length() <= 0) { + if (tableName == null || tableName.length() <= 0) { throw new IllegalArgumentException("Must specify table name"); } @@ -234,7 +219,7 @@ public void setConf(Configuration otherConf) { if (zkClientPort != 0) { this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort); } - } catch(IOException e) { + } catch (IOException e) { LOG.error(e.toString(), e); throw new RuntimeException(e); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java index 512c22f9cc9c..a0df98796b45 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -28,21 +25,19 @@ import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) - * pairs. + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) pairs. */ @InterfaceAudience.Public -public class TableRecordReader -extends RecordReader { +public class TableRecordReader extends RecordReader { private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl(); /** * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow The first row to start at. + * @param firstRow The first row to start at. * @throws IOException When restarting fails. */ public void restart(byte[] firstRow) throws IOException { @@ -58,8 +53,7 @@ public void setTable(Table table) { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.recordReaderImpl.setScan(scan); @@ -67,7 +61,6 @@ public void setScan(Scan scan) { /** * Closes the split. - * * @see org.apache.hadoop.mapreduce.RecordReader#close() */ @Override @@ -77,23 +70,18 @@ public void close() { /** * Returns the current key. - * - * @return The current key. - * @throws IOException - * @throws InterruptedException When the job is aborted. + * @return The current key. n * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey() */ @Override - public ImmutableBytesWritable getCurrentKey() throws IOException, - InterruptedException { + public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return this.recordReaderImpl.getCurrentKey(); } /** * Returns the current value. - * * @return The current value. - * @throws IOException When the value is faulty. + * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentValue() */ @@ -104,27 +92,23 @@ public Result getCurrentValue() throws IOException, InterruptedException { /** * Initializes the reader. - * - * @param inputsplit The split to work with. - * @param context The current task context. - * @throws IOException When setting up the reader fails. + * @param inputsplit The split to work with. + * @param context The current task context. + * @throws IOException When setting up the reader fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#initialize( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { this.recordReaderImpl.initialize(inputsplit, context); } /** * Positions the record reader to the next record. - * * @return true if there was another record. - * @throws IOException When reading the record failed. + * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. * @see org.apache.hadoop.mapreduce.RecordReader#nextKeyValue() */ @@ -135,7 +119,6 @@ public boolean nextKeyValue() throws IOException, InterruptedException { /** * The current progress of the record reader through its data. - * * @return A number between 0.0 and 1.0, the fraction of the data read. * @see org.apache.hadoop.mapreduce.RecordReader#getProgress() */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 097b436f5664..7d0ffe02e6f0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -40,13 +40,11 @@ import org.slf4j.LoggerFactory; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) - * pairs. + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) pairs. */ @InterfaceAudience.Public public class TableRecordReaderImpl { - public static final String LOG_PER_ROW_COUNT - = "hbase.mapreduce.log.scanner.rowcount"; + public static final String LOG_PER_ROW_COUNT = "hbase.mapreduce.log.scanner.rowcount"; private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class); @@ -71,8 +69,7 @@ public class TableRecordReaderImpl { /** * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow The first row to start at. + * @param firstRow The first row to start at. * @throws IOException When restarting fails. */ public void restart(byte[] firstRow) throws IOException { @@ -98,8 +95,8 @@ public void restart(byte[] firstRow) throws IOException { } /** - * In new mapreduce APIs, TaskAttemptContext has two getCounter methods - * Check if getCounter(String, String) method is available. + * In new mapreduce APIs, TaskAttemptContext has two getCounter methods Check if + * getCounter(String, String) method is available. * @return The getCounter method or null if not available. * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 */ @@ -108,8 +105,7 @@ protected static Method retrieveGetCounterWithStringsParams(TaskAttemptContext c throws IOException { Method m = null; try { - m = context.getClass().getMethod("getCounter", - new Class [] {String.class, String.class}); + m = context.getClass().getMethod("getCounter", new Class[] { String.class, String.class }); } catch (SecurityException e) { throw new IOException("Failed test for getCounter", e); } catch (NoSuchMethodException e) { @@ -131,8 +127,7 @@ public void setHTable(Table htable) { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.scan = scan; @@ -141,9 +136,8 @@ public void setScan(Scan scan) { /** * Build the scanner. Not done in constructor to allow for extension. */ - public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { if (context != null) { this.context = context; } @@ -152,8 +146,6 @@ public void initialize(InputSplit inputsplit, /** * Closes the split. - * - * */ public void close() { if (this.scanner != null) { @@ -168,32 +160,27 @@ public void close() { /** * Returns the current key. - * * @return The current key. * @throws InterruptedException When the job is aborted. */ - public ImmutableBytesWritable getCurrentKey() throws IOException, - InterruptedException { + public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return key; } /** * Returns the current value. - * * @return The current value. - * @throws IOException When the value is faulty. + * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. */ public Result getCurrentValue() throws IOException, InterruptedException { return value; } - /** * Positions the record reader to the next record. - * * @return true if there was another record. - * @throws IOException When reading the record failed. + * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. */ public boolean nextKeyValue() throws IOException, InterruptedException { @@ -210,7 +197,7 @@ public boolean nextKeyValue() throws IOException, InterruptedException { numStale++; } if (logScannerActivity) { - rowcount ++; + rowcount++; if (rowcount >= logPerRowCount) { long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); @@ -228,16 +215,16 @@ public boolean nextKeyValue() throws IOException, InterruptedException { // the scanner, if the second call fails, it will be rethrown LOG.info("recovered from " + StringUtils.stringifyException(e)); if (lastSuccessfulRow == null) { - LOG.warn("We are restarting the first next() invocation," + - " if your mapper has restarted a few other times like this" + - " then you should consider killing this job and investigate" + - " why it's taking so long."); + LOG.warn("We are restarting the first next() invocation," + + " if your mapper has restarted a few other times like this" + + " then you should consider killing this job and investigate" + + " why it's taking so long."); } if (lastSuccessfulRow == null) { restart(scan.getStartRow()); } else { restart(lastSuccessfulRow); - scanner.next(); // skip presumed already mapped row + scanner.next(); // skip presumed already mapped row } value = scanner.next(); if (value != null && value.isStale()) { @@ -267,8 +254,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException { long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); LOG.info(ioe.toString(), ioe); - String lastRow = lastSuccessfulRow == null ? - "null" : Bytes.toStringBinary(lastSuccessfulRow); + String lastRow = + lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow); LOG.info("lastSuccessfulRow=" + lastRow); } throw ioe; @@ -276,10 +263,9 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } /** - * If hbase runs on new version of mapreduce, RecordReader has access to - * counters thus can update counters based on scanMetrics. - * If hbase runs on old version of mapreduce, it won't be able to get - * access to counters and TableRecorderReader can't update counter values. + * If hbase runs on new version of mapreduce, RecordReader has access to counters thus can update + * counters based on scanMetrics. If hbase runs on old version of mapreduce, it won't be able to + * get access to counters and TableRecorderReader can't update counter values. */ private void updateCounters() { ScanMetrics scanMetrics = scanner.getScanMetrics(); @@ -291,45 +277,44 @@ private void updateCounters() { } /** - * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 - * Use {@link #updateCounters(ScanMetrics, long, TaskAttemptContext, long)} instead. + * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 Use + * {@link #updateCounters(ScanMetrics, long, TaskAttemptContext, long)} instead. */ @Deprecated protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts, - Method getCounter, TaskAttemptContext context, long numStale) { + Method getCounter, TaskAttemptContext context, long numStale) { updateCounters(scanMetrics, numScannerRestarts, context, numStale); } protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts, - TaskAttemptContext context, long numStale) { + TaskAttemptContext context, long numStale) { // we can get access to counters only if hbase uses new mapreduce APIs if (context == null) { return; } - for (Map.Entry entry : scanMetrics.getMetricsMap().entrySet()) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, entry.getKey()); - if (counter != null) { - counter.increment(entry.getValue()); - } + for (Map.Entry entry : scanMetrics.getMetricsMap().entrySet()) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, entry.getKey()); + if (counter != null) { + counter.increment(entry.getValue()); } - if (numScannerRestarts != 0L) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"); - if (counter != null) { - counter.increment(numScannerRestarts); - } + } + if (numScannerRestarts != 0L) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"); + if (counter != null) { + counter.increment(numScannerRestarts); } - if (numStale != 0L) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE"); - if (counter != null) { - counter.increment(numStale); - } + } + if (numStale != 0L) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE"); + if (counter != null) { + counter.increment(numStale); } + } } /** * The current progress of the record reader through its data. - * * @return A number between 0.0 and 1.0, the fraction of the data read. */ public float getProgress() { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java index 07e44cbc28be..7e1285537542 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,28 +17,26 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; /** - * Extends the basic Reducer class to add the required key and - * value input/output classes. While the input key and value as well as the - * output key can be anything handed in from the previous map phase the output - * value must be either a {@link org.apache.hadoop.hbase.client.Put Put} - * or a {@link org.apache.hadoop.hbase.client.Delete Delete} instance when - * using the {@link TableOutputFormat} class. + * Extends the basic Reducer class to add the required key and value input/output + * classes. While the input key and value as well as the output key can be anything handed in from + * the previous map phase the output value must be either a + * {@link org.apache.hadoop.hbase.client.Put Put} or a {@link org.apache.hadoop.hbase.client.Delete + * Delete} instance when using the {@link TableOutputFormat} class. *

    - * This class is extended by {@link IdentityTableReducer} but can also be - * subclassed to implement similar features or any custom code needed. It has - * the advantage to enforce the output value to a specific basic type. - * - * @param The type of the input key. - * @param The type of the input value. + * This class is extended by {@link IdentityTableReducer} but can also be subclassed to implement + * similar features or any custom code needed. It has the advantage to enforce the output value to a + * specific basic type. + * @param The type of the input key. + * @param The type of the input value. * @param The type of the output key. * @see org.apache.hadoop.mapreduce.Reducer */ @InterfaceAudience.Public public abstract class TableReducer -extends Reducer { + extends Reducer { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java index 23a39a4192db..c71a42aea5d1 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.DataInput; @@ -41,40 +40,41 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job - * bypasses HBase servers, and directly accesses the underlying files (hfile, recovered edits, - * wals, etc) directly to provide maximum performance. The snapshot is not required to be - * restored to the live cluster or cloned. This also allows to run the mapreduce job from an - * online or offline hbase cluster. The snapshot files can be exported by using the - * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, - * and this InputFormat can be used to run the mapreduce job directly over the snapshot files. - * The snapshot should not be deleted while there are jobs reading from snapshot files. + * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job bypasses + * HBase servers, and directly accesses the underlying files (hfile, recovered edits, wals, etc) + * directly to provide maximum performance. The snapshot is not required to be restored to the live + * cluster or cloned. This also allows to run the mapreduce job from an online or offline hbase + * cluster. The snapshot files can be exported by using the + * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, and this + * InputFormat can be used to run the mapreduce job directly over the snapshot files. The snapshot + * should not be deleted while there are jobs reading from snapshot files. *

    * Usage is similar to TableInputFormat, and * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, boolean, Path)} * can be used to configure the job. - *

    {@code
    - * Job job = new Job(conf);
    - * Scan scan = new Scan();
    - * TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
    - *      scan, MyTableMapper.class, MyMapKeyOutput.class,
    - *      MyMapOutputValueWritable.class, job, true);
    + *
    + * 
    + * {
    + *   @code
    + *   Job job = new Job(conf);
    + *   Scan scan = new Scan();
    + *   TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, MyTableMapper.class,
    + *     MyMapKeyOutput.class, MyMapOutputValueWritable.class, job, true);
      * }
      * 
    *

    - * Internally, this input format restores the snapshot into the given tmp directory. By default, - * and similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you - * can run N mapper tasks per every region, in which case the region key range will be split to - * N sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading - * from each RecordReader. An internal RegionScanner is used to execute the + * Internally, this input format restores the snapshot into the given tmp directory. By default, and + * similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you can + * run N mapper tasks per every region, in which case the region key range will be split to N + * sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading from + * each RecordReader. An internal RegionScanner is used to execute the * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user. *

    * HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from - * snapshot files and data files. - * To read from snapshot files directly from the file system, the user who is running the MR job - * must have sufficient permissions to access snapshot and reference files. - * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase - * user or the user must have group or other privileges in the filesystem (See HBASE-8369). + * snapshot files and data files. To read from snapshot files directly from the file system, the + * user who is running the MR job must have sufficient permissions to access snapshot and reference + * files. This means that to run mapreduce over snapshot files, the MR job has to be run as the + * HBase user or the user must have group or other privileges in the filesystem (See HBASE-8369). * Note that, given other users access to read from snapshot/data files will completely circumvent * the access control enforced by HBase. * @see org.apache.hadoop.hbase.client.TableSnapshotScanner @@ -95,9 +95,9 @@ public TableSnapshotRegionSplit(TableSnapshotInputFormatImpl.InputSplit delegate } public TableSnapshotRegionSplit(TableDescriptor htd, RegionInfo regionInfo, - List locations, Scan scan, Path restoreDir) { + List locations, Scan scan, Path restoreDir) { this.delegate = - new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); + new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); } @Override @@ -130,19 +130,17 @@ TableSnapshotInputFormatImpl.InputSplit getDelegate() { } @InterfaceAudience.Private - static class TableSnapshotRegionRecordReader extends - RecordReader { + static class TableSnapshotRegionRecordReader + extends RecordReader { private TableSnapshotInputFormatImpl.RecordReader delegate = new TableSnapshotInputFormatImpl.RecordReader(); private TaskAttemptContext context; @Override - public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { this.context = context; - delegate.initialize( - ((TableSnapshotRegionSplit) split).delegate, - context.getConfiguration()); + delegate.initialize(((TableSnapshotRegionSplit) split).delegate, context.getConfiguration()); } @Override @@ -179,16 +177,16 @@ public void close() throws IOException { } @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) throws IOException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException { return new TableSnapshotRegionRecordReader(); } @Override public List getSplits(JobContext job) throws IOException, InterruptedException { List results = new ArrayList<>(); - for (TableSnapshotInputFormatImpl.InputSplit split : - TableSnapshotInputFormatImpl.getSplits(job.getConfiguration())) { + for (TableSnapshotInputFormatImpl.InputSplit split : TableSnapshotInputFormatImpl + .getSplits(job.getConfiguration())) { results.add(new TableSnapshotRegionSplit(split)); } return results; @@ -196,38 +194,39 @@ public List getSplits(JobContext job) throws IOException, Interrupte /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param job the job to configure + * @param job the job to configure * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should + * have write permissions to this directory, and this should not be a + * subdirectory of rootdir. After the job is finished, restoreDir can be + * deleted. * @throws IOException if an error occurs */ - public static void setInput(Job job, String snapshotName, Path restoreDir) - throws IOException { + public static void setInput(Job job, String snapshotName, Path restoreDir) throws IOException { TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir); } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param job the job to configure - * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. - * @param splitAlgo split algorithm to generate splits from region + * @param job the job to configure + * @param snapshotName the name of the snapshot to read from + * @param restoreDir a temporary directory to restore the snapshot into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restoreDir + * can be deleted. + * @param splitAlgo split algorithm to generate splits from region * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException if an error occurs */ - public static void setInput(Job job, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { - TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir, - splitAlgo, numSplitsPerRegion); - } + public static void setInput(Job job, String snapshotName, Path restoreDir, + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { + TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir, + splitAlgo, numSplitsPerRegion); + } /** - * clean restore directory after snapshot scan job - * @param job the snapshot scan job + * clean restore directory after snapshot scan job + * @param job the snapshot scan job * @param snapshotName the name of the snapshot to read from * @throws IOException if an error occurs */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java index e454157da269..501209f1c902 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.ByteArrayOutputStream; @@ -84,34 +83,32 @@ public class TableSnapshotInputFormatImpl { private static final float DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f; /** - * For MapReduce jobs running multiple mappers per region, determines - * what split algorithm we should be using to find split points for scanners. + * For MapReduce jobs running multiple mappers per region, determines what split algorithm we + * should be using to find split points for scanners. */ public static final String SPLIT_ALGO = "hbase.mapreduce.split.algorithm"; /** - * For MapReduce jobs running multiple mappers per region, determines - * number of splits to generate per region. + * For MapReduce jobs running multiple mappers per region, determines number of splits to generate + * per region. */ public static final String NUM_SPLITS_PER_REGION = "hbase.mapreduce.splits.per.region"; /** - * Whether to calculate the block location for splits. Default to true. - * If the computing layer runs outside of HBase cluster, the block locality does not master. - * Setting this value to false could skip the calculation and save some time. - * - * Set access modifier to "public" so that these could be accessed by test classes of - * both org.apache.hadoop.hbase.mapred - * and org.apache.hadoop.hbase.mapreduce. + * Whether to calculate the block location for splits. Default to true. If the computing layer + * runs outside of HBase cluster, the block locality does not master. Setting this value to false + * could skip the calculation and save some time. Set access modifier to "public" so that these + * could be accessed by test classes of both org.apache.hadoop.hbase.mapred and + * org.apache.hadoop.hbase.mapreduce. */ - public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY = - "hbase.TableSnapshotInputFormat.locality.enabled"; + public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY = + "hbase.TableSnapshotInputFormat.locality.enabled"; public static final boolean SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT = true; /** - * Whether to calculate the Snapshot region location by region location from meta. - * It is much faster than computing block locations for splits. + * Whether to calculate the Snapshot region location by region location from meta. It is much + * faster than computing block locations for splits. */ - public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION = + public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION = "hbase.TableSnapshotInputFormat.locality.by.region.location"; public static final boolean SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT = false; @@ -120,12 +117,12 @@ public class TableSnapshotInputFormatImpl { * In some scenario, scan limited rows on each InputSplit for sampling data extraction */ public static final String SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT = - "hbase.TableSnapshotInputFormat.row.limit.per.inputsplit"; + "hbase.TableSnapshotInputFormat.row.limit.per.inputsplit"; /** * Whether to enable scan metrics on Scan, default to true */ - public static final String SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED = + public static final String SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED = "hbase.TableSnapshotInputFormat.scan_metrics.enabled"; public static final boolean SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT = true; @@ -135,7 +132,7 @@ public class TableSnapshotInputFormatImpl { * default STREAM. */ public static final String SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE = - "hbase.TableSnapshotInputFormat.scanner.readtype"; + "hbase.TableSnapshotInputFormat.scanner.readtype"; public static final ReadType SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT = ReadType.STREAM; /** @@ -150,10 +147,11 @@ public static class InputSplit implements Writable { private String restoreDir; // constructor for mapreduce framework / Writable - public InputSplit() {} + public InputSplit() { + } - public InputSplit(TableDescriptor htd, RegionInfo regionInfo, List locations, - Scan scan, Path restoreDir) { + public InputSplit(TableDescriptor htd, RegionInfo regionInfo, List locations, Scan scan, + Path restoreDir) { this.htd = htd; this.regionInfo = regionInfo; if (locations == null || locations.isEmpty()) { @@ -183,7 +181,7 @@ public String getRestoreDir() { } public long getLength() { - //TODO: We can obtain the file sizes of the snapshot here. + // TODO: We can obtain the file sizes of the snapshot here. return 0; } @@ -204,8 +202,7 @@ public RegionInfo getRegionInfo() { @Override public void write(DataOutput out) throws IOException { TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder() - .setTable(ProtobufUtil.toTableSchema(htd)) - .setRegion(ProtobufUtil.toRegionInfo(regionInfo)); + .setTable(ProtobufUtil.toTableSchema(htd)).setRegion(ProtobufUtil.toRegionInfo(regionInfo)); for (String location : locations) { builder.addLocations(location); @@ -265,7 +262,6 @@ public void initialize(InputSplit split, Configuration conf) throws IOException RegionInfo hri = this.split.getRegionInfo(); FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf); - // region is immutable, this should be fine, // otherwise we have to set the thread read point scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); @@ -273,13 +269,13 @@ public void initialize(InputSplit split, Configuration conf) throws IOException scan.setCacheBlocks(false); scanner = - new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null); + new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null); } public boolean nextKeyValue() throws IOException { result = scanner.next(); if (result == null) { - //we are done + // we are done return false; } @@ -345,14 +341,13 @@ public static RegionSplitter.SplitAlgorithm getSplitAlgo(Configuration conf) thr } try { return Class.forName(splitAlgoClassName).asSubclass(RegionSplitter.SplitAlgorithm.class) - .getDeclaredConstructor().newInstance(); - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | - NoSuchMethodException | InvocationTargetException e) { + .getDeclaredConstructor().newInstance(); + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | NoSuchMethodException | InvocationTargetException e) { throw new IOException("SplitAlgo class " + splitAlgoClassName + " is not found", e); } } - public static List getRegionInfosFromManifest(SnapshotManifest manifest) { List regionManifests = manifest.getRegionManifests(); if (regionManifests == null) { @@ -372,7 +367,7 @@ public static List getRegionInfosFromManifest(SnapshotManifest manif } public static SnapshotManifest getSnapshotManifest(Configuration conf, String snapshotName, - Path rootDir, FileSystem fs) throws IOException { + Path rootDir, FileSystem fs) throws IOException { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); return SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); @@ -394,31 +389,31 @@ public static Scan extractScanFromConf(Configuration conf) throws IOException { } if (scan.getReadType() == ReadType.DEFAULT) { - LOG.info("Provided Scan has DEFAULT ReadType," - + " updating STREAM for Snapshot-based InputFormat"); + LOG.info( + "Provided Scan has DEFAULT ReadType," + " updating STREAM for Snapshot-based InputFormat"); // Update the "DEFAULT" ReadType to be "STREAM" to try to improve the default case. scan.setReadType(conf.getEnum(SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE, - SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT)); + SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT)); } return scan; } public static List getSplits(Scan scan, SnapshotManifest manifest, - List regionManifests, Path restoreDir, Configuration conf) throws IOException { + List regionManifests, Path restoreDir, Configuration conf) throws IOException { return getSplits(scan, manifest, regionManifests, restoreDir, conf, null, 1); } public static List getSplits(Scan scan, SnapshotManifest manifest, - List regionManifests, Path restoreDir, - Configuration conf, RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException { + List regionManifests, Path restoreDir, Configuration conf, + RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException { // load table descriptor TableDescriptor htd = manifest.getTableDescriptor(); Path tableDir = CommonFSUtils.getTableDir(restoreDir, htd.getTableName()); boolean localityEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, - SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); + SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); boolean scanMetricsEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED, SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT); @@ -452,8 +447,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, if (localityEnabled) { if (regionLocator != null) { /* Get Location from the local cache */ - HRegionLocation - location = regionLocator.getRegionLocation(hri.getStartKey(), false); + HRegionLocation location = regionLocator.getRegionLocation(hri.getStartKey(), false); hosts = new ArrayList<>(1); hosts.add(location.getHostname()); @@ -465,8 +459,9 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, if (numSplits > 1) { byte[][] sp = sa.split(hri.getStartKey(), hri.getEndKey(), numSplits, true); for (int i = 0; i < sp.length - 1; i++) { - if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), sp[i], - sp[i + 1])) { + if ( + PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), sp[i], sp[i + 1]) + ) { Scan boundedScan = new Scan(scan); if (scan.getStartRow().length == 0) { @@ -487,8 +482,10 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, } } } else { - if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), - hri.getStartKey(), hri.getEndKey())) { + if ( + PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), + hri.getEndKey()) + ) { splits.add(new InputSplit(htd, hri, hosts, scan, restoreDir)); } @@ -503,8 +500,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, * only when localityEnabled is true. */ private static List calculateLocationsForInputSplit(Configuration conf, - TableDescriptor htd, RegionInfo hri, Path tableDir) - throws IOException { + TableDescriptor htd, RegionInfo hri, Path tableDir) throws IOException { return getBestLocations(conf, HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir)); } @@ -514,15 +510,14 @@ private static List calculateLocationsForInputSplit(Configuration conf, * do not want to blindly pass all the locations, since we are creating one split per region, and * the region's blocks are all distributed throughout the cluster unless favorite node assignment * is used. On the expected stable case, only one location will contain most of the blocks as - * local. - * On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. Here - * we are doing a simple heuristic, where we will pass all hosts which have at least 80% + * local. On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. + * Here we are doing a simple heuristic, where we will pass all hosts which have at least 80% * (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top - * host with the best locality. - * Return at most numTopsAtMost locations if there are more than that. + * host with the best locality. Return at most numTopsAtMost locations if there are more than + * that. */ private static List getBestLocations(Configuration conf, - HDFSBlocksDistribution blockDistribution, int numTopsAtMost) { + HDFSBlocksDistribution blockDistribution, int numTopsAtMost) { HostAndWeight[] hostAndWeights = blockDistribution.getTopHostsWithWeights(); if (hostAndWeights.length == 0) { // no matter what numTopsAtMost is @@ -543,8 +538,8 @@ private static List getBestLocations(Configuration conf, // When top >= 2, // do the heuristic: filter all hosts which have at least cutoffMultiplier % of block locality - double cutoffMultiplier - = conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); + double cutoffMultiplier = + conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); double filterWeight = topHost.getWeight() * cutoffMultiplier; @@ -562,7 +557,7 @@ private static List getBestLocations(Configuration conf, } public static List getBestLocations(Configuration conf, - HDFSBlocksDistribution blockDistribution) { + HDFSBlocksDistribution blockDistribution) { // 3 nodes will contain highly local blocks. So default to 3. return getBestLocations(conf, blockDistribution, 3); } @@ -577,36 +572,37 @@ private static String getSnapshotName(Configuration conf) { /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param conf the job to configuration + * @param conf the job to configuration * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should have - * write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should + * have write permissions to this directory, and this should not be a + * subdirectory of rootdir. After the job is finished, restoreDir can be + * deleted. * @throws IOException if an error occurs */ public static void setInput(Configuration conf, String snapshotName, Path restoreDir) - throws IOException { + throws IOException { setInput(conf, snapshotName, restoreDir, null, 1); } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. - * @param conf the job to configure - * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should have - * write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param conf the job to configure + * @param snapshotName the name of the snapshot to read from + * @param restoreDir a temporary directory to restore the snapshot into. Current user + * should have write permissions to this directory, and this should not + * be a subdirectory of rootdir. After the job is finished, restoreDir + * can be deleted. * @param numSplitsPerRegion how many input splits to generate per one region - * @param splitAlgo SplitAlgorithm to be used when generating InputSplits + * @param splitAlgo SplitAlgorithm to be used when generating InputSplits * @throws IOException if an error occurs */ public static void setInput(Configuration conf, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) - throws IOException { + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { conf.set(SNAPSHOT_NAME_KEY, snapshotName); if (numSplitsPerRegion < 1) { - throw new IllegalArgumentException("numSplits must be >= 1, " + - "illegal numSplits : " + numSplitsPerRegion); + throw new IllegalArgumentException( + "numSplits must be >= 1, " + "illegal numSplits : " + numSplitsPerRegion); } if (splitAlgo == null && numSplitsPerRegion > 1) { throw new IllegalArgumentException("Split algo can't be null when numSplits > 1"); @@ -625,8 +621,8 @@ public static void setInput(Configuration conf, String snapshotName, Path restor } /** - * clean restore directory after snapshot scan job - * @param job the snapshot scan job + * clean restore directory after snapshot scan job + * @param job the snapshot scan job * @param snapshotName the name of the snapshot to read from * @throws IOException if an error occurs */ @@ -641,6 +637,6 @@ public static void cleanRestoreDir(Job job, String snapshotName) throws IOExcept if (!fs.delete(restoreDir, true)) { LOG.warn("Failed clean restore dir {} for snapshot {}", restoreDir, snapshotName); } - LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName); + LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index 93300ebb0f39..f1a71faf9bab 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +33,11 @@ import org.slf4j.LoggerFactory; /** - * A table split corresponds to a key range (low, high) and an optional scanner. - * All references to row below refer to the key of the row. + * A table split corresponds to a key range (low, high) and an optional scanner. All references to + * row below refer to the key of the row. */ @InterfaceAudience.Public -public class TableSplit extends InputSplit - implements Writable, Comparable { +public class TableSplit extends InputSplit implements Writable, Comparable { /** @deprecated LOG variable would be made private. fix in hbase 3.0 */ @Deprecated public static final Logger LOG = LoggerFactory.getLogger(TableSplit.class); @@ -79,76 +77,68 @@ static Version fromCode(int code) { private static final Version VERSION = Version.WITH_ENCODED_REGION_NAME; private TableName tableName; - private byte [] startRow; - private byte [] endRow; + private byte[] startRow; + private byte[] endRow; private String regionLocation; private String encodedRegionName = ""; /** - * The scan object may be null but the serialized form of scan is never null - * or empty since we serialize the scan object with default values then. - * Having no scanner in TableSplit doesn't necessarily mean there is no scanner - * for mapreduce job, it just means that we do not need to set it for each split. - * For example, it is not required to have a scan object for - * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the - * job conf and scanner is supposed to be same for all the splits of table. + * The scan object may be null but the serialized form of scan is never null or empty since we + * serialize the scan object with default values then. Having no scanner in TableSplit doesn't + * necessarily mean there is no scanner for mapreduce job, it just means that we do not need to + * set it for each split. For example, it is not required to have a scan object for + * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the job + * conf and scanner is supposed to be same for all the splits of table. */ private String scan = ""; // stores the serialized form of the Scan private long length; // Contains estimation of region size in bytes /** Default constructor. */ public TableSplit() { - this((TableName)null, null, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, ""); + this((TableName) null, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } /** - * Creates a new instance while assigning all variables. - * Length of region is set to 0 - * Encoded name of the region is set to blank - * - * @param tableName The name of the current table. - * @param scan The scan associated with this split. + * Creates a new instance while assigning all variables. Length of region is set to 0 Encoded name + * of the region is set to blank + * @param tableName The name of the current table. + * @param scan The scan associated with this split. * @param startRow The start row of the split. - * @param endRow The end row of the split. + * @param endRow The end row of the split. * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, - final String location) { + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, + final String location) { this(tableName, scan, startRow, endRow, location, 0L); } /** - * Creates a new instance while assigning all variables. - * Encoded name of region is set to blank - * - * @param tableName The name of the current table. - * @param scan The scan associated with this split. + * Creates a new instance while assigning all variables. Encoded name of region is set to blank + * @param tableName The name of the current table. + * @param scan The scan associated with this split. * @param startRow The start row of the split. - * @param endRow The end row of the split. + * @param endRow The end row of the split. * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, - final String location, long length) { + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, + final String location, long length) { this(tableName, scan, startRow, endRow, location, "", length); } /** * Creates a new instance while assigning all variables. - * - * @param tableName The name of the current table. - * @param scan The scan associated with this split. - * @param startRow The start row of the split. - * @param endRow The end row of the split. + * @param tableName The name of the current table. + * @param scan The scan associated with this split. + * @param startRow The start row of the split. + * @param endRow The end row of the split. * @param encodedRegionName The region ID. - * @param location The location of the region. + * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, - final String location, final String encodedRegionName, long length) { + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, + final String location, final String encodedRegionName, long length) { this.tableName = tableName; try { - this.scan = - (null == scan) ? "" : TableMapReduceUtil.convertScanToString(scan); + this.scan = (null == scan) ? "" : TableMapReduceUtil.convertScanToString(scan); } catch (IOException e) { LOG.warn("Failed to convert Scan to String", e); } @@ -160,36 +150,31 @@ public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endR } /** - * Creates a new instance without a scanner. - * Length of region is set to 0 - * + * Creates a new instance without a scanner. Length of region is set to 0 * @param tableName The name of the current table. - * @param startRow The start row of the split. - * @param endRow The end row of the split. - * @param location The location of the region. + * @param startRow The start row of the split. + * @param endRow The end row of the split. + * @param location The location of the region. */ - public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, - final String location) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) { this(tableName, null, startRow, endRow, location); } /** * Creates a new instance without a scanner. - * * @param tableName The name of the current table. - * @param startRow The start row of the split. - * @param endRow The end row of the split. - * @param location The location of the region. - * @param length Size of region in bytes + * @param startRow The start row of the split. + * @param endRow The end row of the split. + * @param location The location of the region. + * @param length Size of region in bytes */ - public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, - final String location, long length) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location, + long length) { this(tableName, null, startRow, endRow, location, length); } /** * Returns a Scan object from the stored string representation. - * * @return Returns a Scan object based on the stored scanner. * @throws IOException throws IOException if deserialization fails */ @@ -199,9 +184,9 @@ public Scan getScan() throws IOException { /** * Returns a scan string - * @return scan as string. Should be noted that this is not same as getScan().toString() - * because Scan object will have the default values when empty scan string is - * deserialized. Thus, getScan().toString() can never be empty + * @return scan as string. Should be noted that this is not same as getScan().toString() because + * Scan object will have the default values when empty scan string is deserialized. Thus, + * getScan().toString() can never be empty */ @InterfaceAudience.Private public String getScanAsString() { @@ -213,17 +198,16 @@ public String getScanAsString() { * @see #getTable() * @return The table name. */ - public byte [] getTableName() { + public byte[] getTableName() { return tableName.getName(); } /** * Returns the table name. - * * @return The table name. */ public TableName getTable() { - // It is ugly that usually to get a TableName, the method is called getTableName. We can't do + // It is ugly that usually to get a TableName, the method is called getTableName. We can't do // that in here though because there was an existing getTableName in place already since // deprecated. return tableName; @@ -231,25 +215,22 @@ public TableName getTable() { /** * Returns the start row. - * * @return The start row. */ - public byte [] getStartRow() { + public byte[] getStartRow() { return startRow; } /** * Returns the end row. - * * @return The end row. */ - public byte [] getEndRow() { + public byte[] getEndRow() { return endRow; } /** * Returns the region location. - * * @return The region's location. */ public String getRegionLocation() { @@ -258,18 +239,16 @@ public String getRegionLocation() { /** * Returns the region's location as an array. - * * @return The array containing the region location. * @see org.apache.hadoop.mapreduce.InputSplit#getLocations() */ @Override public String[] getLocations() { - return new String[] {regionLocation}; + return new String[] { regionLocation }; } /** * Returns the region's encoded name. - * * @return The region's encoded name. */ public String getEncodedRegionName() { @@ -278,7 +257,6 @@ public String getEncodedRegionName() { /** * Returns the length of the split. - * * @return The length of the split. * @see org.apache.hadoop.mapreduce.InputSplit#getLength() */ @@ -289,8 +267,7 @@ public long getLength() { /** * Reads the values of each field. - * - * @param in The input to read from. + * @param in The input to read from. * @throws IOException When reading the input fails. */ @Override @@ -327,8 +304,7 @@ public void readFields(DataInput in) throws IOException { /** * Writes the field values to the output. - * - * @param out The output to write to. + * @param out The output to write to. * @throws IOException When writing the values to the output fails. */ @Override @@ -345,7 +321,6 @@ public void write(DataOutput out) throws IOException { /** * Returns the details about this instance as a string. - * * @return The values of this instance as a string. * @see java.lang.Object#toString() */ @@ -360,8 +335,7 @@ public String toString() { try { // get the real scan here in toString, not the Base64 string printScan = TableMapReduceUtil.convertStringToScan(scan).toString(); - } - catch (IOException e) { + } catch (IOException e) { printScan = ""; } sb.append(", scan=").append(printScan); @@ -376,8 +350,7 @@ public String toString() { /** * Compares this split against the given one. - * - * @param split The split to compare to. + * @param split The split to compare to. * @return The result of the comparison. * @see java.lang.Comparable#compareTo(java.lang.Object) */ @@ -385,10 +358,10 @@ public String toString() { public int compareTo(TableSplit split) { // If The table name of the two splits is the same then compare start row // otherwise compare based on table names - int tableNameComparison = - getTable().compareTo(split.getTable()); - return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo( - getStartRow(), split.getStartRow()); + int tableNameComparison = getTable().compareTo(split.getTable()); + return tableNameComparison != 0 + ? tableNameComparison + : Bytes.compareTo(getStartRow(), split.getStartRow()); } @Override @@ -396,10 +369,10 @@ public boolean equals(Object o) { if (o == null || !(o instanceof TableSplit)) { return false; } - return tableName.equals(((TableSplit)o).tableName) && - Bytes.equals(startRow, ((TableSplit)o).startRow) && - Bytes.equals(endRow, ((TableSplit)o).endRow) && - regionLocation.equals(((TableSplit)o).regionLocation); + return tableName.equals(((TableSplit) o).tableName) + && Bytes.equals(startRow, ((TableSplit) o).startRow) + && Bytes.equals(endRow, ((TableSplit) o).endRow) + && regionLocation.equals(((TableSplit) o).regionLocation); } @Override diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index 667ca97e3f1b..79dfe752be0c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.List; import java.util.Set; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -33,7 +32,6 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.security.visibility.InvalidLabelException; import org.apache.hadoop.hbase.util.Bytes; @@ -41,6 +39,7 @@ import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Emits Sorted KeyValues. Parse the passed text and creates KeyValues. Sorts them before emit. @@ -49,8 +48,8 @@ * @see PutSortReducer */ @InterfaceAudience.Public -public class TextSortReducer extends - Reducer { +public class TextSortReducer + extends Reducer { /** Timestamp for all inserted rows */ private long ts; @@ -90,12 +89,10 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subsclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * - * @param context + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subsclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. n */ @Override protected void setup(Context context) { @@ -110,9 +107,7 @@ protected void setup(Context context) { } /** - * Handles common parameter initialization that a subclass might want to leverage. - * @param context - * @param conf + * Handles common parameter initialization that a subclass might want to leverage. nn */ protected void doSetup(Context context, Configuration conf) { // If a custom separator has been used, @@ -132,16 +127,11 @@ protected void doSetup(Context context, Configuration conf) { } @Override - protected void reduce( - ImmutableBytesWritable rowKey, - java.lang.Iterable lines, - Reducer.Context context) - throws java.io.IOException, InterruptedException - { + protected void reduce(ImmutableBytesWritable rowKey, java.lang.Iterable lines, + Reducer.Context context) + throws java.io.IOException, InterruptedException { // although reduce() is called per-row, handle pathological case - long threshold = context.getConfiguration().getLong( - "reducer.row.threshold", 1L * (1<<30)); + long threshold = context.getConfiguration().getLong("reducer.row.threshold", 1L * (1 << 30)); Iterator iter = lines.iterator(); while (iter.hasNext()) { Set kvs = new TreeSet<>(CellComparator.getInstance()); @@ -160,8 +150,8 @@ protected void reduce( // create tags for the parsed line List tags = new ArrayList<>(); if (cellVisibilityExpr != null) { - tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( - cellVisibilityExpr)); + tags.addAll(kvCreator.getVisibilityExpressionResolver() + .createVisibilityExpTags(cellVisibilityExpr)); } // Add TTL directly to the KV so we can vary them when packing more than one KV // into puts @@ -169,23 +159,25 @@ protected void reduce( tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl))); } for (int i = 0; i < parsed.getColumnCount(); i++) { - if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() - || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() - || i == parser.getCellTTLColumnIndex()) { + if ( + i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() + || i == parser.getAttributesKeyColumnIndex() + || i == parser.getCellVisibilityColumnIndex() || i == parser.getCellTTLColumnIndex() + ) { continue; } // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. Cell cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), - parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, - parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, - parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); + parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, + parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, + parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); KeyValue kv = KeyValueUtil.ensureKeyValue(cell); kvs.add(kv); curSize += kv.heapSize(); } } catch (ImportTsv.TsvParser.BadTsvLineException | IllegalArgumentException - | InvalidLabelException badLine) { + | InvalidLabelException badLine) { if (skipBadLines) { System.err.println("Bad line." + badLine.getMessage()); incrementBadLineCount(1); @@ -194,13 +186,12 @@ protected void reduce( throw new IOException(badLine); } } - context.setStatus("Read " + kvs.size() + " entries of " + kvs.getClass() - + "(" + StringUtils.humanReadableInt(curSize) + ")"); + context.setStatus("Read " + kvs.size() + " entries of " + kvs.getClass() + "(" + + StringUtils.humanReadableInt(curSize) + ")"); int index = 0; for (KeyValue kv : kvs) { context.write(rowKey, kv); - if (++index > 0 && index % 100 == 0) - context.setStatus("Wrote " + index + " key values."); + if (++index > 0 && index % 100 == 0) context.setStatus("Wrote " + index + " key values."); } // if we have more entries to process diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java index 5d406195d40b..b6c4e814113f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,14 +21,12 @@ import java.util.ArrayList; import java.util.Base64; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -39,6 +37,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,8 +45,7 @@ * Write table content out to files in hdfs. */ @InterfaceAudience.Public -public class TsvImporterMapper - extends Mapper { +public class TsvImporterMapper extends Mapper { private static final Logger LOG = LoggerFactory.getLogger(TsvImporterMapper.class); /** Timestamp for all inserted rows */ @@ -58,7 +56,7 @@ public class TsvImporterMapper /** Should skip bad lines */ private boolean skipBadLines; - /** Should skip empty columns*/ + /** Should skip empty columns */ private boolean skipEmptyColumns; private Counter badLineCount; private boolean logBadLines; @@ -95,20 +93,17 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subsclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * - * @param context + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subsclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. n */ @Override protected void setup(Context context) { doSetup(context); conf = context.getConfiguration(); - parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), - separator); + parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator); if (parser.getRowKeyColumnIndex() == -1) { throw new RuntimeException("No row key column specified"); } @@ -117,8 +112,7 @@ protected void setup(Context context) { } /** - * Handles common parameter initialization that a subclass might want to leverage. - * @param context + * Handles common parameter initialization that a subclass might want to leverage. n */ protected void doSetup(Context context) { Configuration conf = context.getConfiguration(); @@ -135,10 +129,8 @@ protected void doSetup(Context context) { // configuration. ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0); - skipEmptyColumns = context.getConfiguration().getBoolean( - ImportTsv.SKIP_EMPTY_COLUMNS, false); - skipBadLines = context.getConfiguration().getBoolean( - ImportTsv.SKIP_LINES_CONF_KEY, true); + skipEmptyColumns = context.getConfiguration().getBoolean(ImportTsv.SKIP_EMPTY_COLUMNS, false); + skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false); hfileOutPath = conf.get(ImportTsv.BULK_OUTPUT_CONF_KEY); @@ -148,18 +140,13 @@ protected void doSetup(Context context) { * Convert a line of TSV text into an HBase table row. */ @Override - public void map(LongWritable offset, Text value, - Context context) - throws IOException { + public void map(LongWritable offset, Text value, Context context) throws IOException { byte[] lineBytes = value.getBytes(); try { - ImportTsv.TsvParser.ParsedLine parsed = parser.parse( - lineBytes, value.getLength()); + ImportTsv.TsvParser.ParsedLine parsed = parser.parse(lineBytes, value.getLength()); ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(lineBytes, - parsed.getRowKeyOffset(), - parsed.getRowKeyLength()); + new ImmutableBytesWritable(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength()); // Retrieve timestamp if exists ts = parsed.getTimestamp(ts); cellVisibilityExpr = parsed.getCellVisibility(); @@ -169,8 +156,8 @@ public void map(LongWritable offset, Text value, if (hfileOutPath != null) { tags.clear(); if (cellVisibilityExpr != null) { - tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( - cellVisibilityExpr)); + tags.addAll(kvCreator.getVisibilityExpressionResolver() + .createVisibilityExpTags(cellVisibilityExpr)); } // Add TTL directly to the KV so we can vary them when packing more than one KV // into puts @@ -180,17 +167,19 @@ public void map(LongWritable offset, Text value, } Put put = new Put(rowKey.copyBytes()); for (int i = 0; i < parsed.getColumnCount(); i++) { - if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() - || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() - || i == parser.getCellTTLColumnIndex() || (skipEmptyColumns - && parsed.getColumnLength(i) == 0)) { + if ( + i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() + || i == parser.getAttributesKeyColumnIndex() + || i == parser.getCellVisibilityColumnIndex() || i == parser.getCellTTLColumnIndex() + || (skipEmptyColumns && parsed.getColumnLength(i) == 0) + ) { continue; } populatePut(lineBytes, parsed, put, i); } context.write(rowKey, put); } catch (ImportTsv.TsvParser.BadTsvLineException | IllegalArgumentException - | InvalidLabelException badLine) { + | InvalidLabelException badLine) { if (logBadLines) { System.err.println(value); } @@ -207,13 +196,13 @@ public void map(LongWritable offset, Text value, } protected void populatePut(byte[] lineBytes, ImportTsv.TsvParser.ParsedLine parsed, Put put, - int i) throws BadTsvLineException, IOException { + int i) throws BadTsvLineException, IOException { Cell cell = null; if (hfileOutPath == null) { cell = new KeyValue(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength(), - parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, - parser.getQualifier(i).length, ts, KeyValue.Type.Put, lineBytes, - parsed.getColumnOffset(i), parsed.getColumnLength(i)); + parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, + parser.getQualifier(i).length, ts, KeyValue.Type.Put, lineBytes, parsed.getColumnOffset(i), + parsed.getColumnLength(i)); if (cellVisibilityExpr != null) { // We won't be validating the expression here. The Visibility CP will do // the validation @@ -226,9 +215,9 @@ protected void populatePut(byte[] lineBytes, ImportTsv.TsvParser.ParsedLine pars // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength(), - parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, - parser.getQualifier(i).length, ts, lineBytes, parsed.getColumnOffset(i), - parsed.getColumnLength(i), tags); + parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, + parser.getQualifier(i).length, ts, lineBytes, parsed.getColumnOffset(i), + parsed.getColumnLength(i), tags); } put.add(cell); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java index 0127f26955c3..c9f7012edde3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,15 +19,14 @@ import java.io.IOException; import java.util.Base64; - -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Mapper; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,7 +35,7 @@ */ @InterfaceAudience.Public public class TsvImporterTextMapper - extends Mapper { + extends Mapper { private static final Logger LOG = LoggerFactory.getLogger(TsvImporterTextMapper.class); /** Column seperator */ @@ -62,12 +61,10 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * - * @param context + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. n */ @Override protected void setup(Context context) { @@ -82,8 +79,7 @@ protected void setup(Context context) { } /** - * Handles common parameter initialization that a subclass might want to leverage. - * @param context + * Handles common parameter initialization that a subclass might want to leverage. n */ protected void doSetup(Context context) { Configuration conf = context.getConfiguration(); @@ -108,11 +104,12 @@ protected void doSetup(Context context) { @Override public void map(LongWritable offset, Text value, Context context) throws IOException { try { - Pair rowKeyOffests = parser.parseRowKey(value.getBytes(), value.getLength()); - ImmutableBytesWritable rowKey = new ImmutableBytesWritable( - value.getBytes(), rowKeyOffests.getFirst(), rowKeyOffests.getSecond()); + Pair rowKeyOffests = + parser.parseRowKey(value.getBytes(), value.getLength()); + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(value.getBytes(), + rowKeyOffests.getFirst(), rowKeyOffests.getSecond()); context.write(rowKey, value); - } catch (ImportTsv.TsvParser.BadTsvLineException|IllegalArgumentException badLine) { + } catch (ImportTsv.TsvParser.BadTsvLineException | IllegalArgumentException badLine) { if (logBadLines) { System.err.println(value); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java index f0f4c82a5ad8..b42c0d9116d2 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,9 @@ import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.Tag; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface to convert visibility expressions into Tags for storing along with Cells in HFiles. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 30d112fd1c0c..1acc8e330217 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -62,8 +62,7 @@ public class WALInputFormat extends InputFormat { public static final String END_TIME_KEY = "wal.end.time"; /** - * {@link InputSplit} for {@link WAL} files. Each split represent - * exactly one log file. + * {@link InputSplit} for {@link WAL} files. Each split represent exactly one log file. */ static class WALSplit extends InputSplit implements Writable { private String logFileName; @@ -72,12 +71,12 @@ static class WALSplit extends InputSplit implements Writable { private long endTime; /** for serialization */ - public WALSplit() {} + public WALSplit() { + } /** - * Represent an WALSplit, i.e. a single WAL file. - * Start- and EndTime are managed by the split, so that WAL files can be - * filtered before WALEdits are passed to the mapper(s). + * Represent an WALSplit, i.e. a single WAL file. Start- and EndTime are managed by the split, + * so that WAL files can be filtered before WALEdits are passed to the mapper(s). */ public WALSplit(String logFileName, long fileSize, long startTime, long endTime) { this.logFileName = logFileName; @@ -132,8 +131,8 @@ public String toString() { } /** - * {@link RecordReader} for an {@link WAL} file. - * Implementation shared with deprecated HLogInputFormat. + * {@link RecordReader} for an {@link WAL} file. Implementation shared with deprecated + * HLogInputFormat. */ static abstract class WALRecordReader extends RecordReader { private Reader reader = null; @@ -147,8 +146,8 @@ static abstract class WALRecordReader extends RecordReader { @Override @@ -262,8 +261,7 @@ public WALKey getCurrentKey() throws IOException, InterruptedException { } @Override - public List getSplits(JobContext context) throws IOException, - InterruptedException { + public List getSplits(JobContext context) throws IOException, InterruptedException { return getSplits(context, START_TIME_KEY, END_TIME_KEY); } @@ -271,7 +269,7 @@ public List getSplits(JobContext context) throws IOException, * implementation shared with deprecated HLogInputFormat */ List getSplits(final JobContext context, final String startKey, final String endKey) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); boolean ignoreMissing = conf.getBoolean(WALPlayer.IGNORE_MISSING_FILES, false); Path[] inputPaths = getInputPaths(conf); @@ -281,14 +279,14 @@ List getSplits(final JobContext context, final String startKey, fina long endTime = conf.getLong(endKey, Long.MAX_VALUE); List allFiles = new ArrayList(); - for(Path inputPath: inputPaths){ + for (Path inputPath : inputPaths) { FileSystem fs = inputPath.getFileSystem(conf); try { List files = getFiles(fs, inputPath, startTime, endTime); allFiles.addAll(files); } catch (FileNotFoundException e) { if (ignoreMissing) { - LOG.warn("File "+ inputPath +" is missing. Skipping it."); + LOG.warn("File " + inputPath + " is missing. Skipping it."); continue; } throw e; @@ -303,20 +301,20 @@ List getSplits(final JobContext context, final String startKey, fina private Path[] getInputPaths(Configuration conf) { String inpDirs = conf.get(FileInputFormat.INPUT_DIR); - return StringUtils.stringToPath( - inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); + return StringUtils + .stringToPath(inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); } /** - * @param startTime If file looks like it has a timestamp in its name, we'll check if newer - * or equal to this value else we will filter out the file. If name does not - * seem to have a timestamp, we will just return it w/o filtering. - * @param endTime If file looks like it has a timestamp in its name, we'll check if older or equal - * to this value else we will filter out the file. If name does not seem to - * have a timestamp, we will just return it w/o filtering. + * @param startTime If file looks like it has a timestamp in its name, we'll check if newer or + * equal to this value else we will filter out the file. If name does not seem to + * have a timestamp, we will just return it w/o filtering. + * @param endTime If file looks like it has a timestamp in its name, we'll check if older or + * equal to this value else we will filter out the file. If name does not seem to + * have a timestamp, we will just return it w/o filtering. */ private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) - throws IOException { + throws IOException { List result = new ArrayList<>(); LOG.debug("Scanning " + dir.toString() + " for WAL files"); RemoteIterator iter = fs.listLocatedStatus(dir); @@ -339,7 +337,7 @@ private List getFiles(FileSystem fs, Path dir, long startTime, long } static void addFile(List result, LocatedFileStatus lfs, long startTime, - long endTime) { + long endTime) { long timestamp = AbstractFSWALProvider.getTimestamp(lfs.getPath().getName()); if (timestamp > 0) { // Looks like a valid timestamp. @@ -347,8 +345,8 @@ static void addFile(List result, LocatedFileStatus lfs, long startTi LOG.info("Found {}", lfs.getPath()); result.add(lfs); } else { - LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), - startTime, Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); + LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), startTime, + Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); } } else { // If no timestamp, add it regardless. @@ -359,7 +357,7 @@ static void addFile(List result, LocatedFileStatus lfs, long startTi @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) throws IOException, InterruptedException { + TaskAttemptContext context) throws IOException, InterruptedException { return new WALKeyRecordReader(); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index e3c4d7a328f6..56c6bebdf261 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -58,17 +58,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - - /** - * A tool to replay WAL files as a M/R job. - * The WAL can be replayed for a set of tables or all tables, - * and a time range can be provided (in milliseconds). - * The WAL is filtered to the passed set of tables and the output - * can optionally be mapped to another set of tables. - * - * WAL replay can also generate HFiles for later bulk importing, - * in that case the WAL is replayed for a single table only. + * A tool to replay WAL files as a M/R job. The WAL can be replayed for a set of tables or all + * tables, and a time range can be provided (in milliseconds). The WAL is filtered to the passed set + * of tables and the output can optionally be mapped to another set of tables. WAL replay can also + * generate HFiles for later bulk importing, in that case the WAL is replayed for a single table + * only. */ @InterfaceAudience.Public public class WALPlayer extends Configured implements Tool { @@ -111,8 +106,8 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { continue; } byte[] outKey = multiTableSupport - ? Bytes.add(table.getName(), Bytes.toBytes(tableSeparator), CellUtil.cloneRow(cell)) - : CellUtil.cloneRow(cell); + ? Bytes.add(table.getName(), Bytes.toBytes(tableSeparator), CellUtil.cloneRow(cell)) + : CellUtil.cloneRow(cell); context.write(new ImmutableBytesWritable(outKey), new MapReduceExtendedCell(cell)); } } @@ -134,8 +129,8 @@ public void setup(Context context) throws IOException { } /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected static enum Counter { /** Number of aggregated writes */ @@ -148,11 +143,10 @@ protected static enum Counter { } /** - * A mapper that writes out {@link Mutation} to be directly applied to - * a running HBase instance. + * A mapper that writes out {@link Mutation} to be directly applied to a running HBase instance. */ protected static class WALMapper - extends Mapper { + extends Mapper { private Map tables = new TreeMap<>(); @Override @@ -161,7 +155,7 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { try { if (tables.isEmpty() || tables.containsKey(key.getTableName())) { TableName targetTable = - tables.isEmpty() ? key.getTableName() : tables.get(key.getTableName()); + tables.isEmpty() ? key.getTableName() : tables.get(key.getTableName()); ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable.getName()); Put put = null; Delete del = null; @@ -178,8 +172,10 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { // multiple rows (HBASE-5229). // Aggregate as much as possible into a single Put/Delete // operation before writing to the context. - if (lastCell == null || lastCell.getTypeByte() != cell.getTypeByte() - || !CellUtil.matchingRows(lastCell, cell)) { + if ( + lastCell == null || lastCell.getTypeByte() != cell.getTypeByte() + || !CellUtil.matchingRows(lastCell, cell) + ) { // row or type changed, write out aggregate KVs. if (put != null) { context.write(tableOut, put); @@ -226,8 +222,8 @@ protected boolean filter(Context context, final Cell cell) { @Override protected void - cleanup(Mapper.Context context) - throws IOException, InterruptedException { + cleanup(Mapper.Context context) + throws IOException, InterruptedException { super.cleanup(context); } @@ -269,8 +265,8 @@ void setupTime(Configuration conf, String option) throws IOException { ms = Long.parseLong(val); } catch (NumberFormatException nfe) { throw new IOException( - option + " must be specified either in the form 2001-02-20T16:35:06.99 " - + "or as number of milliseconds"); + option + " must be specified either in the form 2001-02-20T16:35:06.99 " + + "or as number of milliseconds"); } } conf.setLong(option, ms); @@ -287,7 +283,7 @@ public Job createSubmittableJob(String[] args) throws IOException { setupTime(conf, WALInputFormat.START_TIME_KEY); setupTime(conf, WALInputFormat.END_TIME_KEY); String inputDirs = args[0]; - String[] tables = args.length == 1? new String [] {}: args[1].split(","); + String[] tables = args.length == 1 ? new String[] {} : args[1].split(","); String[] tableMap; if (args.length > 2) { tableMap = args[2].split(","); @@ -301,8 +297,8 @@ public Job createSubmittableJob(String[] args) throws IOException { conf.setStrings(TABLES_KEY, tables); conf.setStrings(TABLE_MAP_KEY, tableMap); conf.set(FileInputFormat.INPUT_DIR, inputDirs); - Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + - EnvironmentEdgeManager.currentTime())); + Job job = Job.getInstance(conf, + conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); job.setJarByClass(WALPlayer.class); job.setInputFormatClass(WALInputFormat.class); @@ -370,12 +366,12 @@ private void usage(final String errorMsg) { System.err.println(" directory of WALs to replay."); System.err.println(" comma separated list of tables. If no tables specified,"); System.err.println(" all are imported (even hbase:meta if present)."); - System.err.println(" WAL entries can be mapped to a new set of tables by " + - "passing"); - System.err.println(" , a comma separated list of target " + - "tables."); - System.err.println(" If specified, each table in must have a " + - "mapping."); + System.err.println( + " WAL entries can be mapped to a new set of tables by " + "passing"); + System.err + .println(" , a comma separated list of target " + "tables."); + System.err + .println(" If specified, each table in must have a " + "mapping."); System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println(" Only one table can be specified, and no mapping allowed!"); @@ -383,8 +379,8 @@ private void usage(final String errorMsg) { System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); - System.err.println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + - "format."); + System.err + .println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + "format."); System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); @@ -392,8 +388,7 @@ private void usage(final String errorMsg) { System.err.println(" -Dwal.input.separator=' '"); System.err.println(" Change WAL filename separator (WAL dir names use default ','.)"); System.err.println("For performance also consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" - + " -Dmapreduce.reduce.speculative=false"); + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 747b56474128..6a854b97239b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,20 +69,18 @@ import org.slf4j.LoggerFactory; /** - * This map-only job compares the data from a local table with a remote one. - * Every cell is compared and must have exactly the same keys (even timestamp) - * as well as same value. It is possible to restrict the job by time range and - * families. The peer id that's provided must match the one given when the - * replication stream was setup. + * This map-only job compares the data from a local table with a remote one. Every cell is compared + * and must have exactly the same keys (even timestamp) as well as same value. It is possible to + * restrict the job by time range and families. The peer id that's provided must match the one given + * when the replication stream was setup. *

    - * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason - * for a why a row is different is shown in the map's log. + * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason for a why a row is + * different is shown in the map's log. */ @InterfaceAudience.Private public class VerifyReplication extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(VerifyReplication.class); + private static final Logger LOG = LoggerFactory.getLogger(VerifyReplication.class); public final static String NAME = "verifyrep"; private final static String PEER_CONFIG_PREFIX = NAME + ".peer."; @@ -100,32 +97,34 @@ public class VerifyReplication extends Configured implements Tool { int sleepMsBeforeReCompare = 0; boolean verbose = false; boolean includeDeletedCells = false; - //Source table snapshot name + // Source table snapshot name String sourceSnapshotName = null; - //Temp location in source cluster to restore source snapshot + // Temp location in source cluster to restore source snapshot String sourceSnapshotTmpDir = null; - //Peer table snapshot name + // Peer table snapshot name String peerSnapshotName = null; - //Temp location in peer cluster to restore peer snapshot + // Temp location in peer cluster to restore peer snapshot String peerSnapshotTmpDir = null; - //Peer cluster Hadoop FS address + // Peer cluster Hadoop FS address String peerFSAddress = null; - //Peer cluster HBase root dir location + // Peer cluster HBase root dir location String peerHBaseRootAddress = null; - //Peer Table Name + // Peer Table Name String peerTableName = null; - private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; /** * Map-only comparator for 2 tables */ - public static class Verifier - extends TableMapper { + public static class Verifier extends TableMapper { public enum Counters { - GOODROWS, BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, CONTENT_DIFFERENT_ROWS + GOODROWS, + BADROWS, + ONLY_IN_SOURCE_TABLE_ROWS, + ONLY_IN_PEER_TABLE_ROWS, + CONTENT_DIFFERENT_ROWS } private Connection sourceConnection; @@ -140,22 +139,20 @@ public enum Counters { private int batch = -1; /** - * Map method that compares every scanned row with the equivalent from - * a distant cluster. - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * Map method that compares every scanned row with the equivalent from a distant cluster. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, final Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, final Result value, Context context) + throws IOException { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); - sleepMsBeforeReCompare = conf.getInt(NAME +".sleepMsBeforeReCompare", 0); + sleepMsBeforeReCompare = conf.getInt(NAME + ".sleepMsBeforeReCompare", 0); delimiter = conf.get(NAME + ".delimiter", ""); - verbose = conf.getBoolean(NAME +".verbose", false); + verbose = conf.getBoolean(NAME + ".verbose", false); batch = conf.getInt(NAME + ".batch", -1); final Scan scan = new Scan(); if (batch > 0) { @@ -166,9 +163,9 @@ public void map(ImmutableBytesWritable row, final Result value, long startTime = conf.getLong(NAME + ".startTime", 0); long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE); String families = conf.get(NAME + ".families", null); - if(families != null) { + if (families != null) { String[] fams = families.split(","); - for(String fam : fams) { + for (String fam : fams) { scan.addFamily(Bytes.toBytes(fam)); } } @@ -177,7 +174,7 @@ public void map(ImmutableBytesWritable row, final Result value, String rowPrefixes = conf.get(NAME + ".rowPrefixes", null); setRowPrefixFilter(scan, rowPrefixes); scan.setTimeRange(startTime, endTime); - int versions = conf.getInt(NAME+".versions", -1); + int versions = conf.getInt(NAME + ".versions", -1); LOG.info("Setting number of version inside map as: " + versions); if (versions >= 0) { scan.readVersions(versions); @@ -189,8 +186,8 @@ public void map(ImmutableBytesWritable row, final Result value, final InputSplit tableSplit = context.getInputSplit(); String zkClusterKey = conf.get(NAME + ".peerQuorumAddress"); - Configuration peerConf = HBaseConfiguration.createClusterConf(conf, - zkClusterKey, PEER_CONFIG_PREFIX); + Configuration peerConf = + HBaseConfiguration.createClusterConf(conf, zkClusterKey, PEER_CONFIG_PREFIX); String peerName = peerConf.get(NAME + ".peerTableName", tableName.getNameAsString()); TableName peerTableName = TableName.valueOf(peerName); @@ -215,9 +212,9 @@ public void map(ImmutableBytesWritable row, final Result value, String peerHBaseRootAddress = conf.get(NAME + ".peerHBaseRootAddress", null); FileSystem.setDefaultUri(peerConf, peerFSAddress); CommonFSUtils.setRootDir(peerConf, new Path(peerHBaseRootAddress)); - LOG.info("Using peer snapshot:" + peerSnapshotName + " with temp dir:" + - peerSnapshotTmpDir + " peer root uri:" + CommonFSUtils.getRootDir(peerConf) + - " peerFSAddress:" + peerFSAddress); + LOG.info("Using peer snapshot:" + peerSnapshotName + " with temp dir:" + + peerSnapshotTmpDir + " peer root uri:" + CommonFSUtils.getRootDir(peerConf) + + " peerFSAddress:" + peerFSAddress); replicatedScanner = new TableSnapshotScanner(peerConf, CommonFSUtils.getRootDir(peerConf), new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName, scan, true); @@ -239,8 +236,8 @@ public void map(ImmutableBytesWritable row, final Result value, Result.compareResults(value, currentCompareRowInPeerTable, false); context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { - LOG.info("Good row key: " + delimiter - + Bytes.toStringBinary(value.getRow()) + delimiter); + LOG.info( + "Good row key: " + delimiter + Bytes.toStringBinary(value.getRow()) + delimiter); } } catch (Exception e) { logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value); @@ -270,21 +267,20 @@ private void logFailRowAndIncreaseCounter(Context context, Counters counter, Res if (!sourceResult.isEmpty()) { context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { - LOG.info("Good row key (with recompare): " + delimiter + - Bytes.toStringBinary(row.getRow()) - + delimiter); + LOG.info("Good row key (with recompare): " + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } return; } catch (Exception e) { - LOG.error("recompare fail after sleep, rowkey=" + delimiter + - Bytes.toStringBinary(row.getRow()) + delimiter); + LOG.error("recompare fail after sleep, rowkey=" + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } context.getCounter(counter).increment(1); context.getCounter(Counters.BADROWS).increment(1); - LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + - delimiter); + LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + + delimiter); } @Override @@ -311,7 +307,7 @@ protected void cleanup(Context context) { LOG.error("fail to close source table in cleanup", e); } } - if(sourceConnection != null){ + if (sourceConnection != null) { try { sourceConnection.close(); } catch (Exception e) { @@ -319,14 +315,14 @@ protected void cleanup(Context context) { } } - if(replicatedTable != null){ - try{ + if (replicatedTable != null) { + try { replicatedTable.close(); } catch (Exception e) { LOG.error("fail to close replicated table in cleanup", e); } } - if(replicatedConnection != null){ + if (replicatedConnection != null) { try { replicatedConnection.close(); } catch (Exception e) { @@ -336,8 +332,8 @@ protected void cleanup(Context context) { } } - private static Pair getPeerQuorumConfig( - final Configuration conf, String peerId) throws IOException { + private static Pair + getPeerQuorumConfig(final Configuration conf, String peerId) throws IOException { ZKWatcher localZKW = null; try { localZKW = new ZKWatcher(conf, "VerifyReplication", new Abortable() { @@ -357,7 +353,7 @@ public boolean isAborted() { ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf)); } catch (ReplicationException e) { throw new IOException("An error occurred while trying to connect to the remote peer cluster", - e); + e); } finally { if (localZKW != null) { localZKW.close(); @@ -378,30 +374,28 @@ private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorum /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws java.io.IOException When setting up the job fails. */ - public Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public Job createSubmittableJob(Configuration conf, String[] args) throws IOException { if (!doCommandLine(args)) { return null; } - conf.set(NAME+".tableName", tableName); - conf.setLong(NAME+".startTime", startTime); - conf.setLong(NAME+".endTime", endTime); - conf.setInt(NAME +".sleepMsBeforeReCompare", sleepMsBeforeReCompare); + conf.set(NAME + ".tableName", tableName); + conf.setLong(NAME + ".startTime", startTime); + conf.setLong(NAME + ".endTime", endTime); + conf.setInt(NAME + ".sleepMsBeforeReCompare", sleepMsBeforeReCompare); conf.set(NAME + ".delimiter", delimiter); conf.setInt(NAME + ".batch", batch); - conf.setBoolean(NAME +".verbose", verbose); - conf.setBoolean(NAME +".includeDeletedCells", includeDeletedCells); + conf.setBoolean(NAME + ".verbose", verbose); + conf.setBoolean(NAME + ".includeDeletedCells", includeDeletedCells); if (families != null) { - conf.set(NAME+".families", families); + conf.set(NAME + ".families", families); } - if (rowPrefixes != null){ - conf.set(NAME+".rowPrefixes", rowPrefixes); + if (rowPrefixes != null) { + conf.set(NAME + ".rowPrefixes", rowPrefixes); } String peerQuorumAddress; @@ -410,8 +404,8 @@ public Job createSubmittableJob(Configuration conf, String[] args) peerConfigPair = getPeerQuorumConfig(conf, peerId); ReplicationPeerConfig peerConfig = peerConfigPair.getFirst(); peerQuorumAddress = peerConfig.getClusterKey(); - LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + - peerConfig.getConfiguration()); + LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + + peerConfig.getConfiguration()); conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress); HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX, peerConfig.getConfiguration().entrySet()); @@ -430,7 +424,7 @@ public Job createSubmittableJob(Configuration conf, String[] args) conf.setInt(NAME + ".versions", versions); LOG.info("Number of version: " + versions); - //Set Snapshot specific parameters + // Set Snapshot specific parameters if (peerSnapshotName != null) { conf.set(NAME + ".peerSnapshotName", peerSnapshotName); @@ -461,9 +455,9 @@ public Job createSubmittableJob(Configuration conf, String[] args) scan.readVersions(versions); LOG.info("Number of versions set to " + versions); } - if(families != null) { + if (families != null) { String[] fams = families.split(","); - for(String fam : fams) { + for (String fam : fams) { scan.addFamily(Bytes.toBytes(fam)); } } @@ -486,8 +480,8 @@ public Job createSubmittableJob(Configuration conf, String[] args) assert peerConfigPair != null; peerClusterConf = peerConfigPair.getSecond(); } else { - peerClusterConf = HBaseConfiguration.createClusterConf(conf, - peerQuorumAddress, PEER_CONFIG_PREFIX); + peerClusterConf = + HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); } // Obtain the auth token from peer cluster TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf); @@ -508,7 +502,7 @@ private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { } scan.setFilter(filterList); byte[] startPrefixRow = Bytes.toBytes(rowPrefixArray[0]); - byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length -1]); + byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length - 1]); setStartAndStopRows(scan, startPrefixRow, lastPrefixRow); } } @@ -516,7 +510,7 @@ private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { private static void setStartAndStopRows(Scan scan, byte[] startPrefixRow, byte[] lastPrefixRow) { scan.withStartRow(startPrefixRow); byte[] stopRow = Bytes.add(Bytes.head(lastPrefixRow, lastPrefixRow.length - 1), - new byte[]{(byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1)}); + new byte[] { (byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1) }); scan.withStopRow(stopRow); } @@ -570,7 +564,7 @@ public boolean doCommandLine(final String[] args) { } final String rowPrefixesKey = "--row-prefixes="; - if (cmd.startsWith(rowPrefixesKey)){ + if (cmd.startsWith(rowPrefixesKey)) { rowPrefixes = cmd.substring(rowPrefixesKey.length()); continue; } @@ -639,7 +633,7 @@ public boolean doCommandLine(final String[] args) { return false; } - if (i == args.length-2) { + if (i == args.length - 2) { if (isPeerQuorumAddress(cmd)) { peerQuorumAddress = cmd; } else { @@ -647,25 +641,31 @@ public boolean doCommandLine(final String[] args) { } } - if (i == args.length-1) { + if (i == args.length - 1) { tableName = cmd; } } - if ((sourceSnapshotName != null && sourceSnapshotTmpDir == null) - || (sourceSnapshotName == null && sourceSnapshotTmpDir != null)) { + if ( + (sourceSnapshotName != null && sourceSnapshotTmpDir == null) + || (sourceSnapshotName == null && sourceSnapshotTmpDir != null) + ) { printUsage("Source snapshot name and snapshot temp location should be provided" - + " to use snapshots in source cluster"); + + " to use snapshots in source cluster"); return false; } - if (peerSnapshotName != null || peerSnapshotTmpDir != null || peerFSAddress != null - || peerHBaseRootAddress != null) { - if (peerSnapshotName == null || peerSnapshotTmpDir == null || peerFSAddress == null - || peerHBaseRootAddress == null) { + if ( + peerSnapshotName != null || peerSnapshotTmpDir != null || peerFSAddress != null + || peerHBaseRootAddress != null + ) { + if ( + peerSnapshotName == null || peerSnapshotTmpDir == null || peerFSAddress == null + || peerHBaseRootAddress == null + ) { printUsage( "Peer snapshot name, peer snapshot temp location, Peer HBase root address and " - + "peer FSAddress should be provided to use snapshots in peer cluster"); + + "peer FSAddress should be provided to use snapshots in peer cluster"); return false; } } @@ -697,17 +697,17 @@ private boolean isPeerQuorumAddress(String cmd) { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } System.err.println("Usage: verifyrep [--starttime=X]" - + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] " - + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] " - + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] " - + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] "); + + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] " + + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] " + + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] " + + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] "); System.err.println(); System.err.println("Options:"); System.err.println(" starttime beginning of the time range"); @@ -720,8 +720,8 @@ private static void printUsage(final String errorMsg) { System.err.println(" families comma-separated list of families to copy"); System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on "); System.err.println(" delimiter the delimiter used in display around rowkey"); - System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + - "default value is 0 which disables the recompare."); + System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + + "default value is 0 which disables the recompare."); System.err.println(" verbose logs row keys of good rows"); System.err.println(" peerTableName Peer Table Name"); System.err.println(" sourceSnapshotName Source Snapshot Name"); @@ -739,57 +739,53 @@ private static void printUsage(final String errorMsg) { System.err.println(" tablename Name of the table to verify"); System.err.println(); System.err.println("Examples:"); - System.err.println( - " To verify the data replicated from TestTable for a 1 hour window with peer #5 "); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" + - " --starttime=1265875194289 --endtime=1265878794289 5 TestTable "); + System.err + .println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 "); + System.err + .println(" $ hbase " + "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" + + " --starttime=1265875194289 --endtime=1265878794289 5 TestTable "); System.err.println(); System.err.println( " To verify the data in TestTable between the cluster runs VerifyReplication and cluster-b"); System.err.println(" Assume quorum address for cluster-b is" + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:2181:/cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); - System.err.println( - " To verify the data in TestTable between the secured cluster runs VerifyReplication" + System.err + .println(" To verify the data in TestTable between the secured cluster runs VerifyReplication" + " and insecure cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.security.authentication=simple \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.security.authentication=simple \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); - System.err.println(" To verify the data in TestTable between" + - " the secured cluster runs VerifyReplication and secured cluster-b"); - System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + - ", for master and regionserver kerberos principal from another cluster"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" - + "cluster-b/_HOST@EXAMPLE.COM \\\n" + - " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + System.err.println(" To verify the data in TestTable between" + + " the secured cluster runs VerifyReplication and secured cluster-b"); + System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + + ", for master and regionserver kerberos principal from another cluster"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" + + "cluster-b/_HOST@EXAMPLE.COM \\\n" + + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); System.err.println( " To verify the data in TestTable between the insecure cluster runs VerifyReplication" + " and secured cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" + - " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" - + "cluster-b/_HOST@EXAMPLE.COM \\\n" + - " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" + + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" + + "cluster-b/_HOST@EXAMPLE.COM \\\n" + + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); } @Override @@ -804,8 +800,7 @@ public int run(String[] args) throws Exception { /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java index 663b5564a1fb..923777e36cf9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,7 +62,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Scans a given table + CF for all mob reference cells to get the list of backing mob files. For * each referenced file we attempt to verify that said file is on the FileSystem in a place that the @@ -168,8 +166,8 @@ public class MobRefReporter extends Configured implements Tool { public static class MobRefMapper extends TableMapper { @Override - public void map(ImmutableBytesWritable r, Result columns, Context context) throws IOException, - InterruptedException { + public void map(ImmutableBytesWritable r, Result columns, Context context) + throws IOException, InterruptedException { if (columns == null) { return; } @@ -189,24 +187,27 @@ public void map(ImmutableBytesWritable r, Result columns, Context context) throw files.add(fileName); } final int cellsize = MobUtils.getMobValueLength(c); - context.getCounter("SIZES OF CELLS", "Number of cells with size in the " + - log10GroupedString(cellsize) + "s of bytes").increment(1L); + context + .getCounter("SIZES OF CELLS", + "Number of cells with size in the " + log10GroupedString(cellsize) + "s of bytes") + .increment(1L); size += cellsize; count++; } else { LOG.debug("cell is not a mob ref, even though we asked for only refs. cell={}", c); } } - context.getCounter("CELLS PER ROW", "Number of rows with " + log10GroupedString(count) + - "s of cells per row").increment(1L); - context.getCounter("SIZES OF ROWS", "Number of rows with total size in the " + - log10GroupedString(size) + "s of bytes").increment(1L); - context.getCounter("MOB","NUM_CELLS").increment(count); + context.getCounter("CELLS PER ROW", + "Number of rows with " + log10GroupedString(count) + "s of cells per row").increment(1L); + context + .getCounter("SIZES OF ROWS", + "Number of rows with total size in the " + log10GroupedString(size) + "s of bytes") + .increment(1L); + context.getCounter("MOB", "NUM_CELLS").increment(count); } } - public static class MobRefReducer extends - Reducer { + public static class MobRefReducer extends Reducer { TableName table; String mobRegion; @@ -220,7 +221,7 @@ public static class MobRefReducer extends final Text OK_HLINK_CLONE = new Text("HLINK TO ARCHIVE FOR OTHER TABLE"); /* Results that mean something is incorrect */ final Text INCONSISTENT_ARCHIVE_BAD_LINK = - new Text("ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE"); + new Text("ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE"); final Text INCONSISTENT_ARCHIVE_STALE = new Text("ARCHIVE BUT NO HLINKS"); final Text INCONSISTENT_ARCHIVE_IOE = new Text("ARCHIVE BUT FAILURE WHILE CHECKING HLINKS"); /* Results that mean data is probably already gone */ @@ -245,21 +246,21 @@ public void setup(Context context) throws IOException, InterruptedException { mob = MobUtils.getMobFamilyPath(conf, table, family); LOG.info("Using active mob area '{}'", mob); archive = HFileArchiveUtil.getStoreArchivePath(conf, table, - MobUtils.getMobRegionInfo(table).getEncodedName(), family); + MobUtils.getMobRegionInfo(table).getEncodedName(), family); LOG.info("Using archive mob area '{}'", archive); seperator = conf.get(TextOutputFormat.SEPERATOR, "\t"); } @Override public void reduce(Text key, Iterable rows, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final Configuration conf = context.getConfiguration(); final String file = key.toString(); // active mob area if (mob.getFileSystem(conf).exists(new Path(mob, file))) { LOG.debug("Found file '{}' in mob area", file); context.write(OK_MOB_DIR, key); - // archive area - is there an hlink back reference (from a snapshot from same table) + // archive area - is there an hlink back reference (from a snapshot from same table) } else if (archive.getFileSystem(conf).exists(new Path(archive, file))) { Path backRefDir = HFileLink.getBackReferencesDir(archive, file); @@ -268,37 +269,38 @@ public void reduce(Text key, Iterable rows, Context cont if (backRefs != null) { boolean found = false; for (FileStatus backRef : backRefs) { - Pair refParts = HFileLink.parseBackReferenceName( - backRef.getPath().getName()); + Pair refParts = + HFileLink.parseBackReferenceName(backRef.getPath().getName()); if (table.equals(refParts.getFirst()) && mobRegion.equals(refParts.getSecond())) { - Path hlinkPath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(conf), - backRef.getPath()); + Path hlinkPath = + HFileLink.getHFileFromBackReference(MobUtils.getMobHome(conf), backRef.getPath()); if (hlinkPath.getFileSystem(conf).exists(hlinkPath)) { found = true; } else { - LOG.warn("Found file '{}' in archive area with a back reference to the mob area " + LOG.warn( + "Found file '{}' in archive area with a back reference to the mob area " + "for our table, but the mob area does not have a corresponding hfilelink.", - file); + file); } } } if (found) { LOG.debug("Found file '{}' in archive area. has proper hlink back references to " - + "suggest it is from a restored snapshot for this table.", file); + + "suggest it is from a restored snapshot for this table.", file); context.write(OK_HLINK_RESTORE, key); } else { LOG.warn("Found file '{}' in archive area, but the hlink back references do not " - + "properly point to the mob area for our table.", file); + + "properly point to the mob area for our table.", file); context.write(INCONSISTENT_ARCHIVE_BAD_LINK, encodeRows(context, key, rows)); } } else { LOG.warn("Found file '{}' in archive area, but there are no hlinks pointing to it. Not " - + "yet used snapshot or an error.", file); + + "yet used snapshot or an error.", file); context.write(INCONSISTENT_ARCHIVE_STALE, encodeRows(context, key, rows)); } } catch (IOException e) { LOG.warn("Found file '{}' in archive area, but got an error while checking " - + "on back references.", file, e); + + "on back references.", file, e); context.write(INCONSISTENT_ARCHIVE_IOE, encodeRows(context, key, rows)); } @@ -306,19 +308,18 @@ public void reduce(Text key, Iterable rows, Context cont // check for an hlink in the active mob area (from a snapshot of a different table) try { /** - * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because - * we know the mob region never splits, so we can only have HFileLink references - * and looking for just them is cheaper then listing everything. - * - * This glob should match the naming convention for HFileLinks to our referenced hfile. - * As simplified explanation those file names look like "table=region-hfile". For details - * see the {@link HFileLink#createHFileLinkName HFileLink implementation}. + * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because we + * know the mob region never splits, so we can only have HFileLink references and looking + * for just them is cheaper then listing everything. This glob should match the naming + * convention for HFileLinks to our referenced hfile. As simplified explanation those file + * names look like "table=region-hfile". For details see the + * {@link HFileLink#createHFileLinkName HFileLink implementation}. */ FileStatus[] hlinks = mob.getFileSystem(conf).globStatus(new Path(mob + "/*=*-" + file)); if (hlinks != null && hlinks.length != 0) { if (hlinks.length != 1) { - LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than " + - "one: {}", file, Arrays.deepToString(hlinks)); + LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than " + + "one: {}", file, Arrays.deepToString(hlinks)); } HFileLink found = null; for (FileStatus hlink : hlinks) { @@ -335,24 +336,24 @@ public void reduce(Text key, Iterable rows, Context cont context.write(OK_HLINK_CLONE, key); } else { LOG.warn("Found file '{}' as ref(s) in the mob area but they do not point to an hfile" - + " that exists.", file); + + " that exists.", file); context.write(DATALOSS_HLINK_DANGLING, encodeRows(context, key, rows)); } } else { LOG.error("Could not find referenced file '{}'. See the docs on this tool.", file); LOG.debug("Note that we don't have the server-side tag from the mob cells that says " - + "what table the reference is originally from. So if the HFileLink in this table " - + "is missing but the referenced file is still in the table from that tag, then " - + "lookups of these impacted rows will work. Do a scan of the reference details " - + "of the cell for the hfile name and then check the entire hbase install if this " - + "table was made from a snapshot of another table. see the ref guide section on " - + "mob for details."); + + "what table the reference is originally from. So if the HFileLink in this table " + + "is missing but the referenced file is still in the table from that tag, then " + + "lookups of these impacted rows will work. Do a scan of the reference details " + + "of the cell for the hfile name and then check the entire hbase install if this " + + "table was made from a snapshot of another table. see the ref guide section on " + + "mob for details."); context.write(DATALOSS_MISSING, encodeRows(context, key, rows)); } } catch (IOException e) { LOG.error( - "Exception while checking mob area of our table for HFileLinks that point to {}", - file, e); + "Exception while checking mob area of our table for HFileLinks that point to {}", file, + e); context.write(DATALOSS_MISSING_IOE, encodeRows(context, key, rows)); } } @@ -363,7 +364,7 @@ public void reduce(Text key, Iterable rows, Context cont * of base64 encoded row keys */ private Text encodeRows(Context context, Text key, Iterable rows) - throws IOException { + throws IOException { StringBuilder sb = new StringBuilder(key.toString()); sb.append(seperator); boolean moreThanOne = false; @@ -378,25 +379,27 @@ private Text encodeRows(Context context, Text key, Iterable 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc. + * Returns the string representation of the given number after grouping it into log10 buckets. + * e.g. 0-9 -> 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc. */ static String log10GroupedString(long number) { - return String.format("%,d", (long)(Math.pow(10d, Math.floor(Math.log10(number))))); + return String.format("%,d", (long) (Math.pow(10d, Math.floor(Math.log10(number))))); } /** * Main method for the tool. - * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, - * 3 if mr job was unsuccessful + * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, 3 if mr job was + * unsuccessful */ public int run(String[] args) throws IOException, InterruptedException { // TODO make family and table optional @@ -417,8 +420,8 @@ public int run(String[] args) throws IOException, InterruptedException { if (hbaseRootFileStat.length > 0) { String owner = hbaseRootFileStat[0].getOwner(); if (!owner.equals(currentUserName)) { - String errorMsg = "The current user[" + currentUserName - + "] does not have hbase root credentials." + String errorMsg = + "The current user[" + currentUserName + "] does not have hbase root credentials." + " If this job fails due to an inability to read HBase's internal directories, " + "you will need to rerun as a user with sufficient permissions. The HBase superuser " + "is a safe choice."; @@ -426,7 +429,7 @@ public int run(String[] args) throws IOException, InterruptedException { } } else { LOG.error("The passed configs point to an HBase dir does not exist: {}", - conf.get(HConstants.HBASE_DIR)); + conf.get(HConstants.HBASE_DIR)); throw new IOException("The target HBase does not exist"); } @@ -434,7 +437,7 @@ public int run(String[] args) throws IOException, InterruptedException { int maxVersions; TableName tn = TableName.valueOf(tableName); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { TableDescriptor htd = admin.getDescriptor(tn); ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(familyName)); if (hcd == null || !hcd.isMobEnabled()) { @@ -444,7 +447,6 @@ public int run(String[] args) throws IOException, InterruptedException { maxVersions = hcd.getMaxVersions(); } - String id = getClass().getSimpleName() + UUID.randomUUID().toString().replace("-", ""); Job job = null; Scan scan = new Scan(); @@ -461,8 +463,8 @@ public int run(String[] args) throws IOException, InterruptedException { job = Job.getInstance(conf); job.setJarByClass(getClass()); - TableMapReduceUtil.initTableMapperJob(tn, scan, - MobRefMapper.class, Text.class, ImmutableBytesWritable.class, job); + TableMapReduceUtil.initTableMapperJob(tn, scan, MobRefMapper.class, Text.class, + ImmutableBytesWritable.class, job); job.setReducerClass(MobRefReducer.class); job.setOutputFormatClass(TextOutputFormat.class); @@ -497,7 +499,7 @@ public static void main(String[] args) throws Exception { private void printUsage() { System.err.println("Usage:\n" + "--------------------------\n" + MobRefReporter.class.getName() - + " output-dir tableName familyName"); + + " output-dir tableName familyName"); System.err.println(" output-dir Where to write output report."); System.err.println(" tableName The table name"); System.err.println(" familyName The column family name"); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index fd09e34fde16..9f2db27466cb 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,8 +82,8 @@ public class CompactionTool extends Configured implements Tool { private final static String CONF_DELETE_COMPACTED = "hbase.compactiontool.delete"; /** - * Class responsible to execute the Compaction on the specified path. - * The path can be a table, region or family directory. + * Class responsible to execute the Compaction on the specified path. The path can be a table, + * region or family directory. */ private static class CompactionWorker { private final boolean deleteCompacted; @@ -98,20 +98,18 @@ public CompactionWorker(final FileSystem fs, final Configuration conf) { /** * Execute the compaction on the specified path. - * - * @param path Directory path on which to run compaction. + * @param path Directory path on which to run compaction. * @param compactOnce Execute just a single step of compaction. - * @param major Request major compaction. + * @param major Request major compaction. */ public void compact(final Path path, final boolean compactOnce, final boolean major) - throws IOException { + throws IOException { if (isFamilyDir(fs, path)) { Path regionDir = path.getParent(); Path tableDir = regionDir.getParent(); TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - compactStoreFiles(tableDir, htd, hri, - path.getName(), compactOnce, major); + compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major); } else if (isRegionDir(fs, path)) { Path tableDir = path.getParent(); TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); @@ -125,60 +123,57 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma } private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major) - throws IOException { + throws IOException { TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); - for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { + for (Path regionDir : FSUtils.getRegionDirs(fs, tableDir)) { compactRegion(tableDir, htd, regionDir, compactOnce, major); } } - private void compactRegion(final Path tableDir, final TableDescriptor htd, - final Path regionDir, final boolean compactOnce, final boolean major) - throws IOException { + private void compactRegion(final Path tableDir, final TableDescriptor htd, final Path regionDir, + final boolean compactOnce, final boolean major) throws IOException { RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { compactStoreFiles(tableDir, htd, hri, familyDir.getName(), compactOnce, major); } } /** - * Execute the actual compaction job. - * If the compact once flag is not specified, execute the compaction until - * no more compactions are needed. Uses the Configuration settings provided. + * Execute the actual compaction job. If the compact once flag is not specified, execute the + * compaction until no more compactions are needed. Uses the Configuration settings provided. */ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, - final RegionInfo hri, final String familyName, final boolean compactOnce, - final boolean major) throws IOException { + final RegionInfo hri, final String familyName, final boolean compactOnce, final boolean major) + throws IOException { HStore store = getStore(conf, fs, tableDir, htd, hri, familyName); - LOG.info("Compact table=" + htd.getTableName() + - " region=" + hri.getRegionNameAsString() + - " family=" + familyName); + LOG.info("Compact table=" + htd.getTableName() + " region=" + hri.getRegionNameAsString() + + " family=" + familyName); if (major) { store.triggerMajorCompaction(); } do { Optional compaction = - store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); + store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); if (!compaction.isPresent()) { break; } List storeFiles = - store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); + store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); if (storeFiles != null && !storeFiles.isEmpty()) { if (deleteCompacted) { - for (HStoreFile storeFile: storeFiles) { + for (HStoreFile storeFile : storeFiles) { fs.delete(storeFile.getPath(), false); } } } } while (store.needsCompaction() && !compactOnce); - //We need to close the store properly, to make sure it will archive compacted files + // We need to close the store properly, to make sure it will archive compacted files store.close(); } private static HStore getStore(final Configuration conf, final FileSystem fs, - final Path tableDir, final TableDescriptor htd, final RegionInfo hri, - final String familyName) throws IOException { + final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName) + throws IOException { HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri); HRegion region = new HRegion(regionFs, null, conf, htd, null); return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf, false); @@ -199,7 +194,7 @@ private static boolean isFamilyDir(final FileSystem fs, final Path path) throws } private static class CompactionMapper - extends Mapper { + extends Mapper { private CompactionWorker compactor = null; private boolean compactOnce = false; private boolean major = false; @@ -220,7 +215,7 @@ public void setup(Context context) { @Override public void map(LongWritable key, Text value, Context context) - throws InterruptedException, IOException { + throws InterruptedException, IOException { Path path = new Path(value.toString()); this.compactor.compact(path, compactOnce, major); } @@ -236,8 +231,8 @@ protected boolean isSplitable(JobContext context, Path file) { } /** - * Returns a split for each store files directory using the block location - * of each file as locality reference. + * Returns a split for each store files directory using the block location of each file as + * locality reference. */ @Override public List getSplits(JobContext job) throws IOException { @@ -245,7 +240,7 @@ public List getSplits(JobContext job) throws IOException { List files = listStatus(job); Text key = new Text(); - for (FileStatus file: files) { + for (FileStatus file : files) { Path path = file.getPath(); FileSystem fs = path.getFileSystem(job.getConfiguration()); LineReader reader = new LineReader(fs.open(path)); @@ -269,14 +264,14 @@ public List getSplits(JobContext job) throws IOException { * return the top hosts of the store files, used by the Split */ private static String[] getStoreDirHosts(final FileSystem fs, final Path path) - throws IOException { + throws IOException { FileStatus[] files = CommonFSUtils.listStatus(fs, path); if (files == null) { return new String[] {}; } HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); - for (FileStatus hfileStatus: files) { + for (FileStatus hfileStatus : files) { HDFSBlocksDistribution storeFileBlocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen()); hdfsBlocksDistribution.add(storeFileBlocksDistribution); @@ -287,22 +282,21 @@ private static String[] getStoreDirHosts(final FileSystem fs, final Path path) } /** - * Create the input file for the given directories to compact. - * The file is a TextFile with each line corrisponding to a - * store files directory to compact. + * Create the input file for the given directories to compact. The file is a TextFile with each + * line corrisponding to a store files directory to compact. */ public static List createInputFile(final FileSystem fs, final FileSystem stagingFs, - final Path path, final Set toCompactDirs) throws IOException { + final Path path, final Set toCompactDirs) throws IOException { // Extract the list of store dirs List storeDirs = new LinkedList<>(); - for (Path compactDir: toCompactDirs) { + for (Path compactDir : toCompactDirs) { if (isFamilyDir(fs, compactDir)) { storeDirs.add(compactDir); } else if (isRegionDir(fs, compactDir)) { storeDirs.addAll(FSUtils.getFamilyDirs(fs, compactDir)); } else if (isTableDir(fs, compactDir)) { // Lookup regions - for (Path regionDir: FSUtils.getRegionDirs(fs, compactDir)) { + for (Path regionDir : FSUtils.getRegionDirs(fs, compactDir)) { storeDirs.addAll(FSUtils.getFamilyDirs(fs, regionDir)); } } else { @@ -316,7 +310,7 @@ public static List createInputFile(final FileSystem fs, final FileSystem s LOG.info("Create input file=" + path + " with " + storeDirs.size() + " dirs to compact."); try { final byte[] newLine = Bytes.toBytes("\n"); - for (Path storeDir: storeDirs) { + for (Path storeDir : storeDirs) { stream.write(Bytes.toBytes(storeDir.toString())); stream.write(newLine); } @@ -331,7 +325,7 @@ public static List createInputFile(final FileSystem fs, final FileSystem s * Execute compaction, using a Map-Reduce job. */ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, - final boolean compactOnce, final boolean major) throws Exception { + final boolean compactOnce, final boolean major) throws Exception { Configuration conf = getConf(); conf.setBoolean(CONF_COMPACT_ONCE, compactOnce); conf.setBoolean(CONF_COMPACT_MAJOR, major); @@ -352,16 +346,16 @@ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, FileSystem stagingFs = stagingDir.getFileSystem(conf); try { // Create input file with the store dirs - Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime()); - List storeDirs = CompactionInputFormat.createInputFile(fs, stagingFs, - inputPath, toCompactDirs); + Path inputPath = new Path(stagingDir, "compact-" + EnvironmentEdgeManager.currentTime()); + List storeDirs = + CompactionInputFormat.createInputFile(fs, stagingFs, inputPath, toCompactDirs); CompactionInputFormat.addInputPath(job, inputPath); // Initialize credential for secure cluster TableMapReduceUtil.initCredentials(job); // Despite the method name this will get delegation token for the filesystem - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - storeDirs.toArray(new Path[0]), conf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), storeDirs.toArray(new Path[0]), + conf); // Start the MR Job and wait return job.waitForCompletion(true) ? 0 : 1; @@ -374,9 +368,9 @@ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, * Execute compaction, from this client, one path at the time. */ private int doClient(final FileSystem fs, final Set toCompactDirs, - final boolean compactOnce, final boolean major) throws IOException { + final boolean compactOnce, final boolean major) throws IOException { CompactionWorker worker = new CompactionWorker(fs, getConf()); - for (Path path: toCompactDirs) { + for (Path path : toCompactDirs) { worker.compact(path, compactOnce, major); } return 0; @@ -449,16 +443,17 @@ private void printUsage(final String message) { System.err.println(); System.err.println("Note: -D properties will be applied to the conf used. "); System.err.println("For example: "); - System.err.println(" To stop delete of compacted file, pass -D"+CONF_DELETE_COMPACTED+"=false"); + System.err + .println(" To stop delete of compacted file, pass -D" + CONF_DELETE_COMPACTED + "=false"); System.err.println(); System.err.println("Examples:"); System.err.println(" To compact the full 'TestTable' using MapReduce:"); - System.err.println(" $ hbase " + this.getClass().getName() + - " -mapred hdfs://hbase/data/default/TestTable"); + System.err.println( + " $ hbase " + this.getClass().getName() + " -mapred hdfs://hbase/data/default/TestTable"); System.err.println(); System.err.println(" To compact column family 'x' of the table 'TestTable' region 'abc':"); - System.err.println(" $ hbase " + this.getClass().getName() + - " hdfs://hbase/data/default/TestTable/abc/x"); + System.err.println( + " $ hbase " + this.getClass().getName() + " hdfs://hbase/data/default/TestTable/abc/x"); } public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 54c92c5ab6a0..80c5242a1060 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.BufferedInputStream; @@ -86,11 +85,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * Export the specified snapshot to a given FileSystem. - * - * The .snapshot/name folder is copied to the destination cluster - * and then all the hfiles/wals are copied using a Map-Reduce Job in the .archive/ location. - * When everything is done, the second cluster can restore the snapshot. + * Export the specified snapshot to a given FileSystem. The .snapshot/name folder is copied to the + * destination cluster and then all the hfiles/wals are copied using a Map-Reduce Job in the + * .archive/ location. When everything is done, the second cluster can restore the snapshot. */ @InterfaceAudience.Public public class ExportSnapshot extends AbstractHBaseTool implements Tool { @@ -119,9 +116,9 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { private static final String CONF_MR_JOB_NAME = "mapreduce.job.name"; protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; private static final String CONF_COPY_MANIFEST_THREADS = - "snapshot.export.copy.references.threads"; + "snapshot.export.copy.references.threads"; private static final int DEFAULT_COPY_MANIFEST_THREADS = - Runtime.getRuntime().availableProcessors(); + Runtime.getRuntime().availableProcessors(); static class Testing { static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; @@ -133,40 +130,45 @@ static class Testing { // Command line options and defaults. static final class Options { static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); - static final Option TARGET_NAME = new Option(null, "target", true, - "Target name for the snapshot."); - static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " - + "destination hdfs://"); - static final Option COPY_FROM = new Option(null, "copy-from", true, - "Input folder hdfs:// (default hbase.rootdir)"); + static final Option TARGET_NAME = + new Option(null, "target", true, "Target name for the snapshot."); + static final Option COPY_TO = + new Option(null, "copy-to", true, "Remote " + "destination hdfs://"); + static final Option COPY_FROM = + new Option(null, "copy-from", true, "Input folder hdfs:// (default hbase.rootdir)"); static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, - "Do not verify checksum, use name+length only."); + "Do not verify checksum, use name+length only."); static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, - "Do not verify the integrity of the exported snapshot."); - static final Option NO_SOURCE_VERIFY = new Option(null, "no-source-verify", false, - "Do not verify the source of the snapshot."); - static final Option OVERWRITE = new Option(null, "overwrite", false, - "Rewrite the snapshot manifest if already exists."); - static final Option CHUSER = new Option(null, "chuser", true, - "Change the owner of the files to the specified one."); - static final Option CHGROUP = new Option(null, "chgroup", true, - "Change the group of the files to the specified one."); - static final Option CHMOD = new Option(null, "chmod", true, - "Change the permission of the files to the specified one."); + "Do not verify the integrity of the exported snapshot."); + static final Option NO_SOURCE_VERIFY = + new Option(null, "no-source-verify", false, "Do not verify the source of the snapshot."); + static final Option OVERWRITE = + new Option(null, "overwrite", false, "Rewrite the snapshot manifest if already exists."); + static final Option CHUSER = + new Option(null, "chuser", true, "Change the owner of the files to the specified one."); + static final Option CHGROUP = + new Option(null, "chgroup", true, "Change the group of the files to the specified one."); + static final Option CHMOD = + new Option(null, "chmod", true, "Change the permission of the files to the specified one."); static final Option MAPPERS = new Option(null, "mappers", true, - "Number of mappers to use during the copy (mapreduce.job.maps)."); - static final Option BANDWIDTH = new Option(null, "bandwidth", true, - "Limit bandwidth to this value in MB/second."); + "Number of mappers to use during the copy (mapreduce.job.maps)."); + static final Option BANDWIDTH = + new Option(null, "bandwidth", true, "Limit bandwidth to this value in MB/second."); } // Export Map-Reduce Counters, to keep track of the progress public enum Counter { - MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, - BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED + MISSING_FILES, + FILES_COPIED, + FILES_SKIPPED, + COPY_FAILED, + BYTES_EXPECTED, + BYTES_SKIPPED, + BYTES_COPIED } - private static class ExportMapper extends Mapper { + private static class ExportMapper + extends Mapper { private static final Logger LOG = LoggerFactory.getLogger(ExportMapper.class); final static int REPORT_SIZE = 1 * 1024 * 1024; final static int BUFFER_SIZE = 64 * 1024; @@ -199,7 +201,7 @@ public void setup(Context context) throws IOException { filesGroup = conf.get(CONF_FILES_GROUP); filesUser = conf.get(CONF_FILES_USER); - filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); + filesMode = (short) conf.getInt(CONF_FILES_MODE, 0); outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); @@ -217,7 +219,7 @@ public void setup(Context context) throws IOException { destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); outputFs = FileSystem.get(outputRoot.toUri(), destConf); } catch (IOException e) { - throw new IOException("Could not get the output FileSystem with root="+ outputRoot, e); + throw new IOException("Could not get the output FileSystem with root=" + outputRoot, e); } // Use the default block size of the outputFs if bigger @@ -245,7 +247,7 @@ protected void cleanup(Context context) { @Override public void map(BytesWritable key, NullWritable value, Context context) - throws InterruptedException, IOException { + throws InterruptedException, IOException { SnapshotFileInfo inputInfo = SnapshotFileInfo.parseFrom(key.copyBytes()); Path outputPath = getOutputPath(inputInfo); @@ -261,11 +263,11 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException case HFILE: Path inputPath = new Path(inputInfo.getHfile()); String family = inputPath.getParent().getName(); - TableName table =HFileLink.getReferencedTableName(inputPath.getName()); + TableName table = HFileLink.getReferencedTableName(inputPath.getName()); String region = HFileLink.getReferencedRegionName(inputPath.getName()); String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); path = new Path(CommonFSUtils.getTableDir(new Path("./"), table), - new Path(region, new Path(family, hfile))); + new Path(region, new Path(family, hfile))); break; case WAL: LOG.warn("snapshot does not keeps WALs: " + inputInfo); @@ -278,22 +280,22 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException @SuppressWarnings("checkstyle:linelength") /** - * Used by TestExportSnapshot to test for retries when failures happen. - * Failure is injected in {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. + * Used by TestExportSnapshot to test for retries when failures happen. Failure is injected in + * {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. */ private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo) - throws IOException { + throws IOException { if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) return; if (testing.injectedFailureCount >= testing.failuresCountToInject) return; testing.injectedFailureCount++; context.getCounter(Counter.COPY_FAILED).increment(1); LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount); throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s", - testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); + testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); } private void copyFile(final Context context, final SnapshotFileInfo inputInfo, - final Path outputPath) throws IOException { + final Path outputPath) throws IOException { // Get the file information FileStatus inputStat = getSourceFileStatus(context, inputInfo); @@ -361,10 +363,8 @@ private void createOutputPath(final Path path) throws IOException { /** * Try to Preserve the files attribute selected by the user copying them from the source file * This is only required when you are exporting as a different user than "hbase" or on a system - * that doesn't have the "hbase" user. - * - * This is not considered a blocking failure since the user can force a chmod with the user - * that knows is available on the system. + * that doesn't have the "hbase" user. This is not considered a blocking failure since the user + * can force a chmod with the user that knows is available on the system. */ private boolean preserveAttributes(final Path path, final FileStatus refStat) { FileStatus stat; @@ -382,7 +382,7 @@ private boolean preserveAttributes(final Path path, final FileStatus refStat) { outputFs.setPermission(path, refStat.getPermission()); } } catch (IOException e) { - LOG.warn("Unable to set the permission for file="+ stat.getPath() +": "+ e.getMessage()); + LOG.warn("Unable to set the permission for file=" + stat.getPath() + ": " + e.getMessage()); return false; } @@ -395,9 +395,10 @@ private boolean preserveAttributes(final Path path, final FileStatus refStat) { outputFs.setOwner(path, user, group); } } catch (IOException e) { - LOG.warn("Unable to set the owner/group for file="+ stat.getPath() +": "+ e.getMessage()); - LOG.warn("The user/group may not exist on the destination cluster: user=" + - user + " group=" + group); + LOG.warn( + "Unable to set the owner/group for file=" + stat.getPath() + ": " + e.getMessage()); + LOG.warn("The user/group may not exist on the destination cluster: user=" + user + + " group=" + group); return false; } } @@ -409,13 +410,11 @@ private boolean stringIsNotEmpty(final String str) { return str != null && str.length() > 0; } - private void copyData(final Context context, - final Path inputPath, final InputStream in, - final Path outputPath, final FSDataOutputStream out, - final long inputFileSize) - throws IOException { - final String statusMessage = "copied %s/" + StringUtils.humanReadableInt(inputFileSize) + - " (%.1f%%)"; + private void copyData(final Context context, final Path inputPath, final InputStream in, + final Path outputPath, final FSDataOutputStream out, final long inputFileSize) + throws IOException { + final String statusMessage = + "copied %s/" + StringUtils.humanReadableInt(inputFileSize) + " (%.1f%%)"; try { byte[] buffer = new byte[bufferSize]; @@ -431,33 +430,33 @@ private void copyData(final Context context, if (reportBytes >= reportSize) { context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); - context.setStatus(String.format(statusMessage, - StringUtils.humanReadableInt(totalBytesWritten), - (totalBytesWritten/(float)inputFileSize) * 100.0f) + - " from " + inputPath + " to " + outputPath); + context.setStatus( + String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), + (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + + " to " + outputPath); reportBytes = 0; } } long etime = EnvironmentEdgeManager.currentTime(); context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); - context.setStatus(String.format(statusMessage, - StringUtils.humanReadableInt(totalBytesWritten), - (totalBytesWritten/(float)inputFileSize) * 100.0f) + - " from " + inputPath + " to " + outputPath); + context + .setStatus(String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), + (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + " to " + + outputPath); // Verify that the written size match if (totalBytesWritten != inputFileSize) { - String msg = "number of bytes copied not matching copied=" + totalBytesWritten + - " expected=" + inputFileSize + " for file=" + inputPath; + String msg = "number of bytes copied not matching copied=" + totalBytesWritten + + " expected=" + inputFileSize + " for file=" + inputPath; throw new IOException(msg); } LOG.info("copy completed for input=" + inputPath + " output=" + outputPath); - LOG.info("size=" + totalBytesWritten + - " (" + StringUtils.humanReadableInt(totalBytesWritten) + ")" + - " time=" + StringUtils.formatTimeDiff(etime, stime) + - String.format(" %.3fM/sec", (totalBytesWritten / ((etime - stime)/1000.0))/1048576.0)); + LOG + .info("size=" + totalBytesWritten + " (" + StringUtils.humanReadableInt(totalBytesWritten) + + ")" + " time=" + StringUtils.formatTimeDiff(etime, stime) + String + .format(" %.3fM/sec", (totalBytesWritten / ((etime - stime) / 1000.0)) / 1048576.0)); context.getCounter(Counter.FILES_COPIED).increment(1); } catch (IOException e) { LOG.error("Error copying " + inputPath + " to " + outputPath, e); @@ -467,12 +466,11 @@ private void copyData(final Context context, } /** - * Try to open the "source" file. - * Throws an IOException if the communication with the inputFs fail or - * if the file is not found. + * Try to open the "source" file. Throws an IOException if the communication with the inputFs + * fail or if the file is not found. */ private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo) - throws IOException { + throws IOException { try { Configuration conf = context.getConfiguration(); FileLink link = null; @@ -498,7 +496,7 @@ private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo } private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo fileInfo) - throws IOException { + throws IOException { try { Configuration conf = context.getConfiguration(); FileLink link = null; @@ -524,12 +522,12 @@ private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo f } } - private FileLink getFileLink(Path path, Configuration conf) throws IOException{ + private FileLink getFileLink(Path path, Configuration conf) throws IOException { String regionName = HFileLink.getReferencedRegionName(path.getName()); TableName tableName = HFileLink.getReferencedTableName(path.getName()); - if(MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) { + if (MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) { return HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), path); + HFileArchiveUtil.getArchivePath(conf), path); } return HFileLink.buildFromHFileLinkPattern(inputRoot, inputArchive, path); } @@ -544,8 +542,8 @@ private FileChecksum getFileChecksum(final FileSystem fs, final Path path) { } /** - * Check if the two files are equal by looking at the file length, - * and at the checksum (if user has specified the verifyChecksum flag). + * Check if the two files are equal by looking at the file length, and at the checksum (if user + * has specified the verifyChecksum flag). */ private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat) { // Not matching length @@ -566,7 +564,7 @@ private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat } // ========================================================================== - // Input Format + // Input Format // ========================================================================== /** @@ -574,7 +572,7 @@ private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat * @return list of files referenced by the snapshot (pair of path and size) */ private static List> getSnapshotFiles(final Configuration conf, - final FileSystem fs, final Path snapshotDir) throws IOException { + final FileSystem fs, final Path snapshotDir) throws IOException { SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); final List> files = new ArrayList<>(); @@ -586,7 +584,7 @@ private static List> getSnapshotFiles(final Configu new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { Pair snapshotFileAndSize = null; if (!storeFile.hasReference()) { String region = regionInfo.getEncodedName(); @@ -595,7 +593,7 @@ public void storeFile(final RegionInfo regionInfo, final String family, storeFile.hasFileSize() ? storeFile.getFileSize() : -1); } else { Pair referredToRegionAndFile = - StoreFileInfo.getReferredToRegionAndFile(storeFile.getName()); + StoreFileInfo.getReferredToRegionAndFile(storeFile.getName()); String referencedRegion = referredToRegionAndFile.getFirst(); String referencedHFile = referredToRegionAndFile.getSecond(); snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, referencedRegion, family, @@ -609,11 +607,11 @@ public void storeFile(final RegionInfo regionInfo, final String family, } private static Pair getSnapshotFileAndSize(FileSystem fs, - Configuration conf, TableName table, String region, String family, String hfile, long size) - throws IOException { + Configuration conf, TableName table, String region, String family, String hfile, long size) + throws IOException { Path path = HFileLink.createPath(table, region, family, hfile); SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder().setType(SnapshotFileInfo.Type.HFILE) - .setHfile(path.toString()).build(); + .setHfile(path.toString()).build(); if (size == -1) { size = HFileLink.buildFromHFileLinkPattern(conf, path).getFileStatus(fs).getLen(); } @@ -624,12 +622,11 @@ private static Pair getSnapshotFileAndSize(FileSystem fs * Given a list of file paths and sizes, create around ngroups in as balanced a way as possible. * The groups created will have similar amounts of bytes. *

    - * The algorithm used is pretty straightforward; the file list is sorted by size, - * and then each group fetch the bigger file available, iterating through groups - * alternating the direction. + * The algorithm used is pretty straightforward; the file list is sorted by size, and then each + * group fetch the bigger file available, iterating through groups alternating the direction. */ - static List>> getBalancedSplits( - final List> files, final int ngroups) { + static List>> + getBalancedSplits(final List> files, final int ngroups) { // Sort files by size, from small to big Collections.sort(files, new Comparator>() { public int compare(Pair a, Pair b) { @@ -685,8 +682,8 @@ public int compare(Pair a, Pair private static class ExportSnapshotInputFormat extends InputFormat { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext tac) throws IOException, InterruptedException { - return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit)split).getSplitKeys()); + TaskAttemptContext tac) throws IOException, InterruptedException { + return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit) split).getSplitKeys()); } @Override @@ -706,7 +703,7 @@ public List getSplits(JobContext context) throws IOException, Interr List>> groups = getBalancedSplits(snapshotFiles, mappers); List splits = new ArrayList(groups.size()); - for (List> files: groups) { + for (List> files : groups) { splits.add(new ExportSnapshotInputSplit(files)); } return splits; @@ -722,9 +719,9 @@ public ExportSnapshotInputSplit() { public ExportSnapshotInputSplit(final List> snapshotFiles) { this.files = new ArrayList(snapshotFiles.size()); - for (Pair fileInfo: snapshotFiles) { - this.files.add(new Pair<>( - new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); + for (Pair fileInfo : snapshotFiles) { + this.files.add( + new Pair<>(new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); this.length += fileInfo.getSecond(); } } @@ -760,7 +757,7 @@ public void readFields(DataInput in) throws IOException { @Override public void write(DataOutput out) throws IOException { out.writeInt(files.size()); - for (final Pair fileInfo: files) { + for (final Pair fileInfo : files) { fileInfo.getFirst().write(out); out.writeLong(fileInfo.getSecond()); } @@ -768,7 +765,7 @@ public void write(DataOutput out) throws IOException { } private static class ExportSnapshotRecordReader - extends RecordReader { + extends RecordReader { private final List> files; private long totalSize = 0; private long procSize = 0; @@ -776,48 +773,55 @@ private static class ExportSnapshotRecordReader ExportSnapshotRecordReader(final List> files) { this.files = files; - for (Pair fileInfo: files) { + for (Pair fileInfo : files) { totalSize += fileInfo.getSecond(); } } @Override - public void close() { } + public void close() { + } @Override - public BytesWritable getCurrentKey() { return files.get(index).getFirst(); } + public BytesWritable getCurrentKey() { + return files.get(index).getFirst(); + } @Override - public NullWritable getCurrentValue() { return NullWritable.get(); } + public NullWritable getCurrentValue() { + return NullWritable.get(); + } @Override - public float getProgress() { return (float)procSize / totalSize; } + public float getProgress() { + return (float) procSize / totalSize; + } @Override - public void initialize(InputSplit split, TaskAttemptContext tac) { } + public void initialize(InputSplit split, TaskAttemptContext tac) { + } @Override public boolean nextKeyValue() { if (index >= 0) { procSize += files.get(index).getSecond(); } - return(++index < files.size()); + return (++index < files.size()); } } } // ========================================================================== - // Tool + // Tool // ========================================================================== /** * Run Map-Reduce Job to perform the files copy. */ - private void runCopyJob(final Path inputRoot, final Path outputRoot, - final String snapshotName, final Path snapshotDir, final boolean verifyChecksum, - final String filesUser, final String filesGroup, final int filesMode, - final int mappers, final int bandwidthMB) - throws IOException, InterruptedException, ClassNotFoundException { + private void runCopyJob(final Path inputRoot, final Path outputRoot, final String snapshotName, + final Path snapshotDir, final boolean verifyChecksum, final String filesUser, + final String filesGroup, final int filesMode, final int mappers, final int bandwidthMB) + throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup); if (filesUser != null) conf.set(CONF_FILES_USER, filesUser); @@ -846,11 +850,9 @@ private void runCopyJob(final Path inputRoot, final Path outputRoot, // Acquire the delegation Tokens Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { inputRoot }, srcConf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { inputRoot }, srcConf); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { outputRoot }, destConf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { outputRoot }, destConf); // Run the MR Job if (!job.waitForCompletion(true)) { @@ -858,8 +860,8 @@ private void runCopyJob(final Path inputRoot, final Path outputRoot, } } - private void verifySnapshot(final Configuration baseConf, - final FileSystem fs, final Path rootDir, final Path snapshotDir) throws IOException { + private void verifySnapshot(final Configuration baseConf, final FileSystem fs, final Path rootDir, + final Path snapshotDir) throws IOException { // Update the conf with the current root dir, since may be a different cluster Configuration conf = new Configuration(baseConf); CommonFSUtils.setRootDir(conf, rootDir); @@ -869,9 +871,9 @@ private void verifySnapshot(final Configuration baseConf, } private void setConfigParallel(FileSystem outputFs, List traversedPath, - BiConsumer task, Configuration conf) throws IOException { + BiConsumer task, Configuration conf) throws IOException { ExecutorService pool = Executors - .newFixedThreadPool(conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); + .newFixedThreadPool(conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); List> futures = new ArrayList<>(); for (Path dstPath : traversedPath) { Future future = (Future) pool.submit(() -> task.accept(outputFs, dstPath)); @@ -889,19 +891,19 @@ private void setConfigParallel(FileSystem outputFs, List traversedPath, } private void setOwnerParallel(FileSystem outputFs, String filesUser, String filesGroup, - Configuration conf, List traversedPath) throws IOException { + Configuration conf, List traversedPath) throws IOException { setConfigParallel(outputFs, traversedPath, (fs, path) -> { try { fs.setOwner(path, filesUser, filesGroup); } catch (IOException e) { throw new RuntimeException( - "set owner for file " + path + " to " + filesUser + ":" + filesGroup + " failed", e); + "set owner for file " + path + " to " + filesUser + ":" + filesGroup + " failed", e); } }, conf); } private void setPermissionParallel(final FileSystem outputFs, final short filesMode, - final List traversedPath, final Configuration conf) throws IOException { + final List traversedPath, final Configuration conf) throws IOException { if (filesMode <= 0) { return; } @@ -911,7 +913,7 @@ private void setPermissionParallel(final FileSystem outputFs, final short filesM fs.setPermission(path, perm); } catch (IOException e) { throw new RuntimeException( - "set permission for file " + path + " to " + filesMode + " failed", e); + "set permission for file " + path + " to " + filesMode + " failed", e); } }, conf); } @@ -968,8 +970,8 @@ public int doWork() throws IOException { } if (outputRoot == null) { - System.err.println("Destination file-system (--" + Options.COPY_TO.getLongOpt() - + ") not provided."); + System.err + .println("Destination file-system (--" + Options.COPY_TO.getLongOpt() + ") not provided."); LOG.error("Use -h or --help for usage instructions."); return 0; } @@ -989,16 +991,17 @@ public int doWork() throws IOException { Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf); - boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) || - conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; + boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) + || conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot); - Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, - destConf); - Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); + Path snapshotTmpDir = + SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, destConf); + Path outputSnapshotDir = + SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir; LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot); - LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", - outputFs, outputRoot.toString(), skipTmp, initialOutputSnapshotDir); + LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", outputFs, + outputRoot.toString(), skipTmp, initialOutputSnapshotDir); // Verify snapshot source before copying files if (verifySource) { @@ -1028,8 +1031,8 @@ public int doWork() throws IOException { return 1; } } else { - System.err.println("The snapshot '" + targetName + - "' already exists in the destination: " + outputSnapshotDir); + System.err.println("The snapshot '" + targetName + "' already exists in the destination: " + + outputSnapshotDir); return 1; } } @@ -1039,19 +1042,23 @@ public int doWork() throws IOException { if (outputFs.exists(snapshotTmpDir)) { if (overwrite) { if (!outputFs.delete(snapshotTmpDir, true)) { - System.err.println("Unable to remove existing snapshot tmp directory: "+snapshotTmpDir); + System.err + .println("Unable to remove existing snapshot tmp directory: " + snapshotTmpDir); return 1; } } else { - System.err.println("A snapshot with the same name '"+ targetName +"' may be in-progress"); - System.err.println("Please check "+snapshotTmpDir+". If the snapshot has completed, "); - System.err.println("consider removing "+snapshotTmpDir+" by using the -overwrite option"); + System.err + .println("A snapshot with the same name '" + targetName + "' may be in-progress"); + System.err + .println("Please check " + snapshotTmpDir + ". If the snapshot has completed, "); + System.err + .println("consider removing " + snapshotTmpDir + " by using the -overwrite option"); return 1; } } } - // Step 1 - Copy fs1:/.snapshot/ to fs2:/.snapshot/.tmp/ + // Step 1 - Copy fs1:/.snapshot/ to fs2:/.snapshot/.tmp/ // The snapshot references must be copied before the hfiles otherwise the cleaner // will remove them because they are unreferenced. List travesedPaths = new ArrayList<>(); @@ -1059,43 +1066,43 @@ public int doWork() throws IOException { try { LOG.info("Copy Snapshot Manifest from " + snapshotDir + " to " + initialOutputSnapshotDir); travesedPaths = - FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf, - conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); + FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf, + conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); copySucceeded = true; } catch (IOException e) { - throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + - snapshotDir + " to=" + initialOutputSnapshotDir, e); + throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + snapshotDir + + " to=" + initialOutputSnapshotDir, e); } finally { if (copySucceeded) { if (filesUser != null || filesGroup != null) { - LOG.warn((filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " - + filesUser) - + (filesGroup == null ? "" : ", Change the group of " + needSetOwnerDir + " to " - + filesGroup)); + LOG.warn( + (filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " + filesUser) + + (filesGroup == null + ? "" + : ", Change the group of " + needSetOwnerDir + " to " + filesGroup)); setOwnerParallel(outputFs, filesUser, filesGroup, conf, travesedPaths); } if (filesMode > 0) { LOG.warn("Change the permission of " + needSetOwnerDir + " to " + filesMode); - setPermissionParallel(outputFs, (short)filesMode, travesedPaths, conf); + setPermissionParallel(outputFs, (short) filesMode, travesedPaths, conf); } } } // Write a new .snapshotinfo if the target name is different from the source name if (!targetName.equals(snapshotName)) { - SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir) - .toBuilder() - .setName(targetName) - .build(); + SnapshotDescription snapshotDesc = SnapshotDescriptionUtils + .readSnapshotInfo(inputFs, snapshotDir).toBuilder().setName(targetName).build(); SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDesc, initialOutputSnapshotDir, outputFs); if (filesUser != null || filesGroup != null) { - outputFs.setOwner(new Path(initialOutputSnapshotDir, - SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, filesGroup); + outputFs.setOwner( + new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, + filesGroup); } if (filesMode > 0) { - outputFs.setPermission(new Path(initialOutputSnapshotDir, - SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), new FsPermission((short)filesMode)); + outputFs.setPermission( + new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), + new FsPermission((short) filesMode)); } } @@ -1103,15 +1110,15 @@ public int doWork() throws IOException { // The snapshot references must be copied before the files otherwise the files gets removed // by the HFileArchiver, since they have no references. try { - runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, - filesUser, filesGroup, filesMode, mappers, bandwidthMB); + runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, filesUser, + filesGroup, filesMode, mappers, bandwidthMB); LOG.info("Finalize the Snapshot Export"); if (!skipTmp) { // Step 3 - Rename fs2:/.snapshot/.tmp/ fs2:/.snapshot/ if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) { - throw new ExportSnapshotException("Unable to rename snapshot directory from=" + - snapshotTmpDir + " to=" + outputSnapshotDir); + throw new ExportSnapshotException("Unable to rename snapshot directory from=" + + snapshotTmpDir + " to=" + outputSnapshotDir); } } @@ -1139,18 +1146,16 @@ public int doWork() throws IOException { @Override protected void printUsage() { super.printUsage(); - System.out.println("\n" - + "Examples:\n" - + " hbase snapshot export \\\n" - + " --snapshot MySnapshot --copy-to hdfs://srv2:8082/hbase \\\n" - + " --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n" - + "\n" - + " hbase snapshot export \\\n" - + " --snapshot MySnapshot --copy-from hdfs://srv2:8082/hbase \\\n" - + " --copy-to hdfs://srv1:50070/hbase"); + System.out.println("\n" + "Examples:\n" + " hbase snapshot export \\\n" + + " --snapshot MySnapshot --copy-to hdfs://srv2:8082/hbase \\\n" + + " --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n" + "\n" + + " hbase snapshot export \\\n" + + " --snapshot MySnapshot --copy-from hdfs://srv2:8082/hbase \\\n" + + " --copy-to hdfs://srv1:50070/hbase"); } - @Override protected void addOptions() { + @Override + protected void addOptions() { addRequiredOption(Options.SNAPSHOT); addOption(Options.COPY_TO); addOption(Options.COPY_FROM); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java index 9432f309adb6..e7489fc5b8f3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Generate a classpath string containing any jars required by mapreduce jobs. Specify - * additional values by providing a comma-separated list of paths via -Dtmpjars. + * Generate a classpath string containing any jars required by mapreduce jobs. Specify additional + * values by providing a comma-separated list of paths via -Dtmpjars. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class MapreduceDependencyClasspathTool implements Tool { @@ -49,8 +49,10 @@ public Configuration getConf() { public int run(String[] args) throws Exception { if (args.length > 0) { System.err.println("Usage: hbase mapredcp [-Dtmpjars=...]"); - System.err.println(" Construct a CLASSPATH containing dependency jars required to run a mapreduce"); - System.err.println(" job. By default, includes any jars detected by TableMapReduceUtils. Provide"); + System.err + .println(" Construct a CLASSPATH containing dependency jars required to run a mapreduce"); + System.err + .println(" job. By default, includes any jars detected by TableMapReduceUtils. Provide"); System.err.println(" additional entries by specifying a comma-separated list in tmpjars."); return 0; } @@ -63,7 +65,7 @@ public int run(String[] args) throws Exception { public static void main(String[] argv) throws Exception { // Silence the usual noise. This is probably fragile... Log4jUtils.setLogLevel("org.apache.hadoop.hbase", "WARN"); - System.exit(ToolRunner.run( - HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv)); + System.exit( + ToolRunner.run(HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 0a24cbe1edc9..63f812128845 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -117,21 +116,18 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; /** - * Script used evaluating HBase performance and scalability. Runs a HBase - * client that steps through one of a set of hardcoded tests or 'experiments' - * (e.g. a random reads test, a random writes test, etc.). Pass on the - * command-line which test to run and how many clients are participating in - * this experiment. Run {@code PerformanceEvaluation --help} to obtain usage. - * - *

    This class sets up and runs the evaluation programs described in - * Section 7, Performance Evaluation, of the Bigtable - * paper, pages 8-10. - * - *

    By default, runs as a mapreduce job where each mapper runs a single test - * client. Can also run as a non-mapreduce, multithreaded application by - * specifying {@code --nomapred}. Each client does about 1GB of data, unless - * specified otherwise. + * Script used evaluating HBase performance and scalability. Runs a HBase client that steps through + * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test, a random writes test, + * etc.). Pass on the command-line which test to run and how many clients are participating in this + * experiment. Run {@code PerformanceEvaluation --help} to obtain usage. + *

    + * This class sets up and runs the evaluation programs described in Section 7, Performance + * Evaluation, of the Bigtable paper, + * pages 8-10. + *

    + * By default, runs as a mapreduce job where each mapper runs a single test client. Can also run as + * a non-mapreduce, multithreaded application by specifying {@code --nomapred}. Each client does + * about 1GB of data, unless specified otherwise. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PerformanceEvaluation extends Configured implements Tool { @@ -170,11 +166,9 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run async sequential read test"); addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite", "Run async sequential write test"); - addCommandDescriptor(AsyncScanTest.class, "asyncScan", - "Run async scan test (read every row)"); + addCommandDescriptor(AsyncScanTest.class, "asyncScan", "Run async scan test (read every row)"); addCommandDescriptor(RandomReadTest.class, RANDOM_READ, "Run random read test"); - addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", - "Run getRegionLocation test"); + addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", "Run getRegionLocation test"); addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN, "Run random seek and scan 100 test"); addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", @@ -185,18 +179,15 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run random seek scan with both start and stop row (max 1000 rows)"); addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", "Run random seek scan with both start and stop row (max 10000 rows)"); - addCommandDescriptor(RandomWriteTest.class, "randomWrite", - "Run random write test"); - addCommandDescriptor(SequentialReadTest.class, "sequentialRead", - "Run sequential read test"); - addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", - "Run sequential write test"); + addCommandDescriptor(RandomWriteTest.class, "randomWrite", "Run random write test"); + addCommandDescriptor(SequentialReadTest.class, "sequentialRead", "Run sequential read test"); + addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); addCommandDescriptor(MetaWriteTest.class, "metaWrite", "Populate meta table;used with 1 thread; to be cleaned up by cleanMeta"); addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", - "Run scan test using a filter to find a specific row based on it's value " + - "(make sure to use --rows=20)"); + "Run scan test using a filter to find a specific row based on it's value " + + "(make sure to use --rows=20)"); addCommandDescriptor(IncrementTest.class, "increment", "Increment on each row; clients overlap on keyspace so some concurrent operations"); addCommandDescriptor(AppendTest.class, "append", @@ -212,8 +203,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected static enum Counter { /** elapsed time */ @@ -248,7 +239,8 @@ public String toString() { return Long.toString(duration); } - @Override public int compareTo(RunResult o) { + @Override + public int compareTo(RunResult o) { return Long.compare(this.duration, o.duration); } } @@ -261,8 +253,8 @@ public PerformanceEvaluation(final Configuration conf) { super(conf); } - protected static void addCommandDescriptor(Class cmdClass, - String name, String description) { + protected static void addCommandDescriptor(Class cmdClass, String name, + String description) { CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); COMMANDS.put(name, cmdDescriptor); } @@ -273,8 +265,7 @@ protected static void addCommandDescriptor(Class cmdClass, interface Status { /** * Sets status - * @param msg status message - * @throws IOException + * @param msg status message n */ void setStatus(final String msg) throws IOException; } @@ -283,7 +274,7 @@ interface Status { * MapReduce job that runs a performance evaluation client in each map task. */ public static class EvaluationMapTask - extends Mapper { + extends Mapper { /** configuration parameter name that contains the command */ public final static String CMD_KEY = "EvaluationMapTask.command"; @@ -299,7 +290,7 @@ protected void setup(Context context) throws IOException, InterruptedException { // this is required so that extensions of PE are instantiated within the // map reduce task... Class peClass = - forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); + forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); try { peClass.getConstructor(Configuration.class).newInstance(context.getConfiguration()); } catch (Exception e) { @@ -317,12 +308,12 @@ private Class forName(String className, Class type) @Override protected void map(LongWritable key, Text value, final Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Status status = new Status() { @Override public void setStatus(String msg) { - context.setStatus(msg); + context.setStatus(msg); } }; @@ -337,7 +328,8 @@ public void setStatus(String msg) { } // Evaluation task - RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); + RunResult result = + PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(result.duration); @@ -348,9 +340,9 @@ public void setStatus(String msg) { } /* - * If table does not already exist, create. Also create a table when - * {@code opts.presplitRegions} is specified or when the existing table's - * region replica count doesn't match {@code opts.replicas}. + * If table does not already exist, create. Also create a table when {@code opts.presplitRegions} + * is specified or when the existing table's region replica count doesn't match {@code + * opts.replicas}. */ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { TableName tableName = TableName.valueOf(opts.tableName); @@ -361,30 +353,25 @@ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { throw new IllegalStateException( "Must specify an existing table for read commands. Run a write command first."); } - TableDescriptor desc = - exists ? admin.getDescriptor(TableName.valueOf(opts.tableName)) : null; + TableDescriptor desc = exists ? admin.getDescriptor(TableName.valueOf(opts.tableName)) : null; byte[][] splits = getSplits(opts); // recreate the table when user has requested presplit or when existing // {RegionSplitPolicy,replica count} does not match requested, or when the // number of column families does not match requested. - if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions) - || (!isReadCmd && desc != null && - !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) - || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) - || (desc != null && desc.getColumnFamilyCount() != opts.families)) { + if ( + (exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions) + || (!isReadCmd && desc != null + && !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) + || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) + || (desc != null && desc.getColumnFamilyCount() != opts.families) + ) { needsDelete = true; // wait, why did it delete my table?!? - LOG.debug(MoreObjects.toStringHelper("needsDelete") - .add("needsDelete", needsDelete) - .add("isReadCmd", isReadCmd) - .add("exists", exists) - .add("desc", desc) - .add("presplit", opts.presplitRegions) - .add("splitPolicy", opts.splitPolicy) - .add("replicas", opts.replicas) - .add("families", opts.families) - .toString()); + LOG.debug(MoreObjects.toStringHelper("needsDelete").add("needsDelete", needsDelete) + .add("isReadCmd", isReadCmd).add("exists", exists).add("desc", desc) + .add("presplit", opts.presplitRegions).add("splitPolicy", opts.splitPolicy) + .add("replicas", opts.replicas).add("families", opts.families).toString()); } // remove an existing table @@ -449,8 +436,7 @@ protected static TableDescriptor getTableDescriptor(TestOptions opts) { * generates splits based on total number of rows and specified split regions */ protected static byte[][] getSplits(TestOptions opts) { - if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions) - return null; + if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions) return null; int numSplitPoints = opts.presplitRegions - 1; byte[][] splits = new byte[numSplitPoints][]; @@ -477,7 +463,7 @@ static void setupConnectionCount(final TestOptions opts) { * Run all clients in this vm each to its own thread. */ static RunResult[] doLocalClients(final TestOptions opts, final Configuration conf) - throws IOException, InterruptedException, ExecutionException { + throws IOException, InterruptedException, ExecutionException { final Class cmd = determineCommandClass(opts.cmdName); assert cmd != null; @SuppressWarnings("unchecked") @@ -492,8 +478,8 @@ static RunResult[] doLocalClients(final TestOptions opts, final Configuration co cons[i] = ConnectionFactory.createConnection(conf); asyncCons[i] = ConnectionFactory.createAsyncConnection(conf).get(); } - LOG.info("Created " + opts.connCount + " connections for " + - opts.numClientThreads + " threads"); + LOG + .info("Created " + opts.connCount + " connections for " + opts.numClientThreads + " threads"); for (int i = 0; i < threads.length; i++) { final int index = i; threads[i] = pool.submit(new Callable() { @@ -509,11 +495,11 @@ public void setStatus(final String msg) throws IOException { LOG.info(msg); } }); - LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + - "ms over " + threadOpts.perClientRunRows + " rows"); + LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + + "ms over " + threadOpts.perClientRunRows + " rows"); if (opts.latencyThreshold > 0) { - LOG.info("Number of replies over latency threshold " + opts.latencyThreshold + - "(ms) is " + run.numbOfReplyOverThreshold); + LOG.info("Number of replies over latency threshold " + opts.latencyThreshold + + "(ms) is " + run.numbOfReplyOverThreshold); } return run; } @@ -529,11 +515,10 @@ public void setStatus(final String msg) throws IOException { } } final String test = cmd.getSimpleName(); - LOG.info("[" + test + "] Summary of timings (ms): " - + Arrays.toString(results)); + LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(results)); Arrays.sort(results); long total = 0; - float avgLatency = 0 ; + float avgLatency = 0; float avgTPS = 0; long replicaWins = 0; for (RunResult result : results) { @@ -544,10 +529,8 @@ public void setStatus(final String msg) throws IOException { } avgTPS *= 1000; // ms to second avgLatency = avgLatency / results.length; - LOG.info("[" + test + " duration ]" - + "\tMin: " + results[0] + "ms" - + "\tMax: " + results[results.length - 1] + "ms" - + "\tAvg: " + (total / results.length) + "ms"); + LOG.info("[" + test + " duration ]" + "\tMin: " + results[0] + "ms" + "\tMax: " + + results[results.length - 1] + "ms" + "\tAvg: " + (total / results.length) + "ms"); LOG.info("[ Avg latency (us)]\t" + Math.round(avgLatency)); LOG.info("[ Avg TPS/QPS]\t" + Math.round(avgTPS) + "\t row per second"); if (opts.replicas > 1) { @@ -563,14 +546,12 @@ public void setStatus(final String msg) throws IOException { } /* - * Run a mapreduce job. Run as many maps as asked-for clients. - * Before we start up the job, write out an input file with instruction - * per client regards which row they are to start on. - * @param cmd Command to run. - * @throws IOException + * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write + * out an input file with instruction per client regards which row they are to start on. + * @param cmd Command to run. n */ static Job doMapReduce(TestOptions opts, final Configuration conf) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { final Class cmd = determineCommandClass(opts.cmdName); assert cmd != null; Path inputDir = writeInputFile(conf, opts); @@ -597,11 +578,11 @@ static Job doMapReduce(TestOptions opts, final Configuration conf) TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs")); TableMapReduceUtil.addDependencyJars(job); - TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - Histogram.class, // yammer metrics - Gson.class, // gson + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Histogram.class, // yammer + // metrics + Gson.class, // gson FilterAllFilter.class // hbase-server tests jar - ); + ); TableMapReduceUtil.initCredentials(job); @@ -610,7 +591,7 @@ static Job doMapReduce(TestOptions opts, final Configuration conf) } /** - * Each client has one mapper to do the work, and client do the resulting count in a map task. + * Each client has one mapper to do the work, and client do the resulting count in a map task. */ static String JOB_INPUT_FILENAME = "input.txt"; @@ -618,15 +599,14 @@ static Job doMapReduce(TestOptions opts, final Configuration conf) /* * Write input file of offsets-per-client for the mapreduce job. * @param c Configuration - * @return Directory that contains file written whose name is JOB_INPUT_FILENAME - * @throws IOException + * @return Directory that contains file written whose name is JOB_INPUT_FILENAME n */ static Path writeInputFile(final Configuration c, final TestOptions opts) throws IOException { return writeInputFile(c, opts, new Path(".")); } static Path writeInputFile(final Configuration c, final TestOptions opts, final Path basedir) - throws IOException { + throws IOException { SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss"); Path jobdir = new Path(new Path(basedir, PERF_EVAL_DIR), formatter.format(new Date())); Path inputDir = new Path(jobdir, "inputs"); @@ -651,7 +631,7 @@ static Path writeInputFile(final Configuration c, final TestOptions opts, final int hash = h.hash(new ByteArrayHashKey(b, 0, b.length), -1); m.put(hash, s); } - for (Map.Entry e: m.entrySet()) { + for (Map.Entry e : m.entrySet()) { out.println(e.getValue()); } } finally { @@ -688,11 +668,11 @@ public String getDescription() { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}. - * This makes tracking all these arguments a little easier. - * NOTE: ADDING AN OPTION, you need to add a data member, a getter/setter (to make JSON - * serialization of this TestOptions class behave), and you need to add to the clone constructor - * below copying your new option from the 'that' to the 'this'. Look for 'clone' below. + * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}. This makes + * tracking all these arguments a little easier. NOTE: ADDING AN OPTION, you need to add a data + * member, a getter/setter (to make JSON serialization of this TestOptions class behave), and you + * need to add to the clone constructor below copying your new option from the 'that' to the + * 'this'. Look for 'clone' below. */ static class TestOptions { String cmdName = null; @@ -715,7 +695,7 @@ static class TestOptions { boolean writeToWAL = true; boolean autoFlush = false; boolean oneCon = false; - int connCount = -1; //wil decide the actual num later + int connCount = -1; // wil decide the actual num later boolean useTags = false; int noOfTags = 1; boolean reportLatency = false; @@ -733,7 +713,7 @@ static class TestOptions { boolean valueRandom = false; boolean valueZipf = false; int valueSize = DEFAULT_VALUE_LENGTH; - int period = (this.perClientRunRows / 10) == 0? perClientRunRows: perClientRunRows / 10; + int period = (this.perClientRunRows / 10) == 0 ? perClientRunRows : perClientRunRows / 10; int cycles = 1; int columns = 1; int families = 1; @@ -741,14 +721,14 @@ static class TestOptions { int latencyThreshold = 0; // in millsecond boolean addColumns = true; MemoryCompactionPolicy inMemoryCompaction = - MemoryCompactionPolicy.valueOf( - CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); + MemoryCompactionPolicy.valueOf(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); boolean asyncPrefetch = false; boolean cacheBlocks = true; Scan.ReadType scanReadType = Scan.ReadType.DEFAULT; long bufferSize = 2l * 1024l * 1024l; - public TestOptions() {} + public TestOptions() { + } /** * Clone constructor. @@ -1141,8 +1121,7 @@ public long getBufferSize() { } /* - * A test. - * Subclass to particularize what happens per row. + * A test. Subclass to particularize what happens per row. */ static abstract class TestBase { // Below is make it so when Tests are all running in the one @@ -1152,6 +1131,7 @@ static abstract class TestBase { private static long nextRandomSeed() { return randomSeed.nextLong(); } + private final int everyN; protected final Random rand = new Random(nextRandomSeed()); @@ -1175,8 +1155,8 @@ private static long nextRandomSeed() { private long numOfReplyFromReplica = 0; /** - * Note that all subclasses of this class must provide a public constructor - * that has the exact same list of arguments. + * Note that all subclasses of this class must provide a public constructor that has the exact + * same list of arguments. */ TestBase(final Configuration conf, final TestOptions options, final Status status) { this.conf = conf; @@ -1200,13 +1180,14 @@ int getValueLength(final Random r) { } } - void updateValueSize(final Result [] rs) throws IOException { + void updateValueSize(final Result[] rs) throws IOException { updateValueSize(rs, 0); } - void updateValueSize(final Result [] rs, final long latency) throws IOException { + void updateValueSize(final Result[] rs, final long latency) throws IOException { if (rs == null || (latency == 0)) return; - for (Result r: rs) updateValueSize(r, latency); + for (Result r : rs) + updateValueSize(r, latency); } void updateValueSize(final Result r) throws IOException { @@ -1219,7 +1200,7 @@ void updateValueSize(final Result r, final long latency) throws IOException { // update replicaHistogram if (r.isStale()) { replicaLatencyHistogram.update(latency / 1000); - numOfReplyFromReplica ++; + numOfReplyFromReplica++; } if (!isRandomValueSize()) return; @@ -1236,7 +1217,7 @@ void updateValueSize(final int valueSize) { void updateScanMetrics(final ScanMetrics metrics) { if (metrics == null) return; - Map metricsMap = metrics.getMetricsMap(); + Map metricsMap = metrics.getMetricsMap(); Long rpcCalls = metricsMap.get(ScanMetrics.RPC_CALLS_METRIC_NAME); if (rpcCalls != null) { this.rpcCallsHistogram.update(rpcCalls.longValue()); @@ -1264,8 +1245,8 @@ void updateScanMetrics(final ScanMetrics metrics) { } String generateStatus(final int sr, final int i, final int lr) { - return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() + - (!isRandomValueSize()? "": ", value size " + getShortValueSizeReport()); + return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() + + (!isRandomValueSize() ? "" : ", value size " + getShortValueSizeReport()); } boolean isRandomValueSize() { @@ -1288,16 +1269,19 @@ void testSetup() throws IOException { latencyHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); // If it is a replica test, set up histogram for replica. if (opts.replicas > 1) { - replicaLatencyHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + replicaLatencyHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); } valueSizeHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); // scan metrics rpcCallsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); remoteRpcCallsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); - millisBetweenNextHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + millisBetweenNextHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); regionsScannedHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); bytesInResultsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); - bytesInRemoteResultsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + bytesInRemoteResultsHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); onStartup(); } @@ -1311,56 +1295,54 @@ void testTakedown() throws IOException { // output. We can't use 'this' here because each thread has its own instance of Test class. synchronized (Test.class) { status.setStatus("Test : " + testName + ", Thread : " + Thread.currentThread().getName()); - status.setStatus("Latency (us) : " + YammerHistogramUtils.getHistogramReport( - latencyHistogram)); + status + .setStatus("Latency (us) : " + YammerHistogramUtils.getHistogramReport(latencyHistogram)); if (opts.replicas > 1) { - status.setStatus("Latency (us) from Replica Regions: " + - YammerHistogramUtils.getHistogramReport(replicaLatencyHistogram)); + status.setStatus("Latency (us) from Replica Regions: " + + YammerHistogramUtils.getHistogramReport(replicaLatencyHistogram)); } status.setStatus("Num measures (latency) : " + latencyHistogram.getCount()); status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(latencyHistogram)); if (valueSizeHistogram.getCount() > 0) { - status.setStatus("ValueSize (bytes) : " - + YammerHistogramUtils.getHistogramReport(valueSizeHistogram)); + status.setStatus( + "ValueSize (bytes) : " + YammerHistogramUtils.getHistogramReport(valueSizeHistogram)); status.setStatus("Num measures (ValueSize): " + valueSizeHistogram.getCount()); status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(valueSizeHistogram)); } else { status.setStatus("No valueSize statistics available"); } if (rpcCallsHistogram.getCount() > 0) { - status.setStatus("rpcCalls (count): " + - YammerHistogramUtils.getHistogramReport(rpcCallsHistogram)); + status.setStatus( + "rpcCalls (count): " + YammerHistogramUtils.getHistogramReport(rpcCallsHistogram)); } if (remoteRpcCallsHistogram.getCount() > 0) { - status.setStatus("remoteRpcCalls (count): " + - YammerHistogramUtils.getHistogramReport(remoteRpcCallsHistogram)); + status.setStatus("remoteRpcCalls (count): " + + YammerHistogramUtils.getHistogramReport(remoteRpcCallsHistogram)); } if (millisBetweenNextHistogram.getCount() > 0) { - status.setStatus("millisBetweenNext (latency): " + - YammerHistogramUtils.getHistogramReport(millisBetweenNextHistogram)); + status.setStatus("millisBetweenNext (latency): " + + YammerHistogramUtils.getHistogramReport(millisBetweenNextHistogram)); } if (regionsScannedHistogram.getCount() > 0) { - status.setStatus("regionsScanned (count): " + - YammerHistogramUtils.getHistogramReport(regionsScannedHistogram)); + status.setStatus("regionsScanned (count): " + + YammerHistogramUtils.getHistogramReport(regionsScannedHistogram)); } if (bytesInResultsHistogram.getCount() > 0) { - status.setStatus("bytesInResults (size): " + - YammerHistogramUtils.getHistogramReport(bytesInResultsHistogram)); + status.setStatus("bytesInResults (size): " + + YammerHistogramUtils.getHistogramReport(bytesInResultsHistogram)); } if (bytesInRemoteResultsHistogram.getCount() > 0) { - status.setStatus("bytesInRemoteResults (size): " + - YammerHistogramUtils.getHistogramReport(bytesInRemoteResultsHistogram)); + status.setStatus("bytesInRemoteResults (size): " + + YammerHistogramUtils.getHistogramReport(bytesInRemoteResultsHistogram)); } } } abstract void onTakedown() throws IOException; - /* * Run test - * @return Elapsed time. - * @throws IOException + * @return Elapsed time. n */ long test() throws IOException, InterruptedException { testSetup(); @@ -1396,12 +1378,12 @@ void testTimed() throws IOException, InterruptedException { long startTime = System.nanoTime(); boolean requestSent = false; Span span = TraceUtil.getGlobalTracer().spanBuilder("test row").startSpan(); - try (Scope scope = span.makeCurrent()){ + try (Scope scope = span.makeCurrent()) { requestSent = testRow(i, startTime); } finally { span.end(); } - if ( (i - startRow) > opts.measureAfter) { + if ((i - startRow) > opts.measureAfter) { // If multiget or multiput is enabled, say set to 10, testRow() returns immediately // first 9 times and sends the actual get request in the 10th iteration. // We should only set latency when actual request is sent because otherwise @@ -1410,7 +1392,7 @@ void testTimed() throws IOException, InterruptedException { long latency = (System.nanoTime() - startTime) / 1000; latencyHistogram.update(latency); if ((opts.latencyThreshold > 0) && (latency / 1000 >= opts.latencyThreshold)) { - numOfReplyOverLatencyThreshold ++; + numOfReplyOverLatencyThreshold++; } } if (status != null && i > 0 && (i % getReportingPeriod()) == 0) { @@ -1435,15 +1417,14 @@ public String getShortValueSizeReport() { return YammerHistogramUtils.getShortHistogramReport(this.valueSizeHistogram); } - /** * Test for individual row. * @param i Row index. - * @return true if the row was sent to server and need to record metrics. - * False if not, multiGet and multiPut e.g., the rows are sent - * to server only if enough gets/puts are gathered. + * @return true if the row was sent to server and need to record metrics. False if not, multiGet + * and multiPut e.g., the rows are sent to server only if enough gets/puts are gathered. */ - abstract boolean testRow(final int i, final long startTime) throws IOException, InterruptedException; + abstract boolean testRow(final int i, final long startTime) + throws IOException, InterruptedException; } static abstract class Test extends TestBase { @@ -1483,8 +1464,8 @@ void onTakedown() throws IOException { } /* - * Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest - */ + * Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest + */ static abstract class MetaTest extends TableTest { protected int keyLength; @@ -1499,7 +1480,7 @@ void onTakedown() throws IOException { } /* - Generates Lexicographically ascending strings + * Generates Lexicographically ascending strings */ protected byte[] getSplitKey(final int i) { return Bytes.toBytes(String.format("%0" + keyLength + "d", i)); @@ -1547,7 +1528,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1564,7 +1545,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt this.gets.add(get); if (this.gets.size() == opts.multiGet) { Result[] rs = - this.table.get(this.gets).stream().map(f -> propagate(f::get)).toArray(Result[]::new); + this.table.get(this.gets).stream().map(f -> propagate(f::get)).toArray(Result[]::new); updateValueSize(rs); this.gets.clear(); } else { @@ -1632,9 +1613,8 @@ static class AsyncScanTest extends AsyncTableTest { @Override void onStartup() throws IOException { - this.asyncTable = - connection.getTable(TableName.valueOf(opts.tableName), - Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())); + this.asyncTable = connection.getTable(TableName.valueOf(opts.tableName), + Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())); } @Override @@ -1649,15 +1629,14 @@ void testTakedown() throws IOException { @Override boolean testRow(final int i, final long startTime) throws IOException { if (this.testScanner == null) { - Scan scan = - new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) - .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) - .setReadType(opts.scanReadType).setScanMetricsEnabled(true); + Scan scan = new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1687,7 +1666,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1729,7 +1708,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); @@ -1738,8 +1717,8 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { @@ -1799,16 +1778,16 @@ static class RandomSeekScanTest extends TableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - Scan scan = new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)) - .setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks) - .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) - .setScanMetricsEnabled(true); + Scan scan = + new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); FilterList list = new FilterList(); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1849,14 +1828,14 @@ static abstract class RandomScanWithRangeTest extends TableTest { boolean testRow(final int i, final long startTime) throws IOException { Pair startAndStopRow = getStartAndStopRow(); Scan scan = new Scan().withStartRow(startAndStopRow.getFirst()) - .withStopRow(startAndStopRow.getSecond()).setCaching(opts.caching) - .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) - .setReadType(opts.scanReadType).setScanMetricsEnabled(true); + .withStopRow(startAndStopRow.getSecond()).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1876,8 +1855,8 @@ boolean testRow(final int i, final long startTime) throws IOException { } if (i % 100 == 0) { LOG.info(String.format("Scan for key range %s - %s returned %s rows", - Bytes.toString(startAndStopRow.getFirst()), - Bytes.toString(startAndStopRow.getSecond()), count)); + Bytes.toString(startAndStopRow.getFirst()), Bytes.toString(startAndStopRow.getSecond()), + count)); } } finally { updateScanMetrics(s.getScanMetrics()); @@ -1886,7 +1865,7 @@ boolean testRow(final int i, final long startTime) throws IOException { return true; } - protected abstract Pair getStartAndStopRow(); + protected abstract Pair getStartAndStopRow(); protected Pair generateStartAndStopRows(int maxRange) { int start = this.rand.nextInt(Integer.MAX_VALUE) % opts.totalRows; @@ -1897,7 +1876,7 @@ protected Pair generateStartAndStopRows(int maxRange) { @Override protected int getReportingPeriod() { int period = opts.perClientRunRows / 100; - return period == 0? opts.perClientRunRows: period; + return period == 0 ? opts.perClientRunRows : period; } } @@ -1968,7 +1947,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1983,7 +1962,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt if (opts.multiGet > 0) { this.gets.add(get); if (this.gets.size() == opts.multiGet) { - Result [] rs = this.table.get(this.gets); + Result[] rs = this.table.get(this.gets); if (opts.replicas > 1) { long latency = System.nanoTime() - startTime; updateValueSize(rs, latency); @@ -2023,8 +2002,8 @@ protected void testTakedown() throws IOException { } /* - * Send random reads against fake regions inserted by MetaWriteTest - */ + * Send random reads against fake regions inserted by MetaWriteTest + */ static class MetaRandomReadTest extends MetaTest { private RegionLocator regionLocator; @@ -2044,8 +2023,8 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt if (opts.randomSleep > 0) { Thread.sleep(rand.nextInt(opts.randomSleep)); } - HRegionLocation hRegionLocation = regionLocator.getRegionLocation( - getSplitKey(rand.nextInt(opts.perClientRunRows)), true); + HRegionLocation hRegionLocation = + regionLocator.getRegionLocation(getSplitKey(rand.nextInt(opts.perClientRunRows)), true); LOG.debug("get location for region: " + hRegionLocation); return true; } @@ -2072,7 +2051,6 @@ protected byte[] generateRow(final int i) { return getRandomRow(this.rand, opts.totalRows); } - } static class ScanTest extends TableTest { @@ -2090,18 +2068,17 @@ void testTakedown() throws IOException { super.testTakedown(); } - @Override boolean testRow(final int i, final long startTime) throws IOException { if (this.testScanner == null) { Scan scan = new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) - .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) - .setReadType(opts.scanReadType).setScanMetricsEnabled(true); + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -2122,19 +2099,20 @@ boolean testRow(final int i, final long startTime) throws IOException { /** * Base class for operations that are CAS-like; that read a value and then set it based off what * they read. In this category is increment, append, checkAndPut, etc. - * - *

    These operations also want some concurrency going on. Usually when these tests run, they + *

    + * These operations also want some concurrency going on. Usually when these tests run, they * operate in their own part of the key range. In CASTest, we will have them all overlap on the * same key space. We do this with our getStartRow and getLastRow overrides. */ static abstract class CASTableTest extends TableTest { - private final byte [] qualifier; + private final byte[] qualifier; + CASTableTest(Connection con, TestOptions options, Status status) { super(con, options, status); qualifier = Bytes.toBytes(this.getClass().getSimpleName()); } - byte [] getQualifier() { + byte[] getQualifier() { return this.qualifier; } @@ -2176,7 +2154,7 @@ static class AppendTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - byte [] bytes = format(i); + byte[] bytes = format(i); Append append = new Append(bytes); // unlike checkAndXXX tests, which make most sense to do on a single value, // if multiple families are specified for an append test we assume it is @@ -2197,7 +2175,7 @@ static class CheckAndMutateTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); @@ -2205,8 +2183,8 @@ boolean testRow(final int i, final long startTime) throws IOException { this.table.put(put); RowMutations mutations = new RowMutations(bytes); mutations.add(put); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenMutate(mutations); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenMutate(mutations); return true; } } @@ -2218,14 +2196,14 @@ static class CheckAndPutTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); put.addColumn(FAMILY_ZERO, getQualifier(), bytes); this.table.put(put); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenPut(put); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenPut(put); return true; } } @@ -2237,7 +2215,7 @@ static class CheckAndDeleteTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); @@ -2245,15 +2223,15 @@ boolean testRow(final int i, final long startTime) throws IOException { this.table.put(put); Delete delete = new Delete(put.getRow()); delete.addColumn(FAMILY_ZERO, getQualifier()); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenDelete(delete); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenDelete(delete); return true; } } /* - * Delete all fake regions inserted to meta table by MetaWriteTest. - */ + * Delete all fake regions inserted to meta table by MetaWriteTest. + */ static class CleanMetaTest extends MetaTest { CleanMetaTest(Connection con, TestOptions options, Status status) { super(con, options, status); @@ -2266,8 +2244,8 @@ boolean testRow(final int i, final long startTime) throws IOException { .getRegionLocation(getSplitKey(i), false).getRegion(); LOG.debug("deleting region from meta: " + regionInfo); - Delete delete = MetaTableAccessor - .makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); + Delete delete = + MetaTableAccessor.makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); try (Table t = MetaTableAccessor.getMetaHTable(connection)) { t.delete(delete); } @@ -2291,7 +2269,7 @@ boolean testRow(final int i, final long startTime) throws IOException { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -2309,7 +2287,6 @@ boolean testRow(final int i, final long startTime) throws IOException { static class SequentialWriteTest extends BufferedMutatorTest { private ArrayList puts; - SequentialWriteTest(Connection con, TestOptions options, Status status) { super(con, options, status); if (opts.multiPut > 0) { @@ -2329,7 +2306,7 @@ boolean testRow(final int i, final long startTime) throws IOException { for (int family = 0; family < opts.families; family++) { byte familyName[] = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); @@ -2338,8 +2315,8 @@ boolean testRow(final int i, final long startTime) throws IOException { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { @@ -2369,8 +2346,8 @@ boolean testRow(final int i, final long startTime) throws IOException { } /* - * Insert fake regions into meta table with contiguous split keys. - */ + * Insert fake regions into meta table with contiguous split keys. + */ static class MetaWriteTest extends MetaTest { MetaWriteTest(Connection con, TestOptions options, Status status) { @@ -2381,27 +2358,26 @@ static class MetaWriteTest extends MetaTest { boolean testRow(final int i, final long startTime) throws IOException { List regionInfos = new ArrayList(); RegionInfo regionInfo = (RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE_NAME)) - .setStartKey(getSplitKey(i)) - .setEndKey(getSplitKey(i + 1)) - .build()); + .setStartKey(getSplitKey(i)).setEndKey(getSplitKey(i + 1)).build()); regionInfos.add(regionInfo); MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1); // write the serverName columns - MetaTableAccessor.updateRegionLocation(connection, - regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i, + MetaTableAccessor.updateRegionLocation(connection, regionInfo, + ServerName.valueOf("localhost", 60010, rand.nextLong()), i, EnvironmentEdgeManager.currentTime()); return true; } } + static class FilteredScanTest extends TableTest { protected static final Logger LOG = LoggerFactory.getLogger(FilteredScanTest.class.getName()); FilteredScanTest(Connection con, TestOptions options, Status status) { super(con, options, status); if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) { - LOG.warn("Option \"rows\" unspecified. Using default value " + DEFAULT_ROWS_PER_GB + - ". This could take a very long time."); + LOG.warn("Option \"rows\" unspecified. Using default value " + DEFAULT_ROWS_PER_GB + + ". This could take a very long time."); } } @@ -2426,18 +2402,18 @@ boolean testRow(int i, final long startTime) throws IOException { protected Scan constructScan(byte[] valuePrefix) throws IOException { FilterList list = new FilterList(); - Filter filter = new SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO, - CompareOperator.EQUAL, new BinaryComparator(valuePrefix)); + Filter filter = new SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO, CompareOperator.EQUAL, + new BinaryComparator(valuePrefix)); list.addFilter(filter); if (opts.filterAll) { list.addFilter(new FilterAllFilter()); } Scan scan = new Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks) - .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) - .setScanMetricsEnabled(true); + .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) + .setScanMetricsEnabled(true); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(FAMILY_ZERO, qualifier); } } else { @@ -2450,64 +2426,61 @@ protected Scan constructScan(byte[] valuePrefix) throws IOException { /** * Compute a throughput rate in MB/s. - * @param rows Number of records consumed. + * @param rows Number of records consumed. * @param timeMs Time taken in milliseconds. * @return String value with label, ie '123.76 MB/s' */ - private static String calculateMbps(int rows, long timeMs, final int valueSize, int families, int columns) { - BigDecimal rowSize = BigDecimal.valueOf(ROW_LENGTH + - ((valueSize + (FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families); + private static String calculateMbps(int rows, long timeMs, final int valueSize, int families, + int columns) { + BigDecimal rowSize = BigDecimal.valueOf(ROW_LENGTH + + ((valueSize + (FAMILY_NAME_BASE.length() + 1) + COLUMN_ZERO.length) * columns) * families); BigDecimal mbps = BigDecimal.valueOf(rows).multiply(rowSize, CXT) - .divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT) - .divide(BYTES_PER_MB, CXT); + .divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT).divide(BYTES_PER_MB, CXT); return FMT.format(mbps) + " MB/s"; } /* - * Format passed integer. - * @param number - * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed - * number (Does absolute in case number is negative). + * Format passed integer. n * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version + * of passed number (Does absolute in case number is negative). */ - public static byte [] format(final int number) { - byte [] b = new byte[ROW_LENGTH]; + public static byte[] format(final int number) { + byte[] b = new byte[ROW_LENGTH]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; } /* - * This method takes some time and is done inline uploading data. For - * example, doing the mapfile test, generation of the key and value - * consumes about 30% of CPU time. + * This method takes some time and is done inline uploading data. For example, doing the mapfile + * test, generation of the key and value consumes about 30% of CPU time. * @return Generated random value to insert into a table cell. */ public static byte[] generateData(final Random r, int length) { - byte [] b = new byte [length]; + byte[] b = new byte[length]; int i; - for(i = 0; i < (length-8); i += 8) { + for (i = 0; i < (length - 8); i += 8) { b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; + b[i + 1] = b[i]; + b[i + 2] = b[i]; + b[i + 3] = b[i]; + b[i + 4] = b[i]; + b[i + 5] = b[i]; + b[i + 6] = b[i]; + b[i + 7] = b[i]; } byte a = (byte) (65 + r.nextInt(26)); - for(; i < length; i++) { + for (; i < length; i++) { b[i] = a; } return b; } - static byte [] getRandomRow(final Random random, final int totalRows) { + static byte[] getRandomRow(final Random random, final int totalRows) { return format(generateRandomRow(random, totalRows)); } @@ -2516,10 +2489,10 @@ static int generateRandomRow(final Random random, final int totalRows) { } static RunResult runOneClient(final Class cmd, Configuration conf, - Connection con, AsyncConnection asyncCon, TestOptions opts, final Status status) - throws IOException, InterruptedException { - status.setStatus("Start " + cmd + " at offset " + opts.startRow + " for " - + opts.perClientRunRows + " rows"); + Connection con, AsyncConnection asyncCon, TestOptions opts, final Status status) + throws IOException, InterruptedException { + status.setStatus( + "Start " + cmd + " at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows"); long totalElapsedTime; final TestBase t; @@ -2527,39 +2500,40 @@ static RunResult runOneClient(final Class cmd, Configuration if (AsyncTest.class.isAssignableFrom(cmd)) { Class newCmd = (Class) cmd; Constructor constructor = - newCmd.getDeclaredConstructor(AsyncConnection.class, TestOptions.class, Status.class); + newCmd.getDeclaredConstructor(AsyncConnection.class, TestOptions.class, Status.class); t = constructor.newInstance(asyncCon, opts, status); } else { Class newCmd = (Class) cmd; Constructor constructor = - newCmd.getDeclaredConstructor(Connection.class, TestOptions.class, Status.class); + newCmd.getDeclaredConstructor(Connection.class, TestOptions.class, Status.class); t = constructor.newInstance(con, opts, status); } } catch (NoSuchMethodException e) { throw new IllegalArgumentException("Invalid command class: " + cmd.getName() - + ". It does not provide a constructor as described by " - + "the javadoc comment. Available constructors are: " - + Arrays.toString(cmd.getConstructors())); + + ". It does not provide a constructor as described by " + + "the javadoc comment. Available constructors are: " + + Arrays.toString(cmd.getConstructors())); } catch (Exception e) { throw new IllegalStateException("Failed to construct command class", e); } totalElapsedTime = t.test(); - status.setStatus("Finished " + cmd + " in " + totalElapsedTime + - "ms at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows" + - " (" + calculateMbps((int)(opts.perClientRunRows * opts.sampleRate), totalElapsedTime, - getAverageValueLength(opts), opts.families, opts.columns) + ")"); + status.setStatus("Finished " + cmd + " in " + totalElapsedTime + "ms at offset " + opts.startRow + + " for " + opts.perClientRunRows + " rows" + " (" + + calculateMbps((int) (opts.perClientRunRows * opts.sampleRate), totalElapsedTime, + getAverageValueLength(opts), opts.families, opts.columns) + + ")"); return new RunResult(totalElapsedTime, t.numOfReplyOverLatencyThreshold, t.numOfReplyFromReplica, t.getLatencyHistogram()); } private static int getAverageValueLength(final TestOptions opts) { - return opts.valueRandom? opts.valueSize/2: opts.valueSize; + return opts.valueRandom ? opts.valueSize / 2 : opts.valueSize; } - private void runTest(final Class cmd, TestOptions opts) throws IOException, - InterruptedException, ClassNotFoundException, ExecutionException { + private void runTest(final Class cmd, TestOptions opts) + throws IOException, InterruptedException, ClassNotFoundException, ExecutionException { // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do // the TestOptions introspection for us and dump the output in a readable format. LOG.info(cmd.getSimpleName() + " test run options=" + GSON.toJson(opts)); @@ -2601,86 +2575,91 @@ protected static void printUsage(final String shortName, final String message) { System.err.println(" [-D]* "); System.err.println(); System.err.println("General Options:"); - System.err.println(" nomapred Run multiple clients using threads " + - "(rather than use mapreduce)"); - System.err.println(" oneCon all the threads share the same connection. Default: False"); + System.err.println( + " nomapred Run multiple clients using threads " + "(rather than use mapreduce)"); + System.err + .println(" oneCon all the threads share the same connection. Default: False"); System.err.println(" connCount connections all threads share. " - + "For example, if set to 2, then all thread share 2 connection. " - + "Default: depend on oneCon parameter. if oneCon set to true, then connCount=1, " - + "if not, connCount=thread number"); - - System.err.println(" sampleRate Execute test on a sample of total " + - "rows. Only supported by randomRead. Default: 1.0"); - System.err.println(" period Report every 'period' rows: " + - "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows()/10); + + "For example, if set to 2, then all thread share 2 connection. " + + "Default: depend on oneCon parameter. if oneCon set to true, then connCount=1, " + + "if not, connCount=thread number"); + + System.err.println(" sampleRate Execute test on a sample of total " + + "rows. Only supported by randomRead. Default: 1.0"); + System.err.println(" period Report every 'period' rows: " + + "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows() / 10); System.err.println(" cycles How many times to cycle the test. Defaults: 1."); - System.err.println(" traceRate Enable HTrace spans. Initiate tracing every N rows. " + - "Default: 0"); + System.err.println( + " traceRate Enable HTrace spans. Initiate tracing every N rows. " + "Default: 0"); System.err.println(" latency Set to report operation latencies. Default: False"); - System.err.println(" latencyThreshold Set to report number of operations with latency " + - "over lantencyThreshold, unit in millisecond, default 0"); - System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" + - " rows have been treated. Default: 0"); - System.err.println(" valueSize Pass value size to use: Default: " - + DEFAULT_OPTS.getValueSize()); - System.err.println(" valueRandom Set if we should vary value size between 0 and " + - "'valueSize'; set on read for stats on size: Default: Not set."); + System.err.println(" latencyThreshold Set to report number of operations with latency " + + "over lantencyThreshold, unit in millisecond, default 0"); + System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" + + " rows have been treated. Default: 0"); + System.err + .println(" valueSize Pass value size to use: Default: " + DEFAULT_OPTS.getValueSize()); + System.err.println(" valueRandom Set if we should vary value size between 0 and " + + "'valueSize'; set on read for stats on size: Default: Not set."); System.err.println(" blockEncoding Block encoding to use. Value should be one of " - + Arrays.toString(DataBlockEncoding.values()) + ". Default: NONE"); + + Arrays.toString(DataBlockEncoding.values()) + ". Default: NONE"); System.err.println(); System.err.println("Table Creation / Write Tests:"); System.err.println(" table Alternate table name. Default: 'TestTable'"); - System.err.println(" rows Rows each client runs. Default: " - + DEFAULT_OPTS.getPerClientRunRows() + System.err.println( + " rows Rows each client runs. Default: " + DEFAULT_OPTS.getPerClientRunRows() + ". In case of randomReads and randomSeekScans this could" + " be specified along with --size to specify the number of rows to be scanned within" + " the total range specified by the size."); System.err.println( " size Total size in GiB. Mutually exclusive with --rows for writes and scans" - + ". But for randomReads and randomSeekScans when you use size with --rows you could" - + " use size to specify the end range and --rows" - + " specifies the number of rows within that range. " + "Default: 1.0."); + + ". But for randomReads and randomSeekScans when you use size with --rows you could" + + " use size to specify the end range and --rows" + + " specifies the number of rows within that range. " + "Default: 1.0."); System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'"); - System.err.println(" flushCommits Used to determine if the test should flush the table. " + - "Default: false"); - System.err.println(" valueZipf Set if we should vary value size between 0 and " + - "'valueSize' in zipf form: Default: Not set."); + System.err.println( + " flushCommits Used to determine if the test should flush the table. " + "Default: false"); + System.err.println(" valueZipf Set if we should vary value size between 0 and " + + "'valueSize' in zipf form: Default: Not set."); System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); System.err.println(" autoFlush Set autoFlush on htable. Default: False"); - System.err.println(" multiPut Batch puts together into groups of N. Only supported " + - "by write. If multiPut is bigger than 0, autoFlush need to set to true. Default: 0"); + System.err.println(" multiPut Batch puts together into groups of N. Only supported " + + "by write. If multiPut is bigger than 0, autoFlush need to set to true. Default: 0"); System.err.println(" presplit Create presplit table. If a table with same name exists," - + " it'll be deleted and recreated (instead of verifying count of its existing regions). " - + "Recommended for accurate perf analysis (see guide). Default: disabled"); - System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + - "Default: false"); - System.err.println(" numoftags Specify the no of tags that would be needed. " + - "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); + + " it'll be deleted and recreated (instead of verifying count of its existing regions). " + + "Recommended for accurate perf analysis (see guide). Default: disabled"); + System.err.println( + " usetags Writes tags along with KVs. Use with HFile V3. " + "Default: false"); + System.err.println(" numoftags Specify the no of tags that would be needed. " + + "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); System.err.println(" splitPolicy Specify a custom RegionSplitPolicy for the table."); System.err.println(" columns Columns to write per row. Default: 1"); - System.err.println(" families Specify number of column families for the table. Default: 1"); + System.err + .println(" families Specify number of column families for the table. Default: 1"); System.err.println(); System.err.println("Read Tests:"); System.err.println(" filterAll Helps to filter out all the rows on the server side" - + " there by not returning any thing back to the client. Helps to check the server side" - + " performance. Uses FilterAllFilter internally. "); - System.err.println(" multiGet Batch gets together into groups of N. Only supported " + - "by randomRead. Default: disabled"); - System.err.println(" inmemory Tries to keep the HFiles of the CF " + - "inmemory as far as possible. Not guaranteed that reads are always served " + - "from memory. Default: false"); - System.err.println(" bloomFilter Bloom filter type, one of " - + Arrays.toString(BloomType.values())); + + " there by not returning any thing back to the client. Helps to check the server side" + + " performance. Uses FilterAllFilter internally. "); + System.err.println(" multiGet Batch gets together into groups of N. Only supported " + + "by randomRead. Default: disabled"); + System.err.println(" inmemory Tries to keep the HFiles of the CF " + + "inmemory as far as possible. Not guaranteed that reads are always served " + + "from memory. Default: false"); + System.err + .println(" bloomFilter Bloom filter type, one of " + Arrays.toString(BloomType.values())); System.err.println(" blockSize Blocksize to use when writing out hfiles. "); - System.err.println(" inmemoryCompaction Makes the column family to do inmemory flushes/compactions. " + System.err + .println(" inmemoryCompaction Makes the column family to do inmemory flushes/compactions. " + "Uses the CompactingMemstore"); System.err.println(" addColumns Adds columns to scans/gets explicitly. Default: true"); System.err.println(" replicas Enable region replica testing. Defaults: 1."); - System.err.println(" randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); + System.err.println( + " randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); System.err.println(" caching Scan caching to use. Default: 30"); System.err.println(" asyncPrefetch Enable asyncPrefetch for scan"); System.err.println(" cacheBlocks Set the cacheBlocks option for scan. Default: true"); - System.err.println(" scanReadType Set the readType option for scan, stream/pread/default. Default: default"); + System.err.println( + " scanReadType Set the readType option for scan, stream/pread/default. Default: default"); System.err.println(" bufferSize Set the value of client side buffering. Default: 2MB"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); @@ -2695,7 +2674,7 @@ protected static void printUsage(final String shortName, final String message) { System.err.println(); System.err.println("Args:"); System.err.println(" nclients Integer. Required. Total number of clients " - + "(and HRegionServers) running. 1 <= value <= 500"); + + "(and HRegionServers) running. 1 <= value <= 500"); System.err.println("Examples:"); System.err.println(" To run a single client doing the default 1M sequentialWrites:"); System.err.println(" $ hbase " + shortName + " sequentialWrite 1"); @@ -2704,10 +2683,10 @@ protected static void printUsage(final String shortName, final String message) { } /** - * Parse options passed in via an arguments array. Assumes that array has been split - * on white-space and placed into a {@code Queue}. Any unknown arguments will remain - * in the queue at the conclusion of this method call. It's up to the caller to deal - * with these unrecognized arguments. + * Parse options passed in via an arguments array. Assumes that array has been split on + * white-space and placed into a {@code Queue}. Any unknown arguments will remain in the queue at + * the conclusion of this method call. It's up to the caller to deal with these unrecognized + * arguments. */ static TestOptions parseOpts(Queue args) { TestOptions opts = new TestOptions(); @@ -2896,7 +2875,7 @@ static TestOptions parseOpts(Queue args) { } final String blockSize = "--blockSize="; - if(cmd.startsWith(blockSize) ) { + if (cmd.startsWith(blockSize)) { opts.blockSize = Integer.parseInt(cmd.substring(blockSize.length())); continue; } @@ -2934,7 +2913,7 @@ static TestOptions parseOpts(Queue args) { final String inMemoryCompaction = "--inmemoryCompaction="; if (cmd.startsWith(inMemoryCompaction)) { opts.inMemoryCompaction = - MemoryCompactionPolicy.valueOf(cmd.substring(inMemoryCompaction.length())); + MemoryCompactionPolicy.valueOf(cmd.substring(inMemoryCompaction.length())); continue; } @@ -2971,7 +2950,7 @@ static TestOptions parseOpts(Queue args) { final String scanReadType = "--scanReadType="; if (cmd.startsWith(scanReadType)) { opts.scanReadType = - Scan.ReadType.valueOf(cmd.substring(scanReadType.length()).toUpperCase()); + Scan.ReadType.valueOf(cmd.substring(scanReadType.length()).toUpperCase()); continue; } @@ -3005,17 +2984,17 @@ static TestOptions parseOpts(Queue args) { } /** - * Validates opts after all the opts are parsed, so that caller need not to maintain order of opts - */ - private static void validateParsedOpts(TestOptions opts) { + * Validates opts after all the opts are parsed, so that caller need not to maintain order of opts + */ + private static void validateParsedOpts(TestOptions opts) { if (!opts.autoFlush && opts.multiPut > 0) { throw new IllegalArgumentException("autoFlush must be true when multiPut is more than 0"); } if (opts.oneCon && opts.connCount > 1) { - throw new IllegalArgumentException("oneCon is set to true, " - + "connCount should not bigger than 1"); + throw new IllegalArgumentException( + "oneCon is set to true, " + "connCount should not bigger than 1"); } if (opts.valueZipf && opts.valueRandom) { @@ -3025,10 +3004,11 @@ private static void validateParsedOpts(TestOptions opts) { static TestOptions calculateRowsAndSize(final TestOptions opts) { int rowsPerGB = getRowsPerGB(opts); - if ((opts.getCmdName() != null + if ( + (opts.getCmdName() != null && (opts.getCmdName().equals(RANDOM_READ) || opts.getCmdName().equals(RANDOM_SEEK_SCAN))) - && opts.size != DEFAULT_OPTS.size - && opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) { + && opts.size != DEFAULT_OPTS.size && opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows + ) { opts.totalRows = (int) opts.size * rowsPerGB; } else if (opts.size != DEFAULT_OPTS.size) { // total size in GB specified @@ -3042,8 +3022,8 @@ static TestOptions calculateRowsAndSize(final TestOptions opts) { } static int getRowsPerGB(final TestOptions opts) { - return ONE_GB / ((opts.valueRandom? opts.valueSize/2: opts.valueSize) * opts.getFamilies() * - opts.getColumns()); + return ONE_GB / ((opts.valueRandom ? opts.valueSize / 2 : opts.valueSize) * opts.getFamilies() + * opts.getColumns()); } @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java index a9ce959c6f9a..983af877b98c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; @@ -49,8 +48,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** - * A simple performance evaluation tool for single client and MR scans - * and snapshot scans. + * A simple performance evaluation tool for single client and MR scans and snapshot scans. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ScanPerformanceEvaluation extends AbstractHBaseTool { @@ -78,7 +76,8 @@ public void setConf(Configuration conf) { @Override protected void addOptions() { - this.addRequiredOptWithArg("t", "type", "the type of the test. One of the following: streaming|scan|snapshotscan|scanmapreduce|snapshotscanmapreduce"); + this.addRequiredOptWithArg("t", "type", + "the type of the test. One of the following: streaming|scan|snapshotscan|scanmapreduce|snapshotscanmapreduce"); this.addOptWithArg("f", "file", "the filename to read from"); this.addOptWithArg("tn", "table", "the tablename to read from"); this.addOptWithArg("sn", "snapshot", "the snapshot name to read from"); @@ -119,15 +118,15 @@ protected void testHdfsStreaming(Path filename) throws IOException { } streamTimer.stop(); - double throughput = (double)totalBytes / streamTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / streamTimer.elapsed(TimeUnit.SECONDS); System.out.println("HDFS streaming: "); - System.out.println("total time to open: " + - fileOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out + .println("total time to open: " + fileOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to read: " + streamTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throghput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throghput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); } private Scan getScan() { @@ -176,30 +175,30 @@ public void testScan() throws IOException { ScanMetrics metrics = scanner.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan: "); - System.out.println("total time to open table: " + - tableOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to scan: " + - scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open table: " + tableOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("Scan metrics:\n" + metrics.getMetricsMap()); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out + .println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } - public void testSnapshotScan() throws IOException { Stopwatch snapshotRestoreTimer = Stopwatch.createUnstarted(); Stopwatch scanOpenTimer = Stopwatch.createUnstarted(); @@ -233,27 +232,28 @@ public void testSnapshotScan() throws IOException { ScanMetrics metrics = scanner.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan snapshot: "); - System.out.println("total time to restore snapshot: " + - snapshotRestoreTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to scan: " + - scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to restore snapshot: " + + snapshotRestoreTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("Scan metrics:\n" + metrics.getMetricsMap()); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out + .println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } @@ -264,9 +264,8 @@ public static enum ScanCounter { public static class MyMapper extends TableMapper { @Override - protected void map(ImmutableBytesWritable key, Result value, - Context context) throws IOException, - InterruptedException { + protected void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { context.getCounter(ScanCounter.NUM_ROWS).increment(1); context.getCounter(ScanCounter.NUM_CELLS).increment(value.rawCells().length); } @@ -285,14 +284,8 @@ public void testScanMapReduce() throws IOException, InterruptedException, ClassN job.setJarByClass(getClass()); - TableMapReduceUtil.initTableMapperJob( - this.tablename, - scan, - MyMapper.class, - NullWritable.class, - NullWritable.class, - job - ); + TableMapReduceUtil.initTableMapperJob(this.tablename, scan, MyMapper.class, NullWritable.class, + NullWritable.class, job); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); @@ -308,25 +301,28 @@ public void testScanMapReduce() throws IOException, InterruptedException, ClassN long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out + .println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } - public void testSnapshotScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException { + public void testSnapshotScanMapReduce() + throws IOException, InterruptedException, ClassNotFoundException { Stopwatch scanOpenTimer = Stopwatch.createUnstarted(); Stopwatch scanTimer = Stopwatch.createUnstarted(); @@ -339,16 +335,8 @@ public void testSnapshotScanMapReduce() throws IOException, InterruptedException job.setJarByClass(getClass()); - TableMapReduceUtil.initTableSnapshotMapperJob( - this.snapshotName, - scan, - MyMapper.class, - NullWritable.class, - NullWritable.class, - job, - true, - new Path(restoreDir) - ); + TableMapReduceUtil.initTableSnapshotMapperJob(this.snapshotName, scan, MyMapper.class, + NullWritable.class, NullWritable.class, job, true, new Path(restoreDir)); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); @@ -364,29 +352,31 @@ public void testSnapshotScanMapReduce() throws IOException, InterruptedException long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out + .println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } @Override protected int doWork() throws Exception { if (type.equals("streaming")) { testHdfsStreaming(new Path(file)); - } else if (type.equals("scan")){ + } else if (type.equals("scan")) { testScan(); } else if (type.equals("snapshotscan")) { testSnapshotScan(); @@ -398,7 +388,7 @@ protected int doWork() throws Exception { return 0; } - public static void main (String[] args) throws Exception { + public static void main(String[] args) throws Exception { int ret = ToolRunner.run(HBaseConfiguration.create(), new ScanPerformanceEvaluation(), args); System.exit(ret); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java index d1f8cc08b269..f02d30c3887b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,6 @@ import java.util.Queue; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -56,20 +55,19 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestPerformanceEvaluation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPerformanceEvaluation.class); + HBaseClassTestRule.forClass(TestPerformanceEvaluation.class); private static final HBaseTestingUtil HTU = new HBaseTestingUtil(); @Test public void testDefaultInMemoryCompaction() { - PerformanceEvaluation.TestOptions defaultOpts = - new PerformanceEvaluation.TestOptions(); + PerformanceEvaluation.TestOptions defaultOpts = new PerformanceEvaluation.TestOptions(); assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT, - defaultOpts.getInMemoryCompaction().toString()); + defaultOpts.getInMemoryCompaction().toString()); TableDescriptor tableDescriptor = PerformanceEvaluation.getTableDescriptor(defaultOpts); for (ColumnFamilyDescriptor familyDescriptor : tableDescriptor.getColumnFamilies()) { assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT, @@ -172,7 +170,7 @@ public void testRandomReadCalculation() { @Test public void testZipfian() throws NoSuchMethodException, SecurityException, InstantiationException, - IllegalAccessException, IllegalArgumentException, InvocationTargetException { + IllegalAccessException, IllegalArgumentException, InvocationTargetException { TestOptions opts = new PerformanceEvaluation.TestOptions(); opts.setValueZipf(true); final int valueSize = 1024; @@ -181,7 +179,7 @@ public void testZipfian() throws NoSuchMethodException, SecurityException, Insta Constructor ctor = Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class); ctor.setAccessible(true); - Histogram histogram = (Histogram)ctor.newInstance(new UniformReservoir(1024 * 500)); + Histogram histogram = (Histogram) ctor.newInstance(new UniformReservoir(1024 * 500)); for (int i = 0; i < 100; i++) { histogram.update(rrt.getValueLength(null)); } @@ -256,11 +254,11 @@ public void testParseOptsMultiPuts() { try { options = PerformanceEvaluation.parseOpts(opts); fail("should fail"); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException e) { System.out.println(e.getMessage()); } - //Re-create options + // Re-create options opts = new LinkedList<>(); opts.offer("--autoFlush=true"); opts.offer("--multiPut=10"); @@ -316,7 +314,7 @@ public void testParseOptsConnCount() { try { options = PerformanceEvaluation.parseOpts(opts); fail("should fail"); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException e) { System.out.println(e.getMessage()); } @@ -344,7 +342,7 @@ public void testParseOptsValueRandom() { try { options = PerformanceEvaluation.parseOpts(opts); fail("should fail"); - } catch (IllegalStateException e) { + } catch (IllegalStateException e) { System.out.println(e.getMessage()); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java index 327b7afec2fb..6c49a43bf463 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,18 +29,17 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestDriver { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDriver.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDriver.class); @Test public void testDriverMainMethod() throws Throwable { ProgramDriver programDriverMock = mock(ProgramDriver.class); Driver.setProgramDriver(programDriverMock); - Driver.main(new String[]{}); + Driver.main(new String[] {}); verify(programDriverMock).driver(Mockito.any()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java index 76e3c73e2d50..4ccaa43a665a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,17 +49,16 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestGroupingTableMap { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGroupingTableMap.class); + HBaseClassTestRule.forClass(TestGroupingTableMap.class); @Test @SuppressWarnings({ "deprecation", "unchecked" }) - public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() - throws Exception { + public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() throws Exception { GroupingTableMap gTableMap = null; try { Result result = mock(Result.class); @@ -71,22 +70,21 @@ public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() gTableMap.configure(jobConf); byte[] row = {}; - List keyValues = ImmutableList.of( - new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), - Bytes.toBytes("1111")), - new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), - Bytes.toBytes("2222")), - new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), - Bytes.toBytes("3333"))); + List keyValues = ImmutableList. of( + new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), + Bytes.toBytes("1111")), + new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), + Bytes.toBytes("2222")), + new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), + Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector outputCollectorMock = - mock(OutputCollector.class); + mock(OutputCollector.class); gTableMap.map(null, result, outputCollectorMock, reporter); verify(result).listCells(); verifyZeroInteractions(outputCollectorMock); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -104,24 +102,22 @@ public void shouldCreateNewKeyAlthoughExtraKey() throws Exception { gTableMap.configure(jobConf); byte[] row = {}; - List keyValues = ImmutableList.of( - new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), - Bytes.toBytes("1111")), - new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), - Bytes.toBytes("2222")), - new KeyValue(row, Bytes.toBytes("familyC"), Bytes.toBytes("qualifierC"), - Bytes.toBytes("3333"))); + List keyValues = ImmutableList. of( + new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), + Bytes.toBytes("1111")), + new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), + Bytes.toBytes("2222")), + new KeyValue(row, Bytes.toBytes("familyC"), Bytes.toBytes("qualifierC"), + Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector outputCollectorMock = - mock(OutputCollector.class); + mock(OutputCollector.class); gTableMap.map(null, result, outputCollectorMock, reporter); verify(result).listCells(); - verify(outputCollectorMock, times(1)) - .collect(any(), any()); + verify(outputCollectorMock, times(1)).collect(any(), any()); verifyNoMoreInteractions(outputCollectorMock); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -142,24 +138,22 @@ public void shouldCreateNewKey() throws Exception { final byte[] firstPartKeyValue = Bytes.toBytes("34879512738945"); final byte[] secondPartKeyValue = Bytes.toBytes("35245142671437"); byte[] row = {}; - List cells = ImmutableList.of( - new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), - firstPartKeyValue), - new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), - secondPartKeyValue)); + List cells = ImmutableList. of( + new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), firstPartKeyValue), + new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), + secondPartKeyValue)); when(result.listCells()).thenReturn(cells); final AtomicBoolean outputCollected = new AtomicBoolean(); OutputCollector outputCollector = - new OutputCollector() { - @Override - public void collect(ImmutableBytesWritable arg, Result result) throws IOException { - assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives. - Bytes.concat(firstPartKeyValue, bSeparator, - secondPartKeyValue), arg.copyBytes()); - outputCollected.set(true); - } - }; + new OutputCollector() { + @Override + public void collect(ImmutableBytesWritable arg, Result result) throws IOException { + assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(firstPartKeyValue, bSeparator, secondPartKeyValue), arg.copyBytes()); + outputCollected.set(true); + } + }; gTableMap.map(null, result, outputCollector, reporter); verify(result).listCells(); @@ -169,12 +163,10 @@ public void collect(ImmutableBytesWritable arg, Result result) throws IOExceptio final byte[] secondPartValue = Bytes.toBytes("4678456942345"); byte[][] data = { firstPartValue, secondPartValue }; ImmutableBytesWritable byteWritable = gTableMap.createGroupKey(data); - assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives. - Bytes.concat(firstPartValue, - bSeparator, secondPartValue), byteWritable.get()); + assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(firstPartValue, bSeparator, secondPartValue), byteWritable.get()); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -186,8 +178,7 @@ public void shouldReturnNullFromCreateGroupKey() throws Exception { gTableMap = new GroupingTableMap(); assertNull(gTableMap.createGroupKey(null)); } finally { - if(gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java index 25576c1ef420..96e25b51f659 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +34,12 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestIdentityTableMap { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIdentityTableMap.class); + HBaseClassTestRule.forClass(TestIdentityTableMap.class); @Test @SuppressWarnings({ "deprecation", "unchecked" }) @@ -52,17 +52,14 @@ public void shouldCollectPredefinedTimes() throws IOException { identityTableMap = new IdentityTableMap(); ImmutableBytesWritable bytesWritableMock = mock(ImmutableBytesWritable.class); OutputCollector outputCollectorMock = - mock(OutputCollector.class); + mock(OutputCollector.class); for (int i = 0; i < recordNumber; i++) - identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, - reporterMock); + identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, reporterMock); - verify(outputCollectorMock, times(recordNumber)).collect( - Mockito.any(), Mockito.any()); + verify(outputCollectorMock, times(recordNumber)).collect(Mockito.any(), Mockito.any()); } finally { - if (identityTableMap != null) - identityTableMap.close(); + if (identityTableMap != null) identityTableMap.close(); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java index 1dd3e69f9775..c042bd35a56d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,18 +46,18 @@ @Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestMultiTableSnapshotInputFormat - extends org.apache.hadoop.hbase.mapreduce.TestMultiTableSnapshotInputFormat { + extends org.apache.hadoop.hbase.mapreduce.TestMultiTableSnapshotInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); + HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); private static final Logger LOG = - LoggerFactory.getLogger(TestMultiTableSnapshotInputFormat.class); + LoggerFactory.getLogger(TestMultiTableSnapshotInputFormat.class); @Override protected void runJob(String jobName, Configuration c, List scans) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { JobConf job = new JobConf(TEST_UTIL.getConfiguration()); job.setJobName(jobName); @@ -65,7 +65,7 @@ protected void runJob(String jobName, Configuration c, List scans) job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); + ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); @@ -81,21 +81,19 @@ protected void runJob(String jobName, Configuration c, List scans) } public static class Mapper extends TestMultiTableSnapshotInputFormat.ScanMapper - implements TableMap { + implements TableMap { @Override public void map(ImmutableBytesWritable key, Result value, - OutputCollector outputCollector, - Reporter reporter) throws IOException { + OutputCollector outputCollector, + Reporter reporter) throws IOException { makeAssertions(key, value); outputCollector.collect(key, key); } /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * + * Closes this stream and releases any system resources associated with it. If the stream is + * already closed then invoking this method has no effect. * @throws IOException if an I/O error occurs */ @Override @@ -108,24 +106,22 @@ public void configure(JobConf jobConf) { } } - public static class Reducer extends TestMultiTableSnapshotInputFormat.ScanReducer implements - org.apache.hadoop.mapred.Reducer { + public static class Reducer extends TestMultiTableSnapshotInputFormat.ScanReducer + implements org.apache.hadoop.mapred.Reducer { private JobConf jobConf; @Override public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector outputCollector, Reporter reporter) - throws IOException { + OutputCollector outputCollector, Reporter reporter) + throws IOException { makeAssertions(key, Lists.newArrayList(values)); } /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * + * Closes this stream and releases any system resources associated with it. If the stream is + * already closed then invoking this method has no effect. * @throws IOException if an I/O error occurs */ @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java index 13913e5fc24a..dc55ff977dcc 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,12 +45,12 @@ import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestRowCounter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowCounter.class); + HBaseClassTestRule.forClass(TestRowCounter.class); @Test @SuppressWarnings("deprecation") @@ -68,8 +68,7 @@ void doRead() { @Test @SuppressWarnings("deprecation") - public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() - throws Exception { + public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() throws Exception { final String[] args = new String[] { "one", "two" }; String line = "ERROR: Wrong number of parameters: " + args.length; String result = new OutputReader(System.err) { @@ -90,10 +89,9 @@ public void shouldRegInReportEveryIncomingRow() throws IOException { Reporter reporter = mock(Reporter.class); for (int i = 0; i < iterationNumber; i++) mapper.map(mock(ImmutableBytesWritable.class), mock(Result.class), - mock(OutputCollector.class), reporter); + mock(OutputCollector.class), reporter); - Mockito.verify(reporter, times(iterationNumber)).incrCounter( - any(), anyLong()); + Mockito.verify(reporter, times(iterationNumber)).incrCounter(any(), anyLong()); } @Test @@ -101,8 +99,7 @@ public void shouldRegInReportEveryIncomingRow() throws IOException { public void shouldCreateAndRunSubmittableJob() throws Exception { RowCounter rCounter = new RowCounter(); rCounter.setConf(HBaseConfiguration.create()); - String[] args = new String[] { "\temp", "tableA", "column1", "column2", - "column3" }; + String[] args = new String[] { "\temp", "tableA", "column1", "column2", "column3" }; JobConf jobConfig = rCounter.createSubmittableJob(args); assertNotNull(jobConfig); @@ -110,13 +107,14 @@ public void shouldCreateAndRunSubmittableJob() throws Exception { assertEquals("rowcounter", jobConfig.getJobName()); assertEquals(jobConfig.getMapOutputValueClass(), Result.class); assertEquals(jobConfig.getMapperClass(), RowCounterMapper.class); - assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), Joiner.on(' ') - .join("column1", "column2", "column3")); + assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), + Joiner.on(' ').join("column1", "column2", "column3")); assertEquals(jobConfig.getMapOutputKeyClass(), ImmutableBytesWritable.class); } enum Outs { - OUT, ERR + OUT, + ERR } private static abstract class OutputReader { @@ -147,17 +145,16 @@ protected String read() throws Exception { return new String(outBytes.toByteArray()); } finally { switch (outs) { - case OUT: { - System.setOut(oldPrintStream); - break; - } - case ERR: { - System.setErr(oldPrintStream); - break; - } - default: - throw new IllegalStateException( - "OutputReader: unsupported PrintStream"); + case OUT: { + System.setOut(oldPrintStream); + break; + } + case ERR: { + System.setErr(oldPrintStream); + break; + } + default: + throw new IllegalStateException("OutputReader: unsupported PrintStream"); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java index bf46a7ac6d88..3e093430a92e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,27 +33,27 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestSplitTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitTable.class); + HBaseClassTestRule.forClass(TestSplitTable.class); @Rule public TestName name = new TestName(); @Test - @SuppressWarnings({"deprecation", "SelfComparison"}) + @SuppressWarnings({ "deprecation", "SelfComparison" }) public void testSplitTableCompareTo() { - TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("aaa"), Bytes.toBytes("ddd"), "locationA"); + TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("aaa"), + Bytes.toBytes("ddd"), "locationA"); - TableSplit bTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("iii"), Bytes.toBytes("kkk"), "locationA"); + TableSplit bTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("iii"), + Bytes.toBytes("kkk"), "locationA"); - TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("lll"), Bytes.toBytes("zzz"), "locationA"); + TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("lll"), + Bytes.toBytes("zzz"), "locationA"); assertEquals(0, aTableSplit.compareTo(aTableSplit)); assertEquals(0, bTableSplit.compareTo(bTableSplit)); @@ -105,18 +105,15 @@ public void testSplitTableEquals() { @Test @SuppressWarnings("deprecation") public void testToString() { - TableSplit split = - new TableSplit(TableName.valueOf(name.getMethodName()), Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location"); - String str = - "HBase table split(table name: " + name.getMethodName() + ", start row: row-start, " - + "end row: row-end, region location: location)"; + TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); + String str = "HBase table split(table name: " + name.getMethodName() + + ", start row: row-start, " + "end row: row-end, region location: location)"; Assert.assertEquals(str, split.toString()); split = new TableSplit((TableName) null, null, null, null); - str = - "HBase table split(table name: null, start row: null, " - + "end row: null, region location: null)"; + str = "HBase table split(table name: null, start row: null, " + + "end row: null, region location: null)"; Assert.assertEquals(str, split.toString()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java index 2c5abec8ddec..7d6dc6e46b71 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,12 +75,12 @@ /** * This tests the TableInputFormat and its recovery semantics */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTableInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormat.class); + HBaseClassTestRule.forClass(TestTableInputFormat.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableInputFormat.class); @@ -109,21 +109,16 @@ public void before() throws IOException { /** * Setup a table with two rows and values. - * * @param tableName the name of the table to create - * @return A Table instance for the created table. - * @throws IOException + * @return A Table instance for the created table. n */ public static Table createTable(byte[] tableName) throws IOException { return createTable(tableName, new byte[][] { FAMILY }); } /** - * Setup a table with two rows and values per column family. - * - * @param tableName - * @return A Table instance for the created table. - * @throws IOException + * Setup a table with two rows and values per column family. n * @return A Table instance for the + * created table. n */ public static Table createTable(byte[] tableName, byte[][] families) throws IOException { Table table = UTIL.createTable(TableName.valueOf(tableName), families); @@ -142,15 +137,14 @@ public static Table createTable(byte[] tableName, byte[][] families) throws IOEx /** * Verify that the result and key have expected values. - * - * @param r single row result - * @param key the row key - * @param expectedKey the expected key + * @param r single row result + * @param key the row key + * @param expectedKey the expected key * @param expectedValue the expected value * @return true if the result contains the expected key and value, false otherwise. */ - static boolean checkResult(Result r, ImmutableBytesWritable key, - byte[] expectedKey, byte[] expectedValue) { + static boolean checkResult(Result r, ImmutableBytesWritable key, byte[] expectedKey, + byte[] expectedValue) { assertEquals(0, key.compareTo(expectedKey)); Map vals = r.getFamilyMap(FAMILY); byte[] value = vals.values().iterator().next(); @@ -159,15 +153,11 @@ static boolean checkResult(Result r, ImmutableBytesWritable key, } /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapred API. - * - * @param table - * @throws IOException + * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. nn */ static void runTestMapred(Table table) throws IOException { org.apache.hadoop.hbase.mapred.TableRecordReader trr = - new org.apache.hadoop.hbase.mapred.TableRecordReader(); + new org.apache.hadoop.hbase.mapred.TableRecordReader(); trr.setStartRow(Bytes.toBytes("aaa")); trr.setEndRow(Bytes.toBytes("zzz")); trr.setHTable(table); @@ -191,12 +181,9 @@ static void runTestMapred(Table table) throws IOException { } /** - * Create a table that IOE's on first scanner next call - * - * @throws IOException + * Create a table that IOE's on first scanner next call n */ - static Table createIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -225,13 +212,9 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { } /** - * Create a table that throws a DoNoRetryIOException on first scanner next - * call - * - * @throws IOException + * Create a table that throws a DoNoRetryIOException on first scanner next call n */ - static Table createDNRIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -246,9 +229,8 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { ResultScanner scanner = mock(ResultScanner.class); invocation.callRealMethod(); // simulate NotServingRegionException - doThrow( - new NotServingRegionException("Injected simulated TimeoutException")) - .when(scanner).next(); + doThrow(new NotServingRegionException("Injected simulated TimeoutException")) + .when(scanner).next(); return scanner; } @@ -263,9 +245,7 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { } /** - * Run test assuming no errors using mapred api. - * - * @throws IOException + * Run test assuming no errors using mapred api. n */ @Test public void testTableRecordReader() throws IOException { @@ -274,9 +254,7 @@ public void testTableRecordReader() throws IOException { } /** - * Run test assuming Scanner IOException failure using mapred api, - * - * @throws IOException + * Run test assuming Scanner IOException failure using mapred api, n */ @Test public void testTableRecordReaderScannerFail() throws IOException { @@ -285,9 +263,7 @@ public void testTableRecordReaderScannerFail() throws IOException { } /** - * Run test assuming Scanner IOException failure using mapred api, - * - * @throws IOException + * Run test assuming Scanner IOException failure using mapred api, n */ @Test(expected = IOException.class) public void testTableRecordReaderScannerFailTwice() throws IOException { @@ -297,7 +273,6 @@ public void testTableRecordReaderScannerFailTwice() throws IOException { /** * Run test assuming NotServingRegionException using mapred api. - * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test @@ -308,7 +283,6 @@ public void testTableRecordReaderScannerTimeout() throws IOException { /** * Run test assuming NotServingRegionException using mapred api. - * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) @@ -330,8 +304,8 @@ public void testExtensionOfTableInputFormatBase() throws IOException { @Test public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " - + "as it was given in 0.98."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "as it was given in 0.98."); final Table table = createTable(Bytes.toBytes("exampleDeprecatedTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleDeprecatedTIF.class); @@ -339,8 +313,8 @@ public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException { @Test public void testJobConfigurableExtensionOfTableInputFormatBase() throws IOException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " - + "using JobConfigurable."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "using JobConfigurable."); final Table table = createTable(Bytes.toBytes("exampleJobConfigurableTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleJobConfigurableTIF.class); @@ -357,17 +331,17 @@ void testInputFormat(Class clazz) throws IOException { final RunningJob run = JobClient.runJob(job); assertTrue("job failed!", run.isSuccessful()); assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); + .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); } public static class ExampleVerifier implements TableMap { @@ -378,18 +352,20 @@ public void configure(JobConf conf) { @Override public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) throws IOException { for (Cell cell : value.listCells()) { - reporter.getCounter(TestTableInputFormat.class.getName() + ":row", + reporter + .getCounter(TestTableInputFormat.class.getName() + ":row", Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) - .increment(1l); - reporter.getCounter(TestTableInputFormat.class.getName() + ":family", + .increment(1l); + reporter + .getCounter(TestTableInputFormat.class.getName() + ":family", Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) - .increment(1l); - reporter.getCounter(TestTableInputFormat.class.getName() + ":value", + .increment(1l); + reporter + .getCounter(TestTableInputFormat.class.getName() + ":value", Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) - .increment(1l); + .increment(1l); } } @@ -408,8 +384,7 @@ public void configure(JobConf job) { Table exampleTable = connection.getTable(TableName.valueOf("exampleDeprecatedTable")); // mandatory initializeTable(connection, exampleTable.getName()); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // mandatory setInputColumns(inputColumns); Filter exampleFilter = @@ -440,7 +415,6 @@ protected void initialize(JobConf job) throws IOException { } } - public static class ExampleTIF extends TableInputFormatBase { @Override @@ -453,8 +427,7 @@ protected void initialize(JobConf job, String table) throws IOException { TableName tableName = TableName.valueOf(table); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // mandatory setInputColumns(inputColumns); Filter exampleFilter = @@ -466,4 +439,3 @@ protected void initialize(JobConf job, String table) throws IOException { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java index e36847613062..2820d9111277 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,36 +43,35 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) @SuppressWarnings("deprecation") public class TestTableMapReduce extends TestTableMapReduceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduce.class); + HBaseClassTestRule.forClass(TestTableMapReduce.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestTableMapReduce.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class.getName()); - protected Logger getLog() { return LOG; } + protected Logger getLog() { + return LOG; + } /** * Pass the given key and processed record reduce */ - static class ProcessContentsMapper extends MapReduceBase implements - TableMap { + static class ProcessContentsMapper extends MapReduceBase + implements TableMap { /** * Pass the key, and reversed value to reduce */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { output.collect(key, TestTableMapReduceBase.map(key, value)); } } @@ -86,8 +85,8 @@ protected void runTestOnTable(Table table) throws IOException { jobConf.setJobName("process column contents"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(table.getName().getNameAsString(), - Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, - ImmutableBytesWritable.class, Put.class, jobConf); + Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, ImmutableBytesWritable.class, + Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(table.getName().getNameAsString(), IdentityTableReduce.class, jobConf); @@ -105,4 +104,3 @@ protected void runTestOnTable(Table table) throws IOException { } } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java index e28cbb63d104..77ac55a1b6d0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,15 +57,14 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTableMapReduceUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); + HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestTableMapReduceUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduceUtil.class); private static Table presidentsTable; private static final String TABLE_NAME = "People"; @@ -73,20 +72,19 @@ public class TestTableMapReduceUtil { private static final byte[] COLUMN_FAMILY = Bytes.toBytes("info"); private static final byte[] COLUMN_QUALIFIER = Bytes.toBytes("name"); - private static ImmutableSet presidentsRowKeys = ImmutableSet.of( - "president1", "president2", "president3"); - private static Iterator presidentNames = ImmutableSet.of( - "John F. Kennedy", "George W. Bush", "Barack Obama").iterator(); + private static ImmutableSet presidentsRowKeys = + ImmutableSet.of("president1", "president2", "president3"); + private static Iterator presidentNames = + ImmutableSet.of("John F. Kennedy", "George W. Bush", "Barack Obama").iterator(); - private static ImmutableSet actorsRowKeys = ImmutableSet.of("actor1", - "actor2"); - private static Iterator actorNames = ImmutableSet.of( - "Jack Nicholson", "Martin Freeman").iterator(); + private static ImmutableSet actorsRowKeys = ImmutableSet.of("actor1", "actor2"); + private static Iterator actorNames = + ImmutableSet.of("Jack Nicholson", "Martin Freeman").iterator(); private static String PRESIDENT_PATTERN = "president"; private static String ACTOR_PATTERN = "actor"; - private static ImmutableMap> relation = ImmutableMap - .of(PRESIDENT_PATTERN, presidentsRowKeys, ACTOR_PATTERN, actorsRowKeys); + private static ImmutableMap> relation = + ImmutableMap.of(PRESIDENT_PATTERN, presidentsRowKeys, ACTOR_PATTERN, actorsRowKeys); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -133,12 +131,11 @@ private static void createPutCommand(Table table) throws IOException { } /** - * Check what the given number of reduce tasks for the given job configuration - * does not exceed the number of regions for the given table. + * Check what the given number of reduce tasks for the given job configuration does not exceed the + * number of regions for the given table. */ @Test - public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() - throws IOException { + public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Assert.assertNotNull(presidentsTable); Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); @@ -155,8 +152,7 @@ public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() } @Test - public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() - throws IOException { + public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME, jobConf); @@ -178,49 +174,42 @@ public void shoudBeValidMapReduceEvaluation() throws Exception { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), - ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, - jobConf); - TableMapReduceUtil.initTableReduceJob(TABLE_NAME, - ClassificatorRowReduce.class, jobConf); + ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); + TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { - if (jobConf != null) - FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); + if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } @Test @SuppressWarnings("deprecation") - public void shoudBeValidMapReduceWithPartitionerEvaluation() - throws IOException { + public void shoudBeValidMapReduceWithPartitionerEvaluation() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(2); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), - ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, - jobConf); + ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); - TableMapReduceUtil.initTableReduceJob(TABLE_NAME, - ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class); + TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf, + HRegionPartitioner.class); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { - if (jobConf != null) - FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); + if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } @SuppressWarnings("deprecation") - static class ClassificatorRowReduce extends MapReduceBase implements - TableReduce { + static class ClassificatorRowReduce extends MapReduceBase + implements TableReduce { @Override public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector output, Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { String strKey = Bytes.toString(key.get()); List result = new ArrayList<>(); while (values.hasNext()) @@ -244,18 +233,17 @@ private void throwAccertionError(String errorMessage) throws AssertionError { } @SuppressWarnings("deprecation") - static class ClassificatorMapper extends MapReduceBase implements - TableMap { + static class ClassificatorMapper extends MapReduceBase + implements TableMap { @Override public void map(ImmutableBytesWritable row, Result result, - OutputCollector outCollector, - Reporter reporter) throws IOException { + OutputCollector outCollector, Reporter reporter) + throws IOException { String rowKey = Bytes.toString(result.getRow()); - final ImmutableBytesWritable pKey = new ImmutableBytesWritable( - Bytes.toBytes(PRESIDENT_PATTERN)); - final ImmutableBytesWritable aKey = new ImmutableBytesWritable( - Bytes.toBytes(ACTOR_PATTERN)); + final ImmutableBytesWritable pKey = + new ImmutableBytesWritable(Bytes.toBytes(PRESIDENT_PATTERN)); + final ImmutableBytesWritable aKey = new ImmutableBytesWritable(Bytes.toBytes(ACTOR_PATTERN)); ImmutableBytesWritable outKey = null; if (rowKey.startsWith(PRESIDENT_PATTERN)) { @@ -266,11 +254,9 @@ public void map(ImmutableBytesWritable row, Result result, throw new AssertionError("unexpected rowKey"); } - String name = Bytes.toString(result.getValue(COLUMN_FAMILY, - COLUMN_QUALIFIER)); - outCollector.collect(outKey, - new Put(Bytes.toBytes("rowKey2")) - .addColumn(COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(name))); + String name = Bytes.toString(result.getValue(COLUMN_FAMILY, COLUMN_QUALIFIER)); + outCollector.collect(outKey, new Put(Bytes.toBytes("rowKey2")).addColumn(COLUMN_FAMILY, + COLUMN_QUALIFIER, Bytes.toBytes(name))); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java index b14bc9aac24c..fec2c8cf0204 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,20 +37,19 @@ import org.slf4j.LoggerFactory; /** - * Spark creates many instances of TableOutputFormat within a single process. We need to make - * sure we can have many instances and not leak connections. - * - * This test creates a few TableOutputFormats and shouldn't fail due to ZK connection exhaustion. + * Spark creates many instances of TableOutputFormat within a single process. We need to make sure + * we can have many instances and not leak connections. This test creates a few TableOutputFormats + * and shouldn't fail due to ZK connection exhaustion. */ @Category(MediumTests.class) public class TestTableOutputFormatConnectionExhaust { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableOutputFormatConnectionExhaust.class); + HBaseClassTestRule.forClass(TestTableOutputFormatConnectionExhaust.class); private static final Logger LOG = - LoggerFactory.getLogger(TestTableOutputFormatConnectionExhaust.class); + LoggerFactory.getLogger(TestTableOutputFormatConnectionExhaust.class); private final static HBaseTestingUtil UTIL = new HBaseTestingUtil(); static final String TABLE = "TestTableOutputFormatConnectionExhaust"; @@ -77,16 +76,16 @@ public void before() throws IOException { } /** - * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase + * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase * Connection (ZK) resources, and will throw exception if they are exhausted. */ - static void openCloseTableOutputFormat(int iter) throws IOException { + static void openCloseTableOutputFormat(int iter) throws IOException { LOG.info("Instantiating TableOutputFormat connection " + iter); JobConf conf = new JobConf(); conf.addResource(UTIL.getConfiguration()); conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE); - TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, conf); + TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, ImmutableBytesWritable.class, + ImmutableBytesWritable.class, conf); TableOutputFormat tof = new TableOutputFormat(); RecordWriter rw = tof.getRecordWriter(null, conf, TABLE, null); rw.close(null); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java index f0556ca8ee7d..3c1b717d5abf 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,12 +53,12 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); + HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); private static final byte[] aaa = Bytes.toBytes("aaa"); private static final byte[] after_zzz = Bytes.toBytes("zz{"); // 'z' + 1 => '{' @@ -79,25 +79,24 @@ protected byte[] getEndRow() { } static class TestTableSnapshotMapper extends MapReduceBase - implements TableMap { + implements TableMap { @Override public void map(ImmutableBytesWritable key, Result value, - OutputCollector collector, Reporter reporter) - throws IOException { + OutputCollector collector, Reporter reporter) + throws IOException { verifyRowFromMap(key, value); collector.collect(key, NullWritable.get()); } } public static class TestTableSnapshotReducer extends MapReduceBase - implements Reducer { + implements Reducer { HBaseTestingUtil.SeenRowTracker rowTracker = new HBaseTestingUtil.SeenRowTracker(aaa, after_zzz); @Override public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector collector, Reporter reporter) - throws IOException { + OutputCollector collector, Reporter reporter) throws IOException { rowTracker.addRow(key.get()); } @@ -117,19 +116,17 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { JobConf job = new JobConf(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals( - "Snapshot job should be configured for default LruBlockCache.", + Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals( - "Snapshot job should not use BucketCache.", - 0, job.getFloat("hbase.bucketcache.size", -1), 0.01); + Assert.assertEquals("Snapshot job should not use BucketCache.", 0, + job.getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -142,10 +139,9 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { @Test @Override public void testWithMockedMapReduceMultiRegion() throws Exception { - testWithMockedMapReduce( - UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true); - // It does not matter whether true or false is given to setLocalityEnabledTo, - // because it is not read in testWithMockedMapReduce(). + testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true); + // It does not matter whether true or false is given to setLocalityEnabledTo, + // because it is not read in testWithMockedMapReduce(). } @Test @@ -163,21 +159,18 @@ public void testWithMapReduceAndOfflineHBaseMultiRegion() throws Exception { @Override public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, - String snapshotName, Path tmpTableDir) throws Exception { + String snapshotName, Path tmpTableDir) throws Exception { JobConf job = new JobConf(UTIL.getConfiguration()); - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, TestTableSnapshotMapper.class, + ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); } @Override - protected void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) - throws Exception { + protected void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { - createTableAndSnapshot( - util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); + createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); JobConf job = new JobConf(util.getConfiguration()); // setLocalityEnabledTo is ignored no matter what is specified, so as to test the case that @@ -186,14 +179,13 @@ protected void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotNam Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName); if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir); } // mapred doesn't support start and end keys? o.O @@ -206,7 +198,7 @@ protected void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotNam } private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expectedNumSplits, - byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { + byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { TableSnapshotInputFormat tsif = new TableSnapshotInputFormat(); InputSplit[] splits = tsif.getSplits(job, 0); @@ -226,7 +218,7 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected if (localityEnabled) { // When localityEnabled is true, meant to verify split.getLocations() // by the following statement: - // Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); + // Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); // However, getLocations() of some splits could return an empty array (length is 0), // so drop the verification on length. // TODO: investigate how to verify split.getLocations() when localityEnabled is true @@ -257,18 +249,18 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected @Override protected void testWithMapReduceImpl(HBaseTestingUtil util, TableName tableName, - String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, - int expectedNumSplits, boolean shutdownCluster) throws Exception { + String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, + int expectedNumSplits, boolean shutdownCluster) throws Exception { doTestWithMapReduce(util, tableName, snapshotName, getStartRow(), getEndRow(), tableDir, numRegions, numSplitsPerRegion, expectedNumSplits, shutdownCluster); } // this is also called by the IntegrationTestTableSnapshotInputFormat public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableName, - String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, - int numSplitsPerRegion,int expectedNumSplits, boolean shutdownCluster) throws Exception { + String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { - //create the table and snapshot + // create the table and snapshot createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions); if (shutdownCluster) { @@ -283,15 +275,14 @@ public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableNam org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(jobConf, TestTableSnapshotInputFormat.class); - if(numSplitsPerRegion > 1) { + if (numSplitsPerRegion > 1) { TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, - TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, jobConf, true, tableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, jobConf, + true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, - TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, jobConf, true, tableDir); + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, jobConf, + true, tableDir); } jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index 28d44edb76b4..12a5650c9816 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; @@ -80,8 +79,7 @@ public static void setUpBeforeClass() throws Exception { // create and fill table for (String tableName : TABLES) { try (Table table = - TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName), - INPUT_FAMILY, 4)) { + TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName), INPUT_FAMILY, 4)) { TEST_UTIL.loadTable(table, INPUT_FAMILY, false); } } @@ -101,19 +99,18 @@ public void tearDown() throws Exception { /** * Pass the key and value to reducer. */ - public static class ScanMapper extends - TableMapper { + public static class ScanMapper + extends TableMapper { /** * Pass the key and value to reduce. - * - * @param key The key, here "aaa", "aab" etc. - * @param value The value is the same as the key. + * @param key The key, here "aaa", "aab" etc. + * @param value The value is the same as the key. * @param context The task context. * @throws IOException When reading the rows fails. */ @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { makeAssertions(key, value); context.write(key, key); } @@ -122,15 +119,13 @@ public void makeAssertions(ImmutableBytesWritable key, Result value) throws IOEx if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> cf = - value.getMap(); + Map>> cf = value.getMap(); if (!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null)); - LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) + - ", value -> " + val); + LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); } } @@ -138,26 +133,23 @@ public void makeAssertions(ImmutableBytesWritable key, Result value) throws IOEx * Checks the last and first keys seen against the scanner boundaries. */ public static class ScanReducer - extends - Reducer { + extends Reducer { private String first = null; private String last = null; @Override - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) - throws IOException, InterruptedException { + protected void reduce(ImmutableBytesWritable key, Iterable values, + Context context) throws IOException, InterruptedException { makeAssertions(key, values); } protected void makeAssertions(ImmutableBytesWritable key, - Iterable values) { + Iterable values) { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.debug("reduce: key[" + count + "] -> " + - Bytes.toStringBinary(key.get()) + ", value -> " + val); + LOG.debug( + "reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; @@ -166,8 +158,7 @@ protected void makeAssertions(ImmutableBytesWritable key, } @Override - protected void cleanup(Context context) throws IOException, - InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); cleanup(c); } @@ -175,10 +166,8 @@ protected void cleanup(Context context) throws IOException, protected void cleanup(Configuration c) { String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); - LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + - startRow + "\""); - LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + - "\""); + LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + startRow + "\""); + LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + "\""); if (startRow != null && startRow.length() > 0) { assertEquals(startRow, first); } @@ -189,41 +178,35 @@ protected void cleanup(Configuration c) { } @Test - public void testScanEmptyToEmpty() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanEmptyToEmpty() + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, null, null); } @Test - public void testScanEmptyToAPP() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanEmptyToAPP() + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "app", "apo"); } @Test - public void testScanOBBToOPP() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanOBBToOPP() throws IOException, InterruptedException, ClassNotFoundException { testScan("obb", "opp", "opo"); } @Test - public void testScanYZYToEmpty() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanYZYToEmpty() + throws IOException, InterruptedException, ClassNotFoundException { testScan("yzy", null, "zzz"); } /** - * Tests a MR scan using specific start and stop rows. - * - * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * Tests a MR scan using specific start and stop rows. nnn */ private void testScan(String start, String stop, String last) - throws IOException, InterruptedException, ClassNotFoundException { - String jobName = - "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + - (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + throws IOException, InterruptedException, ClassNotFoundException { + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -254,7 +237,7 @@ private void testScan(String start, String stop, String last) } protected void runJob(String jobName, Configuration c, List scans) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { Job job = new Job(c, jobName); initJob(scans, job); @@ -269,5 +252,4 @@ protected void runJob(String jobName, Configuration c, List scans) protected abstract void initJob(List scans, Job job) throws IOException; - } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java index e022bfdbd494..910b17a57df4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.io.DataOutput; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; @@ -33,17 +31,16 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; /** - * Input format that creates a configurable number of map tasks - * each provided with a single row of NullWritables. This can be - * useful when trying to write mappers which don't have any real - * input (eg when the mapper is simply producing random data as output) + * Input format that creates a configurable number of map tasks each provided with a single row of + * NullWritables. This can be useful when trying to write mappers which don't have any real input + * (eg when the mapper is simply producing random data as output) */ public class NMapInputFormat extends InputFormat { private static final String NMAPS_KEY = "nmapinputformat.num.maps"; @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext tac) { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext tac) { return new SingleRecordReader<>(NullWritable.get(), NullWritable.get()); } @@ -85,8 +82,7 @@ public void write(DataOutput out) { } } - private static class SingleRecordReader - extends RecordReader { + private static class SingleRecordReader extends RecordReader { private final K key; private final V value; @@ -107,7 +103,7 @@ public K getCurrentKey() { } @Override - public V getCurrentValue(){ + public V getCurrentValue() { return value; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 63e9cdb48688..7a0615a5ff8e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public abstract class TableSnapshotInputFormatTestBase { private static final Logger LOG = LoggerFactory.getLogger(TableSnapshotInputFormatTestBase.class); protected final HBaseTestingUtil UTIL = new HBaseTestingUtil(); protected static final int NUM_REGION_SERVERS = 2; - protected static final byte[][] FAMILIES = {Bytes.toBytes("f1"), Bytes.toBytes("f2")}; + protected static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2") }; protected FileSystem fs; protected Path rootDir; @@ -61,9 +61,9 @@ public abstract class TableSnapshotInputFormatTestBase { @Before public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numRegionServers(NUM_REGION_SERVERS).numDataNodes(NUM_REGION_SERVERS) - .createRootDir(true).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numRegionServers(NUM_REGION_SERVERS) + .numDataNodes(NUM_REGION_SERVERS).createRootDir(true).build(); UTIL.startMiniCluster(option); rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); @@ -128,7 +128,7 @@ public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception { Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir); + testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName, tmpTableDir); Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); for (Path regionDir : FSUtils.getRegionDirs(fs, @@ -158,10 +158,10 @@ public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception { } public abstract void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, - String snapshotName, Path tmpTableDir) throws Exception; + String snapshotName, Path tmpTableDir) throws Exception; protected void testWithMapReduce(HBaseTestingUtil util, String snapshotName, int numRegions, - int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { + int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { Path tableDir = util.getDataTestDirOnTestFS(snapshotName); TableName tableName = TableName.valueOf("testWithMapReduce"); testWithMapReduceImpl(util, tableName, snapshotName, tableDir, numRegions, numSplitsPerRegion, @@ -175,26 +175,24 @@ protected static void verifyRowFromMap(ImmutableBytesWritable key, Result result while (scanner.advance()) { Cell cell = scanner.current(); - //assert that all Cells in the Result have the same key - Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + // assert that all Cells in the Result have the same key + Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength())); } for (byte[] family : FAMILIES) { byte[] actual = result.getValue(family, family); - Assert.assertArrayEquals( - "Row in snapshot does not match, expected:" + Bytes.toString(row) + " ,actual:" + Bytes - .toString(actual), row, actual); + Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row) + + " ,actual:" + Bytes.toString(actual), row, actual); } } protected static void createTableAndSnapshot(HBaseTestingUtil util, TableName tableName, - String snapshotName, byte[] startRow, byte[] endRow, int numRegions) - throws Exception { + String snapshotName, byte[] startRow, byte[] endRow, int numRegions) throws Exception { try { LOG.debug("Ensuring table doesn't exist."); util.deleteTable(tableName); - } catch(Exception ex) { + } catch (Exception ex) { // ignore } @@ -214,8 +212,8 @@ protected static void createTableAndSnapshot(HBaseTestingUtil util, TableName ta FileSystem fs = rootDir.getFileSystem(util.getConfiguration()); LOG.info("snapshot"); - SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, - Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true); + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), null, + snapshotName, rootDir, fs, true); LOG.info("load different values"); byte[] value = Bytes.toBytes("after_snapshot_value"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index 5bc548b1e871..7fbb5bc16255 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,11 +50,11 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCellCounter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellCounter.class); + HBaseClassTestRule.forClass(TestCellCounter.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1"); @@ -66,8 +66,8 @@ public class TestCellCounter { private static final byte[] QUALIFIER = Bytes.toBytes("q"); private static Path FQ_OUTPUT_DIR; - private static final String OUTPUT_DIR = "target" + File.separator + "test-data" + File.separator - + "output"; + private static final String OUTPUT_DIR = + "target" + File.separator + "test-data" + File.separator + "output"; private static long now = EnvironmentEdgeManager.currentTime(); @Rule @@ -87,7 +87,6 @@ public static void afterClass() throws Exception { /** * Test CellCounter all data should print to output - * */ @Test public void testCellCounter() throws Exception { @@ -250,9 +249,8 @@ public void testCellCounteOutOfTimeRange() throws Exception { p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22")); p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23")); t.put(p); - String[] args = - { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "--starttime=" + now + 1, - "--endtime=" + now + 2 }; + String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", + "--starttime=" + now + 1, "--endtime=" + now + 2 }; runCount(args); FileInputStream inputStream = @@ -269,8 +267,8 @@ public void testCellCounteOutOfTimeRange() throws Exception { private boolean runCount(String[] args) throws Exception { // need to make a copy of the configuration because to make sure // different temp dirs are used. - int status = ToolRunner.run(new Configuration(UTIL.getConfiguration()), new CellCounter(), - args); + int status = + ToolRunner.run(new Configuration(UTIL.getConfiguration()), new CellCounter(), args); return status == 0; } @@ -281,7 +279,7 @@ private boolean runCount(String[] args) throws Exception { public void testCellCounterMain() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -313,9 +311,8 @@ public void testCellCounterForCompleteTable() throws Exception { final TableName sourceTable = TableName.valueOf(name.getMethodName()); String outputPath = OUTPUT_DIR + sourceTable; LocalFileSystem localFileSystem = new LocalFileSystem(); - Path outputDir = - new Path(outputPath).makeQualified(localFileSystem.getUri(), - localFileSystem.getWorkingDirectory()); + Path outputDir = new Path(outputPath).makeQualified(localFileSystem.getUri(), + localFileSystem.getWorkingDirectory()); byte[][] families = { FAMILY_A, FAMILY_B }; Table t = UTIL.createTable(sourceTable, families); try { @@ -332,7 +329,7 @@ public void testCellCounterForCompleteTable() throws Exception { String[] args = { sourceTable.getNameAsString(), outputDir.toString(), ";" }; runCount(args); FileInputStream inputStream = - new FileInputStream(outputPath + File.separator + "part-r-00000"); + new FileInputStream(outputPath + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -347,7 +344,7 @@ public void testCellCounterForCompleteTable() throws Exception { FileUtil.fullyDelete(new File(outputPath)); args = new String[] { "-D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=a, b", - sourceTable.getNameAsString(), outputDir.toString(), ";"}; + sourceTable.getNameAsString(), outputDir.toString(), ";" }; runCount(args); inputStream = new FileInputStream(outputPath + File.separator + "part-r-00000"); String data2 = IOUtils.toString(inputStream); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index f25a9862d63a..08dbe77b5c7e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.util.ToolRunner; - import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -60,12 +59,12 @@ /** * Basic test for the CopyTable M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCopyTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCopyTable.class); + HBaseClassTestRule.forClass(TestCopyTable.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] ROW1 = Bytes.toBytes("row1"); @@ -96,20 +95,19 @@ private void doCopyTableTest(boolean bulkload) throws Exception { final byte[] COLUMN1 = Bytes.toBytes("c1"); try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); - Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { + Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { // put rows into the first table loadData(t1, FAMILY, COLUMN1); CopyTable copy = new CopyTable(); int code; if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - "--bulkload", tableName1.getNameAsString() }); - } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", tableName1.getNameAsString() }); + } else { + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); } assertEquals("copy job failed", 0, code); @@ -131,15 +129,13 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { cfd.setMobEnabled(true); cfd.setMobThreshold(5); - TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(tableName1) - .setColumnFamily(cfd.build()) - .build(); - TableDescriptor desc2 = TableDescriptorBuilder.newBuilder(tableName2) - .setColumnFamily(cfd.build()) - .build(); + TableDescriptor desc1 = + TableDescriptorBuilder.newBuilder(tableName1).setColumnFamily(cfd.build()).build(); + TableDescriptor desc2 = + TableDescriptorBuilder.newBuilder(tableName2).setColumnFamily(cfd.build()).build(); try (Table t1 = TEST_UTIL.createTable(desc1, null); - Table t2 = TEST_UTIL.createTable(desc2, null);) { + Table t2 = TEST_UTIL.createTable(desc2, null);) { // put rows into the first table for (int i = 0; i < 10; i++) { @@ -152,13 +148,12 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { int code; if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - "--bulkload", tableName1.getNameAsString() }); - } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", tableName1.getNameAsString() }); + } else { + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); } assertEquals("copy job failed", 0, code); @@ -169,17 +164,14 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); assertEquals("compare row values between two tables", - t1.getDescriptor().getValue("row" + i), - t2.getDescriptor().getValue("row" + i)); + t1.getDescriptor().getValue("row" + i), t2.getDescriptor().getValue("row" + i)); } assertEquals("compare count of mob rows after table copy", MobTestUtil.countMobRows(t1), - MobTestUtil.countMobRows(t2)); + MobTestUtil.countMobRows(t2)); assertEquals("compare count of mob row values between two tables", - t1.getDescriptor().getValues().size(), - t2.getDescriptor().getValues().size()); - assertTrue("The mob row count is 0 but should be > 0", - MobTestUtil.countMobRows(t2) > 0); + t1.getDescriptor().getValues().size(), t2.getDescriptor().getValues().size()); + assertTrue("The mob row count is 0 but should be > 0", MobTestUtil.countMobRows(t2) > 0); } finally { TEST_UTIL.deleteTable(tableName1); TEST_UTIL.deleteTable(tableName2); @@ -229,7 +221,7 @@ public void testStartStopRow() throws Exception { final byte[] row2 = Bytes.toBytesBinary("\\x01row2"); try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); - Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { + Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { // put rows into the first table Put p = new Put(row0); @@ -243,9 +235,10 @@ public void testStartStopRow() throws Exception { t1.put(p); CopyTable copy = new CopyTable(); - assertEquals(0, ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[]{"--new.name=" + tableName2, "--startrow=\\x01row1", - "--stoprow=\\x01row2", tableName1.getNameAsString()})); + assertEquals(0, + ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2, "--startrow=\\x01row1", "--stoprow=\\x01row2", + tableName1.getNameAsString() })); // verify the data was copied into table 2 // row1 exist, row0, row2 do not exist @@ -322,7 +315,7 @@ public void testMainMethod() throws Exception { PrintStream writer = new PrintStream(data); System.setErr(writer); SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); try { CopyTable.main(emptyArgs); @@ -339,8 +332,8 @@ public void testMainMethod() throws Exception { } private boolean runCopy(String[] args) throws Exception { - int status = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), - args); + int status = + ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), args); return status == 0; } @@ -370,9 +363,9 @@ private void verifyRows(Table t, byte[] family, byte[] column) throws IOExceptio private Table createTable(TableName tableName, byte[] family, boolean isMob) throws IOException { if (isMob) { ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setMobEnabled(true).setMobThreshold(1).build(); + .setMobEnabled(true).setMobThreshold(1).build(); TableDescriptor desc = - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfd).build(); + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfd).build(); return TEST_UTIL.createTable(desc, null); } else { return TEST_UTIL.createTable(tableName, family); @@ -380,7 +373,7 @@ private Table createTable(TableName tableName, byte[] family, boolean isMob) thr } private void testCopyTableBySnapshot(String tablePrefix, boolean bulkLoad, boolean isMob) - throws Exception { + throws Exception { TableName table1 = TableName.valueOf(tablePrefix + 1); TableName table2 = TableName.valueOf(tablePrefix + 2); Table t1 = createTable(table1, FAMILY_A, isMob); @@ -391,7 +384,7 @@ private void testCopyTableBySnapshot(String tablePrefix, boolean bulkLoad, boole boolean success; if (bulkLoad) { success = - runCopy(new String[] { "--snapshot", "--new.name=" + table2, "--bulkload", snapshot }); + runCopy(new String[] { "--snapshot", "--new.name=" + table2, "--bulkload", snapshot }); } else { success = runCopy(new String[] { "--snapshot", "--new.name=" + table2, snapshot }); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java index 46a449a43599..34d197be02fa 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestGroupingTableMapper { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGroupingTableMapper.class); + HBaseClassTestRule.forClass(TestGroupingTableMapper.class); /** * Test GroupingTableMapper class @@ -56,14 +56,14 @@ public void testGroupingTableMapper() throws Exception { Result result = mock(Result.class); @SuppressWarnings("unchecked") Mapper.Context context = - mock(Mapper.Context.class); + mock(Mapper.Context.class); context.write(any(), any()); List keyValue = new ArrayList<>(); byte[] row = {}; - keyValue.add(new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes - .toBytes("value1"))); - keyValue.add(new KeyValue(row, Bytes.toBytes("family1"), Bytes.toBytes("clm"), Bytes - .toBytes("value2"))); + keyValue.add( + new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes.toBytes("value1"))); + keyValue.add( + new KeyValue(row, Bytes.toBytes("family1"), Bytes.toBytes("clm"), Bytes.toBytes("value2"))); when(result.listCells()).thenReturn(keyValue); mapper.map(null, result, context); // template data diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java index 09b9b5eea646..37dd817f94a3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -25,18 +31,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestHBaseMRTestingUtility { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseMRTestingUtility.class); + HBaseClassTestRule.forClass(TestHBaseMRTestingUtility.class); @Test public void testMRYarnConfigsPopulation() throws IOException { @@ -55,17 +54,21 @@ public void testMRYarnConfigsPopulation() throws IOException { hbt.getConfiguration().set(entry.getKey(), entry.getValue()); } - for (Map.Entry entry : dummyProps.entrySet()) { - assertTrue("The Configuration for key " + entry.getKey() +" and value: " + entry.getValue() + - " is not populated correctly", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + for (Map.Entry entry : dummyProps.entrySet()) { + assertTrue( + "The Configuration for key " + entry.getKey() + " and value: " + entry.getValue() + + " is not populated correctly", + hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); } hbt.startMiniMapReduceCluster(); // Confirm that MiniMapReduceCluster overwrites the mr properties and updates the Configuration - for (Map.Entry entry : dummyProps.entrySet()) { - assertFalse("The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini"+ - "cluster is started", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + for (Map.Entry entry : dummyProps.entrySet()) { + assertFalse( + "The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini" + + "cluster is started", + hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); } hbt.shutdownMiniMapReduceCluster(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 7adbbc62821a..54d171659d08 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -134,25 +134,24 @@ import org.slf4j.LoggerFactory; /** - * Simple test for {@link HFileOutputFormat2}. - * Sets up and runs a mapreduce job that writes hfile output. - * Creates a few inner classes to implement splits and an inputformat that - * emits keys and values like those of {@link PerformanceEvaluation}. + * Simple test for {@link HFileOutputFormat2}. Sets up and runs a mapreduce job that writes hfile + * output. Creates a few inner classes to implement splits and an inputformat that emits keys and + * values like those of {@link PerformanceEvaluation}. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) -public class TestHFileOutputFormat2 { +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +public class TestHFileOutputFormat2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileOutputFormat2.class); + HBaseClassTestRule.forClass(TestHFileOutputFormat2.class); private final static int ROWSPERSPLIT = 1024; public static final byte[] FAMILY_NAME = TestHRegionFileSystem.FAMILY_NAME; - private static final byte[][] FAMILIES = { - Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B"))}; - private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", - "TestTable3").map(TableName::valueOf).toArray(TableName[]::new); + private static final byte[][] FAMILIES = + { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; + private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", "TestTable3") + .map(TableName::valueOf).toArray(TableName[]::new); private HBaseTestingUtil util = new HBaseTestingUtil(); @@ -162,45 +161,39 @@ public class TestHFileOutputFormat2 { * Simple mapper that makes KeyValue output. */ static class RandomKVGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; - private static final int KEYLEN_DEFAULT=10; - private static final String KEYLEN_CONF="randomkv.key.length"; + private static final int KEYLEN_DEFAULT = 10; + private static final String KEYLEN_CONF = "randomkv.key.length"; private int valLength; - private static final int VALLEN_DEFAULT=10; - private static final String VALLEN_CONF="randomkv.val.length"; - private static final byte [] QUALIFIER = Bytes.toBytes("data"); + private static final int VALLEN_DEFAULT = 10; + private static final String VALLEN_CONF = "randomkv.val.length"; + private static final byte[] QUALIFIER = Bytes.toBytes("data"); private boolean multiTableMapper = false; private TableName[] tables = null; - @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException ,InterruptedException - { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -232,8 +225,7 @@ protected void map( * Simple mapper that makes Put output. */ static class RandomPutGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; private static final int KEYLEN_DEFAULT = 10; @@ -247,28 +239,25 @@ static class RandomPutGeneratingMapper private TableName[] tables = null; @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException, InterruptedException { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -315,28 +304,27 @@ private void setupRandomGeneratorMapper(Job job, boolean putSortReducer) { } /** - * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if - * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}. + * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if passed a keyvalue whose + * timestamp is {@link HConstants#LATEST_TIMESTAMP}. * @see HBASE-2615 */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test - public void test_LATEST_TIMESTAMP_isReplaced() - throws Exception { + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test + public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); + Path dir = util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be - // changed by call to write. Check all in kv is same but ts. + // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be + // changed by call to write. Check all in kv is same but ts. KeyValue kv = new KeyValue(b, b, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); @@ -347,7 +335,7 @@ public void test_LATEST_TIMESTAMP_isReplaced() assertNotSame(original.getTimestamp(), kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp()); - // Test 2. Now test passing a kv that has explicit ts. It should not be + // Test 2. Now test passing a kv that has explicit ts. It should not be // changed by call to record write. kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b); original = kv.clone(); @@ -359,26 +347,25 @@ public void test_LATEST_TIMESTAMP_isReplaced() } } - private TaskAttemptContext createTestTaskAttemptContext(final Job job) - throws Exception { + private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Exception { HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class); - TaskAttemptContext context = hadoop.createTestTaskAttemptContext( - job, "attempt_201402131733_0001_m_000000_0"); + TaskAttemptContext context = + hadoop.createTestTaskAttemptContext(job, "attempt_201402131733_0001_m_000000_0"); return context; } /* - * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE - * metadata used by time-restricted scans. + * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE metadata used by + * time-restricted scans. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_TIMERANGE_present"); - LOG.info("Timerange dir writing to dir: "+ dir); + Path dir = util.getDataTestDir("test_TIMERANGE_present"); + LOG.info("Timerange dir writing to dir: " + dir); try { // build a record writer using HFileOutputFormat2 Job job = new Job(conf); @@ -388,13 +375,13 @@ public void test_TIMERANGE() throws Exception { writer = hof.getRecordWriter(context); // Pass two key values with explicit times stamps - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); // value 1 with timestamp 2000 KeyValue kv = new KeyValue(b, b, b, 2000, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); - assertEquals(original,kv); + assertEquals(original, kv); // value 2 with timestamp 1000 kv = new KeyValue(b, b, b, 1000, b); @@ -415,15 +402,14 @@ public void test_TIMERANGE() throws Exception { // open as HFile Reader and pull out TIMERANGE FileInfo. HFile.Reader rd = - HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); - Map finfo = rd.getHFileInfo(); + HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); + Map finfo = rd.getHFileInfo(); byte[] range = finfo.get(Bytes.toBytes("TIMERANGE")); assertNotNull(range); // unmarshall and check values. - TimeRangeTracker timeRangeTracker =TimeRangeTracker.parseFrom(range); - LOG.info(timeRangeTracker.getMin() + - "...." + timeRangeTracker.getMax()); + TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(range); + LOG.info(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); assertEquals(1000, timeRangeTracker.getMin()); assertEquals(2000, timeRangeTracker.getMax()); rd.close(); @@ -436,7 +422,8 @@ public void test_TIMERANGE() throws Exception { /** * Run small MR job. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testWritingPEData() throws Exception { Configuration conf = util.getConfiguration(); Path testDir = util.getDataTestDirOnTestFS("testWritingPEData"); @@ -455,8 +442,8 @@ public void testWritingPEData() throws Exception { byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; - Arrays.fill(startKey, (byte)0); - Arrays.fill(endKey, (byte)0xff); + Arrays.fill(startKey, (byte) 0); + Arrays.fill(endKey, (byte) 0xff); job.setPartitionerClass(SimpleTotalOrderPartitioner.class); // Set start and end rows for partitioner. @@ -466,49 +453,46 @@ public void testWritingPEData() throws Exception { job.setOutputFormatClass(HFileOutputFormat2.class); job.setNumReduceTasks(4); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); FileOutputFormat.setOutputPath(job, testDir); assertTrue(job.waitForCompletion(false)); - FileStatus [] files = fs.listStatus(testDir); + FileStatus[] files = fs.listStatus(testDir); assertTrue(files.length > 0); - //check output file num and size. + // check output file num and size. for (byte[] family : FAMILIES) { - long kvCount= 0; + long kvCount = 0; RemoteIterator iterator = - fs.listFiles(testDir.suffix("/" + new String(family)), true); + fs.listFiles(testDir.suffix("/" + new String(family)), true); while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); kvCount += reader.getEntries(); scanner.seekTo(); long perKVSize = scanner.getCell().getSerializedSize(); assertTrue("Data size of each file should not be too large.", - perKVSize * reader.getEntries() <= hregionMaxFilesize); + perKVSize * reader.getEntries() <= hregionMaxFilesize); } assertEquals("Should write expected data in output file.", ROWSPERSPLIT, kvCount); } } /** - * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into - * hfile. + * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into hfile. */ @Test - public void test_WritingTagData() - throws Exception { + public void test_WritingTagData() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); final String HFILE_FORMAT_VERSION_CONF_KEY = "hfile.format.version"; conf.setInt(HFILE_FORMAT_VERSION_CONF_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("WritingTagData"); + Path dir = util.getDataTestDir("WritingTagData"); try { conf.set(HFileOutputFormat2.OUTPUT_TABLE_NAME_CONF_KEY, TABLE_NAMES[0].getNameAsString()); // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs @@ -518,9 +502,9 @@ public void test_WritingTagData() context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - List< Tag > tags = new ArrayList<>(); + List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670))); KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags); writer.write(new ImmutableBytesWritable(), kv); @@ -528,10 +512,10 @@ public void test_WritingTagData() writer = null; FileSystem fs = dir.getFileSystem(conf); RemoteIterator iterator = fs.listFiles(dir, true); - while(iterator.hasNext()) { + while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); scanner.seekTo(); Cell cell = scanner.getCell(); @@ -547,11 +531,12 @@ public void test_WritingTagData() } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); - conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration") - .toString()); + conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, + util.getDataTestDir("testJobConfiguration").toString()); Job job = new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); Table table = Mockito.mock(Table.class); @@ -562,7 +547,7 @@ public void testJobConfiguration() throws Exception { assertEquals(job.getNumReduceTasks(), 4); } - private byte [][] generateRandomStartKeys(int numKeys) { + private byte[][] generateRandomStartKeys(int numKeys) { Random random = ThreadLocalRandom.current(); byte[][] ret = new byte[numKeys][]; // first region start key is always empty @@ -579,39 +564,42 @@ private byte[][] generateRandomSplitKeys(int numKeys) { byte[][] ret = new byte[numKeys][]; for (int i = 0; i < numKeys; i++) { ret[i] = - PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); + PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); } return ret; } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoad() throws Exception { LOG.info("\nStarting test testMRIncrementalLoad\n"); doIncrementalLoadTest(false, false, false, "testMRIncrementalLoad"); } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithSplit() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n"); doIncrementalLoadTest(true, false, false, "testMRIncrementalLoadWithSplit"); } /** - * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true - * This test could only check the correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY - * is set to true. Because MiniHBaseCluster always run with single hostname (and different ports), - * it's not possible to check the region locality by comparing region locations and DN hostnames. - * When MiniHBaseCluster supports explicit hostnames parameter (just like MiniDFSCluster does), - * we could test region locality features more easily. + * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true This test could only check the + * correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY is set to true. Because + * MiniHBaseCluster always run with single hostname (and different ports), it's not possible to + * check the region locality by comparing region locations and DN hostnames. When MiniHBaseCluster + * supports explicit hostnames parameter (just like MiniDFSCluster does), we could test region + * locality features more easily. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithLocality() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithLocality\n"); doIncrementalLoadTest(false, true, false, "testMRIncrementalLoadWithLocality1"); doIncrementalLoadTest(true, true, false, "testMRIncrementalLoadWithLocality2"); } - //@Ignore("Wahtevs") + // @Ignore("Wahtevs") @Test public void testMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithPutSortReducer\n"); @@ -619,21 +607,20 @@ public void testMRIncrementalLoadWithPutSortReducer() throws Exception { } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, String tableStr) throws Exception { - doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, - Arrays.asList(tableStr)); + boolean putSortReducer, String tableStr) throws Exception { + doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, + Arrays.asList(tableStr)); } @Test public void testMultiMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMultiMRIncrementalLoadWithPutSortReducer\n"); doIncrementalLoadTest(false, false, true, - Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList - ())); + Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList())); } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, List tableStr) throws Exception { + boolean putSortReducer, List tableStr) throws Exception { util = new HBaseTestingUtil(); Configuration conf = util.getConfiguration(); conf.setBoolean(MultiTableHFileOutputFormat.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality); @@ -651,7 +638,7 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe hostnames[i] = "datanode_" + i; } StartTestingClusterOption option = StartTestingClusterOption.builder() - .numRegionServers(hostCount).dataNodeHosts(hostnames).build(); + .numRegionServers(hostCount).dataNodeHosts(hostnames).build(); util.startMiniCluster(option); Map allTables = new HashMap<>(tableStr.size()); @@ -682,16 +669,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe assertEquals("HFOF should not touch actual table", 0, util.countRows(tableSingle)); } int numTableDirs = 0; - FileStatus[] fss = - testDir.getFileSystem(conf).listStatus(testDir); - for (FileStatus tf: fss) { + FileStatus[] fss = testDir.getFileSystem(conf).listStatus(testDir); + for (FileStatus tf : fss) { Path tablePath = testDir; if (writeMultipleTables) { if (allTables.containsKey(tf.getPath().getName())) { ++numTableDirs; tablePath = tf.getPath(); - } - else { + } else { continue; } } @@ -699,7 +684,7 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe // Make sure that a directory was created for every CF int dir = 0; fss = tablePath.getFileSystem(conf).listStatus(tablePath); - for (FileStatus f: fss) { + for (FileStatus f : fss) { for (byte[] family : FAMILIES) { if (Bytes.toString(family).equals(f.getPath().getName())) { ++dir; @@ -726,9 +711,10 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe byte[][] newSplitKeys = generateRandomSplitKeys(14); Table table = util.createTable(chosenTable.getName(), FAMILIES, newSplitKeys); - while (util.getConnection().getRegionLocator(chosenTable.getName()) - .getAllRegionLocations().size() != 15 || - !admin.isTableAvailable(table.getName())) { + while ( + util.getConnection().getRegionLocator(chosenTable.getName()).getAllRegionLocations() + .size() != 15 || !admin.isTableAvailable(table.getName()) + ) { Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } @@ -751,11 +737,11 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe if (putSortReducer) { // no rows should be extracted assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); } else { expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); Scan scan = new Scan(); ResultScanner results = currentTable.getScanner(scan); for (Result res : results) { @@ -788,14 +774,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } admin.enableTable(currentTableName); util.waitTableAvailable(currentTableName); - assertEquals("Data should remain after reopening of regions", - tableDigestBefore, util.checksumRows(currentTable)); + assertEquals("Data should remain after reopening of regions", tableDigestBefore, + util.checksumRows(currentTable)); } } finally { for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) { - tableInfoSingle.getRegionLocator().close(); + tableInfoSingle.getRegionLocator().close(); } - for (Entry singleTable : allTables.entrySet() ) { + for (Entry singleTable : allTables.entrySet()) { singleTable.getValue().close(); util.deleteTable(singleTable.getValue().getName()); } @@ -804,14 +790,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } } - private void runIncrementalPELoad(Configuration conf, List tableInfo, Path outDir, - boolean putSortReducer) throws IOException, - InterruptedException, ClassNotFoundException { + private void runIncrementalPELoad(Configuration conf, + List tableInfo, Path outDir, boolean putSortReducer) + throws IOException, InterruptedException, ClassNotFoundException { Job job = new Job(conf, "testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad")); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); setupRandomGeneratorMapper(job, putSortReducer); if (tableInfo.size() > 1) { MultiTableHFileOutputFormat.configureIncrementalLoad(job, tableInfo); @@ -820,68 +806,58 @@ private void runIncrementalPELoad(Configuration conf, List familyToCompression = - getMockColumnFamiliesForCompression(numCfs); + getMockColumnFamiliesForCompression(numCfs); Table table = Mockito.mock(Table.class); setupMockColumnFamiliesForCompression(table, familyToCompression); conf.set(HFileOutputFormat2.COMPRESSION_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.compressionDetails, - Arrays.asList(table.getDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.compressionDetails, + Arrays.asList(table.getDescriptor()))); // read back family specific compression setting from the configuration - Map retrievedFamilyToCompressionMap = HFileOutputFormat2 - .createFamilyCompressionMap(conf); + Map retrievedFamilyToCompressionMap = + HFileOutputFormat2.createFamilyCompressionMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToCompression.entrySet()) { - assertEquals("Compression configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToCompressionMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToCompressionMap.get(Bytes.toBytes(entry.getKey()))); } } } private void setupMockColumnFamiliesForCompression(Table table, - Map familyToCompression) throws IOException { + Map familyToCompression) throws IOException { - TableDescriptorBuilder mockTableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry entry : familyToCompression.entrySet()) { ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(entry.getKey())) - .setMaxVersions(1) - .setCompressionType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0) - .build(); + .newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1) + .setCompressionType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0).build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } @@ -889,11 +865,10 @@ private void setupMockColumnFamiliesForCompression(Table table, } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForCompression (int numCfs) { + private Map getMockColumnFamiliesForCompression(int numCfs) { Map familyToCompression = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { @@ -911,73 +886,60 @@ private void setupMockColumnFamiliesForCompression(Table table, return familyToCompression; } - /** - * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. - * Tests that the family bloom type map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the + * family bloom type map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException { for (int numCfs = 0; numCfs <= 2; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBloomType = - getMockColumnFamiliesForBloomType(numCfs); + Map familyToBloomType = getMockColumnFamiliesForBloomType(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBloomType(table, - familyToBloomType); + setupMockColumnFamiliesForBloomType(table, familyToBloomType); conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, - Arrays.asList(table.getDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, + Arrays.asList(table.getDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBloomTypeMap = - HFileOutputFormat2 - .createFamilyBloomTypeMap(conf); + HFileOutputFormat2.createFamilyBloomTypeMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToBloomType.entrySet()) { - assertEquals("BloomType configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBloomTypeMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals("BloomType configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBloomTypeMap.get(Bytes.toBytes(entry.getKey()))); } } } private void setupMockColumnFamiliesForBloomType(Table table, - Map familyToDataBlockEncoding) throws IOException { - TableDescriptorBuilder mockTableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + Map familyToDataBlockEncoding) throws IOException { + TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(entry.getKey())) - .setMaxVersions(1) - .setBloomFilterType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0).build(); + .newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1) + .setBloomFilterType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0).build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBloomType (int numCfs) { + private Map getMockColumnFamiliesForBloomType(int numCfs) { Map familyToBloomType = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBloomType.put("Family1!@#!@#&", BloomType.ROW); } if (numCfs-- > 0) { - familyToBloomType.put("Family2=asdads&!AASD", - BloomType.ROWCOL); + familyToBloomType.put("Family2=asdads&!AASD", BloomType.ROWCOL); } if (numCfs-- > 0) { familyToBloomType.put("Family3", BloomType.NONE); @@ -986,77 +948,62 @@ private void setupMockColumnFamiliesForBloomType(Table table, } /** - * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. - * Tests that the family block size map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the + * family block size map is correctly serialized into and deserialized from configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBlockSize = - getMockColumnFamiliesForBlockSize(numCfs); + Map familyToBlockSize = getMockColumnFamiliesForBlockSize(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBlockSize(table, - familyToBlockSize); + setupMockColumnFamiliesForBlockSize(table, familyToBlockSize); conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.blockSizeDetails, Arrays.asList(table - .getDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.blockSizeDetails, + Arrays.asList(table.getDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBlockSizeMap = - HFileOutputFormat2 - .createFamilyBlockSizeMap(conf); + HFileOutputFormat2.createFamilyBlockSizeMap(conf); // test that we have a value for all column families that matches with the // used mock values - for (Entry entry : familyToBlockSize.entrySet() - ) { - assertEquals("BlockSize configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBlockSizeMap.get(Bytes.toBytes(entry.getKey()))); + for (Entry entry : familyToBlockSize.entrySet()) { + assertEquals("BlockSize configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBlockSizeMap.get(Bytes.toBytes(entry.getKey()))); } } } private void setupMockColumnFamiliesForBlockSize(Table table, - Map familyToDataBlockEncoding) throws IOException { - TableDescriptorBuilder mockTableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + Map familyToDataBlockEncoding) throws IOException { + TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(entry.getKey())) - .setMaxVersions(1) - .setBlocksize(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0).build(); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1) + .setBlocksize(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0).build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBlockSize (int numCfs) { + private Map getMockColumnFamiliesForBlockSize(int numCfs) { Map familyToBlockSize = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBlockSize.put("Family1!@#!@#&", 1234); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { familyToBlockSize.put("Family3", 0); @@ -1065,77 +1012,68 @@ private void setupMockColumnFamiliesForBlockSize(Table table, } /** - * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. - * Tests that the family data block encoding map is correctly serialized into - * and deserialized from configuration - * - * @throws IOException + * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that + * the family data block encoding map is correctly serialized into and deserialized from + * configuration n */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToDataBlockEncoding = - getMockColumnFamiliesForDataBlockEncoding(numCfs); + getMockColumnFamiliesForDataBlockEncoding(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForDataBlockEncoding(table, - familyToDataBlockEncoding); + setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding); TableDescriptor tableDescriptor = table.getDescriptor(); conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.dataBlockEncodingDetails, Arrays - .asList(tableDescriptor))); + HFileOutputFormat2.serializeColumnFamilyAttribute( + HFileOutputFormat2.dataBlockEncodingDetails, Arrays.asList(tableDescriptor))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToDataBlockEncodingMap = - HFileOutputFormat2 - .createFamilyDataBlockEncodingMap(conf); + HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToDataBlockEncoding.entrySet()) { - assertEquals("DataBlockEncoding configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToDataBlockEncodingMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals( + "DataBlockEncoding configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), + retrievedFamilyToDataBlockEncodingMap.get(Bytes.toBytes(entry.getKey()))); } } } private void setupMockColumnFamiliesForDataBlockEncoding(Table table, - Map familyToDataBlockEncoding) throws IOException { - TableDescriptorBuilder mockTableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + Map familyToDataBlockEncoding) throws IOException { + TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(entry.getKey())) - .setMaxVersions(1) - .setDataBlockEncoding(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0).build(); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1) + .setDataBlockEncoding(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0) + .build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForDataBlockEncoding (int numCfs) { + private Map getMockColumnFamiliesForDataBlockEncoding(int numCfs) { Map familyToDataBlockEncoding = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.FAST_DIFF); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.FAST_DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.PREFIX); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.PREFIX); } if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE); @@ -1144,12 +1082,8 @@ private void setupMockColumnFamiliesForDataBlockEncoding(Table table, } private void setupMockStartKeys(RegionLocator table) throws IOException { - byte[][] mockKeys = new byte[][] { - HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaa"), - Bytes.toBytes("ggg"), - Bytes.toBytes("zzz") - }; + byte[][] mockKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaa"), + Bytes.toBytes("ggg"), Bytes.toBytes("zzz") }; Mockito.doReturn(mockKeys).when(table).getStartKeys(); } @@ -1159,10 +1093,11 @@ private void setupMockTableName(RegionLocator table) throws IOException { } /** - * Test that {@link HFileOutputFormat2} RecordWriter uses compression and - * bloom filter settings from the column family descriptor + * Test that {@link HFileOutputFormat2} RecordWriter uses compression and bloom filter settings + * from the column family descriptor */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testColumnFamilySettings() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; @@ -1202,8 +1137,8 @@ public void testColumnFamilySettings() throws Exception { writer = hof.getRecordWriter(context); // write out random rows - writeRandomKeyValues(writer, context, - tableDescriptorBuilder.build().getColumnFamilyNames(), ROWSPERSPLIT); + writeRandomKeyValues(writer, context, tableDescriptorBuilder.build().getColumnFamilyNames(), + ROWSPERSPLIT); writer.close(context); // Make sure that a directory was created for every CF @@ -1216,8 +1151,8 @@ public void testColumnFamilySettings() throws Exception { assertEquals(tableDescriptorBuilder.build().getColumnFamilies().length, families.length); for (FileStatus f : families) { String familyStr = f.getPath().getName(); - ColumnFamilyDescriptor hcd = tableDescriptorBuilder.build() - .getColumnFamily(Bytes.toBytes(familyStr)); + ColumnFamilyDescriptor hcd = + tableDescriptorBuilder.build().getColumnFamily(Bytes.toBytes(familyStr)); // verify that the compression on this file matches the configured // compression Path dataFilePath = fs.listStatus(f.getPath())[0].getPath(); @@ -1226,8 +1161,8 @@ public void testColumnFamilySettings() throws Exception { byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY); if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE"); - assertEquals("Incorrect bloom filter used for column family " + familyStr + - "(reader: " + reader + ")", + assertEquals( + "Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter))); assertEquals( "Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", @@ -1239,19 +1174,19 @@ public void testColumnFamilySettings() throws Exception { } /** - * Write random values to the writer assuming a table created using - * {@link #FAMILIES} as column family descriptors + * Write random values to the writer assuming a table created using {@link #FAMILIES} as column + * family descriptors */ private void writeRandomKeyValues(RecordWriter writer, - TaskAttemptContext context, Set families, int numRows) - throws IOException, InterruptedException { + TaskAttemptContext context, Set families, int numRows) + throws IOException, InterruptedException { byte keyBytes[] = new byte[Bytes.SIZEOF_INT]; int valLength = 10; byte valBytes[] = new byte[valLength]; int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; - final byte [] qualifier = Bytes.toBytes("data"); + final byte[] qualifier = Bytes.toBytes("data"); for (int i = 0; i < numRows; i++) { Bytes.putInt(keyBytes, 0, i); Bytes.random(valBytes); @@ -1264,48 +1199,48 @@ private void writeRandomKeyValues(RecordWriter wri } /** - * This test is to test the scenario happened in HBASE-6901. - * All files are bulk loaded and excluded from minor compaction. - * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException - * will be thrown. + * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and + * excluded from minor compaction. Without the fix of HBASE-6901, an + * ArrayIndexOutOfBoundsException will be thrown. */ - @Ignore ("Flakey: See HBASE-9051") @Test + @Ignore("Flakey: See HBASE-9051") + @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); util.startMiniCluster(); - try (Connection conn = ConnectionFactory.createConnection(); - Admin admin = conn.getAdmin(); - Table table = util.createTable(TABLE_NAMES[0], FAMILIES); - RegionLocator locator = conn.getRegionLocator(TABLE_NAMES[0])) { + try (Connection conn = ConnectionFactory.createConnection(); Admin admin = conn.getAdmin(); + Table table = util.createTable(TABLE_NAMES[0], FAMILIES); + RegionLocator locator = conn.getRegionLocator(TABLE_NAMES[0])) { final FileSystem fs = util.getDFSCluster().getFileSystem(); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), new Path(admin.getRegions(TABLE_NAMES[0]).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // Generate two bulk load files - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getDescriptor(), + conn.getRegionLocator(TABLE_NAMES[0]))), + testDir, false); // Perform the actual load BulkLoadHFiles.create(conf).bulkLoad(table.getName(), testDir); } // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("BulkLoadHFiles should put expected data in table", - expectedRows, util.countRows(table)); + assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1350,7 +1285,8 @@ public Boolean call() throws Exception { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1358,15 +1294,15 @@ public void testExcludeMinorCompaction() throws Exception { util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()){ + Admin admin = conn.getAdmin()) { Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); Table table = util.createTable(TABLE_NAMES[0], FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), new Path(admin.getRegions(TABLE_NAMES[0]).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); @@ -1385,8 +1321,7 @@ public Boolean call() throws Exception { }, 5000); // Generate a bulk load file with more rows - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]); runIncrementalPELoad(conf, @@ -1398,8 +1333,8 @@ public Boolean call() throws Exception { // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("BulkLoadHFiles should put expected data in table", - expectedRows + 1, util.countRows(table)); + assertEquals("BulkLoadHFiles should put expected data in table", expectedRows + 1, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1457,16 +1392,16 @@ public void manualTest(String args[]) throws Exception { Table table = util.createTable(tname, FAMILIES, splitKeys); } else if ("incremental".equals(args[0])) { TableName tname = TableName.valueOf(args[1]); - try(Connection c = ConnectionFactory.createConnection(conf); - Admin admin = c.getAdmin(); - RegionLocator regionLocator = c.getRegionLocator(tname)) { + try (Connection c = ConnectionFactory.createConnection(conf); Admin admin = c.getAdmin(); + RegionLocator regionLocator = c.getRegionLocator(tname)) { Path outDir = new Path("incremental-out"); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin - .getDescriptor(tname), regionLocator)), outDir, false); + runIncrementalPELoad(conf, + Arrays + .asList(new HFileOutputFormat2.TableInfo(admin.getDescriptor(tname), regionLocator)), + outDir, false); } } else { - throw new RuntimeException( - "usage: TestHFileOutputFormat2 newtable | incremental"); + throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental"); } } @@ -1476,9 +1411,10 @@ public void testBlockStoragePolicy() throws Exception { Configuration conf = util.getConfiguration(); conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD"); - conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + - Bytes.toString(HFileOutputFormat2.combineTableNameSuffix( - TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD"); + conf.set( + HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes + .toString(HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0])), + "ONE_SSD"); Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0])); Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1])); util.startMiniDFSCluster(3); @@ -1497,9 +1433,9 @@ public void testBlockStoragePolicy() throws Exception { // alter table cf schema to change storage policies HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); spA = getStoragePolicyName(fs, cf1Dir); spB = getStoragePolicyName(fs, cf2Dir); LOG.debug("Storage policy of cf 0: [" + spA + "]."); @@ -1560,11 +1496,12 @@ public void TestConfigurePartitioner() throws IOException { // Create a user who is not the current user String fooUserName = "foo1234"; String fooGroupName = "group1"; - UserGroupInformation - ugi = UserGroupInformation.createUserForTesting(fooUserName, new String[]{fooGroupName}); + UserGroupInformation ugi = + UserGroupInformation.createUserForTesting(fooUserName, new String[] { fooGroupName }); // Get user's home directory Path fooHomeDirectory = ugi.doAs(new PrivilegedAction() { - @Override public Path run() { + @Override + public Path run() { try (FileSystem fs = FileSystem.get(conf)) { return fs.makeQualified(fs.getHomeDirectory()); } catch (IOException ioe) { @@ -1581,7 +1518,8 @@ public void TestConfigurePartitioner() throws IOException { splitPoints.add(writable); ugi.doAs(new PrivilegedAction() { - @Override public Void run() { + @Override + public Void run() { try { HFileOutputFormat2.configurePartitioner(job, splitPoints, false); } catch (IOException ioe) { @@ -1632,9 +1570,9 @@ public void TestConfigureCompression() throws Exception { while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); assertEquals(reader.getTrailer().getCompressionCodec().getName(), - hfileoutputformatCompression); + hfileoutputformatCompression); } } finally { if (writer != null && context != null) { @@ -1712,8 +1650,7 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { assertEquals(confB.get(HConstants.ZOOKEEPER_ZNODE_PARENT), config.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); - assertEquals(bSpecificConfigValue, - config.get(bSpecificConfigKey)); + assertEquals(bSpecificConfigValue, config.get(bSpecificConfigKey)); } } finally { utilB.deleteTable(tableName); @@ -1819,8 +1756,7 @@ public String getClusterId() { } @Override - public Hbck getHbck() - throws IOException { + public Hbck getHbck() throws IOException { return delegate.getHbck(); } @@ -1841,4 +1777,3 @@ public boolean isAborted() { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index a2c4fbaf87e8..9cffb4089bd7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,12 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestHRegionPartitioner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHRegionPartitioner.class); + HBaseClassTestRule.forClass(TestHRegionPartitioner.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -65,8 +65,8 @@ public void testHRegionPartitioner() throws Exception { byte[][] families = { Bytes.toBytes("familyA"), Bytes.toBytes("familyB") }; - UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, - Bytes.toBytes("aa"), Bytes.toBytes("cc"), 3); + UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, Bytes.toBytes("aa"), + Bytes.toBytes("cc"), 3); HRegionPartitioner partitioner = new HRegionPartitioner<>(); Configuration configuration = UTIL.getConfiguration(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index ff1ac7461fa6..05736f939e13 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestHashTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHashTable.class); + HBaseClassTestRule.forClass(TestHashTable.class); private static final Logger LOG = LoggerFactory.getLogger(TestHashTable.class); @@ -85,9 +85,9 @@ public void testHashTable() throws Exception { int numRegions = 10; int numHashFiles = 3; - byte[][] splitRows = new byte[numRegions-1][]; + byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 1; i < numRegions; i++) { - splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); + splitRows[i - 1] = Bytes.toBytes(numRows * i / numRegions); } long timestamp = 1430764183454L; @@ -107,13 +107,9 @@ public void testHashTable() throws Exception { Path testDir = TEST_UTIL.getDataTestDirOnTestFS(tableName.getNameAsString()); long batchSize = 300; - int code = hashTable.run(new String[] { - "--batchsize=" + batchSize, - "--numhashfiles=" + numHashFiles, - "--scanbatch=2", - tableName.getNameAsString(), - testDir.toString() - }); + int code = + hashTable.run(new String[] { "--batchsize=" + batchSize, "--numhashfiles=" + numHashFiles, + "--scanbatch=2", tableName.getNameAsString(), testDir.toString() }); assertEquals("test job failed", 0, code); FileSystem fs = TEST_UTIL.getTestFileSystem(); @@ -127,29 +123,29 @@ public void testHashTable() throws Exception { LOG.debug("partition: " + Bytes.toInt(bytes.get())); } - ImmutableMap expectedHashes - = ImmutableMap.builder() - .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) - .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) - .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) - .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) - .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) - .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) - .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) - .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) - .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) - .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) - .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) - .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) - .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) - .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) - .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) - .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) - .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) - .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) - .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) - .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) - .build(); + ImmutableMap expectedHashes = + ImmutableMap. builder() + .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) + .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) + .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) + .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) + .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) + .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) + .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) + .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) + .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) + .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) + .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) + .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) + .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) + .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) + .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) + .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) + .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) + .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) + .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) + .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) + .build(); Map actualHashes = new HashMap<>(); Path dataDir = new Path(testDir, HashTable.HASH_DATA_DIR); @@ -162,11 +158,11 @@ public void testHashTable() throws Exception { while (reader.next(key, hash)) { String keyString = Bytes.toHex(key.get(), key.getOffset(), key.getLength()); LOG.debug("Key: " + (keyString.isEmpty() ? "-1" : Integer.parseInt(keyString, 16)) - + " Hash: " + Bytes.toHex(hash.get(), hash.getOffset(), hash.getLength())); + + " Hash: " + Bytes.toHex(hash.get(), hash.getOffset(), hash.getLength())); int intKey = -1; if (key.getLength() > 0) { - intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); + intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); } if (actualHashes.containsKey(intKey)) { Assert.fail("duplicate key in data files: " + intKey); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 124350c4cbc4..57ecb5aefa17 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -110,12 +110,12 @@ /** * Tests the table import and table export MR job functionality */ -@Category({VerySlowMapReduceTests.class, MediumTests.class}) +@Category({ VerySlowMapReduceTests.class, MediumTests.class }) public class TestImportExport { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportExport.class); + HBaseClassTestRule.forClass(TestImportExport.class); private static final Logger LOG = LoggerFactory.getLogger(TestImportExport.class); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -134,7 +134,7 @@ public class TestImportExport { private static final long now = EnvironmentEdgeManager.currentTime(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); - public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); public static final String TEST_ATTR = "source_op"; public static final String TEST_TAG = "test_tag"; @@ -173,12 +173,8 @@ public void cleanup() throws Throwable { } /** - * Runs an export job with the specified command line args - * @param args - * @return true if job completed successfully - * @throws IOException - * @throws InterruptedException - * @throws ClassNotFoundException + * Runs an export job with the specified command line args n * @return true if job completed + * successfully nnn */ protected boolean runExport(String[] args) throws Throwable { // need to make a copy of the configuration because to make sure different temp dirs are used. @@ -191,12 +187,8 @@ protected void runExportMain(String[] args) throws Throwable { } /** - * Runs an import job with the specified command line args - * @param args - * @return true if job completed successfully - * @throws IOException - * @throws InterruptedException - * @throws ClassNotFoundException + * Runs an import job with the specified command line args n * @return true if job completed + * successfully nnn */ boolean runImport(String[] args) throws Throwable { // need to make a copy of the configuration because to make sure different temp dirs are used. @@ -205,8 +197,7 @@ boolean runImport(String[] args) throws Throwable { } /** - * Test simple replication case with column mapping - * @throws Exception + * Test simple replication case with column mapping n */ @Test public void testSimpleCase() throws Throwable { @@ -231,20 +222,16 @@ public void testSimpleCase() throws Throwable { String[] args = new String[] { // Only export row1 & row2. "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", - "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", - name.getMethodName(), - FQ_OUTPUT_DIR, + "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", name.getMethodName(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key to export }; assertTrue(runExport(args)); final String IMPORT_TABLE = name.getMethodName() + "import"; try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3)) { - args = new String[] { - "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING, - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; + args = + new String[] { "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, + IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Get g = new Get(ROW1); @@ -262,20 +249,17 @@ public void testSimpleCase() throws Throwable { } /** - * Test export hbase:meta table - * - * @throws Throwable + * Test export hbase:meta table n */ @Test public void testMetaExport() throws Throwable { - String[] args = new String[] { TableName.META_TABLE_NAME.getNameAsString(), - FQ_OUTPUT_DIR, "1", "0", "0" }; + String[] args = + new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } /** - * Test import data from 0.94 exported file - * @throws Throwable + * Test import data from 0.94 exported file n */ @Test public void testImport94Table() throws Throwable { @@ -293,10 +277,7 @@ public void testImport94Table() throws Throwable { fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name)); String IMPORT_TABLE = name; try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3)) { - String[] args = new String[] { - "-Dhbase.import.version=0.94" , - IMPORT_TABLE, FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-Dhbase.import.version=0.94", IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); // @formatter:off // exportedTableIn94Format contains 5 rows @@ -314,14 +295,12 @@ public void testImport94Table() throws Throwable { /** * Test export scanner batching */ - @Test - public void testExportScannerBatching() throws Throwable { + @Test + public void testExportScannerBatching() throws Throwable { TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(1) - .build()) - .build(); + .newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(1).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName())) { Put p = new Put(ROW1); @@ -343,13 +322,11 @@ public void testExportScannerBatching() throws Throwable { @Test public void testWithDeletes() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName())) { Put p = new Put(ROW1); @@ -360,35 +337,26 @@ public void testWithDeletes() throws Throwable { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - Delete d = new Delete(ROW1, now+3); + Delete d = new Delete(ROW1, now + 3); t.delete(d); d = new Delete(ROW1); - d.addColumns(FAMILYA, QUAL, now+2); + d.addColumns(FAMILYA, QUAL, now + 2); t.delete(d); } - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", name.getMethodName(), + FQ_OUTPUT_DIR, "1000", // max number of key versions per key to export }; assertTrue(runExport(args)); final String IMPORT_TABLE = name.getMethodName() + "import"; desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + .newBuilder(TableName.valueOf(IMPORT_TABLE)).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName())) { - args = new String[] { - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; + args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -398,71 +366,60 @@ public void testWithDeletes() throws Throwable { Result r = scanner.next(); Cell[] res = r.rawCells(); assertTrue(PrivateCellUtil.isDeleteFamily(res[0])); - assertEquals(now+4, res[1].getTimestamp()); - assertEquals(now+3, res[2].getTimestamp()); + assertEquals(now + 4, res[1].getTimestamp()); + assertEquals(now + 3, res[2].getTimestamp()); assertTrue(CellUtil.isDelete(res[3])); - assertEquals(now+2, res[4].getTimestamp()); - assertEquals(now+1, res[5].getTimestamp()); + assertEquals(now + 2, res[4].getTimestamp()); + assertEquals(now + 1, res[5].getTimestamp()); assertEquals(now, res[6].getTimestamp()); } } - @Test public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); exportT.delete(d); - //Add second version of QUAL + // Add second version of QUAL p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now + 5, Bytes.toBytes("s")); exportT.put(p); - //Add second Delete family marker - d = new Delete(ROW1, now+7); + // Add second Delete family marker + d = new Delete(ROW1, now + 7); exportT.delete(d); - - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key to + // export }; assertTrue(runExport(args)); final String importTable = name.getMethodName() + "import"; desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + .newBuilder(TableName.valueOf(importTable)).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importT = UTIL.getConnection().getTable(TableName.valueOf(importTable)); - args = new String[] { - importTable, - FQ_OUTPUT_DIR - }; + args = new String[] { importTable, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -473,11 +430,11 @@ public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Thro Result importedTResult = importedTScanner.next(); ResultScanner exportedTScanner = exportT.getScanner(s); - Result exportedTResult = exportedTScanner.next(); + Result exportedTResult = exportedTScanner.next(); try { Result.compareResults(exportedTResult, importedTResult); } catch (Throwable e) { - fail("Original and imported tables data comparision failed with error:"+e.getMessage()); + fail("Original and imported tables data comparision failed with error:" + e.getMessage()); } finally { exportT.close(); importT.close(); @@ -485,18 +442,16 @@ public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Thro } /** - * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, + * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, * attempt with invalid values. */ @Test public void testWithFilter() throws Throwable { // Create simple table to export TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + .newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()) + .build(); UTIL.getAdmin().createTable(desc); Table exportTable = UTIL.getConnection().getTable(desc.getTableName()); @@ -519,19 +474,15 @@ public void testWithFilter() throws Throwable { // Import to a new table final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importTable = UTIL.getConnection().getTable(desc.getTableName()); args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + PrefixFilter.class.getName(), - "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, - FQ_OUTPUT_DIR, - "1000" }; + "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, FQ_OUTPUT_DIR, + "1000" }; assertTrue(runImport(args)); // get the count of the source table for that time range @@ -545,8 +496,8 @@ public void testWithFilter() throws Throwable { // need to re-run the export job args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + Filter.class.getName(), - "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1) + "", name.getMethodName(), - FQ_OUTPUT_DIR, "1000" }; + "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1) + "", name.getMethodName(), + FQ_OUTPUT_DIR, "1000" }; assertFalse(runImport(args)); // cleanup @@ -557,8 +508,7 @@ public void testWithFilter() throws Throwable { /** * Count the number of keyvalues in the specified table with the given filter * @param table the table to scan - * @return the number of keyvalues found - * @throws IOException + * @return the number of keyvalues found n */ private int getCount(Table table, Filter filter) throws IOException { Scan scan = new Scan(); @@ -579,7 +529,7 @@ private int getCount(Table table, Filter filter) throws IOException { public void testImportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -609,29 +559,19 @@ public void testExportScan() throws Exception { String prefix = "row"; String label_0 = "label_0"; String label_1 = "label_1"; - String[] args = { - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + String[] args = { "table", "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Scan scan = ExportUtils.getScanFromCommandLine(UTIL.getConfiguration(), args); assertEquals(version, scan.getMaxVersions()); assertEquals(startTime, scan.getTimeRange().getMin()); assertEquals(endTime, scan.getTimeRange().getMax()); assertEquals(true, (scan.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); - String[] argsWithLabels = { - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + assertEquals(0, + Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + String[] argsWithLabels = + { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, "table", + "outputDir", String.valueOf(version), String.valueOf(startTime), String.valueOf(endTime), + prefix }; Configuration conf = new Configuration(UTIL.getConfiguration()); // parse the "-D" options String[] otherArgs = new GenericOptionsParser(conf, argsWithLabels).getRemainingArgs(); @@ -640,7 +580,8 @@ public void testExportScan() throws Exception { assertEquals(startTime, scanWithLabels.getTimeRange().getMin()); assertEquals(endTime, scanWithLabels.getTimeRange().getMax()); assertEquals(true, (scanWithLabels.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), + Bytes.toBytesBinary(prefix))); assertEquals(2, scanWithLabels.getAuthorizations().getLabels().size()); assertEquals(label_0, scanWithLabels.getAuthorizations().getLabels().get(0)); assertEquals(label_1, scanWithLabels.getAuthorizations().getLabels().get(1)); @@ -653,7 +594,7 @@ public void testExportScan() throws Exception { public void testExportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -666,11 +607,10 @@ public void testExportMain() throws Throwable { assertEquals(-1, newSecurityManager.getExitCode()); String errMsg = data.toString(); assertTrue(errMsg.contains("Wrong number of arguments:")); - assertTrue(errMsg.contains( - "Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]")); assertTrue( - errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); + errMsg.contains("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]")); + assertTrue(errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); assertTrue(errMsg.contains("-D hbase.mapreduce.include.deleted.rows=true")); assertTrue(errMsg.contains("-D hbase.client.scanner.caching=100")); assertTrue(errMsg.contains("-D hbase.export.scanner.batch=10")); @@ -707,18 +647,18 @@ public Void answer(InvocationOnMock invocation) throws Throwable { importer.setup(ctx); Result value = mock(Result.class); KeyValue[] keys = { - new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), - Bytes.toBytes("value")), - new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), - Bytes.toBytes("value1")) }; + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), + Bytes.toBytes("value")), + new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), + Bytes.toBytes("value1")) }; when(value.rawCells()).thenReturn(keys); importer.map(new ImmutableBytesWritable(Bytes.toBytes("Key")), value, ctx); } /** - * Test addFilterAndArguments method of Import This method set couple - * parameters into Configuration + * Test addFilterAndArguments method of Import This method set couple parameters into + * Configuration */ @Test public void testAddFilterAndArguments() throws IOException { @@ -730,7 +670,7 @@ public void testAddFilterAndArguments() throws IOException { Import.addFilterAndArguments(configuration, FilterBase.class, args); assertEquals("org.apache.hadoop.hbase.filter.FilterBase", - configuration.get(Import.FILTER_CLASS_CONF_KEY)); + configuration.get(Import.FILTER_CLASS_CONF_KEY)); assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY)); } @@ -753,7 +693,7 @@ public void testDurability() throws Throwable { exportTable.put(put); // Run the export - String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000"}; + String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000" }; assertTrue(runExport(args)); // Create the table for import @@ -762,41 +702,40 @@ public void testDurability() throws Throwable { // Register the wal listener for the import table RegionInfo region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() - .getRegions(importTable.getName()).get(0).getRegionInfo(); + .getRegions(importTable.getName()).get(0).getRegionInfo(); TableWALActionListener walListener = new TableWALActionListener(region); WAL wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); wal.registerWALActionsListener(walListener); // Run the import with SKIP_WAL - args = - new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), - importTableName, FQ_OUTPUT_DIR }; + args = new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), + importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is not visisted + // Assert that the wal is not visisted assertTrue(!walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); // Run the import with the default durability option importTableName = name.getMethodName() + "import2"; importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3); region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() - .getRegions(importTable.getName()).get(0).getRegionInfo(); + .getRegions(importTable.getName()).get(0).getRegionInfo(); wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); walListener = new TableWALActionListener(region); wal.registerWALActionsListener(walListener); args = new String[] { importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is visisted + // Assert that the wal is visisted assertTrue(walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); } } /** - * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to - * identify that an entry is written to the Write Ahead Log for the given table. + * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to identify + * that an entry is written to the Write Ahead Log for the given table. */ private static class TableWALActionListener implements WALActionsListener { @@ -809,8 +748,10 @@ public TableWALActionListener(RegionInfo region) { @Override public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { - if (logKey.getTableName().getNameAsString().equalsIgnoreCase( - this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { + if ( + logKey.getTableName().getNameAsString() + .equalsIgnoreCase(this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit()) + ) { isVisited = true; } } @@ -821,45 +762,39 @@ public boolean isWALVisited() { } /** - * Add cell tags to delete mutations, run export and import tool and - * verify that tags are present in import table also. + * Add cell tags to delete mutations, run export and import tool and verify that tags are present + * in import table also. * @throws Throwable throws Throwable. */ @Test public void testTagsAddition() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(exportTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); // Add test attribute to delete mutation. d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); exportT.delete(d); // Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool // will use KeyValueCodecWithTags. - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", // This will make sure that codec will encode and decode tags in rpc call. "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key to + // export }; assertTrue(runExport(args)); // Assert tag exists in exportTable @@ -867,23 +802,17 @@ public void testTagsAddition() throws Throwable { // Create an import table with MetadataController. final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); - TableDescriptor importTableDesc = TableDescriptorBuilder - .newBuilder(importTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor importTableDesc = TableDescriptorBuilder.newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(importTableDesc); // Run import tool. args = new String[] { // This will make sure that codec will encode and decode tags in rpc call. "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - importTable.getNameAsString(), - FQ_OUTPUT_DIR - }; + importTable.getNameAsString(), FQ_OUTPUT_DIR }; assertTrue(runImport(args)); // Make sure that tags exists in imported table. checkWhetherTagExists(importTable, true); @@ -906,7 +835,7 @@ private void checkWhetherTagExists(TableName table, boolean tagExists) throws IO } } boolean deleteFound = false; - for (Cell cell: values) { + for (Cell cell : values) { if (PrivateCellUtil.isDelete(cell.getType().getCode())) { deleteFound = true; List tags = PrivateCellUtil.getTags(cell); @@ -926,7 +855,7 @@ private void checkWhetherTagExists(TableName table, boolean tagExists) throws IO } /* - This co-proc will add a cell tag to delete mutation. + * This co-proc will add a cell tag to delete mutation. */ public static class MetadataController implements RegionCoprocessor, RegionObserver { @Override @@ -936,8 +865,7 @@ public Optional getRegionObserver() { @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) - throws IOException { + MiniBatchOperationInProgress miniBatchOp) throws IOException { if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { return; } @@ -952,7 +880,7 @@ public void preBatchMutate(ObserverContext c, } Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); List tags = PrivateCellUtil.getTags(cell); tags.add(sourceOpTag); @@ -970,34 +898,30 @@ public void preBatchMutate(ObserverContext c, } /** - * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string - * This means it will use no Codec. Make sure that we don't return Tags in response. + * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string This means + * it will use no Codec. Make sure that we don't return Tags in response. * @throws Exception Exception */ @Test public void testTagsWithEmptyCodec() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptor tableDesc = TableDescriptorBuilder - .newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(tableDesc); Configuration conf = new Configuration(UTIL.getConfiguration()); conf.set(RPC_CODEC_CONF_KEY, ""); conf.set(DEFAULT_CODEC_CLASS, ""); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { - //Add first version of QUAL + Table table = connection.getTable(tableName)) { + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); table.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); // Add test attribute to delete mutation. d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); table.delete(d); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 3e17bd963674..b73cc7e1abb3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,21 +65,20 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithOperationAttributes implements Configurable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithOperationAttributes.class); + HBaseClassTestRule.forClass(TestImportTSVWithOperationAttributes.class); private static final Logger LOG = - LoggerFactory.getLogger(TestImportTSVWithOperationAttributes.class); + LoggerFactory.getLogger(TestImportTSVWithOperationAttributes.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); protected static HBaseTestingUtil util = new HBaseTestingUtil(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -126,10 +125,10 @@ public void testMROnTable() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001btest=>myvalue\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1, true); @@ -142,10 +141,10 @@ public void testMROnTableWithInvalidOperationAttr() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001btest1=>myvalue\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1, false); @@ -153,18 +152,14 @@ public void testMROnTableWithInvalidOperationAttr() throws Exception { } /** - * Run an ImportTsv job and perform basic validation on the results. Returns - * the ImportTsv Tool instance so that other tests can inspect it - * for further validation as necessary. This method is static to insure - * non-reliance on instance's util/conf facilities. - * - * @param args - * Any arguments to pass BEFORE inputFile path is appended. - * @param dataAvailable - * @return The Tool instance used to run the test. + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n * + * Any arguments to pass BEFORE inputFile path is appended. n * @return The Tool instance used to + * run the test. */ private Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, String[] args, - int valueMultiplier, boolean dataAvailable) throws Exception { + int valueMultiplier, boolean dataAvailable) throws Exception { String table = args[args.length - 1]; Configuration conf = new Configuration(util.getConfiguration()); @@ -198,12 +193,10 @@ private Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, } /** - * Confirm ImportTsv via data in online table. - * - * @param dataAvailable + * Confirm ImportTsv via data in online table. n */ private static void validateTable(Configuration conf, TableName tableName, String family, - int valueMultiplier, boolean dataAvailable) throws IOException { + int valueMultiplier, boolean dataAvailable) throws IOException { LOG.debug("Validating table."); Connection connection = ConnectionFactory.createConnection(conf); @@ -224,9 +217,10 @@ private static void validateTable(Configuration conf, TableName tableName, Strin List kvs = res.listCells(); assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); - assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), - Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. verified = true; } @@ -254,7 +248,7 @@ private static void validateTable(Configuration conf, TableName tableName, Strin } public static class OperationAttributesTestController - implements RegionCoprocessor, RegionObserver { + implements RegionCoprocessor, RegionObserver { @Override public Optional getRegionObserver() { @@ -263,10 +257,11 @@ public Optional getRegionObserver() { @Override public void prePut(ObserverContext e, Put put, WALEdit edit, - Durability durability) throws IOException { + Durability durability) throws IOException { Region region = e.getEnvironment().getRegion(); - if (!region.getRegionInfo().isMetaRegion() - && !region.getRegionInfo().getTable().isSystemTable()) { + if ( + !region.getRegionInfo().isMetaRegion() && !region.getRegionInfo().getTable().isSystemTable() + ) { if (put.getAttribute(TEST_ATR_KEY) != null) { LOG.debug("allow any put to happen " + region.getRegionInfo().getRegionNameAsString()); } else { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index f981ffc222a4..0c96324c2417 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,20 +55,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithTTLs implements Configurable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithTTLs.class); + HBaseClassTestRule.forClass(TestImportTSVWithTTLs.class); protected static final Logger LOG = LoggerFactory.getLogger(TestImportTSVWithTTLs.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); protected static HBaseTestingUtil util = new HBaseTestingUtil(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -114,10 +113,9 @@ public void testMROnTable() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_TTL", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_TTL", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001b1000000\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -125,14 +123,14 @@ public void testMROnTable() throws Exception { } protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, - String[] args, int valueMultiplier) throws Exception { + String[] args, int valueMultiplier) throws Exception { TableName table = TableName.valueOf(args[args.length - 1]); Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified(new Path(util - .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = + fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); op.write(Bytes.toBytes(data)); op.close(); @@ -171,10 +169,11 @@ public Optional getRegionObserver() { @Override public void prePut(ObserverContext e, Put put, WALEdit edit, - Durability durability) throws IOException { + Durability durability) throws IOException { Region region = e.getEnvironment().getRegion(); - if (!region.getRegionInfo().isMetaRegion() - && !region.getRegionInfo().getTable().isSystemTable()) { + if ( + !region.getRegionInfo().isMetaRegion() && !region.getRegionInfo().getTable().isSystemTable() + ) { // The put carries the TTL attribute if (put.getTTL() != Long.MAX_VALUE) { return; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index 910f4f6836c8..cae349ce05d7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,21 +76,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithVisibilityLabels implements Configurable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithVisibilityLabels.class); + HBaseClassTestRule.forClass(TestImportTSVWithVisibilityLabels.class); private static final Logger LOG = - LoggerFactory.getLogger(TestImportTSVWithVisibilityLabels.class); + LoggerFactory.getLogger(TestImportTSVWithVisibilityLabels.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); protected static HBaseTestingUtil util = new HBaseTestingUtil(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -125,10 +124,10 @@ public void setConf(Configuration conf) { public static void provisionCluster() throws Exception { conf = util.getConfiguration(); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); - conf.set("hbase.superuser", "admin,"+User.getCurrent().getName()); + conf.set("hbase.superuser", "admin," + User.getCurrent().getName()); VisibilityTestUtil.enableVisiblityLabels(conf); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); util.startMiniCluster(); // Wait for the labels table to become available util.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000); @@ -137,20 +136,20 @@ public static void provisionCluster() throws Exception { private static void createLabels() throws IOException, InterruptedException { PrivilegedExceptionAction action = - new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - LOG.info("Added labels "); - } catch (Throwable t) { - LOG.error("Error in adding labels" , t); - throw new IOException(t); + new PrivilegedExceptionAction() { + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + LOG.info("Added labels "); + } catch (Throwable t) { + LOG.error("Error in adding labels", t); + throw new IOException(t); + } + return null; } - return null; - } - }; + }; SUPERUSER.runAs(action); } @@ -165,10 +164,9 @@ public void testMROnTable() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -181,9 +179,9 @@ public void testMROnTableWithDeletes() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -232,11 +230,9 @@ public void testMROnTableWithBulkload() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = new String[] { - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -247,18 +243,14 @@ public void testMROnTableWithBulkload() throws Exception { public void testBulkOutputWithTsvImporterTextMapper() throws Exception { final TableName table = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); String FAMILY = "FAM"; - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - table.getNameAsString() - }; + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), + table.getNameAsString() }; String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n"; doMROnTableTest(util, FAMILY, data, args, 4); util.deleteTable(table); @@ -270,11 +262,10 @@ public void testMRWithOutputFormat() throws Exception { Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); @@ -286,14 +277,13 @@ public void testBulkOutputWithInvalidLabels() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; // 2 Data rows, one with valid label and one with invalid label String data = - "KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n"; + "KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1, 2); util.deleteTable(tableName); @@ -304,49 +294,42 @@ public void testBulkOutputWithTsvImporterTextMapperWithInvalidLabels() throws Ex final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; // 2 Data rows, one with valid label and one with invalid label String data = - "KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n"; + "KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n"; util.createTable(tableName, FAMILY); doMROnTableTest(util, FAMILY, data, args, 1, 2); util.deleteTable(tableName); } protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, - String[] args, int valueMultiplier) throws Exception { + String[] args, int valueMultiplier) throws Exception { return doMROnTableTest(util, family, data, args, valueMultiplier, -1); } /** - * Run an ImportTsv job and perform basic validation on the results. Returns - * the ImportTsv Tool instance so that other tests can inspect it - * for further validation as necessary. This method is static to insure - * non-reliance on instance's util/conf facilities. - * - * @param args - * Any arguments to pass BEFORE inputFile path is appended. - * + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n * + * Any arguments to pass BEFORE inputFile path is appended. * @param expectedKVCount Expected KV count. pass -1 to skip the kvcount check - * * @return The Tool instance used to run the test. */ protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, - String[] args, int valueMultiplier,int expectedKVCount) throws Exception { + String[] args, int valueMultiplier, int expectedKVCount) throws Exception { TableName table = TableName.valueOf(args[args.length - 1]); Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified(new Path(util - .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = + fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); if (data == null) { data = "KEY\u001bVALUE1\u001bVALUE2\n"; @@ -381,10 +364,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, Stri } } LOG.debug("validating the table " + createdHFiles); - if (createdHFiles) - validateHFiles(fs, outputPath, family,expectedKVCount); - else - validateTable(conf, table, family, valueMultiplier); + if (createdHFiles) validateHFiles(fs, outputPath, family, expectedKVCount); + else validateTable(conf, table, family, valueMultiplier); if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) { LOG.debug("Deleting test subdirectory"); @@ -397,7 +378,7 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, Stri * Confirm ImportTsv via HFiles on fs. */ private static void validateHFiles(FileSystem fs, String outputPath, String family, - int expectedKVCount) throws IOException { + int expectedKVCount) throws IOException { // validate number and content of output columns LOG.debug("Validating HFiles."); @@ -411,20 +392,21 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami String cf = elements[elements.length - 1]; foundFamilies.add(cf); assertTrue(String.format( - "HFile ouput contains a column family (%s) not present in input families (%s)", cf, - configFamilies), configFamilies.contains(cf)); + "HFile ouput contains a column family (%s) not present in input families (%s)", cf, + configFamilies), configFamilies.contains(cf)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), - hfile.getLen() > 0); + hfile.getLen() > 0); if (expectedKVCount > -1) { actualKVCount += getKVCountFromHfile(fs, hfile.getPath()); } } } if (expectedKVCount > -1) { - assertTrue(String.format( - "KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", actualKVCount, - expectedKVCount), actualKVCount == expectedKVCount); + assertTrue( + String.format("KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", + actualKVCount, expectedKVCount), + actualKVCount == expectedKVCount); } } @@ -432,7 +414,7 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami * Confirm ImportTsv via data in online table. */ private static void validateTable(Configuration conf, TableName tableName, String family, - int valueMultiplier) throws IOException { + int valueMultiplier) throws IOException { LOG.debug("Validating table."); Table table = util.getConnection().getTable(tableName); @@ -444,7 +426,7 @@ private static void validateTable(Configuration conf, TableName tableName, Strin Scan scan = new Scan(); // Scan entire family. scan.addFamily(Bytes.toBytes(family)); - scan.setAuthorizations(new Authorizations("secret","private")); + scan.setAuthorizations(new Authorizations("secret", "private")); ResultScanner resScanner = table.getScanner(scan); Result[] next = resScanner.next(5); assertEquals(1, next.length); @@ -455,8 +437,8 @@ private static void validateTable(Configuration conf, TableName tableName, Strin assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), - Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. } verified = true; @@ -478,9 +460,8 @@ private static void validateTable(Configuration conf, TableName tableName, Strin /** * Method returns the total KVs in given hfile * @param fs File System - * @param p HFile path - * @return KV count in the given hfile - * @throws IOException + * @param p HFile path + * @return KV count in the given hfile n */ private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException { Configuration conf = util.getConfiguration(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index a3427f2a5ec6..83634742b28e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,12 +71,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestImportTsv implements Configurable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTsv.class); + HBaseClassTestRule.forClass(TestImportTsv.class); private static final Logger LOG = LoggerFactory.getLogger(TestImportTsv.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); @@ -143,11 +143,10 @@ public void testMROnTableWithTimestamp() throws Exception { } @Test - public void testMROnTableWithCustomMapper() - throws Exception { + public void testMROnTableWithCustomMapper() throws Exception { util.createTable(tn, FAMILY); args.put(ImportTsv.MAPPER_CONF_KEY, - "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper"); + "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper"); doMROnTableTest(null, 3); util.deleteTable(tn); @@ -189,39 +188,33 @@ public void testBulkOutputWithAnExistingTableNoStrictTrue() throws Exception { @Test public void testJobConfigurationsWithTsvImporterTextMapper() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); String INPUT_FILE = "InputFile1.csv"; // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - tn.getNameAsString(), - INPUT_FILE - }; - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); - assertTrue(job.getReducerClass().equals(TextSortReducer.class)); - assertTrue(job.getMapOutputValueClass().equals(Text.class)); - return 0; - } - }, args)); + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), tn.getNameAsString(), + INPUT_FILE }; + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); + assertTrue(job.getReducerClass().equals(TextSortReducer.class)); + assertTrue(job.getMapOutputValueClass().equals(Text.class)); + return 0; + } + }, args)); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @Test public void testBulkOutputWithTsvImporterTextMapper() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.MAPPER_CONF_KEY, "org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); String data = "KEY\u001bVALUE4\u001bVALUE8\n"; @@ -239,53 +232,49 @@ public void testWithoutAnExistingTableAndCreateTableSetToNo() throws Exception { conf.set(ImportTsv.CREATE_TABLE_CONF_KEY, "no"); exception.expect(TableNotFoundException.class); assertEquals("running test job configuration failed.", 0, - ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { - @Override public int run(String[] args) throws Exception { - createSubmittableJob(getConf(), args); - return 0; - } - }, args)); + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + createSubmittableJob(getConf(), args); + return 0; + } + }, args)); } @Test public void testMRWithoutAnExistingTable() throws Exception { - String[] args = - new String[] { tn.getNameAsString(), "/inputFile" }; + String[] args = new String[] { tn.getNameAsString(), "/inputFile" }; exception.expect(TableNotFoundException.class); - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - createSubmittableJob(getConf(), args); - return 0; - } - }, args)); + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + createSubmittableJob(getConf(), args); + return 0; + } + }, args)); } @Test public void testJobConfigurationsWithDryMode() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); String INPUT_FILE = "InputFile1.csv"; // Prepare the arguments required for the test. - String[] argsArray = new String[] { - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", + String[] argsArray = + new String[] { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", - tn.getNameAsString(), - INPUT_FILE }; - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); - return 0; - } - }, argsArray)); + "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", tn.getNameAsString(), INPUT_FILE }; + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); + return 0; + } + }, argsArray)); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @@ -301,8 +290,7 @@ public void testDryModeWithoutBulkOutputAndTableExists() throws Exception { } /** - * If table is not present in non-bulk mode, dry run should fail just like - * normal mode. + * If table is not present in non-bulk mode, dry run should fail just like normal mode. */ @Test public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception { @@ -311,7 +299,8 @@ public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception doMROnTableTest(null, 1); } - @Test public void testDryModeWithBulkOutputAndTableExists() throws Exception { + @Test + public void testDryModeWithBulkOutputAndTableExists() throws Exception { util.createTable(tn, FAMILY); // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); @@ -324,12 +313,11 @@ public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception } /** - * If table is not present in bulk mode and create.table is not set to yes, - * import should fail with TableNotFoundException. + * If table is not present in bulk mode and create.table is not set to yes, import should fail + * with TableNotFoundException. */ @Test - public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() throws - Exception { + public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() throws Exception { // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); @@ -382,31 +370,30 @@ public void testSkipEmptyColumns() throws Exception { } private Tool doMROnTableTest(String data, int valueMultiplier) throws Exception { - return doMROnTableTest(util, tn, FAMILY, data, args, valueMultiplier,-1); + return doMROnTableTest(util, tn, FAMILY, data, args, valueMultiplier, -1); } - protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, - String family, String data, Map args) throws Exception { - return doMROnTableTest(util, table, family, data, args, 1,-1); + protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, String family, + String data, Map args) throws Exception { + return doMROnTableTest(util, table, family, data, args, 1, -1); } /** - * Run an ImportTsv job and perform basic validation on the results. - * Returns the ImportTsv Tool instance so that other tests can - * inspect it for further validation as necessary. This method is static to - * insure non-reliance on instance's util/conf facilities. + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. * @param args Any arguments to pass BEFORE inputFile path is appended. * @return The Tool instance used to run the test. */ - protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, - String family, String data, Map args, int valueMultiplier,int expectedKVCount) - throws Exception { + protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, String family, + String data, Map args, int valueMultiplier, int expectedKVCount) + throws Exception { Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified( - new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = + fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); if (data == null) { data = "KEY\u001bVALUE1\u001bVALUE2\n"; @@ -440,15 +427,14 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, // Perform basic validation. If the input args did not include // ImportTsv.BULK_OUTPUT_CONF_KEY then validate data in the table. // Otherwise, validate presence of hfiles. - boolean isDryRun = args.containsKey(ImportTsv.DRY_RUN_CONF_KEY) && - "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); + boolean isDryRun = args.containsKey(ImportTsv.DRY_RUN_CONF_KEY) + && "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); if (args.containsKey(ImportTsv.BULK_OUTPUT_CONF_KEY)) { if (isDryRun) { assertFalse(String.format("Dry run mode, %s should not have been created.", - ImportTsv.BULK_OUTPUT_CONF_KEY), - fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); + ImportTsv.BULK_OUTPUT_CONF_KEY), fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); } else { - validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family,expectedKVCount); + validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family, expectedKVCount); } } else { validateTable(conf, table, family, valueMultiplier, isDryRun); @@ -464,8 +450,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, /** * Confirm ImportTsv via data in online table. */ - private static void validateTable(Configuration conf, TableName tableName, - String family, int valueMultiplier, boolean isDryRun) throws IOException { + private static void validateTable(Configuration conf, TableName tableName, String family, + int valueMultiplier, boolean isDryRun) throws IOException { LOG.debug("Validating table."); Connection connection = ConnectionFactory.createConnection(conf); @@ -487,7 +473,8 @@ private static void validateTable(Configuration conf, TableName tableName, assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. } if (isDryRun) { @@ -516,7 +503,7 @@ private static void validateTable(Configuration conf, TableName tableName, * Confirm ImportTsv via HFiles on fs. */ private static void validateHFiles(FileSystem fs, String outputPath, String family, - int expectedKVCount) throws IOException { + int expectedKVCount) throws IOException { // validate number and content of output columns LOG.debug("Validating HFiles."); Set configFamilies = new HashSet<>(); @@ -527,14 +514,11 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami String[] elements = cfStatus.getPath().toString().split(Path.SEPARATOR); String cf = elements[elements.length - 1]; foundFamilies.add(cf); - assertTrue( - String.format( - "HFile output contains a column family (%s) not present in input families (%s)", - cf, configFamilies), - configFamilies.contains(cf)); + assertTrue(String.format( + "HFile output contains a column family (%s) not present in input families (%s)", cf, + configFamilies), configFamilies.contains(cf)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { - assertTrue( - String.format("HFile %s appears to contain no data.", hfile.getPath()), + assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), hfile.getLen() > 0); // count the number of KVs from all the hfiles if (expectedKVCount > -1) { @@ -543,20 +527,20 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami } } assertTrue(String.format("HFile output does not contain the input family '%s'.", family), - foundFamilies.contains(family)); + foundFamilies.contains(family)); if (expectedKVCount > -1) { - assertTrue(String.format( - "KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", actualKVCount, - expectedKVCount), actualKVCount == expectedKVCount); + assertTrue( + String.format("KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", + actualKVCount, expectedKVCount), + actualKVCount == expectedKVCount); } } /** * Method returns the total KVs in given hfile * @param fs File System - * @param p HFile path - * @return KV count in the given hfile - * @throws IOException + * @param p HFile path + * @return KV count in the given hfile n */ private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException { Configuration conf = util.getConfiguration(); @@ -571,4 +555,3 @@ private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException return count; } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java index a0d1cf7b6cf9..adb0589c9805 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,11 +44,11 @@ /** * Tests for {@link TsvParser}. */ -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestImportTsvParser { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTsvParser.class); + HBaseClassTestRule.forClass(TestImportTsvParser.class); private void assertBytesEquals(byte[] a, byte[] b) { assertEquals(Bytes.toStringBinary(a), Bytes.toStringBinary(b)); @@ -58,11 +58,11 @@ private void checkParsing(ParsedLine parsed, Iterable expected) { ArrayList parsedCols = new ArrayList<>(); for (int i = 0; i < parsed.getColumnCount(); i++) { parsedCols.add(Bytes.toString(parsed.getLineBytes(), parsed.getColumnOffset(i), - parsed.getColumnLength(i))); + parsed.getColumnLength(i))); } if (!Iterables.elementsEqual(parsedCols, expected)) { fail("Expected: " + Joiner.on(",").join(expected) + "\n" + "Got:" - + Joiner.on(",").join(parsedCols)); + + Joiner.on(",").join(parsedCols)); } } @@ -105,8 +105,8 @@ public void testTsvParserSpecParsing() { assertTrue(parser.hasTimestamp()); assertEquals(2, parser.getTimestampKeyColumnIndex()); - parser = new TsvParser("HBASE_ROW_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ATTRIBUTES_KEY", - "\t"); + parser = + new TsvParser("HBASE_ROW_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ATTRIBUTES_KEY", "\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"), parser.getFamily(1)); @@ -118,8 +118,8 @@ public void testTsvParserSpecParsing() { assertEquals(2, parser.getTimestampKeyColumnIndex()); assertEquals(4, parser.getAttributesKeyColumnIndex()); - parser = new TsvParser("HBASE_ATTRIBUTES_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ROW_KEY", - "\t"); + parser = + new TsvParser("HBASE_ATTRIBUTES_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ROW_KEY", "\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"), parser.getFamily(1)); @@ -293,7 +293,7 @@ public void testTsvParseAttributesKey() throws BadTsvLineException { assertEquals(6, parse.getAttributeKeyOffset()); String[] attr = parse.getIndividualAttributes(); int i = 0; - for (String str : attr) { + for (String str : attr) { assertEquals(("key" + i + "=>" + "value" + i), str); i++; } @@ -302,7 +302,7 @@ public void testTsvParseAttributesKey() throws BadTsvLineException { @Test public void testTsvParserWithCellVisibilityCol() throws BadTsvLineException { TsvParser parser = new TsvParser( - "HBASE_ROW_KEY,col_a,HBASE_TS_KEY,HBASE_ATTRIBUTES_KEY,HBASE_CELL_VISIBILITY", "\t"); + "HBASE_ROW_KEY,col_a,HBASE_TS_KEY,HBASE_ATTRIBUTES_KEY,HBASE_CELL_VISIBILITY", "\t"); assertEquals(0, parser.getRowKeyColumnIndex()); assertEquals(4, parser.getCellVisibilityColumnIndex()); byte[] line = Bytes.toBytes("rowkey\tval_a\t1234\tkey=>value\tPRIVATE&SECRET"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java index d1f48bb299ed..87461c2735f0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,12 +46,12 @@ public class TestJarFinder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestJarFinder.class); + HBaseClassTestRule.forClass(TestJarFinder.class); @Test public void testJar() throws Exception { - //picking a class that is for sure in a JAR in the classpath + // picking a class that is for sure in a JAR in the classpath String jar = JarFinder.getJar(LoggerFactory.class); Assert.assertTrue(new File(jar).exists()); } @@ -59,8 +59,7 @@ public void testJar() throws Exception { private static void delete(File file) throws IOException { if (file.getAbsolutePath().length() < 5) { throw new IllegalArgumentException( - MessageFormat.format("Path [{0}] is too short, not deleting", - file.getAbsolutePath())); + MessageFormat.format("Path [{0}] is too short, not deleting", file.getAbsolutePath())); } if (file.exists()) { if (file.isDirectory()) { @@ -73,16 +72,15 @@ private static void delete(File file) throws IOException { } if (!file.delete()) { throw new RuntimeException( - MessageFormat.format("Could not delete path [{0}]", - file.getAbsolutePath())); + MessageFormat.format("Could not delete path [{0}]", file.getAbsolutePath())); } } } @Test public void testExpandedClasspath() throws Exception { - //picking a class that is for sure in a directory in the classpath - //in this case the JAR is created on the fly + // picking a class that is for sure in a directory in the classpath + // in this case the JAR is created on the fly String jar = JarFinder.getJar(TestJarFinder.class); Assert.assertTrue(new File(jar).exists()); } @@ -90,7 +88,7 @@ public void testExpandedClasspath() throws Exception { @Test public void testExistingManifest() throws Exception { File dir = new File(System.getProperty("test.build.dir", "target/test-dir"), - TestJarFinder.class.getName() + "-testExistingManifest"); + TestJarFinder.class.getName() + "-testExistingManifest"); delete(dir); dir.mkdirs(); @@ -109,8 +107,7 @@ public void testExistingManifest() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); JarOutputStream zos = new JarOutputStream(baos); JarFinder.jarDir(dir, "", zos); - JarInputStream jis = - new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); + JarInputStream jis = new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); Assert.assertNotNull(jis.getManifest()); jis.close(); } @@ -118,7 +115,7 @@ public void testExistingManifest() throws Exception { @Test public void testNoManifest() throws Exception { File dir = new File(System.getProperty("test.build.dir", "target/test-dir"), - TestJarFinder.class.getName() + "-testNoManifest"); + TestJarFinder.class.getName() + "-testNoManifest"); delete(dir); dir.mkdirs(); File propsFile = new File(dir, "props.properties"); @@ -128,8 +125,7 @@ public void testNoManifest() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); JarOutputStream zos = new JarOutputStream(baos); JarFinder.jarDir(dir, "", zos); - JarInputStream jis = - new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); + JarInputStream jis = new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); Assert.assertNotNull(jis.getManifest()); jis.close(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index eca7ca6f32d6..5aa14c3561af 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,25 +31,24 @@ import org.junit.experimental.categories.Category; /** - * Tests various scan start and stop row scenarios. This is set in a scan and - * tested in a MapReduce job to see if that is handed over and done properly - * too. + * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce + * job to see if that is handed over and done properly too. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableInputFormat.class); + HBaseClassTestRule.forClass(TestMultiTableInputFormat.class); @BeforeClass public static void setupLogging() { Log4jUtils.enableDebug(MultiTableInputFormat.class); } - @Override + @Override protected void initJob(List scans, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); + TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class, ImmutableBytesWritable.class, + ImmutableBytesWritable.class, job); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 716d60356183..bfccff65c660 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,21 +67,20 @@ /** * Tests of MultiTableInputFormatBase. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestMultiTableInputFormatBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableInputFormatBase.class); + HBaseClassTestRule.forClass(TestMultiTableInputFormatBase.class); - @Rule public final TestName name = new TestName(); + @Rule + public final TestName name = new TestName(); /** - * Test getSplits only puts up one Connection. - * In past it has put up many Connections. Each Connection setup comes with a fresh new cache - * so we have to do fresh hit on hbase:meta. Should only do one Connection when doing getSplits - * even if a MultiTableInputFormat. - * @throws IOException + * Test getSplits only puts up one Connection. In past it has put up many Connections. Each + * Connection setup comes with a fresh new cache so we have to do fresh hit on hbase:meta. Should + * only do one Connection when doing getSplits even if a MultiTableInputFormat. n */ @Test public void testMRSplitsConnectionCount() throws IOException { @@ -89,8 +88,7 @@ public void testMRSplitsConnectionCount() throws IOException { MultiTableInputFormatBase mtif = new MultiTableInputFormatBase() { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) - throws IOException, InterruptedException { + TaskAttemptContext context) throws IOException, InterruptedException { return super.createRecordReader(split, context); } }; @@ -125,7 +123,7 @@ public static class MRSplitsConnection implements Connection { private final Configuration configuration; static final AtomicInteger creations = new AtomicInteger(0); - MRSplitsConnection (Configuration conf, ExecutorService pool, User user) throws IOException { + MRSplitsConnection(Configuration conf, ExecutorService pool, User user) throws IOException { this.configuration = conf; creations.incrementAndGet(); } @@ -158,33 +156,27 @@ public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws I @Override public RegionLocator getRegionLocator(final TableName tableName) throws IOException { // Make up array of start keys. We start off w/ empty byte array. - final byte [][] startKeys = new byte [][] {HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), - Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), - Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), - Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), - Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("zzz")}; + final byte[][] startKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaaa"), + Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), + Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), + Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), + Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), + Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("zzz") }; // Make an array of end keys. We end with the empty byte array. - final byte [][] endKeys = new byte[][] { - Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), - Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), - Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), - Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), - Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("zzz"), - HConstants.EMPTY_BYTE_ARRAY}; + final byte[][] endKeys = + new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), + Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), Bytes.toBytes("ggg"), + Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), + Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), + Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), + Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), HConstants.EMPTY_BYTE_ARRAY }; // Now make a map of start keys to HRegionLocations. Let the server namber derive from // the start key. - final Map map = - new TreeMap(Bytes.BYTES_COMPARATOR); - for (byte [] startKey: startKeys) { - HRegionLocation hrl = new HRegionLocation( - RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), + final Map map = + new TreeMap(Bytes.BYTES_COMPARATOR); + for (byte[] startKey : startKeys) { + HRegionLocation hrl = + new HRegionLocation(RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), ServerName.valueOf(Bytes.toString(startKey), 0, 0)); map.put(startKey, hrl); } @@ -192,19 +184,20 @@ public RegionLocator getRegionLocator(final TableName tableName) throws IOExcept final List locations = new ArrayList(map.values()); // Now make a RegionLocator mock backed by the abpve map and list of locations. RegionLocator mockedRegionLocator = Mockito.mock(RegionLocator.class); - Mockito.when(mockedRegionLocator.getRegionLocation(Mockito.any(byte [].class), - Mockito.anyBoolean())). - thenAnswer(new Answer() { - @Override - public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] key = (byte [])args[0]; - return map.get(key); - } - }); + Mockito + .when( + mockedRegionLocator.getRegionLocation(Mockito.any(byte[].class), Mockito.anyBoolean())) + .thenAnswer(new Answer() { + @Override + public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { + Object[] args = invocationOnMock.getArguments(); + byte[] key = (byte[]) args[0]; + return map.get(key); + } + }); Mockito.when(mockedRegionLocator.getAllRegionLocations()).thenReturn(locations); - Mockito.when(mockedRegionLocator.getStartEndKeys()). - thenReturn(new Pair(startKeys, endKeys)); + Mockito.when(mockedRegionLocator.getStartEndKeys()) + .thenReturn(new Pair(startKeys, endKeys)); Mockito.when(mockedRegionLocator.getName()).thenReturn(tableName); return mockedRegionLocator; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java index c9ba9badfa2a..fbf9e7ef64c8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ public class TestMultiTableSnapshotInputFormat extends MultiTableInputFormatTest @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); + HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); protected Path restoreDir; @@ -73,9 +73,9 @@ public void setUp() throws Exception { @Override protected void initJob(List scans, Job job) throws IOException { - TableMapReduceUtil - .initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), ScanMapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); + TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), + ScanMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, + restoreDir); } protected Map> getSnapshotScanMapping(final List scans) { @@ -84,7 +84,7 @@ protected Map> getSnapshotScanMapping(final List @Override public String apply(Scan input) { return snapshotNameForTable( - Bytes.toStringBinary(input.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME))); + Bytes.toStringBinary(input.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME))); } }).asMap(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java index 455b64b915b7..409c8d7f195d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ public class TestMultiTableSnapshotInputFormatImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormatImpl.class); + HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormatImpl.class); private MultiTableSnapshotInputFormatImpl subject; private Map> snapshotScans; @@ -71,19 +71,16 @@ public void setUp() throws Exception { // feels weird to introduce a RestoreSnapshotHelperFactory and inject that, which would // probably be the more "pure" // way of doing things. This is the lesser of two evils, perhaps? - doNothing().when(this.subject). - restoreSnapshot(any(), any(), any(), - any(), any()); + doNothing().when(this.subject).restoreSnapshot(any(), any(), any(), any(), any()); this.conf = new Configuration(); this.rootDir = new Path("file:///test-root-dir"); CommonFSUtils.setRootDir(conf, rootDir); - this.snapshotScans = ImmutableMap.>of("snapshot1", - ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("1")) - .withStopRow(Bytes.toBytes("2"))), "snapshot2", - ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("3")) - .withStopRow(Bytes.toBytes("4")), - new Scan().withStartRow(Bytes.toBytes("5")).withStopRow(Bytes.toBytes("6")))); + this.snapshotScans = ImmutableMap.> of("snapshot1", + ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("1")).withStopRow(Bytes.toBytes("2"))), + "snapshot2", + ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("3")).withStopRow(Bytes.toBytes("4")), + new Scan().withStartRow(Bytes.toBytes("5")).withStopRow(Bytes.toBytes("6")))); this.restoreDir = new Path(CommonFSUtils.getRootDir(conf), "restore-dir"); @@ -93,8 +90,8 @@ public void callSetInput() throws IOException { subject.setInput(this.conf, snapshotScans, restoreDir); } - public Map> toScanWithEquals( - Map> snapshotScans) throws IOException { + public Map> + toScanWithEquals(Map> snapshotScans) throws IOException { Map> rtn = Maps.newHashMap(); for (Map.Entry> entry : snapshotScans.entrySet()) { @@ -116,7 +113,6 @@ public static class ScanWithEquals { /** * Creates a new instance of this class while copying all values. - * * @param scan The scan instance to copy from. * @throws java.io.IOException When copying the values fails. */ @@ -131,8 +127,8 @@ public boolean equals(Object obj) { return false; } ScanWithEquals otherScan = (ScanWithEquals) obj; - return Objects.equals(this.startRow, otherScan.startRow) && Objects - .equals(this.stopRow, otherScan.stopRow); + return Objects.equals(this.startRow, otherScan.startRow) + && Objects.equals(this.stopRow, otherScan.stopRow); } @Override @@ -142,9 +138,8 @@ public int hashCode() { @Override public String toString() { - return org.apache.hbase.thirdparty.com.google.common.base.MoreObjects. - toStringHelper(this).add("startRow", startRow) - .add("stopRow", stopRow).toString(); + return org.apache.hbase.thirdparty.com.google.common.base.MoreObjects.toStringHelper(this) + .add("startRow", startRow).add("stopRow", stopRow).toString(); } } @@ -179,7 +174,7 @@ public void testSetInputCreatesRestoreDirectoriesUnderRootRestoreDir() throws Ex for (Path snapshotDir : restoreDirs.values()) { assertEquals("Expected " + snapshotDir + " to be a child of " + restoreDir, restoreDir, - snapshotDir.getParent()); + snapshotDir.getParent()); } } @@ -191,7 +186,7 @@ public void testSetInputRestoresSnapshots() throws Exception { for (Map.Entry entry : snapshotDirs.entrySet()) { verify(this.subject).restoreSnapshot(eq(this.conf), eq(entry.getKey()), eq(this.rootDir), - eq(entry.getValue()), any()); + eq(entry.getValue()), any()); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java index d55fc829bfef..3db7fa7ef0b6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,33 +53,31 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestMultithreadedTableMapper { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultithreadedTableMapper.class); + HBaseClassTestRule.forClass(TestMultithreadedTableMapper.class); private static final Logger LOG = LoggerFactory.getLogger(TestMultithreadedTableMapper.class); - private static final HBaseTestingUtil UTIL = - new HBaseTestingUtil(); + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); static final TableName MULTI_REGION_TABLE_NAME = TableName.valueOf("mrtest"); static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - static final int NUMBER_OF_THREADS = 10; + static final int NUMBER_OF_THREADS = 10; @BeforeClass public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); UTIL.startMiniCluster(); - Table table = - UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, - OUTPUT_FAMILY }); + Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, + new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME); } @@ -92,29 +90,21 @@ public static void afterClass() throws Exception { /** * Pass the given key and processed record reduce */ - public static class ProcessContentsMapper - extends TableMapper { + public static class ProcessContentsMapper extends TableMapper { /** - * Pass the key, and reversed value to reduce - * - * @param key - * @param value - * @param context - * @throws IOException + * Pass the key, and reversed value to reduce nnnn */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) - throws IOException, InterruptedException { + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, INPUT_FAMILY)); @@ -128,19 +118,16 @@ public void map(ImmutableBytesWritable key, Result value, } /** - * Test multithreadedTableMappper map/reduce against a multi-region table - * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * Test multithreadedTableMappper map/reduce against a multi-region table nnn */ @Test public void testMultithreadedTableMapper() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { runTestOnTable(UTIL.getConnection().getTable(MULTI_REGION_TABLE_NAME)); } private void runTestOnTable(Table table) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { Job job = null; try { LOG.info("Before map/reduce startup"); @@ -148,15 +135,12 @@ private void runTestOnTable(Table table) job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); - TableMapReduceUtil.initTableMapperJob( - table.getName(), scan, - MultithreadedTableMapper.class, ImmutableBytesWritable.class, - Put.class, job); + TableMapReduceUtil.initTableMapperJob(table.getName(), scan, MultithreadedTableMapper.class, + ImmutableBytesWritable.class, Put.class, job); MultithreadedTableMapper.setMapperClass(job, ProcessContentsMapper.class); MultithreadedTableMapper.setNumberOfThreads(job, NUMBER_OF_THREADS); - TableMapReduceUtil.initTableReducerJob( - table.getName().getNameAsString(), - IdentityTableReducer.class, job); + TableMapReduceUtil.initTableReducerJob(table.getName().getNameAsString(), + IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName()); assertTrue(job.waitForCompletion(true)); @@ -166,8 +150,7 @@ private void runTestOnTable(Table table) } finally { table.close(); if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -184,8 +167,8 @@ private void verify(TableName tableName) throws IOException { verified = true; break; } catch (NullPointerException e) { - // If here, a cell was empty. Presume its because updates came in - // after the scanner had been opened. Wait a while and retry. + // If here, a cell was empty. Presume its because updates came in + // after the scanner had been opened. Wait a while and retry. LOG.debug("Verification attempt failed: " + e.getMessage()); } try { @@ -199,15 +182,11 @@ private void verify(TableName tableName) throws IOException { } /** - * Looks at every value of the mapreduce output and verifies that indeed - * the values have been reversed. - * - * @param table Table to scan. - * @throws IOException - * @throws NullPointerException if we failed to find a cell value + * Looks at every value of the mapreduce output and verifies that indeed the values have been + * reversed. + * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value */ - private void verifyAttempt(final Table table) - throws IOException, NullPointerException { + private void verifyAttempt(final Table table) throws IOException, NullPointerException { Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); scan.addFamily(OUTPUT_FAMILY); @@ -215,37 +194,34 @@ private void verifyAttempt(final Table table) try { Iterator itr = scanner.iterator(); assertTrue(itr.hasNext()); - while(itr.hasNext()) { + while (itr.hasNext()) { Result r = itr.next(); if (LOG.isDebugEnabled()) { - if (r.size() > 2 ) { - throw new IOException("Too many results, expected 2 got " + - r.size()); + if (r.size() > 2) { + throw new IOException("Too many results, expected 2 got " + r.size()); } } byte[] firstValue = null; byte[] secondValue = null; int count = 0; - for(Cell kv : r.listCells()) { + for (Cell kv : r.listCells()) { if (count == 0) { firstValue = CellUtil.cloneValue(kv); - }else if (count == 1) { + } else if (count == 1) { secondValue = CellUtil.cloneValue(kv); - }else if (count == 2) { + } else if (count == 2) { break; } count++; } String first = ""; if (firstValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": first value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } first = Bytes.toString(firstValue); String second = ""; if (secondValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": second value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": second value is null"); } byte[] secondReversed = new byte[secondValue.length]; for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) { @@ -254,9 +230,9 @@ private void verifyAttempt(final Table table) second = Bytes.toString(secondReversed); if (first.compareTo(second) != 0) { if (LOG.isDebugEnabled()) { - LOG.debug("second key is not the reverse of first. row=" + - Bytes.toStringBinary(r.getRow()) + ", first value=" + first + - ", second value=" + second); + LOG.debug( + "second key is not the reverse of first. row=" + Bytes.toStringBinary(r.getRow()) + + ", first value=" + first + ", second value=" + second); } fail(); } @@ -267,4 +243,3 @@ private void verifyAttempt(final Table table) } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java index 6cce69660895..f841bdbb61dc 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,28 +42,25 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestRegionSizeCalculator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionSizeCalculator.class); + HBaseClassTestRule.forClass(TestRegionSizeCalculator.class); private Configuration configuration = new Configuration(); private final long megabyte = 1024L * 1024L; - private final ServerName sn = ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, - ServerName.NON_STARTCODE); + private final ServerName sn = + ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, ServerName.NON_STARTCODE); @Test public void testSimpleTestCase() throws Exception { RegionLocator regionLocator = mockRegionLocator("region1", "region2", "region3"); - Admin admin = mockAdmin( - mockRegion("region1", 123), - mockRegion("region3", 1232), - mockRegion("region2", 54321) - ); + Admin admin = mockAdmin(mockRegion("region1", 123), mockRegion("region3", 1232), + mockRegion("region2", 54321)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); @@ -76,41 +73,36 @@ public void testSimpleTestCase() throws Exception { assertEquals(3, calculator.getRegionSizeMap().size()); } - /** - * When size of region in megabytes is larger than largest possible integer there could be - * error caused by lost of precision. - * */ + * When size of region in megabytes is larger than largest possible integer there could be error + * caused by lost of precision. + */ @Test public void testLargeRegion() throws Exception { RegionLocator regionLocator = mockRegionLocator("largeRegion"); - Admin admin = mockAdmin( - mockRegion("largeRegion", Integer.MAX_VALUE) - ); + Admin admin = mockAdmin(mockRegion("largeRegion", Integer.MAX_VALUE)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); assertEquals(((long) Integer.MAX_VALUE) * megabyte, - calculator.getRegionSize(Bytes.toBytes("largeRegion"))); + calculator.getRegionSize(Bytes.toBytes("largeRegion"))); } - /** When calculator is disabled, it should return 0 for each request.*/ + /** When calculator is disabled, it should return 0 for each request. */ @Test public void testDisabled() throws Exception { String regionName = "cz.goout:/index.html"; RegionLocator table = mockRegionLocator(regionName); - Admin admin = mockAdmin( - mockRegion(regionName, 999) - ); + Admin admin = mockAdmin(mockRegion(regionName, 999)); - //first request on enabled calculator + // first request on enabled calculator RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin); assertEquals(999 * megabyte, calculator.getRegionSize(Bytes.toBytes(regionName))); - //then disabled calculator. + // then disabled calculator. configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR, false); RegionSizeCalculator disabledCalculator = new RegionSizeCalculator(table, admin); assertEquals(0 * megabyte, disabledCalculator.getRegionSize(Bytes.toBytes(regionName))); @@ -120,7 +112,7 @@ public void testDisabled() throws Exception { /** * Makes some table with given region names. - * */ + */ private RegionLocator mockRegionLocator(String... regionNames) throws IOException { RegionLocator mockedTable = Mockito.mock(RegionLocator.class); when(mockedTable.getName()).thenReturn(TableName.valueOf("sizeTestTable")); @@ -138,7 +130,7 @@ private RegionLocator mockRegionLocator(String... regionNames) throws IOExceptio /** * Creates mock returning RegionLoad info about given servers. - */ + */ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { Admin mockAdmin = Mockito.mock(Admin.class); List regionLoads = new ArrayList<>(); @@ -147,15 +139,14 @@ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { } when(mockAdmin.getConfiguration()).thenReturn(configuration); when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable"))) - .thenReturn(regionLoads); + .thenReturn(regionLoads); return mockAdmin; } /** * Creates mock of region with given name and size. - * - * @param fileSizeMb number of megabytes occupied by region in file store in megabytes - * */ + * @param fileSizeMb number of megabytes occupied by region in file store in megabytes + */ private RegionMetrics mockRegion(String regionName, int fileSizeMb) { RegionMetrics region = Mockito.mock(RegionMetrics.class); when(region.getRegionName()).thenReturn(Bytes.toBytes(regionName)); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java index c3abf4d544e0..df9d15978578 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -43,25 +44,17 @@ /** * Basic test of {@link RoundRobinTableInputFormat}; i.e. RRTIF. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRoundRobinTableInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRoundRobinTableInputFormat.class); + HBaseClassTestRule.forClass(TestRoundRobinTableInputFormat.class); private static final int SERVERS_COUNT = 5; - private static final String[] KEYS = { - "aa", "ab", "ac", "ad", "ae", - "ba", "bb", "bc", "bd", "be", - "ca", "cb", "cc", "cd", "ce", - "da", "db", "dc", "dd", "de", - "ea", "eb", "ec", "ed", "ee", - "fa", "fb", "fc", "fd", "fe", - "ga", "gb", "gc", "gd", "ge", - "ha", "hb", "hc", "hd", "he", - "ia", "ib", "ic", "id", "ie", - "ja", "jb", "jc", "jd", "je", "jf" - }; + private static final String[] KEYS = { "aa", "ab", "ac", "ad", "ae", "ba", "bb", "bc", "bd", "be", + "ca", "cb", "cc", "cd", "ce", "da", "db", "dc", "dd", "de", "ea", "eb", "ec", "ed", "ee", "fa", + "fb", "fc", "fd", "fe", "ga", "gb", "gc", "gd", "ge", "ha", "hb", "hc", "hd", "he", "ia", "ib", + "ic", "id", "ie", "ja", "jb", "jc", "jd", "je", "jf" }; /** * Test default behavior. @@ -78,8 +71,8 @@ public void testRoundRobinSplit() throws IOException, InterruptedException { Arrays.sort(copy.toArray(new InputSplit[0]), new SplitComparator()); // Assert the sort is retained even after passing through SplitComparator. for (int i = 0; i < sortedSplits.size(); i++) { - TableSplit sortedTs = (TableSplit)sortedSplits.get(i); - TableSplit copyTs = (TableSplit)copy.get(i); + TableSplit sortedTs = (TableSplit) sortedSplits.get(i); + TableSplit copyTs = (TableSplit) copy.get(i); assertEquals(sortedTs.getEncodedRegionName(), copyTs.getEncodedRegionName()); } } @@ -90,17 +83,17 @@ public void testRoundRobinSplit() throws IOException, InterruptedException { private List createSplits() { List splits = new ArrayList<>(KEYS.length - 1); for (int i = 0; i < KEYS.length - 1; i++) { - InputSplit split = new TableSplit(TableName.valueOf("test"), new Scan(), - Bytes.toBytes(KEYS[i]), Bytes.toBytes(KEYS[i + 1]), String.valueOf(i % SERVERS_COUNT + 1), - "", 0); + InputSplit split = + new TableSplit(TableName.valueOf("test"), new Scan(), Bytes.toBytes(KEYS[i]), + Bytes.toBytes(KEYS[i + 1]), String.valueOf(i % SERVERS_COUNT + 1), "", 0); splits.add(split); } return splits; } private void testDistribution(List list) throws IOException, InterruptedException { - for (int i = 0; i < KEYS.length/SERVERS_COUNT; i++) { - int [] counts = new int[SERVERS_COUNT]; + for (int i = 0; i < KEYS.length / SERVERS_COUNT; i++) { + int[] counts = new int[SERVERS_COUNT]; for (int j = i * SERVERS_COUNT; j < i * SERVERS_COUNT + SERVERS_COUNT; j++) { counts[Integer.parseInt(list.get(j).getLocations()[0]) - 1]++; } @@ -120,21 +113,21 @@ private static class SplitComparator implements Comparator { public int compare(InputSplit o1, InputSplit o2) { try { return Long.compare(o1.getLength(), o2.getLength()); - } catch (IOException|InterruptedException e) { + } catch (IOException | InterruptedException e) { throw new RuntimeException("exception in compare", e); } } } /** - * Assert that lengths are descending. RRTIF writes lengths in descending order so any - * subsequent sort using dump SplitComparator as is done in JobSubmitter up in Hadoop keeps - * our RRTIF ordering. + * Assert that lengths are descending. RRTIF writes lengths in descending order so any subsequent + * sort using dump SplitComparator as is done in JobSubmitter up in Hadoop keeps our RRTIF + * ordering. */ private void assertLengthDescending(List list) throws IOException, InterruptedException { long previousLength = Long.MAX_VALUE; - for (InputSplit is: list) { + for (InputSplit is : list) { long length = is.getLength(); assertTrue(previousLength + " " + length, previousLength > length); previousLength = length; @@ -165,13 +158,13 @@ public void testConfigureUnconfigure() { } private void checkRetainsBooleanValue(JobContext jobContext, RoundRobinTableInputFormat rrtif, - final boolean b) { - jobContext.getConfiguration(). - setBoolean(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE, b); + final boolean b) { + jobContext.getConfiguration() + .setBoolean(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE, b); rrtif.configure(); rrtif.unconfigure(); - String value = jobContext.getConfiguration(). - get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE); + String value = jobContext.getConfiguration() + .get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE); assertEquals(b, Boolean.valueOf(value)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index d33f30a70759..49daac88e370 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -50,7 +49,7 @@ /** * Test the rowcounter map reduce job. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestRowCounter { @ClassRule @@ -88,133 +87,96 @@ public static void tearDownAfterClass() throws Exception { } /** - * Test a case when no column was specified in command line arguments. - * - * @throws Exception + * Test a case when no column was specified in command line arguments. n */ @Test public void testRowCounterNoColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME - }; + String[] args = new String[] { TABLE_NAME }; runRowCount(args, 10); } /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows. - * - * @throws Exception + * Test a case when the column specified in command line arguments is exclusive for few rows. n */ @Test public void testRowCounterExclusiveColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL1 }; runRowCount(args, 8); } /** - * Test a case when the column specified in command line arguments is - * one for which the qualifier contains colons. - * - * @throws Exception + * Test a case when the column specified in command line arguments is one for which the qualifier + * contains colons. n */ @Test public void testRowCounterColumnWithColonInQualifier() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN }; runRowCount(args, 8); } /** - * Test a case when the column specified in command line arguments is not part - * of first KV for a row. - * - * @throws Exception + * Test a case when the column specified in command line arguments is not part of first KV for a + * row. n */ @Test public void testRowCounterHiddenColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL2 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL2 }; runRowCount(args, 10); } - /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows and also a row range filter is specified - * - * @throws Exception + * Test a case when the column specified in command line arguments is exclusive for few rows and + * also a row range filter is specified n */ @Test public void testRowCounterColumnAndRowRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 }; runRowCount(args, 8); } /** - * Test a case when a range is specified with single range of start-end keys - * @throws Exception + * Test a case when a range is specified with single range of start-end keys n */ @Test public void testRowCounterRowSingleRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3" }; runRowCount(args, 2); } /** - * Test a case when a range is specified with single range with end key only - * @throws Exception + * Test a case when a range is specified with single range with end key only n */ @Test public void testRowCounterRowSingleRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3" }; runRowCount(args, 3); } /** - * Test a case when a range is specified with two ranges where one range is with end key only - * @throws Exception + * Test a case when a range is specified with two ranges where one range is with end key only n */ @Test public void testRowCounterRowMultiRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" }; runRowCount(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys - * @throws Exception + * Test a case when a range is specified with multiple ranges of start-end keys n */ @Test public void testRowCounterRowMultiRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" }; runRowCount(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys; - * one range is filled, another two are not - * @throws Exception + * Test a case when a range is specified with multiple ranges of start-end keys; one range is + * filled, another two are not n */ @Test public void testRowCounterRowMultiEmptyRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;;" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;;" }; runRowCount(args, 2); } @@ -222,20 +184,16 @@ public void testRowCounterRowMultiEmptyRange() throws Exception { public void testRowCounter10kRowRange() throws Exception { String tableName = TABLE_NAME + "10k"; - try (Table table = TEST_UTIL.createTable( - TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { + try ( + Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { writeRows(table, 10000, 0); } - String[] args = new String[] { - tableName, "--range=\\x00row9872,\\x00row9875" - }; + String[] args = new String[] { tableName, "--range=\\x00row9872,\\x00row9875" }; runRowCount(args, 3); } /** - * Test a case when the timerange is specified with --starttime and --endtime options - * - * @throws Exception + * Test a case when the timerange is specified with --starttime and --endtime options n */ @Test public void testRowCounterTimeRange() throws Exception { @@ -248,7 +206,8 @@ public void testRowCounterTimeRange() throws Exception { long ts; // clean up content of TABLE_NAME - Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); + Table table = + TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); ts = EnvironmentEdgeManager.currentTime(); put1.addColumn(family, col1, ts, Bytes.toBytes("val1")); @@ -262,58 +221,43 @@ public void testRowCounterTimeRange() throws Exception { table.put(put3); table.close(); - String[] args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + ts - }; + String[] args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + ts }; runRowCount(args, 1); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + (ts - 10) - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + (ts - 10) }; runRowCount(args, 1); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + ts, - "--endtime=" + (ts + 1000) - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + ts, + "--endtime=" + (ts + 1000) }; runRowCount(args, 2); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + (ts - 30 * 1000), - "--endtime=" + (ts + 30 * 1000), - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, + "--starttime=" + (ts - 30 * 1000), "--endtime=" + (ts + 30 * 1000), }; runRowCount(args, 3); } /** * Run the RowCounter map reduce job and verify the row count. - * - * @param args the command line arguments to be used for rowcounter job. - * @param expectedCount the expected row count (result of map reduce job). - * @throws Exception + * @param args the command line arguments to be used for rowcounter job. + * @param expectedCount the expected row count (result of map reduce job). n */ private void runRowCount(String[] args, int expectedCount) throws Exception { RowCounter rowCounter = new RowCounter(); rowCounter.setConf(TEST_UTIL.getConfiguration()); - args = Arrays.copyOf(args, args.length+1); - args[args.length-1]="--expectedCount=" + expectedCount; + args = Arrays.copyOf(args, args.length + 1); + args[args.length - 1] = "--expectedCount=" + expectedCount; long start = EnvironmentEdgeManager.currentTime(); int result = rowCounter.run(args); long duration = EnvironmentEdgeManager.currentTime() - start; LOG.debug("row count duration (ms): " + duration); - assertTrue(result==0); + assertTrue(result == 0); } /** * Run the RowCounter map reduce job and verify the row count. - * - * @param args the command line arguments to be used for rowcounter job. + * @param args the command line arguments to be used for rowcounter job. * @param expectedCount the expected row count (result of map reduce job). * @throws Exception in case of any unexpected error. */ @@ -330,66 +274,50 @@ private void runCreateSubmittableJobWithArgs(String[] args, int expectedCount) t @Test public void testCreateSubmittableJobWithArgsNoColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME - }; + String[] args = new String[] { TABLE_NAME }; runCreateSubmittableJobWithArgs(args, 10); } /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows. - * + * Test a case when the column specified in command line arguments is exclusive for few rows. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsExclusiveColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL1 }; runCreateSubmittableJobWithArgs(args, 8); } /** - * Test a case when the column specified in command line arguments is - * one for which the qualifier contains colons. - * + * Test a case when the column specified in command line arguments is one for which the qualifier + * contains colons. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsColumnWithColonInQualifier() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN }; runCreateSubmittableJobWithArgs(args, 8); } /** - * Test a case when the column specified in command line arguments is not part - * of first KV for a row. - * + * Test a case when the column specified in command line arguments is not part of first KV for a + * row. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsHiddenColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL2 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL2 }; runCreateSubmittableJobWithArgs(args, 10); } - /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows and also a row range filter is specified - * + * Test a case when the column specified in command line arguments is exclusive for few rows and + * also a row range filter is specified * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsColumnAndRowRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 }; runCreateSubmittableJobWithArgs(args, 8); } @@ -399,9 +327,7 @@ public void testCreateSubmittableJobWithArgsColumnAndRowRange() throws Exception */ @Test public void testCreateSubmittableJobWithArgsRowSingleRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3" }; runCreateSubmittableJobWithArgs(args, 2); } @@ -411,9 +337,7 @@ public void testCreateSubmittableJobWithArgsRowSingleRange() throws Exception { */ @Test public void testCreateSubmittableJobWithArgsRowSingleRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3" }; runCreateSubmittableJobWithArgs(args, 3); } @@ -423,9 +347,7 @@ public void testCreateSubmittableJobWithArgsRowSingleRangeUpperBound() throws Ex */ @Test public void testCreateSubmittableJobWithArgsRowMultiRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" }; runCreateSubmittableJobWithArgs(args, 5); } @@ -435,22 +357,18 @@ public void testCreateSubmittableJobWithArgsRowMultiRangeUpperBound() throws Exc */ @Test public void testCreateSubmittableJobWithArgsRowMultiRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" }; runCreateSubmittableJobWithArgs(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys; - * one range is filled, another two are not + * Test a case when a range is specified with multiple ranges of start-end keys; one range is + * filled, another two are not * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsRowMultiEmptyRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;;" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;;" }; runCreateSubmittableJobWithArgs(args, 2); } @@ -458,19 +376,16 @@ public void testCreateSubmittableJobWithArgsRowMultiEmptyRange() throws Exceptio public void testCreateSubmittableJobWithArgs10kRowRange() throws Exception { String tableName = TABLE_NAME + "CreateSubmittableJobWithArgs10kRowRange"; - try (Table table = TEST_UTIL.createTable( - TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { + try ( + Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { writeRows(table, 10000, 0); } - String[] args = new String[] { - tableName, "--range=\\x00row9872,\\x00row9875" - }; + String[] args = new String[] { tableName, "--range=\\x00row9872,\\x00row9875" }; runCreateSubmittableJobWithArgs(args, 3); } /** * Test a case when the timerange is specified with --starttime and --endtime options - * * @throws Exception in case of any unexpected error. */ @Test @@ -483,7 +398,7 @@ public void testCreateSubmittableJobWithArgsTimeRange() throws Exception { long ts; - String tableName = TABLE_NAME_TS_RANGE+"CreateSubmittableJobWithArgs"; + String tableName = TABLE_NAME_TS_RANGE + "CreateSubmittableJobWithArgs"; // clean up content of TABLE_NAME Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM)); @@ -499,41 +414,26 @@ public void testCreateSubmittableJobWithArgsTimeRange() throws Exception { table.put(put3); table.close(); - String[] args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + ts - }; + String[] args = + new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, "--endtime=" + ts }; runCreateSubmittableJobWithArgs(args, 1); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + (ts - 10) - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + (ts - 10) }; runCreateSubmittableJobWithArgs(args, 1); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + ts, - "--endtime=" + (ts + 1000) - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + ts, + "--endtime=" + (ts + 1000) }; runCreateSubmittableJobWithArgs(args, 2); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + (ts - 30 * 1000), - "--endtime=" + (ts + 30 * 1000), - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + (ts - 30 * 1000), + "--endtime=" + (ts + 30 * 1000), }; runCreateSubmittableJobWithArgs(args, 3); } /** - * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have - * two columns, Few have one. - * - * @param table - * @throws IOException + * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have + * one. nn */ private static void writeRows(Table table, int totalRows, int rowsWithOneCol) throws IOException { final byte[] family = Bytes.toBytes(COL_FAM); @@ -570,7 +470,7 @@ private static void writeRows(Table table, int totalRows, int rowsWithOneCol) th @Test public void testImportMain() throws Exception { SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); String[] args = {}; try { @@ -602,13 +502,13 @@ public void testHelp() throws Exception { ByteArrayOutputStream data = new ByteArrayOutputStream(); PrintStream stream = new PrintStream(data); System.setOut(stream); - String[] args = {"-h"}; + String[] args = { "-h" }; runRowCount(args, 0); assertUsageContent(data.toString()); - args = new String[]{"--help"}; + args = new String[] { "--help" }; runRowCount(args, 0); assertUsageContent(data.toString()); - }finally { + } finally { System.setOut(oldPrintStream); } } @@ -616,27 +516,27 @@ public void testHelp() throws Exception { @Test public void testInvalidTable() throws Exception { try { - String[] args = {"invalid"}; + String[] args = { "invalid" }; runRowCount(args, 0); fail("RowCounter should had failed with invalid table."); - }catch (Throwable e){ + } catch (Throwable e) { assertTrue(e instanceof AssertionError); } } private void assertUsageContent(String usage) { - assertTrue(usage.contains("usage: hbase rowcounter " - + " [options] [ ...]")); + assertTrue(usage + .contains("usage: hbase rowcounter " + " [options] [ ...]")); assertTrue(usage.contains("Options:\n")); - assertTrue(usage.contains("--starttime= " - + "starting time filter to start counting rows from.\n")); + assertTrue(usage.contains( + "--starttime= " + "starting time filter to start counting rows from.\n")); assertTrue(usage.contains("--endtime= " + "end time filter limit, to only count rows up to this timestamp.\n")); - assertTrue(usage.contains("--range= " - + "[startKey],[endKey][;[startKey],[endKey]...]]\n")); + assertTrue(usage + .contains("--range= " + "[startKey],[endKey][;[startKey],[endKey]...]]\n")); assertTrue(usage.contains("--expectedCount= expected number of rows to be count.\n")); - assertTrue(usage.contains("For performance, " - + "consider the following configuration properties:\n")); + assertTrue( + usage.contains("For performance, " + "consider the following configuration properties:\n")); assertTrue(usage.contains("-Dhbase.client.scanner.caching=100\n")); assertTrue(usage.contains("-Dmapreduce.map.speculative=false\n")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java index f93e76ae8031..305d6ac19761 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +34,12 @@ /** * Test of simple partitioner. */ -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestSimpleTotalOrderPartitioner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSimpleTotalOrderPartitioner.class); + HBaseClassTestRule.forClass(TestSimpleTotalOrderPartitioner.class); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); Configuration conf = TEST_UTIL.getConfiguration(); @@ -48,7 +48,7 @@ public class TestSimpleTotalOrderPartitioner { public void testSplit() throws Exception { String start = "a"; String end = "{"; - SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); + SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); this.conf.set(SimpleTotalOrderPartitioner.START, start); this.conf.set(SimpleTotalOrderPartitioner.END, end); @@ -69,14 +69,12 @@ public void testSplit() throws Exception { partition = p.getPartition(q, HConstants.EMPTY_BYTE_ARRAY, 3); assertEquals(2, partition); // What about end and start keys. - ImmutableBytesWritable startBytes = - new ImmutableBytesWritable(Bytes.toBytes(start)); + ImmutableBytesWritable startBytes = new ImmutableBytesWritable(Bytes.toBytes(start)); partition = p.getPartition(startBytes, HConstants.EMPTY_BYTE_ARRAY, 2); assertEquals(0, partition); partition = p.getPartition(startBytes, HConstants.EMPTY_BYTE_ARRAY, 3); assertEquals(0, partition); - ImmutableBytesWritable endBytes = - new ImmutableBytesWritable(Bytes.toBytes("z")); + ImmutableBytesWritable endBytes = new ImmutableBytesWritable(Bytes.toBytes("z")); partition = p.getPartition(endBytes, HConstants.EMPTY_BYTE_ARRAY, 2); assertEquals(1, partition); partition = p.getPartition(endBytes, HConstants.EMPTY_BYTE_ARRAY, 3); @@ -84,4 +82,3 @@ public void testSplit() throws Exception { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index da0d7b121d71..ca2dbdb0f671 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals; import java.util.Arrays; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -59,7 +58,7 @@ public class TestSyncTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncTable.class); + HBaseClassTestRule.forClass(TestSyncTable.class); private static final Logger LOG = LoggerFactory.getLogger(TestSyncTable.class); @@ -80,9 +79,9 @@ public static void afterClass() throws Exception { } private static byte[][] generateSplits(int numRows, int numRegions) { - byte[][] splitRows = new byte[numRegions-1][]; + byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 1; i < numRegions; i++) { - splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); + splitRows[i - 1] = Bytes.toBytes(numRows * i / numRegions); } return splitRows; } @@ -117,8 +116,8 @@ public void testSyncTableDoDeletesFalse() throws Exception { writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--doDeletes=false"); + Counters syncCounters = + syncTables(sourceTableName, targetTableName, testDir, "--doDeletes=false"); assertTargetDoDeletesFalse(100, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -140,8 +139,7 @@ public void testSyncTableDoPutsFalse() throws Exception { writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--doPuts=false"); + Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir, "--doPuts=false"); assertTargetDoPutsFalse(70, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -163,8 +161,8 @@ public void testSyncTableIgnoreTimestampsTrue() throws Exception { long current = EnvironmentEdgeManager.currentTime(); writeTestData(sourceTableName, targetTableName, current - 1000, current); hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true"); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--ignoreTimestamps=true"); + Counters syncCounters = + syncTables(sourceTableName, targetTableName, testDir, "--ignoreTimestamps=true"); assertEqualTables(90, sourceTableName, targetTableName, true); assertEquals(50, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -179,7 +177,7 @@ public void testSyncTableIgnoreTimestampsTrue() throws Exception { } private void assertEqualTables(int expectedRows, TableName sourceTableName, - TableName targetTableName, boolean ignoreTimestamps) throws Exception { + TableName targetTableName, boolean ignoreTimestamps) throws Exception { Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); @@ -191,27 +189,23 @@ private void assertEqualTables(int expectedRows, TableName sourceTableName, Result targetRow = targetScanner.next(); LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); + + " cells:" + sourceRow); LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); + + " cells:" + targetRow); if (sourceRow == null) { - Assert.fail("Expected " + expectedRows - + " source rows but only found " + i); + Assert.fail("Expected " + expectedRows + " source rows but only found " + i); } if (targetRow == null) { - Assert.fail("Expected " + expectedRows - + " target rows but only found " + i); + Assert.fail("Expected " + expectedRows + " target rows but only found " + i); } Cell[] sourceCells = sourceRow.rawCells(); Cell[] targetCells = targetRow.rawCells(); if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " has " + sourceCells.length - + " cells in source table but " + targetCells.length - + " cells in target table"); + Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } for (int j = 0; j < sourceCells.length; j++) { Cell sourceCell = sourceCells[j]; @@ -240,13 +234,13 @@ private void assertEqualTables(int expectedRows, TableName sourceTableName, } Result sourceRow = sourceScanner.next(); if (sourceRow != null) { - Assert.fail("Source table has more than " + expectedRows - + " rows. Next row: " + Bytes.toInt(sourceRow.getRow())); + Assert.fail("Source table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(sourceRow.getRow())); } Result targetRow = targetScanner.next(); if (targetRow != null) { - Assert.fail("Target table has more than " + expectedRows - + " rows. Next row: " + Bytes.toInt(targetRow.getRow())); + Assert.fail("Target table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(targetRow.getRow())); } sourceScanner.close(); targetScanner.close(); @@ -255,7 +249,7 @@ private void assertEqualTables(int expectedRows, TableName sourceTableName, } private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableName, - TableName targetTableName) throws Exception { + TableName targetTableName) throws Exception { Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); @@ -266,19 +260,17 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN int rowsCount = 0; while (targetRow != null) { rowsCount++; - //only compares values for existing rows, skipping rows existing on - //target only that were not deleted given --doDeletes=false + // only compares values for existing rows, skipping rows existing on + // target only that were not deleted given --doDeletes=false if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { targetRow = targetScanner.next(); continue; } - LOG.debug("SOURCE row: " + (sourceRow == null ? "null" - : Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? "null" - : Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + + " cells:" + sourceRow); + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + + " cells:" + targetRow); Cell[] sourceCells = sourceRow.rawCells(); Cell[] targetCells = targetRow.rawCells(); @@ -287,18 +279,16 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN if (sourceCells.length == targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + targetRowKey + " should have more cells in " - + "target than in source"); + Assert + .fail("Row " + targetRowKey + " should have more cells in " + "target than in source"); } } else { if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " has " + sourceCells.length - + " cells in source table but " + targetCells.length - + " cells in target table"); + Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } } for (int j = 0; j < sourceCells.length; j++) { @@ -314,7 +304,7 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN if (!CellUtil.matchingQualifier(sourceCell, targetCell)) { Assert.fail("Qualifiers don't match"); } - if (targetRowKey < 80 && targetRowKey >= 90){ + if (targetRowKey < 80 && targetRowKey >= 90) { if (!CellUtil.matchingTimestamp(sourceCell, targetCell)) { Assert.fail("Timestamps don't match"); } @@ -323,16 +313,14 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN Assert.fail("Values don't match"); } } catch (Throwable t) { - LOG.debug("Source cell: " + sourceCell + " target cell: " - + targetCell); + LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); Throwables.propagate(t); } } targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.",expectedRows, - rowsCount); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); sourceScanner.close(); targetScanner.close(); sourceTable.close(); @@ -340,7 +328,7 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN } private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName, - TableName targetTableName) throws Exception { + TableName targetTableName) throws Exception { Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); @@ -350,22 +338,18 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName Result sourceRow = sourceScanner.next(); int rowsCount = 0; - while (targetRow!=null) { - //only compares values for existing rows, skipping rows existing on - //source only that were not added to target given --doPuts=false + while (targetRow != null) { + // only compares values for existing rows, skipping rows existing on + // source only that were not added to target given --doPuts=false if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { sourceRow = sourceScanner.next(); continue; } - LOG.debug("SOURCE row: " + (sourceRow == null ? - "null" : - Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? - "null" : - Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + + " cells:" + sourceRow); + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + + " cells:" + targetRow); LOG.debug("rowsCount: " + rowsCount); @@ -376,27 +360,26 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); Assert.fail("There shouldn't exist any rows between 40 and 60, since " - + "Puts are disabled and Deletes are enabled."); + + "Puts are disabled and Deletes are enabled."); } else if (targetRowKey >= 60 && targetRowKey < 70) { if (sourceCells.length == targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " shouldn't have same number of cells."); + Assert.fail( + "Row " + Bytes.toInt(sourceRow.getRow()) + " shouldn't have same number of cells."); } } else if (targetRowKey >= 80 && targetRowKey < 90) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); Assert.fail("There should be no rows between 80 and 90 on target, as " - + "these had different timestamps and should had been deleted."); + + "these had different timestamps and should had been deleted."); } else if (targetRowKey >= 90 && targetRowKey < 100) { for (int j = 0; j < sourceCells.length; j++) { Cell sourceCell = sourceCells[j]; Cell targetCell = targetCells[j]; if (CellUtil.matchingValue(sourceCell, targetCell)) { Assert.fail("Cells values should not match for rows between " - + "90 and 100. Target row id: " + (Bytes.toInt(targetRow - .getRow()))); + + "90 and 100. Target row id: " + (Bytes.toInt(targetRow.getRow()))); } } } else { @@ -420,8 +403,7 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName Assert.fail("Values don't match"); } } catch (Throwable t) { - LOG.debug( - "Source cell: " + sourceCell + " target cell: " + targetCell); + LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); Throwables.propagate(t); } } @@ -430,21 +412,20 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.",expectedRows, - rowsCount); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); sourceScanner.close(); targetScanner.close(); sourceTable.close(); targetTable.close(); } - private Counters syncTables(TableName sourceTableName, TableName targetTableName, - Path testDir, String... options) throws Exception { + private Counters syncTables(TableName sourceTableName, TableName targetTableName, Path testDir, + String... options) throws Exception { SyncTable syncTable = new SyncTable(TEST_UTIL.getConfiguration()); - String[] args = Arrays.copyOf(options, options.length+3); + String[] args = Arrays.copyOf(options, options.length + 3); args[options.length] = testDir.toString(); - args[options.length+1] = sourceTableName.getNameAsString(); - args[options.length+2] = targetTableName.getNameAsString(); + args[options.length + 1] = sourceTableName.getNameAsString(); + args[options.length + 2] = targetTableName.getNameAsString(); int code = syncTable.run(args); assertEquals("sync table job failed", 0, code); @@ -453,12 +434,12 @@ private Counters syncTables(TableName sourceTableName, TableName targetTableName } private void hashSourceTable(TableName sourceTableName, Path testDir, String... options) - throws Exception { + throws Exception { int numHashFiles = 3; - long batchSize = 100; // should be 2 batches per region + long batchSize = 100; // should be 2 batches per region int scanBatch = 1; HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration()); - String[] args = Arrays.copyOf(options, options.length+5); + String[] args = Arrays.copyOf(options, options.length + 5); args[options.length] = "--batchsize=" + batchSize; args[options.length + 1] = "--numhashfiles=" + numHashFiles; args[options.length + 2] = "--scanbatch=" + scanBatch; @@ -479,7 +460,7 @@ private void hashSourceTable(TableName sourceTableName, Path testDir, String... } private void writeTestData(TableName sourceTableName, TableName targetTableName, - long... timestamps) throws Exception { + long... timestamps) throws Exception { final byte[] family = Bytes.toBytes("family"); final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); @@ -492,14 +473,14 @@ private void writeTestData(TableName sourceTableName, TableName targetTableName, int targetRegions = 6; if (ArrayUtils.isEmpty(timestamps)) { long current = EnvironmentEdgeManager.currentTime(); - timestamps = new long[]{current,current}; + timestamps = new long[] { current, current }; } - Table sourceTable = TEST_UTIL.createTable(sourceTableName, - family, generateSplits(numRows, sourceRegions)); + Table sourceTable = + TEST_UTIL.createTable(sourceTableName, family, generateSplits(numRows, sourceRegions)); - Table targetTable = TEST_UTIL.createTable(targetTableName, - family, generateSplits(numRows, targetRegions)); + Table targetTable = + TEST_UTIL.createTable(targetTableName, family, generateSplits(numRows, targetRegions)); int rowIndex = 0; // a bunch of identical rows @@ -571,8 +552,8 @@ private void writeTestData(TableName sourceTableName, TableName targetTableName, sourceTable.put(sourcePut); Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1]+1, column1); - targetPut.addColumn(family, column2, timestamps[1]-1, value2); + targetPut.addColumn(family, column1, timestamps[1] + 1, column1); + targetPut.addColumn(family, column2, timestamps[1] - 1, value2); targetTable.put(targetPut); } // some rows with different values diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index 197060d1b20c..bf1a7439b4eb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,14 +71,13 @@ /** * This tests the TableInputFormat and its recovery semantics - * */ @Category(LargeTests.class) public class TestTableInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormat.class); + HBaseClassTestRule.forClass(TestTableInputFormat.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableInputFormat.class); @@ -106,22 +105,15 @@ public void before() throws IOException { } /** - * Setup a table with two rows and values. - * - * @param tableName - * @return A Table instance for the created table. - * @throws IOException + * Setup a table with two rows and values. n * @return A Table instance for the created table. n */ public static Table createTable(byte[] tableName) throws IOException { return createTable(tableName, new byte[][] { FAMILY }); } /** - * Setup a table with two rows and values per column family. - * - * @param tableName - * @return A Table instance for the created table. - * @throws IOException + * Setup a table with two rows and values per column family. n * @return A Table instance for the + * created table. n */ public static Table createTable(byte[] tableName, byte[][] families) throws IOException { Table table = UTIL.createTable(TableName.valueOf(tableName), families); @@ -140,15 +132,14 @@ public static Table createTable(byte[] tableName, byte[][] families) throws IOEx /** * Verify that the result and key have expected values. - * - * @param r single row result - * @param key the row key - * @param expectedKey the expected key + * @param r single row result + * @param key the row key + * @param expectedKey the expected key * @param expectedValue the expected value * @return true if the result contains the expected key and value, false otherwise. */ - static boolean checkResult(Result r, ImmutableBytesWritable key, - byte[] expectedKey, byte[] expectedValue) { + static boolean checkResult(Result r, ImmutableBytesWritable key, byte[] expectedKey, + byte[] expectedValue) { assertEquals(0, key.compareTo(expectedKey)); Map vals = r.getFamilyMap(FAMILY); byte[] value = vals.values().iterator().next(); @@ -157,17 +148,11 @@ static boolean checkResult(Result r, ImmutableBytesWritable key, } /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapreduce API. - * - * @param table - * @throws IOException - * @throws InterruptedException + * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API. nnn */ - static void runTestMapreduce(Table table) throws IOException, - InterruptedException { + static void runTestMapreduce(Table table) throws IOException, InterruptedException { org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = - new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); + new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); Scan s = new Scan(); s.withStartRow(Bytes.toBytes("aaa")); s.withStopRow(Bytes.toBytes("zzz")); @@ -197,12 +182,9 @@ static void runTestMapreduce(Table table) throws IOException, } /** - * Create a table that IOE's on first scanner next call - * - * @throws IOException + * Create a table that IOE's on first scanner next call n */ - static Table createIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -231,13 +213,9 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { } /** - * Create a table that throws a NotServingRegionException on first scanner - * next call - * - * @throws IOException + * Create a table that throws a NotServingRegionException on first scanner next call n */ - static Table createDNRIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -252,9 +230,8 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { ResultScanner scanner = mock(ResultScanner.class); invocation.callRealMethod(); // simulate NotServingRegionException - doThrow( - new NotServingRegionException("Injected simulated TimeoutException")) - .when(scanner).next(); + doThrow(new NotServingRegionException("Injected simulated TimeoutException")) + .when(scanner).next(); return scanner; } @@ -269,66 +246,51 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { } /** - * Run test assuming no errors using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException + * Run test assuming no errors using newer mapreduce api nn */ @Test - public void testTableRecordReaderMapreduce() throws IOException, - InterruptedException { + public void testTableRecordReaderMapreduce() throws IOException, InterruptedException { Table table = createTable(Bytes.toBytes("table1-mr")); runTestMapreduce(table); } /** - * Run test assuming Scanner IOException failure using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException + * Run test assuming Scanner IOException failure using newer mapreduce api nn */ @Test - public void testTableRecordReaderScannerFailMapreduce() throws IOException, - InterruptedException { + public void testTableRecordReaderScannerFailMapreduce() throws IOException, InterruptedException { Table htable = createIOEScannerTable(Bytes.toBytes("table2-mr"), 1); runTestMapreduce(htable); } /** - * Run test assuming Scanner IOException failure using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException + * Run test assuming Scanner IOException failure using newer mapreduce api nn */ @Test(expected = IOException.class) - public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException, - InterruptedException { + public void testTableRecordReaderScannerFailMapreduceTwice() + throws IOException, InterruptedException { Table htable = createIOEScannerTable(Bytes.toBytes("table3-mr"), 2); runTestMapreduce(htable); } /** - * Run test assuming NotServingRegionException using newer mapreduce api - * - * @throws InterruptedException - * @throws org.apache.hadoop.hbase.DoNotRetryIOException + * Run test assuming NotServingRegionException using newer mapreduce api n * @throws + * org.apache.hadoop.hbase.DoNotRetryIOException */ @Test public void testTableRecordReaderScannerTimeoutMapreduce() - throws IOException, InterruptedException { + throws IOException, InterruptedException { Table htable = createDNRIOEScannerTable(Bytes.toBytes("table4-mr"), 1); runTestMapreduce(htable); } /** - * Run test assuming NotServingRegionException using newer mapreduce api - * - * @throws InterruptedException - * @throws org.apache.hadoop.hbase.NotServingRegionException + * Run test assuming NotServingRegionException using newer mapreduce api n * @throws + * org.apache.hadoop.hbase.NotServingRegionException */ @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) public void testTableRecordReaderScannerTimeoutMapreduceTwice() - throws IOException, InterruptedException { + throws IOException, InterruptedException { Table htable = createDNRIOEScannerTable(Bytes.toBytes("table5-mr"), 2); runTestMapreduce(htable); } @@ -338,7 +300,7 @@ public void testTableRecordReaderScannerTimeoutMapreduceTwice() */ @Test public void testExtensionOfTableInputFormatBase() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { LOG.info("testing use of an InputFormat taht extends InputFormatBase"); final Table htable = createTable(Bytes.toBytes("exampleTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); @@ -347,9 +309,9 @@ public void testExtensionOfTableInputFormatBase() @Test public void testJobConfigurableExtensionOfTableInputFormatBase() - throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + - "using JobConfigurable."); + throws IOException, InterruptedException, ClassNotFoundException { + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "using JobConfigurable."); final Table htable = createTable(Bytes.toBytes("exampleJobConfigurableTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleJobConfigurableTIF.class); @@ -357,16 +319,16 @@ public void testJobConfigurableExtensionOfTableInputFormatBase() @Test public void testDeprecatedExtensionOfTableInputFormatBase() - throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + - "using the approach documented in 0.98."); + throws IOException, InterruptedException, ClassNotFoundException { + LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + + "using the approach documented in 0.98."); final Table htable = createTable(Bytes.toBytes("exampleDeprecatedTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleDeprecatedTIF.class); } void testInputFormat(Class clazz) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { final Job job = MapreduceTestingShim.createJob(UTIL.getConfiguration()); job.setInputFormatClass(clazz); job.setOutputFormatClass(NullOutputFormat.class); @@ -376,34 +338,36 @@ void testInputFormat(Class clazz) LOG.debug("submitting job."); assertTrue("job failed!", job.waitForCompletion(true)); assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue()); assertEquals("Saw any instances of the filtered out row.", 0, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue()); assertEquals("Saw the wrong number of instances of columnA.", 1, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue()); assertEquals("Saw the wrong number of instances of columnB.", 1, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue()); assertEquals("Saw the wrong count of values for the filtered-for row.", 2, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue()); assertEquals("Saw the wrong count of values for the filtered-out row.", 0, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue()); + .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue()); } public static class ExampleVerifier extends TableMapper { @Override - public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException { + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException { for (Cell cell : value.listCells()) { - context.getCounter(TestTableInputFormat.class.getName() + ":row", + context + .getCounter(TestTableInputFormat.class.getName() + ":row", Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) - .increment(1l); - context.getCounter(TestTableInputFormat.class.getName() + ":family", + .increment(1l); + context + .getCounter(TestTableInputFormat.class.getName() + ":family", Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) - .increment(1l); - context.getCounter(TestTableInputFormat.class.getName() + ":value", + .increment(1l); + context + .getCounter(TestTableInputFormat.class.getName() + ":value", Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) - .increment(1l); + .increment(1l); } } @@ -418,8 +382,7 @@ public void configure(JobConf job) { Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable"))); // mandatory initializeTable(connection, exampleTable.getName()); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { @@ -436,9 +399,8 @@ public void configure(JobConf job) { } - public static class ExampleJobConfigurableTIF extends TableInputFormatBase - implements JobConfigurable { + implements JobConfigurable { @Override public void configure(JobConf job) { @@ -447,9 +409,8 @@ public void configure(JobConf job) { TableName tableName = TableName.valueOf("exampleJobConfigurableTable"); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; - //optional + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; + // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); @@ -464,19 +425,17 @@ public void configure(JobConf job) { } } - public static class ExampleTIF extends TableInputFormatBase { @Override protected void initialize(JobContext job) throws IOException { - Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create( - job.getConfiguration())); + Connection connection = + ConnectionFactory.createConnection(HBaseConfiguration.create(job.getConfiguration())); TableName tableName = TableName.valueOf("exampleTable"); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; - //optional + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; + // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); @@ -489,4 +448,3 @@ protected void initialize(JobContext job) throws IOException { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index 12b17f925d99..13e3831f6df6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; @@ -61,12 +60,12 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestTableInputFormatBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatBase.class); + HBaseClassTestRule.forClass(TestTableInputFormatBase.class); @Test public void testReuseRegionSizeCalculator() throws IOException { @@ -92,13 +91,12 @@ public void testReuseRegionSizeCalculator() throws IOException { format.getSplits(context); // should only be 2 despite calling getSplits 4 times - Mockito.verify(format, Mockito.times(2)) - .createRegionSizeCalculator(Mockito.any(), Mockito.any()); + Mockito.verify(format, Mockito.times(2)).createRegionSizeCalculator(Mockito.any(), + Mockito.any()); } @Test - public void testTableInputFormatBaseReverseDNSForIPv6() - throws UnknownHostException { + public void testTableInputFormatBaseReverseDNSForIPv6() throws UnknownHostException { String address = "ipv6.google.com"; String localhost = null; InetAddress addr = null; @@ -110,11 +108,10 @@ public void testTableInputFormatBaseReverseDNSForIPv6() // google.com is down, we can probably forgive this test. return; } - System.out.println("Should retrun the hostname for this host " + - localhost + " addr : " + addr); + System.out.println("Should retrun the hostname for this host " + localhost + " addr : " + addr); String actualHostName = inputFormat.reverseDNS(addr); - assertEquals("Should retrun the hostname for this host. Expected : " + - localhost + " Actual : " + actualHostName, localhost, actualHostName); + assertEquals("Should retrun the hostname for this host. Expected : " + localhost + " Actual : " + + actualHostName, localhost, actualHostName); } @Test @@ -122,7 +119,7 @@ public void testNonSuccessiveSplitsAreNotMerged() throws IOException { JobContext context = mock(JobContext.class); Configuration conf = HBaseConfiguration.create(); conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, - ConnectionForMergeTesting.class.getName()); + ConnectionForMergeTesting.class.getName()); conf.set(TableInputFormat.INPUT_TABLE, "testTable"); conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true); when(context.getConfiguration()).thenReturn(conf); @@ -132,13 +129,13 @@ public void testNonSuccessiveSplitsAreNotMerged() throws IOException { // split["b", "c"] is excluded, split["o", "p"] and split["p", "q"] are merged, // but split["a", "b"] and split["c", "d"] are not merged. assertEquals(ConnectionForMergeTesting.START_KEYS.length - 1 - 1, - tifExclude.getSplits(context).size()); + tifExclude.getSplits(context).size()); } /** * Subclass of {@link TableInputFormat} to use in {@link #testNonSuccessiveSplitsAreNotMerged}. - * This class overrides {@link TableInputFormatBase#includeRegionInSplit} - * to exclude specific splits. + * This class overrides {@link TableInputFormatBase#includeRegionInSplit} to exclude specific + * splits. */ private static class TableInputFormatForMergeTesting extends TableInputFormat { private byte[] prefixStartKey = Bytes.toBytes("b"); @@ -149,10 +146,11 @@ private static class TableInputFormatForMergeTesting extends TableInputFormat { * Exclude regions which contain rows starting with "b". */ @Override - protected boolean includeRegionInSplit(final byte[] startKey, final byte [] endKey) { - if (Bytes.compareTo(startKey, prefixEndKey) < 0 - && (Bytes.compareTo(prefixStartKey, endKey) < 0 - || Bytes.equals(endKey, HConstants.EMPTY_END_ROW))) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { + if ( + Bytes.compareTo(startKey, prefixEndKey) < 0 && (Bytes.compareTo(prefixStartKey, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_END_ROW)) + ) { return false; } else { return true; @@ -174,20 +172,17 @@ protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, } /** - * Connection class to use in {@link #testNonSuccessiveSplitsAreNotMerged}. - * This class returns mocked {@link Table}, {@link RegionLocator}, {@link RegionSizeCalculator}, - * and {@link Admin}. + * Connection class to use in {@link #testNonSuccessiveSplitsAreNotMerged}. This class returns + * mocked {@link Table}, {@link RegionLocator}, {@link RegionSizeCalculator}, and {@link Admin}. */ private static class ConnectionForMergeTesting implements Connection { - public static final byte[][] SPLITS = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d"), - Bytes.toBytes("e"), Bytes.toBytes("f"), Bytes.toBytes("g"), Bytes.toBytes("h"), - Bytes.toBytes("i"), Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"), - Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"), Bytes.toBytes("p"), - Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"), Bytes.toBytes("t"), - Bytes.toBytes("u"), Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"), - Bytes.toBytes("y"), Bytes.toBytes("z") - }; + public static final byte[][] SPLITS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), + Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"), + Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j"), + Bytes.toBytes("k"), Bytes.toBytes("l"), Bytes.toBytes("m"), Bytes.toBytes("n"), + Bytes.toBytes("o"), Bytes.toBytes("p"), Bytes.toBytes("q"), Bytes.toBytes("r"), + Bytes.toBytes("s"), Bytes.toBytes("t"), Bytes.toBytes("u"), Bytes.toBytes("v"), + Bytes.toBytes("w"), Bytes.toBytes("x"), Bytes.toBytes("y"), Bytes.toBytes("z") }; public static final byte[][] START_KEYS; public static final byte[][] END_KEYS; @@ -218,7 +213,7 @@ private static class ConnectionForMergeTesting implements Connection { } ConnectionForMergeTesting(Configuration conf, ExecutorService pool, User user) - throws IOException { + throws IOException { } @Override @@ -261,39 +256,38 @@ public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws I public RegionLocator getRegionLocator(TableName tableName) throws IOException { final Map locationMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] startKey : START_KEYS) { - HRegionLocation hrl = new HRegionLocation( - RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), + HRegionLocation hrl = + new HRegionLocation(RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), ServerName.valueOf("localhost", 0, 0)); locationMap.put(startKey, hrl); } RegionLocator locator = mock(RegionLocator.class); - when(locator.getRegionLocation(any(byte [].class), anyBoolean())). - thenAnswer(new Answer() { + when(locator.getRegionLocation(any(byte[].class), anyBoolean())) + .thenAnswer(new Answer() { @Override public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] key = (byte [])args[0]; + Object[] args = invocationOnMock.getArguments(); + byte[] key = (byte[]) args[0]; return locationMap.get(key); } }); - when(locator.getStartEndKeys()). - thenReturn(new Pair(START_KEYS, END_KEYS)); + when(locator.getStartEndKeys()) + .thenReturn(new Pair(START_KEYS, END_KEYS)); return locator; } public RegionSizeCalculator getRegionSizeCalculator() { RegionSizeCalculator sizeCalculator = mock(RegionSizeCalculator.class); - when(sizeCalculator.getRegionSize(any(byte[].class))). - thenAnswer(new Answer() { - @Override - public Long answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] regionId = (byte [])args[0]; - byte[] startKey = RegionInfo.getStartKey(regionId); - return SIZE_MAP.get(startKey); - } - }); + when(sizeCalculator.getRegionSize(any(byte[].class))).thenAnswer(new Answer() { + @Override + public Long answer(InvocationOnMock invocationOnMock) throws Throwable { + Object[] args = invocationOnMock.getArguments(); + byte[] regionId = (byte[]) args[0]; + byte[] startKey = RegionInfo.getStartKey(regionId); + return SIZE_MAP.get(startKey); + } + }); return sizeCalculator; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java index a116ecb72fa6..aeea1dffbf51 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public void testGetSplits() throws IOException, InterruptedException, ClassNotFo */ @Test public void testSpecifiedNumOfMappersMR() - throws InterruptedException, IOException, ClassNotFoundException { + throws InterruptedException, IOException, ClassNotFoundException { testNumOfSplitsMR(2, 52); testNumOfSplitsMR(4, 104); } @@ -61,7 +61,7 @@ public void testAutoBalanceSplits() throws IOException { @Test public void testScanFromConfiguration() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScanFromConfiguration("bba", "bbd", "bbc"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index eab1d871a606..ced6e156e87b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -50,7 +50,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce * job to see if that is handed over and done properly too. @@ -61,7 +60,7 @@ public abstract class TestTableInputFormatScanBase { static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); static final TableName TABLE_NAME = TableName.valueOf("scantest"); - static final byte[][] INPUT_FAMILYS = {Bytes.toBytes("content1"), Bytes.toBytes("content2")}; + static final byte[][] INPUT_FAMILYS = { Bytes.toBytes("content1"), Bytes.toBytes("content2") }; static final String KEY_STARTROW = "startRow"; static final String KEY_LASTROW = "stpRow"; @@ -89,31 +88,28 @@ public static class ScanMapper /** * Pass the key and value to reduce. - * - * @param key The key, here "aaa", "aab" etc. - * @param value The value is the same as the key. - * @param context The task context. + * @param key The key, here "aaa", "aab" etc. + * @param value The value is the same as the key. + * @param context The task context. * @throws IOException When reading the rows fails. */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { if (value.size() != 2) { throw new IOException("There should be two input columns"); } - Map>> - cfMap = value.getMap(); + Map>> cfMap = value.getMap(); if (!cfMap.containsKey(INPUT_FAMILYS[0]) || !cfMap.containsKey(INPUT_FAMILYS[1])) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILYS[0]) + "' or '" + Bytes.toString(INPUT_FAMILYS[1]) + "'."); + throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILYS[0]) + + "' or '" + Bytes.toString(INPUT_FAMILYS[1]) + "'."); } String val0 = Bytes.toStringBinary(value.getValue(INPUT_FAMILYS[0], null)); String val1 = Bytes.toStringBinary(value.getValue(INPUT_FAMILYS[1], null)); - LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + - ", value -> (" + val0 + ", " + val1 + ")"); + LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> (" + val0 + ", " + + val1 + ")"); context.write(key, key); } } @@ -122,28 +118,25 @@ public void map(ImmutableBytesWritable key, Result value, * Checks the last and first key seen against the scanner boundaries. */ public static class ScanReducer - extends Reducer { + extends Reducer { private String first = null; private String last = null; - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) - throws IOException ,InterruptedException { + protected void reduce(ImmutableBytesWritable key, Iterable values, + Context context) throws IOException, InterruptedException { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.info("reduce: key[" + count + "] -> " + - Bytes.toStringBinary(key.get()) + ", value -> " + val); + LOG.info( + "reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; } } - protected void cleanup(Context context) - throws IOException, InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); @@ -163,9 +156,9 @@ protected void cleanup(Context context) * Tests an MR Scan initialized from properties set in the Configuration. */ protected void testScanFromConfiguration(String start, String stop, String last) - throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + - "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + throws IOException, InterruptedException, ClassNotFoundException { + String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + + "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); c.set(TableInputFormat.INPUT_TABLE, TABLE_NAME.getNameAsString()); c.set(TableInputFormat.SCAN_COLUMN_FAMILY, @@ -197,9 +190,9 @@ protected void testScanFromConfiguration(String start, String stop, String last) * Tests a MR scan using specific start and stop rows. */ protected void testScan(String start, String stop, String last) - throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + - (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + throws IOException, InterruptedException, ClassNotFoundException { + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); Scan scan = new Scan(); @@ -225,13 +218,12 @@ protected void testScan(String start, String stop, String last) LOG.info("After map/reduce completion - job " + jobName); } - /** * Tests Number of inputSplits for MR job when specify number of mappers for TableInputFormatXXX * This test does not run MR job */ protected void testNumOfSplits(int splitsPerRegion, int expectedNumOfSplits) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { String jobName = "TestJobForNumOfSplits"; LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -261,7 +253,7 @@ protected void testNumOfSplits(int splitsPerRegion, int expectedNumOfSplits) * Run MR job to check the number of mapper = expectedNumOfSplits */ protected void testNumOfSplitsMR(int splitsPerRegion, int expectedNumOfSplits) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { String jobName = "TestJobForNumOfSplits-MR"; LOG.info("Before map/reduce startup - job " + jobName); JobConf c = new JobConf(TEST_UTIL.getConfiguration()); @@ -311,4 +303,3 @@ protected void testAutobalanceNumOfSplit() throws IOException { assertNotEquals("The seventh split start key should not be", 4, Bytes.toInt(ts4.getStartRow())); } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java index d7cefd61b148..addcdc898c8e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToAPP extends TestTableInputFormatScan */ @Test public void testScanEmptyToAPP() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "app", "apo"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java index 598a345834d8..e395b36e2a70 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToBBA extends TestTableInputFormatScan */ @Test public void testScanEmptyToBBA() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "bba", "baz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java index 6d3674caad86..f86578712ae8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToBBB extends TestTableInputFormatScan */ @Test public void testScanEmptyToBBB() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "bbb", "bba"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java index f5d4de10a88a..ef7b38b21be1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToEmpty extends TestTableInputFormatSc */ @Test public void testScanEmptyToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, null, null); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java index 939fc936f955..f20d8113f780 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanEmptyToOPP extends TestTableInputFormatScan */ @Test public void testScanEmptyToOPP() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "opp", "opo"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java index 32f768c00fb8..7d833eb66a1a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java index 5ecb4e60f4e0..f6985a3fd773 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java index 7b2ccded7e19..e57051dfd192 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanOPPToEmpty extends TestTableInputFormatScan */ @Test public void testScanOPPToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan("opp", null, "zzz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java index 2801f4eb8bf7..c8b3394e54b4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanYYXToEmpty extends TestTableInputFormatScan */ @Test public void testScanYYXToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan("yyx", null, "zzz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java index 97a4998e5537..175d10e1f755 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanYYYToEmpty extends TestTableInputFormatScan */ @Test public void testScanYYYToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan("yyy", null, "zzz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java index 3d91ff2b7b3c..9ce2f0782b2f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestTableInputFormatScanYZYToEmpty extends TestTableInputFormatScan */ @Test public void testScanYZYToEmpty() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { testScan("yzy", null, "zzz"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java index 786da1a02049..e1bd16268703 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,22 +51,24 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableMapReduce extends TestTableMapReduceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduce.class); + HBaseClassTestRule.forClass(TestTableMapReduce.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class); @Override - protected Logger getLog() { return LOG; } + protected Logger getLog() { + return LOG; + } /** * Pass the given key and processed record reduce @@ -74,25 +76,18 @@ public class TestTableMapReduce extends TestTableMapReduceBase { static class ProcessContentsMapper extends TableMapper { /** - * Pass the key, and reversed value to reduce - * - * @param key - * @param value - * @param context - * @throws IOException + * Pass the key, and reversed value to reduce nnnn */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) - throws IOException, InterruptedException { + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it @@ -115,12 +110,9 @@ protected void runTestOnTable(Table table) throws IOException { job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); - TableMapReduceUtil.initTableMapperJob( - table.getName().getNameAsString(), scan, - ProcessContentsMapper.class, ImmutableBytesWritable.class, - Put.class, job); - TableMapReduceUtil.initTableReducerJob( - table.getName().getNameAsString(), + TableMapReduceUtil.initTableMapperJob(table.getName().getNameAsString(), scan, + ProcessContentsMapper.class, ImmutableBytesWritable.class, Put.class, job); + TableMapReduceUtil.initTableReducerJob(table.getName().getNameAsString(), IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName().getNameAsString()); @@ -138,21 +130,18 @@ protected void runTestOnTable(Table table) throws IOException { } finally { table.close(); if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } /** - * Verify scan counters are emitted from the job - * @param job - * @throws IOException + * Verify scan counters are emitted from the job nn */ private void verifyJobCountersAreEmitted(Job job) throws IOException { Counters counters = job.getCounters(); - Counter counter - = counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); + Counter counter = + counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); assertNotNull("Unable to find Job counter for HBase scan metrics, RPC_CALLS", counter); assertTrue("Counter value for RPC_CALLS should be larger than 0", counter.getValue() > 0); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java index bca27ec28f6a..7490587b1097 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,9 +43,9 @@ import org.slf4j.Logger; /** - * A base class for a test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of a particular cell, - * and write it back to the table. Implements common components between mapred and mapreduce + * A base class for a test Map/Reduce job over HBase tables. The map/reduce process we're testing on + * our tables is simple - take every row in the table, reverse the value of a particular cell, and + * write it back to the table. Implements common components between mapred and mapreduce * implementations. */ public abstract class TestTableMapReduceBase { @@ -56,10 +55,7 @@ public abstract class TestTableMapReduceBase { protected static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); protected static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - protected static final byte[][] columns = new byte[][] { - INPUT_FAMILY, - OUTPUT_FAMILY - }; + protected static final byte[][] columns = new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }; /** * Retrieve my logger instance. @@ -74,9 +70,8 @@ public abstract class TestTableMapReduceBase { @BeforeClass public static void beforeClass() throws Exception { UTIL.startMiniCluster(); - Table table = - UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, - OUTPUT_FAMILY }); + Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, + new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); UTIL.createTable(TABLE_FOR_NEGATIVE_TESTS, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); } @@ -88,8 +83,7 @@ public static void afterClass() throws Exception { } /** - * Test a map/reduce against a multi-region table - * @throws IOException + * Test a map/reduce against a multi-region table n */ @Test public void testMultiRegionTable() throws IOException { @@ -111,11 +105,10 @@ protected static Put map(ImmutableBytesWritable key, Result value) throws IOExce if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it @@ -157,11 +150,9 @@ protected void verify(TableName tableName) throws IOException { } /** - * Looks at every value of the mapreduce output and verifies that indeed - * the values have been reversed. - * @param table Table to scan. - * @throws IOException - * @throws NullPointerException if we failed to find a cell value + * Looks at every value of the mapreduce output and verifies that indeed the values have been + * reversed. + * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value */ private void verifyAttempt(final Table table) throws IOException, NullPointerException { Scan scan = new Scan(); @@ -170,18 +161,17 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc try { Iterator itr = scanner.iterator(); assertTrue(itr.hasNext()); - while(itr.hasNext()) { + while (itr.hasNext()) { Result r = itr.next(); if (getLog().isDebugEnabled()) { - if (r.size() > 2 ) { - throw new IOException("Too many results, expected 2 got " + - r.size()); + if (r.size() > 2) { + throw new IOException("Too many results, expected 2 got " + r.size()); } } byte[] firstValue = null; byte[] secondValue = null; int count = 0; - for(Cell kv : r.listCells()) { + for (Cell kv : r.listCells()) { if (count == 0) { firstValue = CellUtil.cloneValue(kv); } @@ -194,16 +184,13 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc } } - if (firstValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": first value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } String first = Bytes.toString(firstValue); if (secondValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": second value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": second value is null"); } byte[] secondReversed = new byte[secondValue.length]; for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) { @@ -213,9 +200,9 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc if (first.compareTo(second) != 0) { if (getLog().isDebugEnabled()) { - getLog().debug("second key is not the reverse of first. row=" + - Bytes.toStringBinary(r.getRow()) + ", first value=" + first + - ", second value=" + second); + getLog().debug( + "second key is not the reverse of first. row=" + Bytes.toStringBinary(r.getRow()) + + ", first value=" + first + ", second value=" + second); } fail(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java index 09cdc279bc8a..03cf6a441f4d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.Closeable; import java.io.File; import java.util.Collection; @@ -57,13 +58,13 @@ /** * Test different variants of initTableMapperJob method */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestTableMapReduceUtil { private static final String HTTP_PRINCIPAL = "HTTP/localhost"; @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); + HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); @After public void after() { @@ -71,8 +72,8 @@ public void after() { } /* - * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because - * the method depends on an online cluster. + * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because the method + * depends on an online cluster. */ @Test @@ -80,9 +81,8 @@ public void testInitTableMapperJob1() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); // test - TableMapReduceUtil.initTableMapperJob( - "Table", new Scan(), Import.Importer.class, Text.class, Text.class, job, - false, WALInputFormat.class); + TableMapReduceUtil.initTableMapperJob("Table", new Scan(), Import.Importer.class, Text.class, + Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -95,9 +95,8 @@ public void testInitTableMapperJob1() throws Exception { public void testInitTableMapperJob2() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job, false, WALInputFormat.class); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -110,9 +109,8 @@ public void testInitTableMapperJob2() throws Exception { public void testInitTableMapperJob3() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -125,9 +123,8 @@ public void testInitTableMapperJob3() throws Exception { public void testInitTableMapperJob4() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job, false); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job, false); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -136,8 +133,8 @@ public void testInitTableMapperJob4() throws Exception { assertEquals("Table", job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); } - private static Closeable startSecureMiniCluster( - HBaseTestingUtil util, MiniKdc kdc, String principal) throws Exception { + private static Closeable startSecureMiniCluster(HBaseTestingUtil util, MiniKdc kdc, + String principal) throws Exception { Configuration conf = util.getConfiguration(); SecureTestUtil.enableSecurity(conf); @@ -147,8 +144,8 @@ private static Closeable startSecureMiniCluster( conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + ',' + TokenProvider.class.getName()); - HBaseKerberosUtils.setSecuredConfiguration(conf, - principal + '@' + kdc.getRealm(), HTTP_PRINCIPAL + '@' + kdc.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, principal + '@' + kdc.getRealm(), + HTTP_PRINCIPAL + '@' + kdc.getRealm()); util.startMiniCluster(); try { @@ -161,7 +158,8 @@ private static Closeable startSecureMiniCluster( return util::shutdownMiniCluster; } - @Test public void testInitCredentialsForCluster1() throws Exception { + @Test + public void testInitCredentialsForCluster1() throws Exception { HBaseTestingUtil util1 = new HBaseTestingUtil(); HBaseTestingUtil util2 = new HBaseTestingUtil(); @@ -185,8 +183,9 @@ private static Closeable startSecureMiniCluster( } } - @Test @SuppressWarnings("unchecked") public void testInitCredentialsForCluster2() - throws Exception { + @Test + @SuppressWarnings("unchecked") + public void testInitCredentialsForCluster2() throws Exception { HBaseTestingUtil util1 = new HBaseTestingUtil(); HBaseTestingUtil util2 = new HBaseTestingUtil(); @@ -220,7 +219,8 @@ private static Closeable startSecureMiniCluster( } } - @Test public void testInitCredentialsForCluster3() throws Exception { + @Test + public void testInitCredentialsForCluster3() throws Exception { HBaseTestingUtil util1 = new HBaseTestingUtil(); File keytab = new File(util1.getDataTestDir("keytab").toUri().getPath()); @@ -251,8 +251,9 @@ private static Closeable startSecureMiniCluster( } } - @Test @SuppressWarnings("unchecked") public void testInitCredentialsForCluster4() - throws Exception { + @Test + @SuppressWarnings("unchecked") + public void testInitCredentialsForCluster4() throws Exception { HBaseTestingUtil util1 = new HBaseTestingUtil(); // Assume util1 is insecure cluster // Do not start util1 because cannot boot secured mini cluster and insecure mini cluster at once diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java index 1905beba3b18..232083ea7e78 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -50,7 +49,7 @@ public class TestTableRecordReader { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableRecordReader.class); + HBaseClassTestRule.forClass(TestTableRecordReader.class); private static TableName TABLE_NAME = TableName.valueOf("TestTableRecordReader"); @@ -86,12 +85,12 @@ public static void setUpBeforeClass() throws Exception { } private static void createTestTable(TableName name, byte[][] rows, byte[][] families, - byte[][] qualifiers, byte[] cellValue) throws IOException { + byte[][] qualifiers, byte[] cellValue) throws IOException { TEST_UTIL.createTable(name, families).put(createPuts(rows, families, qualifiers, cellValue)); } private static List createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers, - byte[] value) throws IOException { + byte[] value) throws IOException { List puts = new ArrayList<>(); for (int row = 0; row < rows.length; row++) { Put put = new Put(rows[row]); @@ -114,11 +113,11 @@ public static void tearDownAfterClass() throws Exception { @Test public void test() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table table = conn.getTable(TABLE_NAME)) { + Table table = conn.getTable(TABLE_NAME)) { org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = - new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); + new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); Scan scan = - new Scan().setMaxResultSize(1).setCaching(Integer.MAX_VALUE).setNeedCursorResult(true); + new Scan().setMaxResultSize(1).setCaching(Integer.MAX_VALUE).setNeedCursorResult(true); trr.setScan(scan); trr.setHTable(table); trr.initialize(null, null); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index 3165d459f85b..edd2da4129ac 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION; +import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT; -import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION; -import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; @@ -70,12 +70,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); + HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableSnapshotInputFormat.class); @@ -97,7 +97,6 @@ protected byte[] getEndRow() { return yyy; } - @Test public void testGetBestLocations() throws IOException { TableSnapshotInputFormatImpl tsif = new TableSnapshotInputFormatImpl(); @@ -107,36 +106,36 @@ public void testGetBestLocations() throws IOException { Assert.assertEquals(null, TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution = new HDFSBlocksDistribution(); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 10); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 7); - blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 5); - blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 10); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 7); + blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 5); + blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 2); Assert.assertEquals(Lists.newArrayList("h1", "h2"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 3); Assert.assertEquals(Lists.newArrayList("h2", "h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6); - blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9); + blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 6); + blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 9); Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); @@ -149,8 +148,8 @@ public static enum TestTableSnapshotCounters { public static class TestTableSnapshotMapper extends TableMapper { @Override - protected void map(ImmutableBytesWritable key, Result value, - Context context) throws IOException, InterruptedException { + protected void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { // Validate a single row coming from the snapshot, and emit the row key verifyRowFromMap(key, value); context.write(key, NullWritable.get()); @@ -159,17 +158,16 @@ protected void map(ImmutableBytesWritable key, Result value, public static class TestTableSnapshotReducer extends Reducer { - HBaseTestingUtil.SeenRowTracker rowTracker = - new HBaseTestingUtil.SeenRowTracker(bbb, yyy); + HBaseTestingUtil.SeenRowTracker rowTracker = new HBaseTestingUtil.SeenRowTracker(bbb, yyy); + @Override protected void reduce(ImmutableBytesWritable key, Iterable values, - Context context) throws IOException, InterruptedException { + Context context) throws IOException, InterruptedException { rowTracker.addRow(key.get()); } @Override - protected void cleanup(Context context) throws IOException, - InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { rowTracker.validate(); } } @@ -184,19 +182,17 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { Job job = new Job(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals( - "Snapshot job should be configured for default LruBlockCache.", + Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals( - "Snapshot job should not use BucketCache.", - 0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); + Assert.assertEquals("Snapshot job should not use BucketCache.", 0, + job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -208,8 +204,7 @@ public void testWithMockedMapReduceSingleRegionByRegionLocation() throws Excepti Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, true); try { - testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, - true); + testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, true); } finally { conf.unset(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION); } @@ -217,21 +212,19 @@ public void testWithMockedMapReduceSingleRegionByRegionLocation() throws Excepti @Override public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, - String snapshotName, Path tmpTableDir) throws Exception { + String snapshotName, Path tmpTableDir) throws Exception { Job job = new Job(UTIL.getConfiguration()); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); } @Override - public void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) - throws Exception { + public void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { - createTableAndSnapshot( - util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); + createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); Configuration conf = util.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, setLocalityEnabledTo); @@ -242,14 +235,13 @@ public void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow()); // limit the scan if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir); } verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow()); @@ -418,7 +410,7 @@ public void testScannerReadTypeConfiguration() throws IOException { Scan scanWithReadType = new Scan(); scanWithReadType.setReadType(readType); assertEquals(scanWithReadType.getReadType(), - serializeAndReturn(conf, scanWithReadType).getReadType()); + serializeAndReturn(conf, scanWithReadType).getReadType()); } // We should only see the DEFAULT ReadType getting updated to STREAM. Scan scanWithoutReadType = new Scan(); @@ -432,8 +424,8 @@ public void testScannerReadTypeConfiguration() throws IOException { } /** - * Serializes and deserializes the given scan in the same manner that - * TableSnapshotInputFormat does. + * Serializes and deserializes the given scan in the same manner that TableSnapshotInputFormat + * does. */ private Scan serializeAndReturn(Configuration conf, Scan s) throws IOException { conf.set(TableInputFormat.SCAN, TableMapReduceUtil.convertScanToString(s)); @@ -441,23 +433,21 @@ private Scan serializeAndReturn(Configuration conf, Scan s) throws IOException { } private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumSplits, - byte[] startRow, byte[] stopRow) - throws IOException, InterruptedException { + byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { TableSnapshotInputFormat tsif = new TableSnapshotInputFormat(); List splits = tsif.getSplits(job); Assert.assertEquals(expectedNumSplits, splits.size()); HBaseTestingUtil.SeenRowTracker rowTracker = new HBaseTestingUtil.SeenRowTracker(startRow, - stopRow.length > 0 ? stopRow : Bytes.toBytes("\uffff")); + stopRow.length > 0 ? stopRow : Bytes.toBytes("\uffff")); - boolean localityEnabled = - job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, - SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); + boolean localityEnabled = job.getConfiguration().getBoolean( + SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); boolean byRegionLoc = job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, - SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT); + SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT); for (int i = 0; i < splits.size(); i++) { // validate input split InputSplit split = splits.get(i); @@ -480,7 +470,7 @@ private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumS } Scan scan = - TableMapReduceUtil.convertStringToScan(snapshotRegionSplit.getDelegate().getScan()); + TableMapReduceUtil.convertStringToScan(snapshotRegionSplit.getDelegate().getScan()); if (startRow.length > 0) { Assert.assertTrue( Bytes.toStringBinary(startRow) + " should <= " + Bytes.toStringBinary(scan.getStartRow()), @@ -498,7 +488,7 @@ private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumS TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class); when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration()); RecordReader rr = - tsif.createRecordReader(split, taskAttemptContext); + tsif.createRecordReader(split, taskAttemptContext); rr.initialize(split, taskAttemptContext); // validate we can read all the data back @@ -517,16 +507,16 @@ private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumS @Override protected void testWithMapReduceImpl(HBaseTestingUtil util, TableName tableName, - String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, - int expectedNumSplits, boolean shutdownCluster) throws Exception { + String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, + int expectedNumSplits, boolean shutdownCluster) throws Exception { doTestWithMapReduce(util, tableName, snapshotName, getStartRow(), getEndRow(), tableDir, numRegions, numSplitsPerRegion, expectedNumSplits, shutdownCluster); } // this is also called by the IntegrationTestTableSnapshotInputFormat public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableName, - String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, - int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { + String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { LOG.info("testing with MapReduce"); @@ -545,17 +535,16 @@ public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableNam job.setJarByClass(util.getClass()); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - TestTableSnapshotInputFormat.class); + TestTableSnapshotInputFormat.class); if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, true, tableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, true, tableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + true, tableDir); } job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class); @@ -583,12 +572,12 @@ public void testCleanRestoreDir() throws Exception { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Job job = Job.getInstance(UTIL.getConfiguration()); Path workingDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, workingDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + workingDir); FileSystem fs = workingDir.getFileSystem(job.getConfiguration()); - Path restorePath = new Path(job.getConfiguration() - .get("hbase.TableSnapshotInputFormat.restore.dir")); + Path restorePath = + new Path(job.getConfiguration().get("hbase.TableSnapshotInputFormat.restore.dir")); Assert.assertTrue(fs.exists(restorePath)); TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName); Assert.assertFalse(fs.exists(restorePath)); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java index 37feec3f78d0..e61cb6c6de7b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,11 +35,11 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestTableSplit { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSplit.class); + HBaseClassTestRule.forClass(TestTableSplit.class); @Rule public TestName name = new TestName(); @@ -47,11 +47,9 @@ public class TestTableSplit { @Test public void testHashCode() { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location"); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location"); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); HashSet set = new HashSet<>(2); @@ -62,15 +60,13 @@ public void testHashCode() { /** * length of region should not influence hashcode - * */ + */ @Test public void testHashCode_length() { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location", 1984); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 1984); TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location", 1982); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 1982); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); @@ -82,16 +78,14 @@ public void testHashCode_length() { /** * Length of region need to be properly serialized. - * */ + */ @Test public void testLengthIsSerialized() throws Exception { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location", 666); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 666); TableSplit deserialized = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start2"), - Bytes.toBytes("row-end2"), "location1"); + Bytes.toBytes("row-start2"), Bytes.toBytes("row-end2"), "location1"); ReflectionUtils.copy(new Configuration(), split1, deserialized); Assert.assertEquals(666, deserialized.getLength()); @@ -99,37 +93,27 @@ public void testLengthIsSerialized() throws Exception { @Test public void testToString() { - TableSplit split = - new TableSplit(TableName.valueOf(name.getMethodName()), Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location"); - String str = - "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " - + "regionname=)"; + TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); + String str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + + "endrow=row-end, regionLocation=location, " + "regionname=)"; Assert.assertEquals(str, split.toString()); split = - new TableSplit(TableName.valueOf(name.getMethodName()), null, Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location", "encoded-region-name", 1000L); - str = - "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " - + "regionname=encoded-region-name)"; + new TableSplit(TableName.valueOf(name.getMethodName()), null, Bytes.toBytes("row-start"), + Bytes.toBytes("row-end"), "location", "encoded-region-name", 1000L); + str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + + "endrow=row-end, regionLocation=location, " + "regionname=encoded-region-name)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null); - str = - "Split(tablename=null, startrow=null, " - + "endrow=null, regionLocation=null, " - + "regionname=)"; + str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + + "regionname=)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null, null, null, 1000L); - str = - "Split(tablename=null, startrow=null, " - + "endrow=null, regionLocation=null, " - + "regionname=null)"; + str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + + "regionname=null)"; Assert.assertEquals(str, split.toString()); } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index bf25c1caac30..596932edf24f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,28 +61,27 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTimeRangeMapRed { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTimeRangeMapRed.class); + HBaseClassTestRule.forClass(TestTimeRangeMapRed.class); private final static Logger log = LoggerFactory.getLogger(TestTimeRangeMapRed.class); - private static final HBaseTestingUtil UTIL = - new HBaseTestingUtil(); + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private Admin admin; - private static final byte [] KEY = Bytes.toBytes("row1"); + private static final byte[] KEY = Bytes.toBytes("row1"); private static final NavigableMap TIMESTAMP = new TreeMap<>(); static { - TIMESTAMP.put((long)1245620000, false); - TIMESTAMP.put((long)1245620005, true); // include - TIMESTAMP.put((long)1245620010, true); // include - TIMESTAMP.put((long)1245620055, true); // include - TIMESTAMP.put((long)1245620100, true); // include - TIMESTAMP.put((long)1245620150, false); - TIMESTAMP.put((long)1245620250, false); + TIMESTAMP.put((long) 1245620000, false); + TIMESTAMP.put((long) 1245620005, true); // include + TIMESTAMP.put((long) 1245620010, true); // include + TIMESTAMP.put((long) 1245620055, true); // include + TIMESTAMP.put((long) 1245620100, true); // include + TIMESTAMP.put((long) 1245620150, false); + TIMESTAMP.put((long) 1245620250, false); } static final long MINSTAMP = 1245620005; static final long MAXSTAMP = 1245620100 + 1; // maxStamp itself is excluded. so increment it. @@ -107,16 +106,13 @@ public void before() throws Exception { } private static class ProcessTimeRangeMapper - extends TableMapper - implements Configurable { + extends TableMapper implements Configurable { private Configuration conf = null; private Table table = null; @Override - public void map(ImmutableBytesWritable key, Result result, - Context context) - throws IOException { + public void map(ImmutableBytesWritable key, Result result, Context context) throws IOException { List tsList = new ArrayList<>(); for (Cell kv : result.listCells()) { tsList.add(kv.getTimestamp()); @@ -170,8 +166,7 @@ public void testTimeRangeMapRed() table.close(); } - private void runTestOnTable() - throws IOException, InterruptedException, ClassNotFoundException { + private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { Job job = null; try { job = new Job(UTIL.getConfiguration(), "test123"); @@ -181,16 +176,15 @@ private void runTestOnTable() scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.setTimeRange(MINSTAMP, MAXSTAMP); scan.readAllVersions(); - TableMapReduceUtil.initTableMapperJob(TABLE_NAME, - scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job); + TableMapReduceUtil.initTableMapperJob(TABLE_NAME, scan, ProcessTimeRangeMapper.class, + Text.class, Text.class, job); job.waitForCompletion(true); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -200,11 +194,11 @@ private void verify(final Table table) throws IOException { scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.readVersions(1); ResultScanner scanner = table.getScanner(scan); - for (Result r: scanner) { + for (Result r : scanner) { for (Cell kv : r.listCells()) { log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv)) - + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) - + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv))); + + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) + "\t" + kv.getTimestamp() + "\t" + + Bytes.toBoolean(CellUtil.cloneValue(kv))); org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()), Bytes.toBoolean(CellUtil.cloneValue(kv))); } @@ -213,4 +207,3 @@ private void verify(final Table table) throws IOException { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java index 48e85183923e..70602a371668 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; + import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.FileStatus; @@ -32,7 +33,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({ MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestWALInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index aac4ae5271b8..9b0d5ec52a34 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -24,6 +24,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; @@ -72,11 +73,11 @@ /** * Basic test for the WALPlayer M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestWALPlayer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALPlayer.class); + HBaseClassTestRule.forClass(TestWALPlayer.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static SingleProcessHBaseCluster cluster; @@ -115,9 +116,9 @@ public void testPlayingRecoveredEdit() throws Exception { TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); // Copy testing recovered.edits file that is over under hbase-server test resources // up into a dir in our little hdfs cluster here. - String hbaseServerTestResourcesEdits = System.getProperty("test.build.classes") + - "/../../../hbase-server/src/test/resources/" + - TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + String hbaseServerTestResourcesEdits = + System.getProperty("test.build.classes") + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); assertTrue(new File(hbaseServerTestResourcesEdits).exists()); FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); // Target dir. @@ -125,7 +126,7 @@ public void testPlayingRecoveredEdit() throws Exception { assertTrue(dfs.mkdirs(targetDir)); dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); assertEquals(0, - ToolRunner.run(new WALPlayer(this.conf), new String [] {targetDir.toString()})); + ToolRunner.run(new WALPlayer(this.conf), new String[] { targetDir.toString() })); // I don't know how many edits are in this file for this table... so just check more than 1. assertTrue(TEST_UTIL.countRows(tn) > 0); } @@ -157,19 +158,17 @@ public void testWALPlayer() throws Exception { // replay the WAL, map table 1 to table 2 WAL log = cluster.getRegionServer(0).getWAL(null); log.rollWriter(); - String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() - .getWALRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); + String walInputDir = new Path(cluster.getMaster().getMasterFileSystem().getWALRootDir(), + HConstants.HREGION_LOGDIR_NAME).toString(); - Configuration configuration= TEST_UTIL.getConfiguration(); + Configuration configuration = TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); - String optionName="_test_.name"; + String optionName = "_test_.name"; configuration.set(optionName, "1000"); player.setupTime(configuration, optionName); - assertEquals(1000,configuration.getLong(optionName,0)); + assertEquals(1000, configuration.getLong(optionName, 0)); assertEquals(0, ToolRunner.run(configuration, player, - new String[] {walInputDir, tableName1.getNameAsString(), - tableName2.getNameAsString() })); - + new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); // verify the WAL was player into table 2 Get g = new Get(ROW); @@ -233,7 +232,7 @@ public void testMainMethod() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -246,8 +245,8 @@ public void testMainMethod() throws Exception { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " [ ]")); + assertTrue(data.toString() + .contains("Usage: WALPlayer [options] " + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index c8ff9042932f..795135cc6d19 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.List; import java.util.NavigableMap; @@ -71,7 +72,7 @@ public class TestWALRecordReader { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALRecordReader.class); + HBaseClassTestRule.forClass(TestWALRecordReader.class); private static final Logger LOG = LoggerFactory.getLogger(TestWALRecordReader.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -82,7 +83,7 @@ public class TestWALRecordReader { private static Path walRootDir; // visible for TestHLogRecordReader static final TableName tableName = TableName.valueOf(getName()); - private static final byte [] rowName = tableName.getName(); + private static final byte[] rowName = tableName.getName(); // visible for TestHLogRecordReader static final RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); private static final byte[] family = Bytes.toBytes("column"); @@ -145,8 +146,8 @@ public void testPartialRead() throws Exception { edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value)); log.appendData(info, getWalKeyImpl(ts, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts+1, value)); - log.appendData(info, getWalKeyImpl(ts+1, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts + 1, value)); + log.appendData(info, getWalKeyImpl(ts + 1, scopes), edit); log.sync(); Threads.sleep(10); LOG.info("Before 1st WAL roll " + log.toString()); @@ -157,17 +158,16 @@ public void testPartialRead() throws Exception { long ts1 = EnvironmentEdgeManager.currentTime(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1+1, value)); - log.appendData(info, getWalKeyImpl(ts1+1, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1 + 1, value)); + log.appendData(info, getWalKeyImpl(ts1 + 1, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1+2, value)); - log.appendData(info, getWalKeyImpl(ts1+2, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1 + 2, value)); + log.appendData(info, getWalKeyImpl(ts1 + 2, scopes), edit); log.sync(); log.shutdown(); walfactory.shutdown(); LOG.info("Closed WAL " + log.toString()); - WALInputFormat input = new WALInputFormat(); Configuration jobConf = new Configuration(conf); jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); @@ -178,7 +178,7 @@ public void testPartialRead() throws Exception { assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); - jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1+1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1 + 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(2, splits.size()); // Both entries from first file are in-range. @@ -201,12 +201,12 @@ public void testPartialRead() throws Exception { public void testWALRecordReader() throws Exception { final WALFactory walfactory = new WALFactory(conf, getName()); WAL log = walfactory.getWAL(info); - byte [] value = Bytes.toBytes("value"); + byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), - EnvironmentEdgeManager.currentTime(), value)); - long txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), + value)); + long txid = + log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); Thread.sleep(1); // make sure 2nd log gets a later timestamp @@ -214,10 +214,9 @@ public void testWALRecordReader() throws Exception { log.rollWriter(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), - EnvironmentEdgeManager.currentTime(), value)); - txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), + value)); + txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); walfactory.shutdown(); @@ -240,7 +239,7 @@ public void testWALRecordReader() throws Exception { // now test basic time ranges: // set an endtime, the 2nd log file can be ignored completely. - jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs-1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs - 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); @@ -253,29 +252,27 @@ public void testWALRecordReader() throws Exception { } /** - * Test WALRecordReader tolerance to moving WAL from active - * to archive directory + * Test WALRecordReader tolerance to moving WAL from active to archive directory * @throws Exception exception */ @Test public void testWALRecordReaderActiveArchiveTolerance() throws Exception { final WALFactory walfactory = new WALFactory(conf, getName()); WAL log = walfactory.getWAL(info); - byte [] value = Bytes.toBytes("value"); + byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), - EnvironmentEdgeManager.currentTime(), value)); - long txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), + value)); + long txid = + log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); Thread.sleep(10); // make sure 2nd edit gets a later timestamp edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), - EnvironmentEdgeManager.currentTime(), value)); - txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), + value)); + txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); @@ -287,7 +284,7 @@ public void testWALRecordReaderActiveArchiveTolerance() throws Exception { List splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1, splits.size()); WALInputFormat.WALSplit split = (WALInputFormat.WALSplit) splits.get(0); - LOG.debug("log="+logDir+" file="+ split.getLogFileName()); + LOG.debug("log=" + logDir + " file=" + split.getLogFileName()); testSplitWithMovingWAL(splits.get(0), Bytes.toBytes("1"), Bytes.toBytes("2")); } @@ -310,8 +307,10 @@ private void testSplit(InputSplit split, byte[]... columns) throws Exception { for (byte[] column : columns) { assertTrue(reader.nextKeyValue()); Cell cell = reader.getCurrentValue().getCells().get(0); - if (!Bytes.equals(column, 0, column.length, cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength())) { + if ( + !Bytes.equals(column, 0, column.length, cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + ) { assertTrue( "expected [" + Bytes.toString(column) + "], actual [" + Bytes.toString( cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", @@ -323,8 +322,8 @@ private void testSplit(InputSplit split, byte[]... columns) throws Exception { } /** - * Create a new reader from the split, match the edits against the passed columns, - * moving WAL to archive in between readings + * Create a new reader from the split, match the edits against the passed columns, moving WAL to + * archive in between readings */ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) throws Exception { WALRecordReader reader = getReader(); @@ -332,8 +331,10 @@ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) assertTrue(reader.nextKeyValue()); Cell cell = reader.getCurrentValue().getCells().get(0); - if (!Bytes.equals(col1, 0, col1.length, cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())) { + if ( + !Bytes.equals(col1, 0, col1.length, cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + ) { assertTrue( "expected [" + Bytes.toString(col1) + "], actual [" + Bytes.toString( cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", @@ -354,8 +355,10 @@ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) // TODO: the archivedLogLocation to read next key value. assertTrue(reader.nextKeyValue()); cell = reader.getCurrentValue().getCells().get(0); - if (!Bytes.equals(col2, 0, col2.length, cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())) { + if ( + !Bytes.equals(col2, 0, col2.length, cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + ) { assertTrue( "expected [" + Bytes.toString(col2) + "], actual [" + Bytes.toString( cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java index c674af3e76d8..d879ea21fea7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -29,9 +28,9 @@ import org.apache.hadoop.io.Text; /** - * Dummy mapper used for unit tests to verify that the mapper can be injected. - * This approach would be used if a custom transformation needed to be done after - * reading the input data before writing it to HFiles. + * Dummy mapper used for unit tests to verify that the mapper can be injected. This approach would + * be used if a custom transformation needed to be done after reading the input data before writing + * it to HFiles. */ public class TsvImporterCustomTestMapper extends TsvImporterMapper { @Override @@ -40,12 +39,11 @@ protected void setup(Context context) { } /** - * Convert a line of TSV text into an HBase table row after transforming the - * values by multiplying them by 3. + * Convert a line of TSV text into an HBase table row after transforming the values by multiplying + * them by 3. */ @Override - public void map(LongWritable offset, Text value, Context context) - throws IOException { + public void map(LongWritable offset, Text value, Context context) throws IOException { byte[] family = Bytes.toBytes("FAM"); final byte[][] qualifiers = { Bytes.toBytes("A"), Bytes.toBytes("B") }; @@ -54,20 +52,19 @@ public void map(LongWritable offset, Text value, Context context) String[] valueTokens = new String(lineBytes, StandardCharsets.UTF_8).split("\u001b"); // create the rowKey and Put - ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); Put put = new Put(rowKey.copyBytes()); put.setDurability(Durability.SKIP_WAL); - //The value should look like this: VALUE1 or VALUE2. Let's multiply - //the integer by 3 - for(int i = 1; i < valueTokens.length; i++) { + // The value should look like this: VALUE1 or VALUE2. Let's multiply + // the integer by 3 + for (int i = 1; i < valueTokens.length; i++) { String prefix = valueTokens[i].substring(0, "VALUE".length()); String suffix = valueTokens[i].substring("VALUE".length()); String newValue = prefix + Integer.parseInt(suffix) * 3; - KeyValue kv = new KeyValue(rowKey.copyBytes(), family, - qualifiers[i-1], Bytes.toBytes(newValue)); + KeyValue kv = + new KeyValue(rowKey.copyBytes(), family, qualifiers[i - 1], Bytes.toBytes(newValue)); put.add(kv); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java index 850d4abac80b..e1f4dcdf9e8d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -27,18 +26,17 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Just shows a simple example of how the attributes can be extracted and added - * to the puts + * Just shows a simple example of how the attributes can be extracted and added to the puts */ public class TsvImporterCustomTestMapperForOprAttr extends TsvImporterMapper { @Override protected void populatePut(byte[] lineBytes, ParsedLine parsed, Put put, int i) - throws BadTsvLineException, IOException { + throws BadTsvLineException, IOException { KeyValue kv; kv = new KeyValue(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength(), - parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, - parser.getQualifier(i).length, ts, KeyValue.Type.Put, lineBytes, parsed.getColumnOffset(i), - parsed.getColumnLength(i)); + parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, + parser.getQualifier(i).length, ts, KeyValue.Type.Put, lineBytes, parsed.getColumnOffset(i), + parsed.getColumnLength(i)); if (parsed.getIndividualAttributes() != null) { String[] attributes = parsed.getIndividualAttributes(); for (String attr : attributes) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java index f286c63fb546..a14febd21e6d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,13 +84,13 @@ public void testCompactedFilesArchived() throws Exception { Configuration config = HBaseConfiguration.create(); config.set("fs.defaultFS", defaultFS); int result = ToolRunner.run(config, new CompactionTool(), - new String[]{"-compactOnce", "-major", storePath}); - assertEquals(0,result); + new String[] { "-compactOnce", "-major", storePath }); + assertEquals(0, result); regionDirFiles = fs.listStatus(new Path(storePath)); assertEquals(1, regionDirFiles.length); } - private void putAndFlush(int key) throws Exception{ + private void putAndFlush(int key) throws Exception { Put put = new Put(Bytes.toBytes(key)); put.addColumn(HBaseTestingUtil.fam1, qualifier, Bytes.toBytes("val" + key)); region.put(put); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index c614b4400051..ee77d9f6fccb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -74,7 +74,7 @@ public class TestVerifyReplication extends TestReplicationBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplication.class); + HBaseClassTestRule.forClass(TestVerifyReplication.class); private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplication.class); @@ -95,9 +95,11 @@ public void setUp() throws Exception { public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); - TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100) - .build()).build(); + TableDescriptor peerTable = + TableDescriptorBuilder.newBuilder(peerTableName) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100).build()) + .build(); Connection connection2 = ConnectionFactory.createConnection(CONF2); try (Admin admin2 = connection2.getAdmin()) { @@ -107,7 +109,7 @@ public static void setUpBeforeClass() throws Exception { } static void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { Job job = new VerifyReplication().createSubmittableJob(new Configuration(CONF1), args); if (job == null) { fail("Job wasn't created, see the log"); @@ -167,9 +169,9 @@ public void testVerifyRepJobWithRawOptions() throws Exception { try { ColumnFamilyDescriptor fam = ColumnFamilyDescriptorBuilder.newBuilder(familyname) - .setMaxVersions(100).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build(); + .setMaxVersions(100).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build(); TableDescriptor table = - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(fam).build(); + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(fam).build(); Connection connection1 = ConnectionFactory.createConnection(CONF1); Connection connection2 = ConnectionFactory.createConnection(CONF2); @@ -239,7 +241,7 @@ public void testVerifyRepJobWithRawOptions() throws Exception { } static void checkRestoreTmpDir(Configuration conf, String restoreTmpDir, int expectedCount) - throws IOException { + throws IOException { FileSystem fs = FileSystem.get(conf); FileStatus[] subDirectories = fs.listStatus(new Path(restoreTmpDir)); assertNotNull(subDirectories); @@ -249,7 +251,6 @@ static void checkRestoreTmpDir(Configuration conf, String restoreTmpDir, int exp } } - @Test public void testVerifyRepJobWithQuorumAddress() throws Exception { // Populate the tables, at the same time it guarantees that the tables are @@ -374,7 +375,7 @@ public void testVerifyRepJobWithPeerTableName() throws Exception { // with a peerTableName along with quorum address (a cluster key) String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - UTIL2.getClusterKey(), tableName.getNameAsString() }; + UTIL2.getClusterKey(), tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); UTIL2.deleteTableData(peerTableName); @@ -391,23 +392,23 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti FileSystem fs = rootDir.getFileSystem(CONF1); String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, - Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(CONF2); FileSystem peerFs = peerRootDir.getFileSystem(CONF2); String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, - Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String tmpPath1 = UTIL1.getRandomDir().toString(); String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, + "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, + "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, + "--peerFSAddress=" + peerFSAddress, "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); @@ -421,7 +422,7 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti put = new Put(result.getRow()); Cell firstVal = result.rawCells()[0]; put.addColumn(CellUtil.cloneFamily(firstVal), CellUtil.cloneQualifier(firstVal), - Bytes.toBytes("diff data")); + Bytes.toBytes("diff data")); htable3.put(put); } Delete delete = new Delete(put.getRow()); @@ -429,16 +430,16 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, - Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, - Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, + "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, + "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, + "--peerFSAddress=" + peerFSAddress, "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), tableName.getNameAsString() }; runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java index fdbf7ac0db04..d78b2f2e2edd 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java @@ -70,7 +70,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationAdjunct.class); + HBaseClassTestRule.forClass(TestVerifyReplicationAdjunct.class); private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplicationAdjunct.class); @@ -90,9 +90,11 @@ public void setUp() throws Exception { @BeforeClass public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); - TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100) - .build()).build(); + TableDescriptor peerTable = + TableDescriptorBuilder.newBuilder(peerTableName) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100).build()) + .build(); Connection connection2 = ConnectionFactory.createConnection(CONF2); try (Admin admin2 = connection2.getAdmin()) { admin2.createTable(peerTable, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); @@ -243,21 +245,21 @@ public void testVerifyReplicationPrefixFiltering() throws Exception { loadData("zzz", row); waitForReplication(NB_ROWS_IN_BATCH * 4, NB_RETRIES * 4); String[] args = - new String[] { "--row-prefixes=prefixrow,secondrow", PEER_ID, tableName.getNameAsString() }; + new String[] { "--row-prefixes=prefixrow,secondrow", PEER_ID, tableName.getNameAsString() }; TestVerifyReplication.runVerifyReplication(args, NB_ROWS_IN_BATCH * 2, 0); } @Test public void testVerifyReplicationSnapshotArguments() { String[] args = - new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() }; + new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() }; assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--sourceSnapshotTmpDir=tmp", "2", tableName.getNameAsString() }; assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=tmp", "2", - tableName.getNameAsString() }; + tableName.getNameAsString() }; assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--peerSnapshotName=snapshot1", "2", tableName.getNameAsString() }; @@ -267,13 +269,13 @@ public void testVerifyReplicationSnapshotArguments() { assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--peerSnapshotName=snapshot1", "--peerSnapshotTmpDir=/tmp/", - "--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", - tableName.getNameAsString() }; + "--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", + tableName.getNameAsString() }; assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=/tmp/", - "--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs", - "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() }; + "--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs", + "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() }; assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java index 2fe843ba62d1..9edc6245295c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; @@ -64,10 +63,10 @@ public class TestVerifyReplicationCrossDiffHdfs { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationCrossDiffHdfs.class); + HBaseClassTestRule.forClass(TestVerifyReplicationCrossDiffHdfs.class); private static final Logger LOG = - LoggerFactory.getLogger(TestVerifyReplicationCrossDiffHdfs.class); + LoggerFactory.getLogger(TestVerifyReplicationCrossDiffHdfs.class); private static HBaseTestingUtil util1; private static HBaseTestingUtil util2; @@ -111,16 +110,16 @@ public static void setUpBeforeClass() throws Exception { private static void createTestingTable(Admin admin) throws IOException { TableDescriptor table = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(100) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(100) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .build(); admin.createTable(table); } private static void addTestingPeer() throws IOException { ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(util2.getClusterKey()).setReplicateAllUserTables(false) - .setTableCFsMap(ImmutableMap.of(TABLE_NAME, ImmutableList.of())).build(); + .setClusterKey(util2.getClusterKey()).setReplicateAllUserTables(false) + .setTableCFsMap(ImmutableMap.of(TABLE_NAME, ImmutableList.of())).build(); util1.getAdmin().addReplicationPeer(PEER_ID, rpc); } @@ -139,7 +138,7 @@ private static void loadSomeData() throws IOException, InterruptedException { results = rs.next(numOfRows); if (results == null || results.length < numOfRows) { LOG.info("Retrying, wait until the peer received all the rows, currentRows:" - + (results == null ? 0 : results.length)); + + (results == null ? 0 : results.length)); Thread.sleep(100); } } @@ -168,14 +167,14 @@ public void testVerifyRepBySnapshot() throws Exception { FileSystem fs = rootDir.getFileSystem(conf1); String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME, - Bytes.toString(FAMILY), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(FAMILY), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(conf2); FileSystem peerFs = peerRootDir.getFileSystem(conf2); String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME, - Bytes.toString(FAMILY), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(FAMILY), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String temPath1 = new Path(fs.getUri().toString(), "/tmp1").toString(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java index 0d1cead33016..6c1e77d609e5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +18,7 @@ package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.IOException; import java.util.Arrays; @@ -96,8 +96,8 @@ private static void setupCluster(HBaseTestingUtil util) throws Exception { conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + ',' + TokenProvider.class.getName()); - HBaseKerberosUtils.setSecuredConfiguration(conf, - CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), HTTP_PRINCIPAL + '@' + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), + HTTP_PRINCIPAL + '@' + KDC.getRealm()); util.startMiniCluster(); } @@ -112,13 +112,14 @@ public static void beforeClass() throws Exception { setupCluster(UTIL2); try (Admin admin = UTIL1.getAdmin()) { - admin.addReplicationPeer("1", ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())) - .putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL, - UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL)) - .putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL, - UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL)) - .build()); + admin.addReplicationPeer("1", + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())) + .putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL, + UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL)) + .putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL, + UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL)) + .build()); } } @@ -130,10 +131,8 @@ public static void cleanup() throws IOException { @Parameters public static Collection> peer() { - return Arrays.asList( - () -> "1", - () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration()) - ); + return Arrays.asList(() -> "1", + () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())); } @Parameter @@ -143,11 +142,7 @@ public static Collection> peer() { @SuppressWarnings("unchecked") public void testJobCredentials() throws Exception { Job job = new VerifyReplication().createSubmittableJob( - new Configuration(UTIL1.getConfiguration()), - new String[] { - peer.get(), - "table" - }); + new Configuration(UTIL1.getConfiguration()), new String[] { peer.get(), "table" }); Credentials credentials = job.getCredentials(); Collection> tokens = credentials.getAllTokens(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 02aae1b341bd..c49bf218743f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -64,20 +64,21 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Test Export Snapshot Tool */ @Ignore // HBASE-24493 -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshot.class); + HBaseClassTestRule.forClass(TestExportSnapshot.class); private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshot.class); @@ -177,9 +178,9 @@ public void testExportFileSystemStateWithMergeRegion() throws Exception { String snapshotName0 = "snaptb0-" + testName.getMethodName() + "-1"; admin.createTable( TableDescriptorBuilder.newBuilder(tableName0) - .setColumnFamilies( - Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())) - .build(), + .setColumnFamilies( + Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())) + .build(), new byte[][] { Bytes.toBytes("2") }); // put some data try (Table table = admin.getConnection().getTable(tableName0)) { @@ -191,8 +192,7 @@ public void testExportFileSystemStateWithMergeRegion() throws Exception { tableNumFiles = regions.size(); // merge region admin.mergeRegionsAsync(new byte[][] { regions.get(0).getEncodedNameAsBytes(), - regions.get(1).getEncodedNameAsBytes() }, - true).get(); + regions.get(1).getEncodedNameAsBytes() }, true).get(); // take a snapshot admin.snapshot(snapshotName0, tableName0); // export snapshot and verify @@ -231,31 +231,30 @@ public void testExportWithTargetName() throws Exception { } private void testExportFileSystemState(final TableName tableName, final String snapshotName, - final String targetName, int filesExpected) throws Exception { - testExportFileSystemState(tableName, snapshotName, targetName, - filesExpected, getHdfsDestinationDir(), false); + final String targetName, int filesExpected) throws Exception { + testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, + getHdfsDestinationDir(), false); } - protected void testExportFileSystemState(final TableName tableName, - final String snapshotName, final String targetName, int filesExpected, - Path copyDir, boolean overwrite) throws Exception { + protected void testExportFileSystemState(final TableName tableName, final String snapshotName, + final String targetName, int filesExpected, Path copyDir, boolean overwrite) throws Exception { testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, targetName, - filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, - overwrite, getBypassRegionPredicate(), true); + filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, overwrite, + getBypassRegionPredicate(), true); } /** * Creates destination directory, runs ExportSnapshot() tool, and runs some verifications. */ - protected static void testExportFileSystemState(final Configuration conf, final TableName tableName, - final String snapshotName, final String targetName, final int filesExpected, - final Path srcDir, Path rawTgtDir, final boolean overwrite, - final RegionPredicate bypassregionPredicate, boolean success) throws Exception { + protected static void testExportFileSystemState(final Configuration conf, + final TableName tableName, final String snapshotName, final String targetName, + final int filesExpected, final Path srcDir, Path rawTgtDir, final boolean overwrite, + final RegionPredicate bypassregionPredicate, boolean success) throws Exception { FileSystem tgtFs = rawTgtDir.getFileSystem(conf); FileSystem srcFs = srcDir.getFileSystem(conf); Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); - LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", - tgtFs.getUri(), tgtDir, rawTgtDir, srcFs.getUri(), srcDir); + LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", tgtFs.getUri(), tgtDir, + rawTgtDir, srcFs.getUri(), srcDir); List opts = new ArrayList<>(); opts.add("--snapshot"); opts.add(snapshotName); @@ -283,11 +282,11 @@ protected static void testExportFileSystemState(final Configuration conf, final // Verify File-System state FileStatus[] rootFiles = tgtFs.listStatus(tgtDir); assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length); - for (FileStatus fileStatus: rootFiles) { + for (FileStatus fileStatus : rootFiles) { String name = fileStatus.getPath().getName(); assertTrue(fileStatus.toString(), fileStatus.isDirectory()); - assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) || - name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); + assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) + || name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); } LOG.info("Verified filesystem state"); @@ -295,8 +294,8 @@ protected static void testExportFileSystemState(final Configuration conf, final final Path snapshotDir = new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName); final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, targetName); verifySnapshotDir(srcFs, new Path(srcDir, snapshotDir), tgtFs, new Path(tgtDir, targetDir)); - Set snapshotFiles = verifySnapshot(conf, tgtFs, tgtDir, tableName, - targetName, bypassregionPredicate); + Set snapshotFiles = + verifySnapshot(conf, tgtFs, tgtDir, tableName, targetName, bypassregionPredicate); assertEquals(filesExpected, snapshotFiles.size()); } @@ -304,7 +303,7 @@ protected static void testExportFileSystemState(final Configuration conf, final * verify if the snapshot folder on file-system 1 match the one on file-system 2 */ protected static void verifySnapshotDir(final FileSystem fs1, final Path root1, - final FileSystem fs2, final Path root2) throws IOException { + final FileSystem fs2, final Path root2) throws IOException { assertEquals(listFiles(fs1, root1, root1), listFiles(fs2, root2, root2)); } @@ -312,17 +311,17 @@ protected static void verifySnapshotDir(final FileSystem fs1, final Path root1, * Verify if the files exists */ protected static Set verifySnapshot(final Configuration conf, final FileSystem fs, - final Path rootDir, final TableName tableName, final String snapshotName, - final RegionPredicate bypassregionPredicate) throws IOException { - final Path exportedSnapshot = new Path(rootDir, - new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); + final Path rootDir, final TableName tableName, final String snapshotName, + final RegionPredicate bypassregionPredicate) throws IOException { + final Path exportedSnapshot = + new Path(rootDir, new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); final Set snapshotFiles = new HashSet<>(); final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); SnapshotReferenceUtil.visitReferencedFiles(conf, fs, exportedSnapshot, - new SnapshotReferenceUtil.SnapshotVisitor() { + new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { if (bypassregionPredicate != null && bypassregionPredicate.evaluate(regionInfo)) { return; } @@ -332,10 +331,10 @@ public void storeFile(final RegionInfo regionInfo, final String family, snapshotFiles.add(hfile); verifyNonEmptyFile(new Path(exportedArchive, new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), - new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); + new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); } else { Pair referredToRegionAndFile = - StoreFileInfo.getReferredToRegionAndFile(storeFile.getName()); + StoreFileInfo.getReferredToRegionAndFile(storeFile.getName()); String region = referredToRegionAndFile.getFirst(); String hfile = referredToRegionAndFile.getSecond(); snapshotFiles.add(hfile); @@ -359,13 +358,13 @@ private void verifyNonEmptyFile(final Path path) throws IOException { } private static Set listFiles(final FileSystem fs, final Path root, final Path dir) - throws IOException { + throws IOException { Set files = new HashSet<>(); LOG.debug("List files in {} in root {} at {}", fs, root, dir); int rootPrefix = root.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString().length(); FileStatus[] list = CommonFSUtils.listStatus(fs, dir); if (list != null) { - for (FileStatus fstat: list) { + for (FileStatus fstat : list) { LOG.debug(Objects.toString(fstat.getPath())); if (fstat.isDirectory()) { files.addAll(listFiles(fs, root, fstat.getPath())); @@ -379,8 +378,8 @@ private static Set listFiles(final FileSystem fs, final Path root, final private Path getHdfsDestinationDir() { Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - Path path = new Path(new Path(rootDir, "export-test"), "export-" + - EnvironmentEdgeManager.currentTime()); + Path path = + new Path(new Path(rootDir, "export-test"), "export-" + EnvironmentEdgeManager.currentTime()); LOG.info("HDFS export destination path: " + path); return path; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java index 6569767ea3fc..e51ba7da5707 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.snapshot; import static org.junit.Assert.assertFalse; + import java.util.Iterator; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -43,19 +44,19 @@ import org.slf4j.LoggerFactory; /** - * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but - * the test suite ran too close to the maximum time limit so we split these out. Uses - * facility from TestExportSnapshot where possible. + * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but the + * test suite ran too close to the maximum time limit so we split these out. Uses facility from + * TestExportSnapshot where possible. * @see TestExportSnapshot */ @Ignore // HBASE-24493 -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestExportSnapshotAdjunct { private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotAdjunct.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotAdjunct.class); + HBaseClassTestRule.forClass(TestExportSnapshotAdjunct.class); @Rule public final TestName testName = new TestName(); @@ -75,12 +76,11 @@ public static void setUpBeforeClass() throws Exception { } /** - * Check for references to '/tmp'. We are trying to avoid having references to outside of the - * test data dir when running tests. References outside of the test dir makes it so concurrent - * tests can stamp on each other by mistake. This check is for references to the 'tmp'. - * - * This is a strange place for this test but I want somewhere where the configuration is - * full -- filed w/ hdfs and mapreduce configurations. + * Check for references to '/tmp'. We are trying to avoid having references to outside of the test + * data dir when running tests. References outside of the test dir makes it so concurrent tests + * can stamp on each other by mistake. This check is for references to the 'tmp'. This is a + * strange place for this test but I want somewhere where the configuration is full -- filed w/ + * hdfs and mapreduce configurations. */ private void checkForReferencesToTmpDir() { Configuration conf = TEST_UTIL.getConfiguration(); @@ -126,8 +126,7 @@ public void setUp() throws Exception { admin.snapshot(emptySnapshotName, tableName); // Add some rows - SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, - TestExportSnapshot.FAMILY); + SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, TestExportSnapshot.FAMILY); tableNumFiles = admin.getRegions(tableName).size(); // take a snapshot @@ -151,9 +150,8 @@ public void testExportRetry() throws Exception { conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2); conf.setInt("mapreduce.map.maxattempts", 3); - TestExportSnapshot.testExportFileSystemState(conf, tableName, - snapshotName, snapshotName, tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), - copyDir, true, null, true); + TestExportSnapshot.testExportFileSystemState(conf, tableName, snapshotName, snapshotName, + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, true); } /** @@ -168,8 +166,7 @@ public void testExportFailure() throws Exception { conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4); conf.setInt("mapreduce.map.maxattempts", 3); - TestExportSnapshot.testExportFileSystemState(conf, tableName, - snapshotName, snapshotName, tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), - copyDir, true, null, false); + TestExportSnapshot.testExportFileSystemState(conf, tableName, snapshotName, snapshotName, + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, false); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java index d104d830985b..71402d0989de 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,57 +34,52 @@ /** * Test Export Snapshot Tool helpers */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestExportSnapshotHelpers { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotHelpers.class); + HBaseClassTestRule.forClass(TestExportSnapshotHelpers.class); /** - * Verfy the result of getBalanceSplits() method. - * The result are groups of files, used as input list for the "export" mappers. - * All the groups should have similar amount of data. - * - * The input list is a pair of file path and length. - * The getBalanceSplits() function sort it by length, - * and assign to each group a file, going back and forth through the groups. + * Verfy the result of getBalanceSplits() method. The result are groups of files, used as input + * list for the "export" mappers. All the groups should have similar amount of data. The input + * list is a pair of file path and length. The getBalanceSplits() function sort it by length, and + * assign to each group a file, going back and forth through the groups. */ @Test public void testBalanceSplit() throws Exception { // Create a list of files List> files = new ArrayList<>(21); for (long i = 0; i <= 20; i++) { - SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() - .setType(SnapshotFileInfo.Type.HFILE) - .setHfile("file-" + i) - .build(); + SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder().setType(SnapshotFileInfo.Type.HFILE) + .setHfile("file-" + i).build(); files.add(new Pair<>(fileInfo, i)); } // Create 5 groups (total size 210) - // group 0: 20, 11, 10, 1 (total size: 42) - // group 1: 19, 12, 9, 2 (total size: 42) - // group 2: 18, 13, 8, 3 (total size: 42) - // group 3: 17, 12, 7, 4 (total size: 42) - // group 4: 16, 11, 6, 5 (total size: 42) + // group 0: 20, 11, 10, 1 (total size: 42) + // group 1: 19, 12, 9, 2 (total size: 42) + // group 2: 18, 13, 8, 3 (total size: 42) + // group 3: 17, 12, 7, 4 (total size: 42) + // group 4: 16, 11, 6, 5 (total size: 42) List>> splits = ExportSnapshot.getBalancedSplits(files, 5); assertEquals(5, splits.size()); - String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", "file-0"}; + String[] split0 = new String[] { "file-20", "file-11", "file-10", "file-1", "file-0" }; verifyBalanceSplit(splits.get(0), split0, 42); - String[] split1 = new String[] {"file-19", "file-12", "file-9", "file-2"}; + String[] split1 = new String[] { "file-19", "file-12", "file-9", "file-2" }; verifyBalanceSplit(splits.get(1), split1, 42); - String[] split2 = new String[] {"file-18", "file-13", "file-8", "file-3"}; + String[] split2 = new String[] { "file-18", "file-13", "file-8", "file-3" }; verifyBalanceSplit(splits.get(2), split2, 42); - String[] split3 = new String[] {"file-17", "file-14", "file-7", "file-4"}; + String[] split3 = new String[] { "file-17", "file-14", "file-7", "file-4" }; verifyBalanceSplit(splits.get(3), split3, 42); - String[] split4 = new String[] {"file-16", "file-15", "file-6", "file-5"}; + String[] split4 = new String[] { "file-16", "file-15", "file-6", "file-5" }; verifyBalanceSplit(splits.get(4), split4, 42); } private void verifyBalanceSplit(final List> split, - final String[] expected, final long expectedSize) { + final String[] expected, final long expectedSize) { assertEquals(expected.length, split.size()); long totalSize = 0; for (int i = 0; i < expected.length; ++i) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java index dd5ed0cc9655..6ae20bc4f4f6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java @@ -44,16 +44,15 @@ import org.slf4j.LoggerFactory; /** - * Test Export Snapshot Tool - * Tests V1 snapshots only. Used to ALSO test v2 but strange failure so separate the tests. - * See companion file for test of v2 snapshot. + * Test Export Snapshot Tool Tests V1 snapshots only. Used to ALSO test v2 but strange failure so + * separate the tests. See companion file for test of v2 snapshot. * @see TestExportSnapshotV2NoCluster */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestExportSnapshotV1NoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotV1NoCluster.class); + HBaseClassTestRule.forClass(TestExportSnapshotV1NoCluster.class); private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotV1NoCluster.class); private HBaseCommonTestingUtil testUtil = new HBaseCommonTestingUtil(); @@ -91,26 +90,26 @@ static Path setup(FileSystem fs, HBaseCommonTestingUtil hctu) throws IOException */ @Test public void testSnapshotWithRefsExportFileSystemState() throws Exception { - final SnapshotMock snapshotMock = new SnapshotMock(testUtil.getConfiguration(), - this.fs, testDir); - final SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV1("tableWithRefsV1", - "tableWithRefsV1"); + final SnapshotMock snapshotMock = + new SnapshotMock(testUtil.getConfiguration(), this.fs, testDir); + final SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV1("tableWithRefsV1", "tableWithRefsV1"); testSnapshotWithRefsExportFileSystemState(this.fs, builder, testUtil, testDir); } /** - * Generates a couple of regions for the specified SnapshotMock, - * and then it will run the export and verification. + * Generates a couple of regions for the specified SnapshotMock, and then it will run the export + * and verification. */ static void testSnapshotWithRefsExportFileSystemState(FileSystem fs, - SnapshotMock.SnapshotBuilder builder, HBaseCommonTestingUtil testUtil, Path testDir) - throws Exception { + SnapshotMock.SnapshotBuilder builder, HBaseCommonTestingUtil testUtil, Path testDir) + throws Exception { Path[] r1Files = builder.addRegion(); Path[] r2Files = builder.addRegion(); builder.commit(); // remove references, only keep data files Set dataFiles = new HashSet<>(); - for (Path[] files: new Path[][]{r1Files, r2Files}) { + for (Path[] files : new Path[][] { r1Files, r2Files }) { for (Path file : files) { if (StoreFileInfo.isReference(file.getName())) { Pair referredToRegionAndFile = @@ -124,16 +123,16 @@ static void testSnapshotWithRefsExportFileSystemState(FileSystem fs, int snapshotFilesCount = dataFiles.size(); String snapshotName = builder.getSnapshotDescription().getName(); TableName tableName = builder.getTableDescriptor().getTableName(); - TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), - tableName, snapshotName, snapshotName, snapshotFilesCount, - testDir, getDestinationDir(fs, testUtil, testDir), false, null, true); + TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), tableName, + snapshotName, snapshotName, snapshotFilesCount, testDir, + getDestinationDir(fs, testUtil, testDir), false, null, true); } static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtil hctu, Path testDir) - throws IOException { - Path path = new Path(new Path(testDir, "export-test"), - "export-" + EnvironmentEdgeManager.currentTime()).makeQualified(fs.getUri(), - fs.getWorkingDirectory()); + throws IOException { + Path path = + new Path(new Path(testDir, "export-test"), "export-" + EnvironmentEdgeManager.currentTime()) + .makeQualified(fs.getUri(), fs.getWorkingDirectory()); LOG.info("Export destination={}, fs={}, fsurl={}, fswd={}, testDir={}", path, fs, fs.getUri(), fs.getWorkingDirectory(), testDir); return path; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java index f2d3f627bae7..c07a4400c190 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java @@ -16,7 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hbase.snapshot; + import static org.junit.Assert.assertTrue; + import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; @@ -36,11 +38,11 @@ * Test Export Snapshot Tool; tests v2 snapshots. * @see TestExportSnapshotV1NoCluster */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestExportSnapshotV2NoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotV2NoCluster.class); + HBaseClassTestRule.forClass(TestExportSnapshotV2NoCluster.class); private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotV2NoCluster.class); @@ -61,8 +63,8 @@ public void before() throws Exception { public void testSnapshotWithRefsExportFileSystemState() throws Exception { final SnapshotMock snapshotMock = new SnapshotMock(testUtil.getConfiguration(), testDir.getFileSystem(testUtil.getConfiguration()), testDir); - final SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("tableWithRefsV2", - "tableWithRefsV2"); + final SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV2("tableWithRefsV2", "tableWithRefsV2"); TestExportSnapshotV1NoCluster.testSnapshotWithRefsExportFileSystemState(this.fs, builder, this.testUtil, this.testDir); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java index 5560555e9f33..fe380e683db0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -31,12 +31,12 @@ import org.junit.experimental.categories.Category; @Ignore // HBASE-24493 -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestExportSnapshotWithTemporaryDirectory extends TestExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExportSnapshotWithTemporaryDirectory.class); + HBaseClassTestRule.forClass(TestExportSnapshotWithTemporaryDirectory.class); @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -54,8 +54,8 @@ public static void setUpBaseConf(Configuration conf) { Path tmpDir = null; try { FileSystem localFs = FileSystem.getLocal(conf); - tmpDir = TEST_UTIL.getDataTestDir(UUID.randomUUID().toString()). - makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); + tmpDir = TEST_UTIL.getDataTestDir(UUID.randomUUID().toString()) + .makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); } catch (IOException ioe) { throw new RuntimeException(ioe); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java index 4f0d3deebe20..4943b40d6a71 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +34,12 @@ * Test Export Snapshot Tool */ @Ignore // HBASE-24493 -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestMobExportSnapshot extends TestExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobExportSnapshot.class); + HBaseClassTestRule.forClass(TestMobExportSnapshot.class); public static void setUpBaseConf(Configuration conf) { TestExportSnapshot.setUpBaseConf(conf); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java index 484f88afecf4..2fa686f768f1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,12 +31,12 @@ /** * Reruns TestMobExportSnapshot using MobExportSnapshot in secure mode. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestMobSecureExportSnapshot extends TestMobExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobSecureExportSnapshot.class); + HBaseClassTestRule.forClass(TestMobSecureExportSnapshot.class); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java index ce1c4cb39a04..a2a588ac5724 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,12 +31,12 @@ /** * Reruns TestExportSnapshot using ExportSnapshot in secure mode. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestSecureExportSnapshot extends TestExportSnapshot { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureExportSnapshot.class); + HBaseClassTestRule.forClass(TestSecureExportSnapshot.class); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 336816e2b49e..72a73eab8311 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -68,8 +69,8 @@ /** * A command-line utility that reads, writes, and verifies data. Unlike - * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, - * and supports simultaneously writing and reading the same set of keys. + * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, and + * supports simultaneously writing and reading the same set of keys. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class LoadTestTool extends AbstractHBaseTool { @@ -94,23 +95,21 @@ public class LoadTestTool extends AbstractHBaseTool { /** Usage string for the load option */ protected static final String OPT_USAGE_LOAD = - ":" + - "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; + ":" + "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; /** Usage string for the read option */ protected static final String OPT_USAGE_READ = - "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; + "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; /** Usage string for the update option */ - protected static final String OPT_USAGE_UPDATE = - "[:<#threads=" + DEFAULT_NUM_THREADS - + ">][:<#whether to ignore nonce collisions=0>]"; + protected static final String OPT_USAGE_UPDATE = "[:<#threads=" + + DEFAULT_NUM_THREADS + ">][:<#whether to ignore nonce collisions=0>]"; - protected static final String OPT_USAGE_BLOOM = "Bloom filter type, one of " + - Arrays.toString(BloomType.values()); + protected static final String OPT_USAGE_BLOOM = + "Bloom filter type, one of " + Arrays.toString(BloomType.values()); - protected static final String OPT_USAGE_COMPRESSION = "Compression type, " + - "one of " + Arrays.toString(Compression.Algorithm.values()); + protected static final String OPT_USAGE_COMPRESSION = + "Compression type, " + "one of " + Arrays.toString(Compression.Algorithm.values()); protected static final String OPT_VERBOSE = "verbose"; @@ -121,12 +120,12 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_DEFERRED_LOG_FLUSH_USAGE = "Enable deferred log flush."; public static final String OPT_INMEMORY = "in_memory"; - public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + - "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; + public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + + "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; public static final String OPT_GENERATOR = "generator"; public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool." - + " Any args for this class can be passed as colon separated after class name"; + + " Any args for this class can be passed as colon separated after class name"; public static final String OPT_WRITER = "writer"; public static final String OPT_WRITER_USAGE = "The class for executing the write requests"; @@ -157,25 +156,25 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_ENCRYPTION = "encryption"; protected static final String OPT_ENCRYPTION_USAGE = - "Enables transparent encryption on the test table, one of " + - Arrays.toString(Encryption.getSupportedCiphers()); + "Enables transparent encryption on the test table, one of " + + Arrays.toString(Encryption.getSupportedCiphers()); public static final String OPT_NUM_REGIONS_PER_SERVER = "num_regions_per_server"; - protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE - = "Desired number of regions per region server. Defaults to 5."; + protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE = + "Desired number of regions per region server. Defaults to 5."; public static int DEFAULT_NUM_REGIONS_PER_SERVER = 5; public static final String OPT_REGION_REPLICATION = "region_replication"; protected static final String OPT_REGION_REPLICATION_USAGE = - "Desired number of replicas per region"; + "Desired number of replicas per region"; public static final String OPT_REGION_REPLICA_ID = "region_replica_id"; protected static final String OPT_REGION_REPLICA_ID_USAGE = - "Region replica id to do the reads from"; + "Region replica id to do the reads from"; public static final String OPT_MOB_THRESHOLD = "mob_threshold"; protected static final String OPT_MOB_THRESHOLD_USAGE = - "Desired cell size to exceed in bytes that will use the MOB write path"; + "Desired cell size to exceed in bytes that will use the MOB write path"; protected static final long DEFAULT_START_KEY = 0; @@ -222,7 +221,7 @@ public class LoadTestTool extends AbstractHBaseTool { private String superUser; private String userNames; - //This file is used to read authentication information in secure clusters. + // This file is used to read authentication information in secure clusters. private String authnFileName; private int numRegionsPerServer = DEFAULT_NUM_REGIONS_PER_SERVER; @@ -232,21 +231,19 @@ public class LoadTestTool extends AbstractHBaseTool { private int mobThreshold = -1; // not set // TODO: refactor LoadTestToolImpl somewhere to make the usage from tests less bad, - // console tool itself should only be used from console. + // console tool itself should only be used from console. protected boolean isSkipInit = false; protected boolean isInitOnly = false; protected Cipher cipher = null; - protected String[] splitColonSeparated(String option, - int minNumCols, int maxNumCols) { + protected String[] splitColonSeparated(String option, int minNumCols, int maxNumCols) { String optVal = cmd.getOptionValue(option); String[] cols = optVal.split(COLON); if (cols.length < minNumCols || cols.length > maxNumCols) { - throw new IllegalArgumentException("Expected at least " - + minNumCols + " columns but no more than " + maxNumCols + - " in the colon-separated value '" + optVal + "' of the " + - "-" + option + " option"); + throw new IllegalArgumentException( + "Expected at least " + minNumCols + " columns but no more than " + maxNumCols + + " in the colon-separated value '" + optVal + "' of the " + "-" + option + " option"); } return cols; } @@ -260,22 +257,21 @@ public byte[][] getColumnFamilies() { } /** - * Apply column family options such as Bloom filters, compression, and data - * block encoding. + * Apply column family options such as Bloom filters, compression, and data block encoding. */ - protected void applyColumnFamilyOptions(TableName tableName, - byte[][] columnFamilies) throws IOException { + protected void applyColumnFamilyOptions(TableName tableName, byte[][] columnFamilies) + throws IOException { try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { TableDescriptor tableDesc = admin.getDescriptor(tableName); LOG.info("Disabling table " + tableName); admin.disableTable(tableName); for (byte[] cf : columnFamilies) { ColumnFamilyDescriptor columnDesc = tableDesc.getColumnFamily(cf); boolean isNewCf = columnDesc == null; - ColumnFamilyDescriptorBuilder columnDescBuilder = isNewCf ? - ColumnFamilyDescriptorBuilder.newBuilder(cf) : - ColumnFamilyDescriptorBuilder.newBuilder(columnDesc); + ColumnFamilyDescriptorBuilder columnDescBuilder = isNewCf + ? ColumnFamilyDescriptorBuilder.newBuilder(cf) + : ColumnFamilyDescriptorBuilder.newBuilder(columnDesc); if (bloomType != null) { columnDescBuilder.setBloomFilterType(bloomType); } @@ -292,11 +288,8 @@ protected void applyColumnFamilyOptions(TableName tableName, byte[] keyBytes = new byte[cipher.getKeyLength()]; Bytes.secureRandom(keyBytes); columnDescBuilder.setEncryptionType(cipher.getName()); - columnDescBuilder.setEncryptionKey( - EncryptionUtil.wrapKey(conf, - User.getCurrent().getShortName(), - new SecretKeySpec(keyBytes, - cipher.getName()))); + columnDescBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, + User.getCurrent().getShortName(), new SecretKeySpec(keyBytes, cipher.getName()))); } if (mobThreshold >= 0) { columnDescBuilder.setMobEnabled(true); @@ -317,8 +310,8 @@ protected void applyColumnFamilyOptions(TableName tableName, @Override protected void addOptions() { addOptNoArg("v", OPT_VERBOSE, "Will display a full readout of logs, including ZooKeeper"); - addOptWithArg(OPT_ZK_QUORUM, "ZK quorum as comma-separated host names " + - "without port numbers"); + addOptWithArg(OPT_ZK_QUORUM, + "ZK quorum as comma-separated host names " + "without port numbers"); addOptWithArg(OPT_ZK_PARENT_NODE, "name of parent znode in zookeeper"); addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write"); addOptWithArg(OPT_COLUMN_FAMILIES, "The name of the column families to use separated by comma"); @@ -329,20 +322,23 @@ protected void addOptions() { addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM); addOptWithArg(OPT_BLOOM_PARAM, "the parameter of bloom filter type"); addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION); - addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); - addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " + - "to tolerate before terminating all reader threads. The default is " + - MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); - addOptWithArg(OPT_MULTIGET, "Whether to use multi-gets as opposed to " + - "separate gets for every column in a row"); - addOptWithArg(OPT_KEY_WINDOW, "The 'key window' to maintain between " + - "reads and writes for concurrent write/read workload. The default " + - "is " + MultiThreadedReader.DEFAULT_KEY_WINDOW + "."); - - addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " + - "separate puts for every column in a row"); - addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " + - "separate updates for every column in a row"); + addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, + HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); + addOptWithArg(OPT_MAX_READ_ERRORS, + "The maximum number of read errors " + + "to tolerate before terminating all reader threads. The default is " + + MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); + addOptWithArg(OPT_MULTIGET, + "Whether to use multi-gets as opposed to " + "separate gets for every column in a row"); + addOptWithArg(OPT_KEY_WINDOW, + "The 'key window' to maintain between " + + "reads and writes for concurrent write/read workload. The default " + "is " + + MultiThreadedReader.DEFAULT_KEY_WINDOW + "."); + + addOptNoArg(OPT_MULTIPUT, + "Whether to use multi-puts as opposed to " + "separate puts for every column in a row"); + addOptNoArg(OPT_BATCHUPDATE, + "Whether to use batch as opposed to " + "separate updates for every column in a row"); addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY); addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE); addOptWithArg(OPT_WRITER, OPT_WRITER_USAGE); @@ -350,16 +346,14 @@ protected void addOptions() { addOptWithArg(OPT_READER, OPT_READER_USAGE); addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write"); - addOptWithArg(OPT_START_KEY, "The first key to read/write " + - "(a 0-based index). The default value is " + - DEFAULT_START_KEY + "."); - addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table " - + "already exists"); + addOptWithArg(OPT_START_KEY, "The first key to read/write " + + "(a 0-based index). The default value is " + DEFAULT_START_KEY + "."); + addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table " + "already exists"); addOptWithArg(NUM_TABLES, "A positive integer number. When a number n is specified, load test " - + "tool will load n table parallely. -tn parameter value becomes " - + "table name prefix. Each table name is in format _1..._n"); + + "tool will load n table parallely. -tn parameter value becomes " + + "table name prefix. Each table name is in format _1..._n"); addOptWithArg(OPT_ENCRYPTION, OPT_ENCRYPTION_USAGE); addOptNoArg(OPT_DEFERRED_LOG_FLUSH, OPT_DEFERRED_LOG_FLUSH_USAGE); @@ -376,22 +370,21 @@ protected CommandLineParser newParser() { return new DefaultParser() { @Override public CommandLine parse(Options opts, String[] args, Properties props, boolean stop) - throws ParseException { + throws ParseException { CommandLine cl = super.parse(opts, args, props, stop); - boolean isReadWriteUpdate = cmd.hasOption(OPT_READ) - || cmd.hasOption(OPT_WRITE) - || cmd.hasOption(OPT_UPDATE); + boolean isReadWriteUpdate = + cmd.hasOption(OPT_READ) || cmd.hasOption(OPT_WRITE) || cmd.hasOption(OPT_UPDATE); boolean isInitOnly = cmd.hasOption(OPT_INIT_ONLY); if (!isInitOnly && !isReadWriteUpdate) { throw new MissingOptionException("Must specify either -" + OPT_INIT_ONLY - + " or at least one of -" + OPT_READ + ", -" + OPT_WRITE + ", -" + OPT_UPDATE); + + " or at least one of -" + OPT_READ + ", -" + OPT_WRITE + ", -" + OPT_UPDATE); } if (isInitOnly && isReadWriteUpdate) { throw new AlreadySelectedException(OPT_INIT_ONLY + " cannot be specified with any of -" - + OPT_READ + ", -" + OPT_WRITE + ", -" + OPT_UPDATE); + + OPT_READ + ", -" + OPT_WRITE + ", -" + OPT_UPDATE); } if (isReadWriteUpdate && !cmd.hasOption(OPT_NUM_KEYS)) { @@ -407,8 +400,7 @@ public CommandLine parse(Options opts, String[] args, Properties props, boolean protected void processOptions(CommandLine cmd) { this.cmd = cmd; - tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, - DEFAULT_TABLE_NAME)); + tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME)); if (cmd.hasOption(OPT_COLUMN_FAMILIES)) { String[] list = cmd.getOptionValue(OPT_COLUMN_FAMILIES).split(","); @@ -428,10 +420,9 @@ protected void processOptions(CommandLine cmd) { deferredLogFlush = cmd.hasOption(OPT_DEFERRED_LOG_FLUSH); if (!isInitOnly) { - startKey = parseLong(cmd.getOptionValue(OPT_START_KEY, - String.valueOf(DEFAULT_START_KEY)), 0, Long.MAX_VALUE); - long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1, - Long.MAX_VALUE - startKey); + startKey = parseLong(cmd.getOptionValue(OPT_START_KEY, String.valueOf(DEFAULT_START_KEY)), 0, + Long.MAX_VALUE); + long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1, Long.MAX_VALUE - startKey); endKey = startKey + numKeys; isSkipInit = cmd.hasOption(OPT_SKIP_INIT); System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]"); @@ -445,8 +436,7 @@ protected void processOptions(CommandLine cmd) { int colIndex = 0; minColsPerKey = 1; maxColsPerKey = 2 * Integer.parseInt(writeOpts[colIndex++]); - int avgColDataSize = - parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE); + int avgColDataSize = parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE); minColDataSize = avgColDataSize / 2; maxColDataSize = avgColDataSize * 3 / 2; @@ -462,10 +452,8 @@ protected void processOptions(CommandLine cmd) { } System.out.println("Multi-puts: " + isMultiPut); - System.out.println("Columns per key: " + minColsPerKey + ".." - + maxColsPerKey); - System.out.println("Data size per column: " + minColDataSize + ".." - + maxColDataSize); + System.out.println("Columns per key: " + minColsPerKey + ".." + maxColsPerKey); + System.out.println("Data size per column: " + minColDataSize + ".." + maxColDataSize); } if (isUpdate) { @@ -496,18 +484,15 @@ protected void processOptions(CommandLine cmd) { } if (cmd.hasOption(OPT_MAX_READ_ERRORS)) { - maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS), - 0, Integer.MAX_VALUE); + maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS), 0, Integer.MAX_VALUE); } if (cmd.hasOption(OPT_KEY_WINDOW)) { - keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW), - 0, Integer.MAX_VALUE); + keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW), 0, Integer.MAX_VALUE); } if (cmd.hasOption(OPT_MULTIGET)) { - multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET), - 0, Integer.MAX_VALUE); + multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET), 0, Integer.MAX_VALUE); } System.out.println("Multi-gets (value of 1 means no multigets): " + multiGetBatchSize); @@ -538,16 +523,15 @@ protected void processOptions(CommandLine cmd) { private void parseColumnFamilyOptions(CommandLine cmd) { String dataBlockEncodingStr = cmd.getOptionValue(HFileTestUtil.OPT_DATA_BLOCK_ENCODING); - dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null : - DataBlockEncoding.valueOf(dataBlockEncodingStr); + dataBlockEncodingAlgo = + dataBlockEncodingStr == null ? null : DataBlockEncoding.valueOf(dataBlockEncodingStr); String compressStr = cmd.getOptionValue(OPT_COMPRESSION); - compressAlgo = compressStr == null ? Compression.Algorithm.NONE : - Compression.Algorithm.valueOf(compressStr); + compressAlgo = + compressStr == null ? Compression.Algorithm.NONE : Compression.Algorithm.valueOf(compressStr); String bloomStr = cmd.getOptionValue(OPT_BLOOM); - bloomType = bloomStr == null ? BloomType.ROW : - BloomType.valueOf(bloomStr); + bloomType = bloomStr == null ? BloomType.ROW : BloomType.valueOf(bloomStr); if (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH) { if (!cmd.hasOption(OPT_BLOOM_PARAM)) { @@ -570,9 +554,8 @@ public void initTestTable() throws IOException { durability = Durability.ASYNC_WAL; } - HBaseTestingUtil.createPreSplitLoadTestTable(conf, tableName, - getColumnFamilies(), compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer, - regionReplication, durability); + HBaseTestingUtil.createPreSplitLoadTestTable(conf, tableName, getColumnFamilies(), compressAlgo, + dataBlockEncodingAlgo, numRegionsPerServer, regionReplication, durability); applyColumnFamilyOptions(tableName, getColumnFamilies()); } @@ -634,27 +617,27 @@ protected int loadTable() throws IOException { userOwner = User.createUserForTesting(conf, superUser, new String[0]); } } else { - args = clazzAndArgs.length == 1 ? new String[0] : Arrays.copyOfRange(clazzAndArgs, 1, - clazzAndArgs.length); + args = clazzAndArgs.length == 1 + ? new String[0] + : Arrays.copyOfRange(clazzAndArgs, 1, clazzAndArgs.length); } dataGen.initialize(args); } else { // Default DataGenerator is MultiThreadedAction.DefaultDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(minColDataSize, maxColDataSize, - minColsPerKey, maxColsPerKey, families); + minColsPerKey, maxColsPerKey, families); } if (userOwner != null) { LOG.info("Granting permissions for user " + userOwner.getShortName()); - Permission.Action[] actions = { - Permission.Action.ADMIN, Permission.Action.CREATE, + Permission.Action[] actions = { Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE }; try { - AccessControlClient.grant(ConnectionFactory.createConnection(conf), - tableName, userOwner.getShortName(), null, null, actions); + AccessControlClient.grant(ConnectionFactory.createConnection(conf), tableName, + userOwner.getShortName(), null, null, actions); } catch (Throwable e) { - LOG.error(HBaseMarkers.FATAL, "Error in granting permission for the user " + - userOwner.getShortName(), e); + LOG.error(HBaseMarkers.FATAL, + "Error in granting permission for the user " + userOwner.getShortName(), e); return EXIT_FAILURE; } } @@ -691,7 +674,7 @@ protected int loadTable() throws IOException { if (isUpdate) { if (userOwner != null) { updaterThreads = new MultiThreadedUpdaterWithACL(dataGen, conf, tableName, updatePercent, - userOwner, userNames); + userOwner, userNames); } else { String updaterClass = null; if (cmd.hasOption(OPT_UPDATER)) { @@ -707,8 +690,8 @@ protected int loadTable() throws IOException { if (isRead) { if (userOwner != null) { - readerThreads = new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent, - userNames); + readerThreads = + new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent, userNames); } else { String readerClass = null; if (cmd.hasOption(OPT_READER)) { @@ -725,14 +708,12 @@ protected int loadTable() throws IOException { } if (isUpdate && isWrite) { - LOG.info("Concurrent write/update workload: making updaters aware of the " + - "write point"); + LOG.info("Concurrent write/update workload: making updaters aware of the " + "write point"); updaterThreads.linkToWriter(writerThreads); } if (isRead && (isUpdate || isWrite)) { - LOG.info("Concurrent write/read workload: making readers aware of the " + - "write point"); + LOG.info("Concurrent write/read workload: making readers aware of the " + "write point"); readerThreads.linkToWriter(isUpdate ? updaterThreads : writerThreads); } @@ -774,8 +755,8 @@ protected int loadTable() throws IOException { success = success && updaterThreads.getNumWriteFailures() == 0; } if (isRead) { - success = success && readerThreads.getNumReadErrors() == 0 - && readerThreads.getNumReadFailures() == 0; + success = + success && readerThreads.getNumReadErrors() == 0 && readerThreads.getNumReadFailures() == 0; } return success ? EXIT_SUCCESS : EXIT_FAILURE; } @@ -783,46 +764,46 @@ protected int loadTable() throws IOException { private LoadTestDataGenerator getLoadGeneratorInstance(String clazzName) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor(int.class, int.class, int.class, int.class, - byte[][].class); + Constructor constructor = + clazz.getConstructor(int.class, int.class, int.class, int.class, byte[][].class); return (LoadTestDataGenerator) constructor.newInstance(minColDataSize, maxColDataSize, - minColsPerKey, maxColsPerKey, families); + minColsPerKey, maxColsPerKey, families); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class); + Constructor constructor = + clazz.getConstructor(LoadTestDataGenerator.class, Configuration.class, TableName.class); return (MultiThreadedWriter) constructor.newInstance(dataGen, conf, tableName); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); - return (MultiThreadedUpdater) constructor.newInstance( - dataGen, conf, tableName, updatePercent); + Constructor constructor = clazz.getConstructor(LoadTestDataGenerator.class, + Configuration.class, TableName.class, double.class); + return (MultiThreadedUpdater) constructor.newInstance(dataGen, conf, tableName, + updatePercent); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); + Constructor constructor = clazz.getConstructor(LoadTestDataGenerator.class, + Configuration.class, TableName.class, double.class); return (MultiThreadedReader) constructor.newInstance(dataGen, conf, tableName, verifyPercent); } catch (Exception e) { throw new IOException(e); @@ -834,15 +815,12 @@ public static void main(String[] args) { } /** - * When NUM_TABLES is specified, the function starts multiple worker threads - * which individually start a LoadTestTool instance to load a table. Each - * table name is in format <tn>_<index>. For example, "-tn test -num_tables 2" - * , table names will be "test_1", "test_2" - * + * When NUM_TABLES is specified, the function starts multiple worker threads which individually + * start a LoadTestTool instance to load a table. Each table name is in format <tn>_<index>. + * For example, "-tn test -num_tables 2" , table names will be "test_1", "test_2" * @throws IOException if one of the load tasks is unable to complete */ - private int parallelLoadTables() - throws IOException { + private int parallelLoadTables() throws IOException { // create new command args String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME); String[] newArgs = null; @@ -869,7 +847,7 @@ private int parallelLoadTables() List workers = new ArrayList<>(); for (int i = 0; i < numTables; i++) { String[] workerArgs = newArgs.clone(); - workerArgs[tableNameValueIndex] = tableName + "_" + (i+1); + workerArgs[tableNameValueIndex] = tableName + "_" + (i + 1); WorkerThread worker = new WorkerThread(i, workerArgs); workers.add(worker); LOG.info(worker + " starting"); @@ -936,7 +914,7 @@ public void run() { } private void addAuthInfoToConf(Properties authConfig, Configuration conf, String owner, - String userList) throws IOException { + String userList) throws IOException { List users = new ArrayList<>(Arrays.asList(userList.split(","))); users.add(owner); for (String user : users) { diff --git a/hbase-mapreduce/src/test/resources/mapred-site.xml b/hbase-mapreduce/src/test/resources/mapred-site.xml index 787ffb75511c..b8949fef6a01 100644 --- a/hbase-mapreduce/src/test/resources/mapred-site.xml +++ b/hbase-mapreduce/src/test/resources/mapred-site.xml @@ -31,4 +31,3 @@ -Djava.awt.headless=true - diff --git a/hbase-metrics-api/README.txt b/hbase-metrics-api/README.txt index dfaa29f2e9f5..ddba89b506d3 100644 --- a/hbase-metrics-api/README.txt +++ b/hbase-metrics-api/README.txt @@ -75,4 +75,4 @@ References 1. https://hbase.apache.org/book.html#hbase.versioning 2. http://metrics.dropwizard.io/ 3. https://hadoop.apache.org/docs/r2.7.2/api/org/apache/hadoop/metrics2/package-summary.html -4. https://issues.apache.org/jira/browse/HBASE-9774 \ No newline at end of file +4. https://issues.apache.org/jira/browse/HBASE-9774 diff --git a/hbase-metrics-api/pom.xml b/hbase-metrics-api/pom.xml index 088a6c010bc0..160588d3131f 100644 --- a/hbase-metrics-api/pom.xml +++ b/hbase-metrics-api/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,50 +31,6 @@ Apache HBase - Metrics API HBase Metrics API descriptions - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +105,50 @@ + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + + test + + test + + true + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java index 6e041590ee3a..78d9ade04236 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ public interface Counter extends Metric { /** * Increment {@code this} by {@code n}. - * * @param n The amount to increment. */ void increment(long n); @@ -47,7 +46,6 @@ public interface Counter extends Metric { /** * Decrement {@code this} by {@code n}. - * * @param n The amount to decrement. */ void decrement(long n); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java index ba171c2cab24..b20da2426296 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ /** * A metrics which measures a discrete value. - * * @param The value of the Gauge. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java index 891bc6df2ea1..da4ff89c59b7 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,14 +30,12 @@ public interface Histogram extends Metric { /** * Adds a new value to the distribution. - * * @param value The value to add */ void update(int value); /** * Adds a new value to the distribution. - * * @param value The value to add */ void update(long value); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java index 5f38a005b3e1..9217a2af4a4e 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ public interface Meter extends Metric { /** * Records {@code events} occurrences. - * * @param events Number of occurrences to record. */ void mark(long events); @@ -53,14 +52,13 @@ public interface Meter extends Metric { double getMeanRate(); /** - * Returns the one-minute exponentially-weighted moving average rate at which events have - * occurred since the meter was created. + * Returns the one-minute exponentially-weighted moving average rate at which events have occurred + * since the meter was created. *

    * This rate has the same exponential decay factor as the one-minute load average in the {@code * top} Unix command. - * - * @return the one-minute exponentially-weighted moving average rate at which events have - * occurred since the meter was created + * @return the one-minute exponentially-weighted moving average rate at which events have occurred + * since the meter was created */ double getOneMinuteRate(); @@ -70,7 +68,6 @@ public interface Meter extends Metric { *

    * This rate has the same exponential decay factor as the five-minute load average in the {@code * top} Unix command. - * * @return the five-minute exponentially-weighted moving average rate at which events have * occurred since the meter was created */ @@ -82,7 +79,6 @@ public interface Meter extends Metric { *

    * This rate has the same exponential decay factor as the fifteen-minute load average in the * {@code top} Unix command. - * * @return the fifteen-minute exponentially-weighted moving average rate at which events have * occurred since the meter was created */ diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java index 2f6d49e01fc9..e79a9f3631ac 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java index 33e989cfe015..9e7b13d89c8b 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Collection; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -53,8 +50,8 @@ public static MetricRegistries global() { public abstract void clear(); /** - * Create or return MetricRegistry with the given info. MetricRegistry will only be created - * if current reference count is 0. Otherwise ref counted is incremented, and an existing instance + * Create or return MetricRegistry with the given info. MetricRegistry will only be created if + * current reference count is 0. Otherwise ref counted is incremented, and an existing instance * will be returned. * @param info the info object for the MetricRegistrytry. * @return created or existing MetricRegistry. diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java index edc813d95b99..88c61079630c 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.ArrayList; import java.util.List; import java.util.ServiceLoader; - import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -32,8 +29,8 @@ public final class MetricRegistriesLoader { private static final Logger LOG = LoggerFactory.getLogger(MetricRegistries.class); - private static final String defaultClass - = "org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl"; + private static final String defaultClass = + "org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl"; private MetricRegistriesLoader() { } @@ -64,7 +61,7 @@ static MetricRegistries load(List availableImplementations) { return impl; } else if (availableImplementations.isEmpty()) { try { - return ReflectionUtils.newInstance((Class)Class.forName(defaultClass)); + return ReflectionUtils.newInstance((Class) Class.forName(defaultClass)); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } @@ -78,7 +75,7 @@ static MetricRegistries load(List availableImplementations) { sb.append(factory.getClass()); } LOG.warn("Found multiple MetricRegistries implementations: " + sb - + ". Using first found implementation: " + availableImplementations.get(0)); + + ". Using first found implementation: " + availableImplementations.get(0)); return availableImplementations.get(0); } } diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java index 3bd5f6cd844c..b70526e1c5a9 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics; import java.util.Optional; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +31,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Timer} used to measure durations and report rates. - * * @param name the name of the timer. * @return An instance of {@link Timer}. */ @@ -40,7 +38,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Histogram} used to measure a distribution of values. - * * @param name The name of the Histogram. * @return An instance of {@link Histogram}. */ @@ -49,7 +46,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Meter} used to measure durations and report distributions (a * combination of a {@link Timer} and a {@link Histogram}. - * * @param name The name of the Meter. * @return An instance of {@link Meter}. */ @@ -57,7 +53,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Counter} used to track a mutable number. - * * @param name The name of the Counter * @return An instance of {@link Counter}. */ @@ -66,7 +61,7 @@ public interface MetricRegistry extends MetricSet { /** * Register a {@link Gauge}. The Gauge will be invoked at a period defined by the implementation * of {@link MetricRegistry}. - * @param name The name of the Gauge. + * @param name The name of the Gauge. * @param gauge A callback to compute the current value. * @return the registered gauge, or the existing gauge */ @@ -75,7 +70,7 @@ public interface MetricRegistry extends MetricSet { /** * Registers the {@link Metric} with the given name if there does not exist one with the same * name. Returns the newly registered or existing Metric. - * @param name The name of the Metric. + * @param name The name of the Metric. * @param metric the metric to register * @return the registered metric, or the existing metrid */ @@ -96,7 +91,6 @@ public interface MetricRegistry extends MetricSet { /** * Removes the metric with the given name. - * * @param name the name of the metric * @return true if the metric is removed. */ diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java index be77c42985de..9d53a8cbf539 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java index c4396bd24d5a..d83453f6af89 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; - import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.yetus.audience.InterfaceAudience; /** * HBase Metrics are grouped in different MetricRegistry'ies. All metrics that correspond to a - * subcomponent (like RPC, GC, WAL) are managed in a single MetricRegistry. - * This class holds the name and description and JMX related context names for such group of - * metrics. + * subcomponent (like RPC, GC, WAL) are managed in a single MetricRegistry. This class holds the + * name and description and JMX related context names for such group of metrics. */ @InterfaceAudience.Private public class MetricRegistryInfo { @@ -37,12 +34,8 @@ public class MetricRegistryInfo { protected final String metricsJmxContext; protected final boolean existingSource; - public MetricRegistryInfo( - String metricsName, - String metricsDescription, - String metricsJmxContext, - String metricsContext, - boolean existingSource) { + public MetricRegistryInfo(String metricsName, String metricsDescription, String metricsJmxContext, + String metricsContext, boolean existingSource) { this.metricsName = metricsName; this.metricsDescription = metricsDescription; this.metricsContext = metricsContext; @@ -51,9 +44,8 @@ public MetricRegistryInfo( } /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. + * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver - * * @return The string context used to register this source to hadoop's metrics2 system. */ public String getMetricsContext() { @@ -68,16 +60,15 @@ public String getMetricsDescription() { } /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * Get the name of the context in JMX that this source will be exposed through. This is in + * ObjectName format. With the default context being Hadoop -> HBase */ public String getMetricsJmxContext() { return metricsJmxContext; } /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL + * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ public String getMetricsName() { return metricsName; @@ -102,11 +93,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return new HashCodeBuilder() - .append(metricsName) - .append(metricsDescription) - .append(metricsContext) - .append(metricsJmxContext) - .toHashCode(); + return new HashCodeBuilder().append(metricsName).append(metricsDescription) + .append(metricsContext).append(metricsJmxContext).toHashCode(); } } diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java index 5e1c873ce8bd..60d7e9e39ddb 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Map; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * A set of named metrics. - * * @see MetricRegistry#registerAll(MetricSet) */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -34,7 +31,6 @@ public interface MetricSet extends Metric { /** * A map of metric names to metrics. - * * @return the metrics */ Map getMetrics(); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java index ecb01ad57c0e..e38302360696 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,19 +19,16 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; - import org.apache.yetus.audience.InterfaceAudience; /** - * This is a dummy annotation that forces javac to produce output for - * otherwise empty package-info.java. - * - *

    The result is maven-compiler-plugin can properly identify the scope of - * changed files - * - *

    See more details in - * - * maven-compiler-plugin: incremental compilation broken + * This is a dummy annotation that forces javac to produce output for otherwise empty + * package-info.java. + *

    + * The result is maven-compiler-plugin can properly identify the scope of changed files + *

    + * See more details in + * maven-compiler-plugin: incremental compilation broken */ @Retention(RetentionPolicy.SOURCE) @InterfaceAudience.Private diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java index a7b9869a0d2d..26aee2804eea 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -43,7 +40,6 @@ public interface Snapshot { /** * Returns the number of values in the snapshot. - * * @return the number of values */ long getCount(); @@ -57,77 +53,66 @@ public interface Snapshot { /** * Returns the value at the 25th percentile in the distribution. - * * @return the value at the 25th percentile */ long get25thPercentile(); /** * Returns the value at the 75th percentile in the distribution. - * * @return the value at the 75th percentile */ long get75thPercentile(); /** * Returns the value at the 90th percentile in the distribution. - * * @return the value at the 90th percentile */ long get90thPercentile(); /** * Returns the value at the 95th percentile in the distribution. - * * @return the value at the 95th percentile */ long get95thPercentile(); /** * Returns the value at the 98th percentile in the distribution. - * * @return the value at the 98th percentile */ long get98thPercentile(); /** * Returns the value at the 99th percentile in the distribution. - * * @return the value at the 99th percentile */ long get99thPercentile(); /** * Returns the value at the 99.9th percentile in the distribution. - * * @return the value at the 99.9th percentile */ long get999thPercentile(); /** * Returns the median value in the distribution. - * * @return the median value */ long getMedian(); /** * Returns the highest value in the snapshot. - * * @return the highest value */ long getMax(); /** * Returns the arithmetic mean of the values in the snapshot. - * * @return the arithmetic mean */ long getMean(); /** * Returns the lowest value in the snapshot. - * * @return the lowest value */ long getMin(); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java index 30c64fb5ce4b..d8df01720de0 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +31,7 @@ public interface Timer extends Metric { /** * Update the timer with the given duration in given time unit. * @param duration the duration of the event - * @param unit the time unit for the duration + * @param unit the time unit for the duration */ void update(long duration, TimeUnit unit); diff --git a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java index 59f26999bd2c..b9df823e7565 100644 --- a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java +++ b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestMetricRegistriesLoader { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricRegistriesLoader.class); + HBaseClassTestRule.forClass(TestMetricRegistriesLoader.class); @Test public void testLoadSinleInstance() { @@ -51,8 +51,8 @@ public void testLoadMultipleInstances() { MetricRegistries loader1 = mock(MetricRegistries.class); MetricRegistries loader2 = mock(MetricRegistries.class); MetricRegistries loader3 = mock(MetricRegistries.class); - MetricRegistries instance = MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2, - loader3)); + MetricRegistries instance = + MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2, loader3)); // the load() returns the first instance assertEquals(loader1, instance); diff --git a/hbase-metrics/README.txt b/hbase-metrics/README.txt index d80064c2d6f0..6216fc8daebe 100644 --- a/hbase-metrics/README.txt +++ b/hbase-metrics/README.txt @@ -1 +1 @@ -See the documentation at hbase-metrics-api/README. \ No newline at end of file +See the documentation at hbase-metrics-api/README. diff --git a/hbase-metrics/pom.xml b/hbase-metrics/pom.xml index 5d7c247b81c0..30d8aea5931d 100644 --- a/hbase-metrics/pom.xml +++ b/hbase-metrics/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,50 +31,6 @@ Apache HBase - Metrics Implementation HBase Metrics Implementation - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -157,6 +113,50 @@ + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + + test + + test + + true + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java index 8021c0689398..ad30fbe1674d 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics.impl; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.hbase.metrics.Counter; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java index b5c52cf840cc..d2723a22e942 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,7 @@ package org.apache.hadoop.hbase.metrics.impl; import com.codahale.metrics.Meter; - import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -39,11 +37,13 @@ public DropwizardMeter(Meter meter) { this.meter = Objects.requireNonNull(meter); } - @Override public void mark() { + @Override + public void mark() { this.meter.mark(); } - @Override public void mark(long count) { + @Override + public void mark(long count) { this.meter.mark(count); } diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java index 81544607f5f7..b1b47e3904e8 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class FastLongHistogram { public static final int DEFAULT_NBINS = 255; public static final double[] DEFAULT_QUANTILES = - new double[]{0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999}; + new double[] { 0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999 }; /** * Bins is a class containing a list of buckets(or bins) for estimation histogram of some data. @@ -105,8 +105,8 @@ private int getIndex(long value) { return this.counts.length - 2; } // compute the position - return 1 + (int) ((value - this.binsMin) * (this.counts.length - 3) / - (this.binsMax - this.binsMin)); + return 1 + + (int) ((value - this.binsMin) * (this.counts.length - 3) / (this.binsMax - this.binsMin)); } @@ -245,7 +245,7 @@ public FastLongHistogram() { /** * Constructor. * @param numOfBins the number of bins for the histogram. A larger value results in more precise - * results but with lower efficiency, and vice versus. + * results but with lower efficiency, and vice versus. */ public FastLongHistogram(int numOfBins) { this.bins = new Bins(numOfBins); @@ -254,9 +254,9 @@ public FastLongHistogram(int numOfBins) { /** * Constructor setting the bins assuming a uniform distribution within a range. * @param numOfBins the number of bins for the histogram. A larger value results in more precise - * results but with lower efficiency, and vice versus. - * @param min lower bound of the region, inclusive. - * @param max higher bound of the region, inclusive. + * results but with lower efficiency, and vice versus. + * @param min lower bound of the region, inclusive. + * @param max higher bound of the region, inclusive. */ public FastLongHistogram(int numOfBins, long min, long max) { this(numOfBins); diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java index 2e0aa55808f6..c29b267e347a 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,9 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Custom histogram implementation based on FastLongHistogram. Dropwizard-based histograms are - * slow compared to this implementation, so we are using our implementation here. - * See HBASE-15222. + * Custom histogram implementation based on FastLongHistogram. Dropwizard-based histograms are slow + * compared to this implementation, so we are using our implementation here. See HBASE-15222. */ @InterfaceAudience.Private public class HistogramImpl implements Histogram { diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java index 3826e66093b6..39da41eeec00 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +21,6 @@ import java.util.Collections; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryFactory; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java index 6f9e16366aa7..5ebdf0d479a0 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java index 05e096304cf8..1c8927b15b3a 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Gauge; import org.apache.hadoop.hbase.metrics.Histogram; @@ -97,7 +96,7 @@ public Metric register(String name, Metric metric) { @Override public Gauge register(String name, Gauge gauge) { - return (Gauge) register(name, (Metric)gauge); + return (Gauge) register(name, (Metric) gauge); } @Override diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java index 63131a100e92..19ec192211a8 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,22 +22,23 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; import java.util.stream.Collectors; - import org.apache.yetus.audience.InterfaceAudience; /** - * A map of K to V, but does ref counting for added and removed values. The values are - * not added directly, but instead requested from the given Supplier if ref count == 0. Each put() - * call will increment the ref count, and each remove() will decrement it. The values are removed - * from the map iff ref count == 0. + * A map of K to V, but does ref counting for added and removed values. The values are not added + * directly, but instead requested from the given Supplier if ref count == 0. Each put() call will + * increment the ref count, and each remove() will decrement it. The values are removed from the map + * iff ref count == 0. */ @InterfaceAudience.Private class RefCountingMap { private ConcurrentHashMap> map = new ConcurrentHashMap<>(); + private static class Payload { V v; int refCount; + Payload(V v) { this.v = v; this.refCount = 1; // create with ref count = 1 @@ -46,7 +46,7 @@ private static class Payload { } V put(K k, Supplier supplier) { - return ((Payload)map.compute(k, (k1, oldValue) -> { + return ((Payload) map.compute(k, (k1, oldValue) -> { if (oldValue != null) { oldValue.refCount++; return oldValue; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java index 03a8c65915e2..3ad560a3d74f 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.Timer; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java index 5b5e26f13a80..0d86e2aeb496 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,17 @@ public class TestCounterImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCounterImpl.class); + HBaseClassTestRule.forClass(TestCounterImpl.class); private Counter counter; - @Before public void setup() { + @Before + public void setup() { this.counter = new CounterImpl(); } - @Test public void testCounting() { + @Test + public void testCounting() { counter.increment(); assertEquals(1L, counter.getCount()); counter.increment(); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java index 072f18a3b155..9af666b095f1 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,15 +34,17 @@ public class TestDropwizardMeter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDropwizardMeter.class); + HBaseClassTestRule.forClass(TestDropwizardMeter.class); private Meter meter; - @Before public void setup() { + @Before + public void setup() { this.meter = Mockito.mock(Meter.class); } - @Test public void test() { + @Test + public void test() { DropwizardMeter dwMeter = new DropwizardMeter(this.meter); dwMeter.mark(); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java index 120f91169c5a..ca5c6a476685 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -34,12 +33,12 @@ /** * Testcases for FastLongHistogram. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestFastLongHistogram { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFastLongHistogram.class); + HBaseClassTestRule.forClass(TestFastLongHistogram.class); private static void doTestUniform(FastLongHistogram hist) { long[] VALUES = { 0, 10, 20, 30, 40, 50 }; @@ -97,7 +96,6 @@ public void testAdaptionOfChange() { } } - @Test public void testGetNumAtOrBelow() { long[] VALUES = { 1, 10, 20, 30, 40, 50 }; @@ -126,7 +124,6 @@ public void testGetNumAtOrBelow() { assertEquals(601, h.getNumAtOrBelow(Long.MAX_VALUE)); } - @Test public void testSameValues() { FastLongHistogram hist = new FastLongHistogram(100); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java index 52d29fc700cc..e1ed9cf6a5be 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,18 +34,17 @@ public class TestGauge { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGauge.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGauge.class); @Test public void testGetValue() { SimpleGauge gauge = new SimpleGauge(); - assertEquals(0, (long)gauge.getValue()); + assertEquals(0, (long) gauge.getValue()); gauge.setValue(1000L); - assertEquals(1000L, (long)gauge.getValue()); + assertEquals(1000L, (long) gauge.getValue()); } /** @@ -55,7 +54,8 @@ private static class SimpleGauge implements Gauge { private final AtomicLong value = new AtomicLong(0L); - @Override public Long getValue() { + @Override + public Long getValue() { return this.value.get(); } diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java index 9be3fcee20f4..70d9598570d6 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestHistogramImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHistogramImpl.class); + HBaseClassTestRule.forClass(TestHistogramImpl.class); @Test public void testUpdate() { diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java index 1115529a051c..56b3f0d6a9ee 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ public class TestMetricRegistryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricRegistryImpl.class); + HBaseClassTestRule.forClass(TestMetricRegistryImpl.class); private MetricRegistryInfo info; private MetricRegistryImpl registry; @@ -59,7 +59,7 @@ public void testCounter() { counter.increment(42L); Optional metric = registry.get("mycounter"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Counter)metric.get()).getCount()); + assertEquals(42L, (long) ((Counter) metric.get()).getCount()); } @Test @@ -72,7 +72,7 @@ public Long getValue() { }); Optional metric = registry.get("mygauge"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); } @Test @@ -81,7 +81,7 @@ public void testRegisterGaugeLambda() { registry.register("gaugeLambda", () -> 42L); Optional metric = registry.get("gaugeLambda"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); } @Test @@ -106,7 +106,7 @@ public void testRegister() { Optional metric = registry.get("mycounter"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Counter)metric.get()).getCount()); + assertEquals(42L, (long) ((Counter) metric.get()).getCount()); } @Test @@ -119,8 +119,7 @@ public void testDoubleRegister() { Optional metric = registry.get("mygauge"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); - + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); Counter c1 = registry.counter("mycounter"); Counter c2 = registry.counter("mycounter"); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java index c5ed1edb9eb8..2d29ff17943b 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestRefCountingMap { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRefCountingMap.class); + HBaseClassTestRule.forClass(TestRefCountingMap.class); private RefCountingMap map; @@ -59,7 +59,7 @@ public void testPutGet() { @Test public void testPutMulti() { String v1 = map.put("foo", () -> "foovalue"); - String v2 = map.put("foo", () -> "foovalue2"); + String v2 = map.put("foo", () -> "foovalue2"); String v3 = map.put("foo", () -> "foovalue3"); String v = map.get("foo"); @@ -127,7 +127,6 @@ public void testClear() { assertEquals(0, map.size()); } - @Test public void testKeySet() { map.put("foo", () -> "foovalue"); @@ -151,6 +150,6 @@ public void testValues() { assertEquals(3, values.size()); Lists.newArrayList("foovalue", "foovalue3", "foovalue4").stream() - .forEach(v -> assertTrue(values.contains(v))); + .forEach(v -> assertTrue(values.contains(v))); } } diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java index d9d3632b7310..1bfa02fc4b64 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestTimerImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTimerImpl.class); + HBaseClassTestRule.forClass(TestTimerImpl.class); private Timer timer; diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml index 21e4ff479277..f3f692856679 100644 --- a/hbase-procedure/pom.xml +++ b/hbase-procedure/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -30,35 +30,6 @@ hbase-procedure Apache HBase - Procedure Procedure Framework - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -145,14 +116,43 @@ test - hadoop-hdfs-client org.apache.hadoop + hadoop-hdfs-client + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + - - + + skipProcedureTests @@ -169,7 +169,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 53bfba62daf8..4d1d5c1ccd9a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,13 +68,12 @@ public void signalAll() { } // ========================================================================== - // Add related + // Add related // ========================================================================== /** - * Add the procedure to the queue. - * NOTE: this method is called with the sched lock held. + * Add the procedure to the queue. NOTE: this method is called with the sched lock held. * @param procedure the Procedure to add - * @param addFront true if the item should be added to the front of the queue + * @param addFront true if the item should be added to the front of the queue */ protected abstract void enqueue(Procedure procedure, boolean addFront); @@ -131,11 +129,10 @@ protected void push(final Procedure procedure, final boolean addFront, final boo } // ========================================================================== - // Poll related + // Poll related // ========================================================================== /** - * Fetch one Procedure from the queue - * NOTE: this method is called with the sched lock held. + * Fetch one Procedure from the queue NOTE: this method is called with the sched lock held. * @return the Procedure to execute, or null if nothing is available. */ protected abstract Procedure dequeue(); @@ -187,18 +184,18 @@ public Procedure poll(final long nanos) { } // ========================================================================== - // Utils + // Utils // ========================================================================== /** - * Returns the number of elements in this queue. - * NOTE: this method is called with the sched lock held. + * Returns the number of elements in this queue. NOTE: this method is called with the sched lock + * held. * @return the number of elements in this queue. */ protected abstract int queueSize(); /** - * Returns true if there are procedures available to process. - * NOTE: this method is called with the sched lock held. + * Returns true if there are procedures available to process. NOTE: this method is called with the + * sched lock held. * @return true if there are procedures available to process, otherwise false. */ protected abstract boolean queueHasRunnables(); @@ -224,7 +221,7 @@ public boolean hasRunnables() { } // ============================================================================ - // TODO: Metrics + // TODO: Metrics // ============================================================================ public long getPollCalls() { return pollCalls; @@ -235,13 +232,13 @@ public long getNullPollCalls() { } // ========================================================================== - // Procedure Events + // Procedure Events // ========================================================================== /** - * Wake up all of the given events. - * Note that we first take scheduler lock and then wakeInternal() synchronizes on the event. - * Access should remain package-private. Use ProcedureEvent class to wake/suspend events. + * Wake up all of the given events. Note that we first take scheduler lock and then wakeInternal() + * synchronizes on the event. Access should remain package-private. Use ProcedureEvent class to + * wake/suspend events. * @param events the list of events to wake */ public void wakeEvents(ProcedureEvent[] events) { @@ -276,7 +273,7 @@ protected void wakeProcedure(final Procedure procedure) { } // ========================================================================== - // Internal helpers + // Internal helpers // ========================================================================== protected void schedLock() { schedulerLock.lock(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java index 796a8e47c918..c16b79bde61b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,8 +66,8 @@ class CompletedProcedureCleaner extends ProcedureInMemoryChore> completedMap, - Map nonceKeysToProcIdsMap) { + IdLock procExecutionLock, Map> completedMap, + Map nonceKeysToProcIdsMap) { // set the timeout interval that triggers the periodic-procedure super(conf.getInt(CLEANER_INTERVAL_CONF_KEY, DEFAULT_CLEANER_INTERVAL)); this.completed = completedMap; @@ -138,4 +138,4 @@ protected void periodicExecute(final TEnvironment env) { // procedure id. store.cleanup(); } -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java index d5f1ee7f6c3f..40e3ea074e05 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public void setClientAckTime(long clientAckTime) { } public boolean isExpired(long now, long evictTtl, long evictAckTtl) { - return (hasClientAckTime() && (now - getClientAckTime()) >= evictAckTtl) || - (now - procedure.getLastUpdate()) >= evictTtl; + return (hasClientAckTime() && (now - getClientAckTime()) >= evictAckTtl) + || (now - procedure.getLastUpdate()) >= evictTtl; } -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java index 3fc975078604..a00a710a959b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ */ @InterfaceAudience.Private class DelayedProcedure - extends DelayedUtil.DelayedContainerWithTimestamp> { + extends DelayedUtil.DelayedContainerWithTimestamp> { public DelayedProcedure(Procedure procedure) { super(procedure, procedure.getTimeoutTimestamp()); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java index 40eb22c3b56e..63b8be7a4791 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public FailedProcedure() { } public FailedProcedure(long procId, String procName, User owner, NonceKey nonceKey, - IOException exception) { + IOException exception) { this.procName = procName; setProcId(procId); setState(ProcedureState.ROLLEDBACK); @@ -54,7 +54,7 @@ public String getProcName() { @Override protected Procedure[] execute(TEnvironment env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { throw new UnsupportedOperationException(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java index dfe8e7d3c537..5561661d73b7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java @@ -21,8 +21,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure - * operation. + * Used internally signaling failed queue of a remote procedure operation. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java index 32b4922a0b17..21350b56c23d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java index bfeb7398fa06..e6b7d7d03976 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.util.function.Function; @@ -142,8 +141,10 @@ public boolean tryExclusiveLock(Procedure proc) { * @return whether we should wake the procedures waiting on the lock here. */ public boolean releaseExclusiveLock(Procedure proc) { - if (exclusiveLockOwnerProcedure == null || - exclusiveLockOwnerProcedure.getProcId() != proc.getProcId()) { + if ( + exclusiveLockOwnerProcedure == null + || exclusiveLockOwnerProcedure.getProcId() != proc.getProcId() + ) { // We are not the lock owner, it is probably inherited from the parent procedures. return false; } @@ -187,7 +188,7 @@ public Stream filterWaitingQueue(Predicate predicate) { @Override public String toString() { - return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") + - ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); + return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") + + ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java index 33d2a38c80aa..d3723e1a35a7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java index 8599af90d387..1b23a5c7373d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum LockType { - EXCLUSIVE, SHARED + EXCLUSIVE, + SHARED } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java index 81d1e7212299..1503d8d6710b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -30,9 +29,9 @@ public class LockedResource { private final int sharedLockCount; private final List> waitingProcedures; - public LockedResource(LockedResourceType resourceType, String resourceName, - LockType lockType, Procedure exclusiveLockOwnerProcedure, - int sharedLockCount, List> waitingProcedures) { + public LockedResource(LockedResourceType resourceType, String resourceName, LockType lockType, + Procedure exclusiveLockOwnerProcedure, int sharedLockCount, + List> waitingProcedures) { this.resourceType = resourceType; this.resourceName = resourceName; this.lockType = lockType; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java index 55d195b3920f..12f899d7565b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum LockedResourceType { - SERVER, NAMESPACE, TABLE, REGION, PEER, META + SERVER, + NAMESPACE, + TABLE, + REGION, + PEER, + META } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java index d2e13f135361..a6faf501682b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * In particular, no dispatch Node was found for the passed server name - * key AFTER queuing dispatch. + * Used internally signaling failed queue of a remote procedure operation. In particular, no + * dispatch Node was found for the passed server name key AFTER queuing dispatch. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java index 5cdbcd417dea..95265d00a7ba 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * In particular, no dispatch Node was found for the passed server name - * key. + * Used internally signaling failed queue of a remote procedure operation. In particular, no + * dispatch Node was found for the passed server name key. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java index 9deac23e1546..502d7ee0b6e1 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * The target server passed is null. + * Used internally signaling failed queue of a remote procedure operation. The target server passed + * is null. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java index 2d6e065da675..4f0bc6ce6b29 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index 579c60998765..28b2aa87f656 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -113,9 +113,9 @@ public abstract class Procedure implements Comparable

    Bypassing a procedure is not like aborting. Aborting a procedure will trigger - * a rollback. And since the {@link #abort(Object)} method is overrideable - * Some procedures may have chosen to ignore the aborting. + * If bypass is set to true, when executing it will return null when {@link #doExecute(Object)} is + * called to finish the procedure and release any locks it may currently hold. The bypass does + * cleanup around the Procedure as far as the Procedure framework is concerned. It does not clean + * any internal state that the Procedure's themselves may have set. That is for the Procedures to + * do themselves when bypass is called. They should override bypass and do their cleanup in the + * overridden bypass method (be sure to call the parent bypass to ensure proper processing). + *

    + *

    + * Bypassing a procedure is not like aborting. Aborting a procedure will trigger a rollback. And + * since the {@link #abort(Object)} method is overrideable Some procedures may have chosen to + * ignore the aborting. */ private volatile boolean bypass = false; @@ -176,13 +176,13 @@ public boolean isBypass() { } /** - * Set the bypass to true. - * Only called in {@link ProcedureExecutor#bypassProcedure(long, long, boolean, boolean)} for now. - * DO NOT use this method alone, since we can't just bypass one single procedure. We need to - * bypass its ancestor too. If your Procedure has set state, it needs to undo it in here. - * @param env Current environment. May be null because of context; e.g. pretty-printing - * procedure WALs where there is no 'environment' (and where Procedures that require - * an 'environment' won't be run. + * Set the bypass to true. Only called in + * {@link ProcedureExecutor#bypassProcedure(long, long, boolean, boolean)} for now. DO NOT use + * this method alone, since we can't just bypass one single procedure. We need to bypass its + * ancestor too. If your Procedure has set state, it needs to undo it in here. + * @param env Current environment. May be null because of context; e.g. pretty-printing procedure + * WALs where there is no 'environment' (and where Procedures that require an + * 'environment' won't be run. */ protected void bypass(TEnvironment env) { this.bypass = true; @@ -201,60 +201,54 @@ protected final void skipPersistence() { } /** - * The main code of the procedure. It must be idempotent since execute() - * may be called multiple times in case of machine failure in the middle - * of the execution. + * The main code of the procedure. It must be idempotent since execute() may be called multiple + * times in case of machine failure in the middle of the execution. * @param env the environment passed to the ProcedureExecutor * @return a set of sub-procedures to run or ourselves if there is more work to do or null if the * procedure is done. - * @throws ProcedureYieldException the procedure will be added back to the queue and retried - * later. - * @throws InterruptedException the procedure will be added back to the queue and retried later. + * @throws ProcedureYieldException the procedure will be added back to the queue and retried + * later. + * @throws InterruptedException the procedure will be added back to the queue and retried + * later. * @throws ProcedureSuspendedException Signal to the executor that Procedure has suspended itself - * and has set itself up waiting for an external event to wake it back up again. + * and has set itself up waiting for an external event to wake + * it back up again. */ protected abstract Procedure[] execute(TEnvironment env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; /** - * The code to undo what was done by the execute() code. - * It is called when the procedure or one of the sub-procedures failed or an - * abort was requested. It should cleanup all the resources created by - * the execute() call. The implementation must be idempotent since rollback() - * may be called multiple time in case of machine failure in the middle - * of the execution. + * The code to undo what was done by the execute() code. It is called when the procedure or one of + * the sub-procedures failed or an abort was requested. It should cleanup all the resources + * created by the execute() call. The implementation must be idempotent since rollback() may be + * called multiple time in case of machine failure in the middle of the execution. * @param env the environment passed to the ProcedureExecutor - * @throws IOException temporary failure, the rollback will retry later + * @throws IOException temporary failure, the rollback will retry later * @throws InterruptedException the procedure will be added back to the queue and retried later */ - protected abstract void rollback(TEnvironment env) - throws IOException, InterruptedException; + protected abstract void rollback(TEnvironment env) throws IOException, InterruptedException; /** - * The abort() call is asynchronous and each procedure must decide how to deal - * with it, if they want to be abortable. The simplest implementation - * is to have an AtomicBoolean set in the abort() method and then the execute() - * will check if the abort flag is set or not. - * abort() may be called multiple times from the client, so the implementation - * must be idempotent. - * - *

    NOTE: abort() is not like Thread.interrupt(). It is just a notification - * that allows the procedure implementor abort. + * The abort() call is asynchronous and each procedure must decide how to deal with it, if they + * want to be abortable. The simplest implementation is to have an AtomicBoolean set in the + * abort() method and then the execute() will check if the abort flag is set or not. abort() may + * be called multiple times from the client, so the implementation must be idempotent. + *

    + * NOTE: abort() is not like Thread.interrupt(). It is just a notification that allows the + * procedure implementor abort. */ protected abstract boolean abort(TEnvironment env); /** - * The user-level code of the procedure may have some state to - * persist (e.g. input arguments or current position in the processing state) to - * be able to resume on failure. + * The user-level code of the procedure may have some state to persist (e.g. input arguments or + * current position in the processing state) to be able to resume on failure. * @param serializer stores the serializable state */ protected abstract void serializeStateData(ProcedureStateSerializer serializer) throws IOException; /** - * Called on store load to allow the user to decode the previously serialized - * state. + * Called on store load to allow the user to decode the previously serialized state. * @param serializer contains the serialized state */ protected abstract void deserializeStateData(ProcedureStateSerializer serializer) @@ -321,9 +315,9 @@ protected boolean holdLock(TEnvironment env) { /** * This is used in conjunction with {@link #holdLock(Object)}. If {@link #holdLock(Object)} - * returns true, the procedure executor will call acquireLock() once and thereafter - * not call {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls - * release/acquire around each invocation of {@link #execute(Object)}. + * returns true, the procedure executor will call acquireLock() once and thereafter not call + * {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls release/acquire + * around each invocation of {@link #execute(Object)}. * @see #holdLock(Object) * @return true if the procedure has the lock, false otherwise. */ @@ -332,61 +326,57 @@ public final boolean hasLock() { } /** - * Called when the procedure is loaded for replay. - * The procedure implementor may use this method to perform some quick - * operation before replay. - * e.g. failing the procedure if the state on replay may be unknown. + * Called when the procedure is loaded for replay. The procedure implementor may use this method + * to perform some quick operation before replay. e.g. failing the procedure if the state on + * replay may be unknown. */ protected void beforeReplay(TEnvironment env) { // no-op } /** - * Called when the procedure is ready to be added to the queue after - * the loading/replay operation. + * Called when the procedure is ready to be added to the queue after the loading/replay operation. */ protected void afterReplay(TEnvironment env) { // no-op } /** - * Called when the procedure is marked as completed (success or rollback). - * The procedure implementor may use this method to cleanup in-memory states. - * This operation will not be retried on failure. If a procedure took a lock, - * it will have been released when this method runs. + * Called when the procedure is marked as completed (success or rollback). The procedure + * implementor may use this method to cleanup in-memory states. This operation will not be retried + * on failure. If a procedure took a lock, it will have been released when this method runs. */ protected void completionCleanup(TEnvironment env) { // no-op } /** - * By default, the procedure framework/executor will try to run procedures start to finish. - * Return true to make the executor yield between each execution step to - * give other procedures a chance to run. + * By default, the procedure framework/executor will try to run procedures start to finish. Return + * true to make the executor yield between each execution step to give other procedures a chance + * to run. * @param env the environment passed to the ProcedureExecutor - * @return Return true if the executor should yield on completion of an execution step. - * Defaults to return false. + * @return Return true if the executor should yield on completion of an execution step. Defaults + * to return false. */ protected boolean isYieldAfterExecutionStep(TEnvironment env) { return false; } /** - * By default, the executor will keep the procedure result around util - * the eviction TTL is expired. The client can cut down the waiting time - * by requesting that the result is removed from the executor. - * In case of system started procedure, we can force the executor to auto-ack. + * By default, the executor will keep the procedure result around util the eviction TTL is + * expired. The client can cut down the waiting time by requesting that the result is removed from + * the executor. In case of system started procedure, we can force the executor to auto-ack. * @param env the environment passed to the ProcedureExecutor - * @return true if the executor should wait the client ack for the result. - * Defaults to return true. + * @return true if the executor should wait the client ack for the result. Defaults to return + * true. */ protected boolean shouldWaitClientAck(TEnvironment env) { return true; } /** - * Override this method to provide procedure specific counters for submitted count, failed - * count and time histogram. + * Override this method to provide procedure specific counters for submitted count, failed count + * and time histogram. * @param env The environment passed to the procedure executor * @return Container object for procedure related metric */ @@ -422,7 +412,7 @@ protected void updateMetricsOnSubmit(TEnvironment env) { * TODO: As any of the sub-procedures on failure rolls back all procedures in the stack, including * successfully finished siblings, this function may get called twice in certain cases for certain * procedures. Explore further if this can be called once. - * @param env The environment passed to the procedure executor + * @param env The environment passed to the procedure executor * @param runtime Runtime of the procedure in milliseconds * @param success true if procedure is completed successfully */ @@ -467,13 +457,9 @@ protected StringBuilder toStringSimpleSB() { } /* - * TODO - * Enable later when this is being used. - * Currently owner not used. - if (hasOwner()) { - sb.append(", owner="); - sb.append(getOwner()); - }*/ + * TODO Enable later when this is being used. Currently owner not used. if (hasOwner()) { + * sb.append(", owner="); sb.append(getOwner()); } + */ sb.append(", state="); // pState for Procedure State as opposed to any other kind. toStringState(sb); @@ -532,8 +518,7 @@ protected void toStringState(StringBuilder builder) { } /** - * Extend the toString() information with the procedure details - * e.g. className and parameters + * Extend the toString() information with the procedure details e.g. className and parameters * @param builder the string builder to use to append the proc specific information */ protected void toStringClassDetails(StringBuilder builder) { @@ -541,11 +526,11 @@ protected void toStringClassDetails(StringBuilder builder) { } // ========================================================================== - // Those fields are unchanged after initialization. + // Those fields are unchanged after initialization. // - // Each procedure will get created from the user or during - // ProcedureExecutor.start() during the load() phase and then submitted - // to the executor. these fields will never be changed after initialization + // Each procedure will get created from the user or during + // ProcedureExecutor.start() during the load() phase and then submitted + // to the executor. these fields will never be changed after initialization // ========================================================================== public long getProcId() { return procId; @@ -620,15 +605,14 @@ public void setOwner(User owner) { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected void setSubmittedTime(long submittedTime) { this.submittedTime = submittedTime; } // ========================================================================== - // runtime state - timeout related + // runtime state - timeout related // ========================================================================== /** * @param timeout timeout interval in msec @@ -649,8 +633,7 @@ public int getTimeout() { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected void setLastUpdate(long lastUpdate) { this.lastUpdate = lastUpdate; @@ -668,9 +651,8 @@ public long getLastUpdate() { } /** - * Timeout of the next timeout. - * Called by the ProcedureExecutor if the procedure has timeout set and - * the procedure is in the waiting queue. + * Timeout of the next timeout. Called by the ProcedureExecutor if the procedure has timeout set + * and the procedure is in the waiting queue. * @return the timestamp of the next timeout. */ protected long getTimeoutTimestamp() { @@ -678,7 +660,7 @@ protected long getTimeoutTimestamp() { } // ========================================================================== - // runtime state + // runtime state // ========================================================================== /** * @return the time elapsed between the last update and the start time of the procedure. @@ -704,8 +686,8 @@ protected void setResult(byte[] result) { /** * Will only be called when loading procedures from procedure store, where we need to record - * whether the procedure has already held a lock. Later we will call - * {@link #restoreLock(Object)} to actually acquire the lock. + * whether the procedure has already held a lock. Later we will call {@link #restoreLock(Object)} + * to actually acquire the lock. */ final void lockedWhenLoading() { this.lockedWhenLoading = true; @@ -724,12 +706,12 @@ public boolean isLockedWhenLoading() { } // ============================================================================================== - // Runtime state, updated every operation by the ProcedureExecutor + // Runtime state, updated every operation by the ProcedureExecutor // - // There is always 1 thread at the time operating on the state of the procedure. - // The ProcedureExecutor may check and set states, or some Procecedure may - // update its own state. but no concurrent updates. we use synchronized here - // just because the procedure can get scheduled on different executor threads on each step. + // There is always 1 thread at the time operating on the state of the procedure. + // The ProcedureExecutor may check and set states, or some Procecedure may + // update its own state. but no concurrent updates. we use synchronized here + // just because the procedure can get scheduled on different executor threads on each step. // ============================================================================================== /** @@ -839,8 +821,7 @@ public synchronized RemoteProcedureException getException() { protected synchronized void setChildrenLatch(int numChildren) { this.childrenLatch = numChildren; if (LOG.isTraceEnabled()) { - LOG.trace("CHILD LATCH INCREMENT SET " + - this.childrenLatch, new Throwable(this.toString())); + LOG.trace("CHILD LATCH INCREMENT SET " + this.childrenLatch, new Throwable(this.toString())); } } @@ -859,7 +840,7 @@ protected synchronized void incChildrenLatch() { * Called by the ProcedureExecutor to notify that one of the sub-procedures has completed. */ private synchronized boolean childrenCountDown() { - assert childrenLatch > 0: this; + assert childrenLatch > 0 : this; boolean b = --childrenLatch == 0; if (LOG.isTraceEnabled()) { LOG.trace("CHILD LATCH DECREMENT " + childrenLatch, new Throwable(this.toString())); @@ -868,8 +849,7 @@ private synchronized boolean childrenCountDown() { } /** - * Try to set this procedure into RUNNABLE state. - * Succeeds if all subprocedures/children are done. + * Try to set this procedure into RUNNABLE state. Succeeds if all subprocedures/children are done. * @return True if we were able to move procedure to RUNNABLE state. */ synchronized boolean tryRunnable() { @@ -891,8 +871,8 @@ protected synchronized int getChildrenLatch() { } /** - * Called by the RootProcedureState on procedure execution. - * Each procedure store its stack-index positions. + * Called by the RootProcedureState on procedure execution. Each procedure store its stack-index + * positions. */ protected synchronized void addStackIndex(final int index) { if (stackIndexes == null) { @@ -915,8 +895,7 @@ protected synchronized boolean removeStackIndex() { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected synchronized void setStackIndexes(final List stackIndexes) { this.stackIndexes = new int[stackIndexes.size()]; @@ -934,16 +913,17 @@ protected synchronized int[] getStackIndexes() { } // ========================================================================== - // Internal methods - called by the ProcedureExecutor + // Internal methods - called by the ProcedureExecutor // ========================================================================== /** * Internal method called by the ProcedureExecutor that starts the user-level code execute(). * @throws ProcedureSuspendedException This is used when procedure wants to halt processing and - * skip out without changing states or releasing any locks held. + * skip out without changing states or releasing any locks + * held. */ protected Procedure[] doExecute(TEnvironment env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { try { updateTimestamp(); if (bypass) { @@ -959,8 +939,7 @@ protected Procedure[] doExecute(TEnvironment env) /** * Internal method called by the ProcedureExecutor that starts the user-level code rollback(). */ - protected void doRollback(TEnvironment env) - throws IOException, InterruptedException { + protected void doRollback(TEnvironment env) throws IOException, InterruptedException { try { updateTimestamp(); if (bypass) { @@ -1052,7 +1031,7 @@ public int compareTo(final Procedure other) { } // ========================================================================== - // misc utils + // misc utils // ========================================================================== /** @@ -1073,7 +1052,7 @@ public static long getProcIdHashCode(long procId) { * Helper to lookup the root Procedure ID given a specified procedure. */ protected static Long getRootProcedureId(Map> procedures, - Procedure proc) { + Procedure proc) { while (proc.hasParent()) { proc = procedures.get(proc.getParentProcId()); if (proc == null) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java index 1b6b93db70c2..9d6f9a4965c0 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java index c5f02e950bc5..ad42634edb95 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,20 +11,18 @@ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUTKey WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.procedure2; import java.util.ArrayDeque; - import org.apache.yetus.audience.InterfaceAudience; /** - * Type class. - * For conceptual purpose only. Seeing ProcedureDeque as type instead of just ArrayDeque gives - * more understanding that it's a queue of waiting procedures. + * Type class. For conceptual purpose only. Seeing ProcedureDeque as type instead of just ArrayDeque + * gives more understanding that it's a queue of waiting procedures. */ @InterfaceAudience.Private public class ProcedureDeque extends ArrayDeque { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java index e9bc91986b86..667d045e3ac3 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; @@ -61,12 +60,12 @@ public synchronized void suspend() { } /** - * Wakes up the suspended procedures by pushing them back into scheduler queues and sets the - * event as ready. - * See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not synchronized. + * Wakes up the suspended procedures by pushing them back into scheduler queues and sets the event + * as ready. See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not + * synchronized. */ public void wake(AbstractProcedureScheduler procedureScheduler) { - procedureScheduler.wakeEvents(new ProcedureEvent[]{this}); + procedureScheduler.wakeEvents(new ProcedureEvent[] { this }); } /** @@ -77,7 +76,7 @@ public void wake(AbstractProcedureScheduler procedureScheduler) { * event. */ public synchronized boolean wakeIfSuspended(AbstractProcedureScheduler procedureScheduler, - Procedure proc) { + Procedure proc) { if (suspendedProcedures.stream().anyMatch(p -> p.getProcId() == proc.getProcId())) { wake(procedureScheduler); return true; @@ -89,22 +88,19 @@ public synchronized boolean wakeIfSuspended(AbstractProcedureScheduler procedure * Wakes up all the given events and puts the procedures waiting on them back into * ProcedureScheduler queues. */ - public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent ... events) { + public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent... events) { scheduler.wakeEvents(events); } /** - * Only to be used by ProcedureScheduler implementations. - * Reason: To wake up multiple events, locking sequence is - * schedLock --> synchronized (event) - * To wake up an event, both schedLock() and synchronized(event) are required. - * The order is schedLock() --> synchronized(event) because when waking up multiple events - * simultaneously, we keep the scheduler locked until all procedures suspended on these events - * have been added back to the queue (Maybe it's not required? Evaluate!) - * To avoid deadlocks, we want to keep the locking order same even when waking up single event. - * That's why, {@link #wake(AbstractProcedureScheduler)} above uses the same code path as used - * when waking up multiple events. - * Access should remain package-private. + * Only to be used by ProcedureScheduler implementations. Reason: To wake up multiple events, + * locking sequence is schedLock --> synchronized (event) To wake up an event, both schedLock() + * and synchronized(event) are required. The order is schedLock() --> synchronized(event) because + * when waking up multiple events simultaneously, we keep the scheduler locked until all + * procedures suspended on these events have been added back to the queue (Maybe it's not + * required? Evaluate!) To avoid deadlocks, we want to keep the locking order same even when + * waking up single event. That's why, {@link #wake(AbstractProcedureScheduler)} above uses the + * same code path as used when waking up multiple events. Access should remain package-private. */ public synchronized void wakeInternal(AbstractProcedureScheduler procedureScheduler) { if (ready && !suspendedProcedures.isEmpty()) { @@ -122,8 +118,8 @@ public synchronized void wakeInternal(AbstractProcedureScheduler procedureSchedu } /** - * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it - * here for tests. + * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it here + * for tests. */ public ProcedureDeque getSuspendedProcedures() { return suspendedProcedures; @@ -131,7 +127,7 @@ public ProcedureDeque getSuspendedProcedures() { @Override public synchronized String toString() { - return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + ", " + - suspendedProcedures; + return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + ", " + + suspendedProcedures; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java index 93cd355c4e0d..b52510286d96 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index b4e3d1e03e49..7252edce5426 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,17 +62,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Thread Pool that executes the submitted procedures. - * The executor has a ProcedureStore associated. - * Each operation is logged and on restart the pending procedures are resumed. - * - * Unless the Procedure code throws an error (e.g. invalid user input) - * the procedure will complete (at some point in time), On restart the pending - * procedures are resumed and the once failed will be rolledback. - * - * The user can add procedures to the executor via submitProcedure(proc) - * check for the finished state via isFinished(procId) - * and get the result via getResult(procId) + * Thread Pool that executes the submitted procedures. The executor has a ProcedureStore associated. + * Each operation is logged and on restart the pending procedures are resumed. Unless the Procedure + * code throws an error (e.g. invalid user input) the procedure will complete (at some point in + * time), On restart the pending procedures are resumed and the once failed will be rolledback. The + * user can add procedures to the executor via submitProcedure(proc) check for the finished state + * via isFinished(procId) and get the result via getResult(procId) */ @InterfaceAudience.Private public class ProcedureExecutor { @@ -82,19 +77,19 @@ public class ProcedureExecutor { private static final boolean DEFAULT_CHECK_OWNER_SET = false; public static final String WORKER_KEEP_ALIVE_TIME_CONF_KEY = - "hbase.procedure.worker.keep.alive.time.msec"; + "hbase.procedure.worker.keep.alive.time.msec"; private static final long DEFAULT_WORKER_KEEP_ALIVE_TIME = TimeUnit.MINUTES.toMillis(1); public static final String EVICT_TTL_CONF_KEY = "hbase.procedure.cleaner.evict.ttl"; static final int DEFAULT_EVICT_TTL = 15 * 60000; // 15min - public static final String EVICT_ACKED_TTL_CONF_KEY ="hbase.procedure.cleaner.acked.evict.ttl"; + public static final String EVICT_ACKED_TTL_CONF_KEY = "hbase.procedure.cleaner.acked.evict.ttl"; static final int DEFAULT_ACKED_EVICT_TTL = 5 * 60000; // 5min /** - * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to - * break PE having it fail at various junctures. When non-null, testing is set to an instance of - * the below internal {@link Testing} class with flags set for the particular test. + * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to break PE + * having it fail at various junctures. When non-null, testing is set to an instance of the below + * internal {@link Testing} class with flags set for the particular test. */ volatile Testing testing = null; @@ -114,8 +109,8 @@ public static class Testing { /** * Set when we want to fail AFTER state has been stored into the WAL. Rarely used. HBASE-20978 - * is about a case where memory-state was being set after store to WAL where a crash could - * cause us to get stuck. This flag allows killing at what was a vulnerable time. + * is about a case where memory-state was being set after store to WAL where a crash could cause + * us to get stuck. This flag allows killing at what was a vulnerable time. */ protected volatile boolean killAfterStoreUpdate = false; protected volatile boolean toggleKillAfterStoreUpdate = false; @@ -155,29 +150,31 @@ protected boolean shouldKillAfterStoreUpdate(final boolean isSuspended) { public interface ProcedureExecutorListener { void procedureLoaded(long procId); + void procedureAdded(long procId); + void procedureFinished(long procId); } /** - * Map the the procId returned by submitProcedure(), the Root-ProcID, to the Procedure. - * Once a Root-Procedure completes (success or failure), the result will be added to this map. - * The user of ProcedureExecutor should call getResult(procId) to get the result. + * Map the the procId returned by submitProcedure(), the Root-ProcID, to the Procedure. Once a + * Root-Procedure completes (success or failure), the result will be added to this map. The user + * of ProcedureExecutor should call getResult(procId) to get the result. */ private final ConcurrentHashMap> completed = new ConcurrentHashMap<>(); /** * Map the the procId returned by submitProcedure(), the Root-ProcID, to the RootProcedureState. - * The RootProcedureState contains the execution stack of the Root-Procedure, - * It is added to the map by submitProcedure() and removed on procedure completion. + * The RootProcedureState contains the execution stack of the Root-Procedure, It is added to the + * map by submitProcedure() and removed on procedure completion. */ private final ConcurrentHashMap> rollbackStack = new ConcurrentHashMap<>(); /** - * Helper map to lookup the live procedures by ID. - * This map contains every procedure. root-procedures and subprocedures. + * Helper map to lookup the live procedures by ID. This map contains every procedure. + * root-procedures and subprocedures. */ private final ConcurrentHashMap> procedures = new ConcurrentHashMap<>(); @@ -195,34 +192,31 @@ public interface ProcedureExecutorListener { /** * Created in the {@link #init(int, boolean)} method. Destroyed in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private ThreadGroup threadGroup; /** - * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private CopyOnWriteArrayList workerThreads; /** * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private TimeoutExecutorThread timeoutExecutor; /** * WorkerMonitor check for stuck workers and new worker thread when necessary, for example if * there is no worker to assign meta, it will new worker thread for it, so it is very important. - * TimeoutExecutor execute many tasks like DeadServerMetricRegionChore RegionInTransitionChore - * and so on, some tasks may execute for a long time so will block other tasks like - * WorkerMonitor, so use a dedicated thread for executing WorkerMonitor. + * TimeoutExecutor execute many tasks like DeadServerMetricRegionChore RegionInTransitionChore and + * so on, some tasks may execute for a long time so will block other tasks like WorkerMonitor, so + * use a dedicated thread for executing WorkerMonitor. */ private TimeoutExecutorThread workerMonitorExecutor; @@ -257,7 +251,7 @@ public interface ProcedureExecutorListener { private final IdLock procExecutionLock = new IdLock(); public ProcedureExecutor(final Configuration conf, final TEnvironment environment, - final ProcedureStore store) { + final ProcedureStore store) { this(conf, environment, store, new SimpleProcedureScheduler()); } @@ -272,8 +266,8 @@ private void forceUpdateProcedure(long procId) throws IOException { Procedure proc = procedures.get(procId); if (proc != null) { if (proc.isFinished() && proc.hasParent() && isRootFinished(proc)) { - LOG.debug("Procedure {} has already been finished and parent is succeeded," + - " skip force updating", proc); + LOG.debug("Procedure {} has already been finished and parent is succeeded," + + " skip force updating", proc); return; } } else { @@ -299,7 +293,7 @@ private void forceUpdateProcedure(long procId) throws IOException { } public ProcedureExecutor(final Configuration conf, final TEnvironment environment, - final ProcedureStore store, final ProcedureScheduler scheduler) { + final ProcedureStore store, final ProcedureScheduler scheduler) { this.environment = environment; this.scheduler = scheduler; this.store = store; @@ -399,7 +393,7 @@ private void restoreLocks() { } private void loadProcedures(ProcedureIterator procIter, boolean abortOnCorruption) - throws IOException { + throws IOException { // 1. Build the rollback stack int runnableCount = 0; int failedCount = 0; @@ -556,9 +550,9 @@ private void loadProcedures(ProcedureIterator procIter, boolean abortOnCorruptio * It calls ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, and * ensure a single executor, and start the procedure replay to resume and recover the previous * pending and in-progress procedures. - * @param numThreads number of threads available for procedure execution. + * @param numThreads number of threads available for procedure execution. * @param abortOnCorruption true if you want to abort your service in case a corrupted procedure - * is found on replay. otherwise false. + * is found on replay. otherwise false. */ public void init(int numThreads, boolean abortOnCorruption) throws IOException { // We have numThreads executor + one timer thread used for timing out @@ -566,7 +560,7 @@ public void init(int numThreads, boolean abortOnCorruption) throws IOException { this.corePoolSize = numThreads; this.maxPoolSize = 10 * numThreads; LOG.info("Starting {} core workers (bigger of cpus/4 or 16) with max (burst) worker count={}", - corePoolSize, maxPoolSize); + corePoolSize, maxPoolSize); this.threadGroup = new ThreadGroup("PEWorkerGroup"); this.timeoutExecutor = new TimeoutExecutorThread<>(this, threadGroup, "ProcExecTimeout"); @@ -615,7 +609,7 @@ public void startWorkers() throws IOException { LOG.trace("Start workers {}", workerThreads.size()); timeoutExecutor.start(); workerMonitorExecutor.start(); - for (WorkerThread worker: workerThreads) { + for (WorkerThread worker : workerThreads) { worker.start(); } @@ -647,7 +641,7 @@ public void join() { workerMonitorExecutor.awaitTermination(); // stop the worker threads - for (WorkerThread worker: workerThreads) { + for (WorkerThread worker : workerThreads) { worker.awaitTermination(); } @@ -656,8 +650,8 @@ public void join() { try { threadGroup.destroy(); } catch (IllegalThreadStateException e) { - LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", - this.threadGroup, e.getMessage()); + LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", this.threadGroup, + e.getMessage()); // This dumps list of threads on STDOUT. this.threadGroup.list(); } @@ -673,12 +667,12 @@ public void join() { public void refreshConfiguration(final Configuration conf) { this.conf = conf; - setKeepAliveTime(conf.getLong(WORKER_KEEP_ALIVE_TIME_CONF_KEY, - DEFAULT_WORKER_KEEP_ALIVE_TIME), TimeUnit.MILLISECONDS); + setKeepAliveTime(conf.getLong(WORKER_KEEP_ALIVE_TIME_CONF_KEY, DEFAULT_WORKER_KEEP_ALIVE_TIME), + TimeUnit.MILLISECONDS); } // ========================================================================== - // Accessors + // Accessors // ========================================================================== public boolean isRunning() { return running.get(); @@ -724,7 +718,7 @@ public long getKeepAliveTime(final TimeUnit timeUnit) { } // ========================================================================== - // Submit/Remove Chores + // Submit/Remove Chores // ========================================================================== /** @@ -753,12 +747,12 @@ public boolean removeChore(@Nullable ProcedureInMemoryChore chore) } // ========================================================================== - // Nonce Procedure helpers + // Nonce Procedure helpers // ========================================================================== /** * Create a NonceKey from the specified nonceGroup and nonce. * @param nonceGroup the group to use for the {@link NonceKey} - * @param nonce the nonce to use in the {@link NonceKey} + * @param nonce the nonce to use in the {@link NonceKey} * @return the generated NonceKey */ public NonceKey createNonceKey(final long nonceGroup, final long nonce) { @@ -766,13 +760,10 @@ public NonceKey createNonceKey(final long nonceGroup, final long nonce) { } /** - * Register a nonce for a procedure that is going to be submitted. - * A procId will be reserved and on submitProcedure(), - * the procedure with the specified nonce will take the reserved ProcId. - * If someone already reserved the nonce, this method will return the procId reserved, - * otherwise an invalid procId will be returned. and the caller should procede - * and submit the procedure. - * + * Register a nonce for a procedure that is going to be submitted. A procId will be reserved and + * on submitProcedure(), the procedure with the specified nonce will take the reserved ProcId. If + * someone already reserved the nonce, this method will return the procId reserved, otherwise an + * invalid procId will be returned. and the caller should procede and submit the procedure. * @param nonceKey A unique identifier for this operation from the client or process. * @return the procId associated with the nonce, if any otherwise an invalid procId. */ @@ -796,9 +787,10 @@ public long registerNonce(final NonceKey nonceKey) { // we found a registered nonce, but the procedure may not have been submitted yet. // since the client expect the procedure to be submitted, spin here until it is. final boolean traceEnabled = LOG.isTraceEnabled(); - while (isRunning() && - !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) && - nonceKeysToProcIdsMap.containsKey(nonceKey)) { + while ( + isRunning() && !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) + && nonceKeysToProcIdsMap.containsKey(nonceKey) + ) { if (traceEnabled) { LOG.trace("Waiting for pid=" + oldProcId.longValue() + " to be submitted"); } @@ -828,16 +820,15 @@ public void unregisterNonceIfProcedureWasNotSubmitted(final NonceKey nonceKey) { } /** - * If the failure failed before submitting it, we may want to give back the - * same error to the requests with the same nonceKey. - * - * @param nonceKey A unique identifier for this operation from the client or process - * @param procName name of the procedure, used to inform the user + * If the failure failed before submitting it, we may want to give back the same error to the + * requests with the same nonceKey. + * @param nonceKey A unique identifier for this operation from the client or process + * @param procName name of the procedure, used to inform the user * @param procOwner name of the owner of the procedure, used to inform the user * @param exception the failure to report to the user */ public void setFailureResultForNonce(NonceKey nonceKey, String procName, User procOwner, - IOException exception) { + IOException exception) { if (nonceKey == null) { return; } @@ -848,15 +839,15 @@ public void setFailureResultForNonce(NonceKey nonceKey, String procName, User pr } completed.computeIfAbsent(procId, (key) -> { - Procedure proc = new FailedProcedure<>(procId.longValue(), - procName, procOwner, nonceKey, exception); + Procedure proc = + new FailedProcedure<>(procId.longValue(), procName, procOwner, nonceKey, exception); return new CompletedProcedureRetainer<>(proc); }); } // ========================================================================== - // Submit/Abort Procedure + // Submit/Abort Procedure // ========================================================================== /** * Add a new root-procedure to the executor. @@ -868,52 +859,46 @@ public long submitProcedure(Procedure proc) { } /** - * Bypass a procedure. If the procedure is set to bypass, all the logic in - * execute/rollback will be ignored and it will return success, whatever. - * It is used to recover buggy stuck procedures, releasing the lock resources - * and letting other procedures run. Bypassing one procedure (and its ancestors will - * be bypassed automatically) may leave the cluster in a middle state, e.g. region - * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, - * the operators may have to do some clean up on hdfs or schedule some assign procedures - * to let region online. DO AT YOUR OWN RISK. + * Bypass a procedure. If the procedure is set to bypass, all the logic in execute/rollback will + * be ignored and it will return success, whatever. It is used to recover buggy stuck procedures, + * releasing the lock resources and letting other procedures run. Bypassing one procedure (and its + * ancestors will be bypassed automatically) may leave the cluster in a middle state, e.g. region + * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, the + * operators may have to do some clean up on hdfs or schedule some assign procedures to let region + * online. DO AT YOUR OWN RISK. *

    - * A procedure can be bypassed only if - * 1. The procedure is in state of RUNNABLE, WAITING, WAITING_TIMEOUT - * or it is a root procedure without any child. - * 2. No other worker thread is executing it - * 3. No child procedure has been submitted - * + * A procedure can be bypassed only if 1. The procedure is in state of RUNNABLE, WAITING, + * WAITING_TIMEOUT or it is a root procedure without any child. 2. No other worker thread is + * executing it 3. No child procedure has been submitted *

    - * If all the requirements are meet, the procedure and its ancestors will be - * bypassed and persisted to WAL. - * + * If all the requirements are meet, the procedure and its ancestors will be bypassed and + * persisted to WAL. *

    - * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. - * TODO: What about WAITING_TIMEOUT? - * @param pids the procedure id - * @param lockWait time to wait lock - * @param force if force set to true, we will bypass the procedure even if it is executing. - * This is for procedures which can't break out during executing(due to bug, mostly) - * In this case, bypassing the procedure is not enough, since it is already stuck - * there. We need to restart the master after bypassing, and letting the problematic - * procedure to execute wth bypass=true, so in that condition, the procedure can be - * successfully bypassed. + * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. TODO: What + * about WAITING_TIMEOUT? + * @param pids the procedure id + * @param lockWait time to wait lock + * @param force if force set to true, we will bypass the procedure even if it is executing. + * This is for procedures which can't break out during executing(due to bug, + * mostly) In this case, bypassing the procedure is not enough, since it is + * already stuck there. We need to restart the master after bypassing, and + * letting the problematic procedure to execute wth bypass=true, so in that + * condition, the procedure can be successfully bypassed. * @param recursive We will do an expensive search for children of each pid. EXPENSIVE! * @return true if bypass success * @throws IOException IOException */ public List bypassProcedure(List pids, long lockWait, boolean force, - boolean recursive) - throws IOException { + boolean recursive) throws IOException { List result = new ArrayList(pids.size()); - for(long pid: pids) { + for (long pid : pids) { result.add(bypassProcedure(pid, lockWait, force, recursive)); } return result; } boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recursive) - throws IOException { + throws IOException { Preconditions.checkArgument(lockWait > 0, "lockWait should be positive"); final Procedure procedure = getProcedure(pid); if (procedure == null) { @@ -921,16 +906,16 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur return false; } - LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}", - procedure, lockWait, override, recursive); + LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}", procedure, lockWait, + override, recursive); IdLock.Entry lockEntry = procExecutionLock.tryLockEntry(procedure.getProcId(), lockWait); if (lockEntry == null && !override) { - LOG.debug("Waited {} ms, but {} is still running, skipping bypass with force={}", - lockWait, procedure, override); + LOG.debug("Waited {} ms, but {} is still running, skipping bypass with force={}", lockWait, + procedure, override); return false; } else if (lockEntry == null) { - LOG.debug("Waited {} ms, but {} is still running, begin bypass with force={}", - lockWait, procedure, override); + LOG.debug("Waited {} ms, but {} is still running, begin bypass with force={}", lockWait, + procedure, override); } try { // check whether the procedure is already finished @@ -944,9 +929,9 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur // EXPENSIVE. Checks each live procedure of which there could be many!!! // Is there another way to get children of a procedure? LOG.info("Recursive bypass on children of pid={}", procedure.getProcId()); - this.procedures.forEachValue(1 /*Single-threaded*/, + this.procedures.forEachValue(1 /* Single-threaded */, // Transformer - v -> v.getParentProcId() == procedure.getProcId()? v: null, + v -> v.getParentProcId() == procedure.getProcId() ? v : null, // Consumer v -> { try { @@ -962,12 +947,13 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur } // If the procedure has no parent or no child, we are safe to bypass it in whatever state - if (procedure.hasParent() && procedure.getState() != ProcedureState.RUNNABLE + if ( + procedure.hasParent() && procedure.getState() != ProcedureState.RUNNABLE && procedure.getState() != ProcedureState.WAITING - && procedure.getState() != ProcedureState.WAITING_TIMEOUT) { + && procedure.getState() != ProcedureState.WAITING_TIMEOUT + ) { LOG.debug("Bypassing procedures in RUNNABLE, WAITING and WAITING_TIMEOUT states " - + "(with no parent), {}", - procedure); + + "(with no parent), {}", procedure); // Question: how is the bypass done here? return false; } @@ -984,7 +970,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur current = getProcedure(parentID); } - //wake up waiting procedure, already checked there is no child + // wake up waiting procedure, already checked there is no child if (procedure.getState() == ProcedureState.WAITING) { procedure.setState(ProcedureState.RUNNABLE); store.update(procedure); @@ -1007,8 +993,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur // need to restart the master. With the procedure set to bypass, the procedureExecutor // will bypass it and won't get stuck again. LOG.debug("Bypassing {} and its ancestors successfully, but since it is already running, " - + "skipping add to queue", - procedure); + + "skipping add to queue", procedure); } return true; @@ -1021,11 +1006,11 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur /** * Add a new root-procedure to the executor. - * @param proc the new procedure to execute. + * @param proc the new procedure to execute. * @param nonceKey the registered unique identifier for this operation from the client or process. * @return the procedure id, that can be used to monitor the operation */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "FindBugs is blind to the check-for-null") public long submitProcedure(Procedure proc, NonceKey nonceKey) { Preconditions.checkArgument(lastProcId.get() >= 0); @@ -1109,8 +1094,8 @@ private long pushProcedure(Procedure proc) { } /** - * Send an abort notification the specified procedure. - * Depending on the procedure implementation the abort can be considered or ignored. + * Send an abort notification the specified procedure. Depending on the procedure implementation + * the abort can be considered or ignored. * @param procId the procedure to abort * @return true if the procedure exists and has received the abort, otherwise false. */ @@ -1119,9 +1104,9 @@ public boolean abort(long procId) { } /** - * Send an abort notification to the specified procedure. - * Depending on the procedure implementation, the abort can be considered or ignored. - * @param procId the procedure to abort + * Send an abort notification to the specified procedure. Depending on the procedure + * implementation, the abort can be considered or ignored. + * @param procId the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if the procedure exists and has received the abort, otherwise false. */ @@ -1137,7 +1122,7 @@ public boolean abort(long procId, boolean mayInterruptIfRunning) { } // ========================================================================== - // Executor query helpers + // Executor query helpers // ========================================================================== public Procedure getProcedure(final long procId) { return procedures.get(procId); @@ -1161,9 +1146,8 @@ public Procedure getResult(long procId) { } /** - * Return true if the procedure is finished. - * The state may be "completed successfully" or "failed and rolledback". - * Use getResult() to check the state or get the result data. + * Return true if the procedure is finished. The state may be "completed successfully" or "failed + * and rolledback". Use getResult() to check the state or get the result data. * @param procId the ID of the procedure to check * @return true if the procedure execution is finished, otherwise false. */ @@ -1212,9 +1196,9 @@ public Procedure getResultOrProcedure(long procId) { /** * Check if the user is this procedure's owner * @param procId the target procedure - * @param user the user - * @return true if the user is the owner of the procedure, - * false otherwise or the owner is unknown. + * @param user the user + * @return true if the user is the owner of the procedure, false otherwise or the owner is + * unknown. */ public boolean isProcedureOwner(long procId, User user) { if (user == null) { @@ -1264,7 +1248,7 @@ public List> getProcedures() { } // ========================================================================== - // Listeners helpers + // Listeners helpers // ========================================================================== public void registerListener(ProcedureExecutorListener listener) { this.listeners.add(listener); @@ -1276,7 +1260,7 @@ public boolean unregisterListener(ProcedureExecutorListener listener) { private void sendProcedureLoadedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureLoaded(procId); } catch (Throwable e) { @@ -1288,7 +1272,7 @@ private void sendProcedureLoadedNotification(final long procId) { private void sendProcedureAddedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureAdded(procId); } catch (Throwable e) { @@ -1300,7 +1284,7 @@ private void sendProcedureAddedNotification(final long procId) { private void sendProcedureFinishedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureFinished(procId); } catch (Throwable e) { @@ -1311,7 +1295,7 @@ private void sendProcedureFinishedNotification(final long procId) { } // ========================================================================== - // Procedure IDs helpers + // Procedure IDs helpers // ========================================================================== private long nextProcId() { long procId = lastProcId.incrementAndGet(); @@ -1343,7 +1327,7 @@ Long getRootProcedureId(Procedure proc) { } // ========================================================================== - // Executions + // Executions // ========================================================================== private void executeProcedure(Procedure proc) { if (proc.isFinished()) { @@ -1579,9 +1563,8 @@ private void cleanupAfterRollbackOneStep(Procedure proc) { } /** - * Execute the rollback of the procedure step. - * It updates the store with the new state (stack index) - * or will remove completly the procedure in case it is a child. + * Execute the rollback of the procedure step. It updates the store with the new state (stack + * index) or will remove completly the procedure in case it is a child. */ private LockState executeRollback(Procedure proc) { try { @@ -1619,36 +1602,38 @@ private void yieldProcedure(Procedure proc) { /** * Executes procedure *

      - *
    • Calls the doExecute() of the procedure - *
    • If the procedure execution didn't fail (i.e. valid user input) - *
        - *
      • ...and returned subprocedures - *
        • The subprocedures are initialized. - *
        • The subprocedures are added to the store - *
        • The subprocedures are added to the runnable queue - *
        • The procedure is now in a WAITING state, waiting for the subprocedures to complete - *
        - *
      • - *
      • ...if there are no subprocedure - *
        • the procedure completed successfully - *
        • if there is a parent (WAITING) - *
        • the parent state will be set to RUNNABLE - *
        - *
      • - *
      - *
    • - *
    • In case of failure - *
        - *
      • The store is updated with the new state
      • - *
      • The executor (caller of this method) will start the rollback of the procedure
      • - *
      - *
    • - *
    + *
  • Calls the doExecute() of the procedure + *
  • If the procedure execution didn't fail (i.e. valid user input) + *
      + *
    • ...and returned subprocedures + *
        + *
      • The subprocedures are initialized. + *
      • The subprocedures are added to the store + *
      • The subprocedures are added to the runnable queue + *
      • The procedure is now in a WAITING state, waiting for the subprocedures to complete + *
      + *
    • + *
    • ...if there are no subprocedure + *
        + *
      • the procedure completed successfully + *
      • if there is a parent (WAITING) + *
      • the parent state will be set to RUNNABLE + *
      + *
    • + *
    + *
  • + *
  • In case of failure + *
      + *
    • The store is updated with the new state
    • + *
    • The executor (caller of this method) will start the rollback of the procedure
    • + *
    + *
  • + * */ private void execProcedure(RootProcedureState procStack, - Procedure procedure) { + Procedure procedure) { Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE, - "NOT RUNNABLE! " + procedure.toString()); + "NOT RUNNABLE! " + procedure.toString()); // Procedures can suspend themselves. They skip out by throwing a ProcedureSuspendedException. // The exception is caught below and then we hurry to the exit without disturbing state. The @@ -1699,10 +1684,10 @@ private void execProcedure(RootProcedureState procStack, // Yield the current procedure, and make the subprocedure runnable // subprocs may come back 'null'. subprocs = initializeChildren(procStack, procedure, subprocs); - LOG.info("Initialized subprocedures=" + - (subprocs == null? null: - Stream.of(subprocs).map(e -> "{" + e.toString() + "}"). - collect(Collectors.toList()).toString())); + LOG.info("Initialized subprocedures=" + (subprocs == null + ? null + : Stream.of(subprocs).map(e -> "{" + e.toString() + "}").collect(Collectors.toList()) + .toString())); } } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) { LOG.trace("Added to timeoutExecutor {}", procedure); @@ -1718,8 +1703,9 @@ private void execProcedure(RootProcedureState procStack, // allows to kill the executor before something is stored to the wal. // useful to test the procedure recovery. - if (testing != null && - testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent())) { + if ( + testing != null && testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent()) + ) { kill("TESTING: Kill BEFORE store update: " + procedure); } @@ -1740,8 +1726,10 @@ private void execProcedure(RootProcedureState procStack, return; } // if the procedure is kind enough to pass the slot to someone else, yield - if (procedure.isRunnable() && !suspended && - procedure.isYieldAfterExecutionStep(getEnvironment())) { + if ( + procedure.isRunnable() && !suspended + && procedure.isYieldAfterExecutionStep(getEnvironment()) + ) { yieldProcedure(procedure); return; } @@ -1780,15 +1768,15 @@ private void kill(String msg) { } private Procedure[] initializeChildren(RootProcedureState procStack, - Procedure procedure, Procedure[] subprocs) { + Procedure procedure, Procedure[] subprocs) { assert subprocs != null : "expected subprocedures"; final long rootProcId = getRootProcedureId(procedure); for (int i = 0; i < subprocs.length; ++i) { Procedure subproc = subprocs[i]; if (subproc == null) { String msg = "subproc[" + i + "] is null, aborting the procedure"; - procedure.setFailure(new RemoteProcedureException(msg, - new IllegalArgumentIOException(msg))); + procedure + .setFailure(new RemoteProcedureException(msg, new IllegalArgumentIOException(msg))); return null; } @@ -1826,7 +1814,7 @@ private void submitChildrenProcedures(Procedure[] subprocs) { } private void countDownChildren(RootProcedureState procStack, - Procedure procedure) { + Procedure procedure) { Procedure parent = procedures.get(procedure.getParentProcId()); if (parent == null) { assert procStack.isRollingback(); @@ -1839,14 +1827,14 @@ private void countDownChildren(RootProcedureState procStack, // children have completed, move parent to front of the queue. store.update(parent); scheduler.addFront(parent); - LOG.info("Finished subprocedure pid={}, resume processing ppid={}", - procedure.getProcId(), parent.getProcId()); + LOG.info("Finished subprocedure pid={}, resume processing ppid={}", procedure.getProcId(), + parent.getProcId()); return; } } private void updateStoreOnExec(RootProcedureState procStack, - Procedure procedure, Procedure[] subprocs) { + Procedure procedure, Procedure[] subprocs) { if (subprocs != null && !procedure.isFailed()) { if (LOG.isTraceEnabled()) { LOG.trace("Stored " + procedure + ", children " + Arrays.toString(subprocs)); @@ -1883,10 +1871,10 @@ private void handleInterruptedException(Procedure proc, Interrupte private void execCompletionCleanup(Procedure proc) { final TEnvironment env = getEnvironment(); if (proc.hasLock()) { - LOG.warn("Usually this should not happen, we will release the lock before if the procedure" + - " is finished, even if the holdLock is true, arrive here means we have some holes where" + - " we do not release the lock. And the releaseLock below may fail since the procedure may" + - " have already been deleted from the procedure store."); + LOG.warn("Usually this should not happen, we will release the lock before if the procedure" + + " is finished, even if the holdLock is true, arrive here means we have some holes where" + + " we do not release the lock. And the releaseLock below may fail since the procedure may" + + " have already been deleted from the procedure store."); releaseLock(proc, true); } try { @@ -1941,7 +1929,7 @@ public IdLock getProcExecutionLock() { } // ========================================================================== - // Worker Thread + // Worker Thread // ========================================================================== private class WorkerThread extends StoppableThread { private final AtomicLong executionStartTime = new AtomicLong(Long.MAX_VALUE); @@ -1960,6 +1948,7 @@ protected WorkerThread(ThreadGroup group, String prefix) { public void sendStopSignal() { scheduler.signalAll(); } + @Override public void run() { long lastUpdate = EnvironmentEdgeManager.currentTime(); @@ -1986,8 +1975,8 @@ public void run() { procExecutionLock.releaseLockEntry(lockEntry); activeCount = activeExecutorCount.decrementAndGet(); runningCount = store.setRunningProcedureCount(activeCount); - LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), - runningCount, activeCount); + LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), runningCount, + activeCount); this.activeProcedure = null; lastUpdate = EnvironmentEdgeManager.currentTime(); executionStartTime.set(Long.MAX_VALUE); @@ -2004,7 +1993,7 @@ public void run() { @Override public String toString() { Procedure p = this.activeProcedure; - return getName() + "(pid=" + (p == null? Procedure.NO_PROC_ID: p.getProcId() + ")"); + return getName() + "(pid=" + (p == null ? Procedure.NO_PROC_ID : p.getProcId() + ")"); } /** @@ -2043,15 +2032,15 @@ protected boolean keepAlive(long lastUpdate) { private final class WorkerMonitor extends InlineChore { public static final String WORKER_MONITOR_INTERVAL_CONF_KEY = - "hbase.procedure.worker.monitor.interval.msec"; + "hbase.procedure.worker.monitor.interval.msec"; private static final int DEFAULT_WORKER_MONITOR_INTERVAL = 5000; // 5sec public static final String WORKER_STUCK_THRESHOLD_CONF_KEY = - "hbase.procedure.worker.stuck.threshold.msec"; + "hbase.procedure.worker.stuck.threshold.msec"; private static final int DEFAULT_WORKER_STUCK_THRESHOLD = 10000; // 10sec public static final String WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY = - "hbase.procedure.worker.add.stuck.percentage"; + "hbase.procedure.worker.add.stuck.percentage"; private static final float DEFAULT_WORKER_ADD_STUCK_PERCENTAGE = 0.5f; // 50% stuck private float addWorkerStuckPercentage = DEFAULT_WORKER_ADD_STUCK_PERCENTAGE; @@ -2107,12 +2096,11 @@ private void checkThreadCount(final int stuckCount) { } private void refreshConfig() { - addWorkerStuckPercentage = conf.getFloat(WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY, - DEFAULT_WORKER_ADD_STUCK_PERCENTAGE); - timeoutInterval = conf.getInt(WORKER_MONITOR_INTERVAL_CONF_KEY, - DEFAULT_WORKER_MONITOR_INTERVAL); - stuckThreshold = conf.getInt(WORKER_STUCK_THRESHOLD_CONF_KEY, - DEFAULT_WORKER_STUCK_THRESHOLD); + addWorkerStuckPercentage = + conf.getFloat(WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY, DEFAULT_WORKER_ADD_STUCK_PERCENTAGE); + timeoutInterval = + conf.getInt(WORKER_MONITOR_INTERVAL_CONF_KEY, DEFAULT_WORKER_MONITOR_INTERVAL); + stuckThreshold = conf.getInt(WORKER_STUCK_THRESHOLD_CONF_KEY, DEFAULT_WORKER_STUCK_THRESHOLD); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java index cd65c1f74aed..f8232cce950e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; @@ -23,13 +22,10 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Special procedure used as a chore. - * Instead of bringing the Chore class in (dependencies reason), - * we reuse the executor timeout thread for this special case. - * - * The assumption is that procedure is used as hook to dispatch other procedures - * or trigger some cleanups. It does not store state in the ProcedureStore. - * this is just for in-memory chore executions. + * Special procedure used as a chore. Instead of bringing the Chore class in (dependencies reason), + * we reuse the executor timeout thread for this special case. The assumption is that procedure is + * used as hook to dispatch other procedures or trigger some cleanups. It does not store state in + * the ProcedureStore. this is just for in-memory chore executions. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -56,12 +52,10 @@ protected boolean abort(final TEnvironment env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java index 48413928e5b7..f86a2b2d00a5 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.hadoop.hbase.metrics.Counter; @@ -26,12 +25,11 @@ * With this interface, the procedure framework provides means to collect following set of metrics * per procedure type for all procedures: *
      - *
    • Count of submitted procedure instances
    • - *
    • Time histogram for successfully completed procedure instances
    • - *
    • Count of failed procedure instances
    • - *
    - * - * Please implement this interface to return appropriate metrics. + *
  • Count of submitted procedure instances
  • + *
  • Time histogram for successfully completed procedure instances
  • + *
  • Count of failed procedure instances
  • + * + * Please implement this interface to return appropriate metrics. */ @InterfaceAudience.Private public interface ProcedureMetrics { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java index 72b2b284ca19..f89cac5137c9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,8 @@ public interface ProcedureScheduler { void stop(); /** - * In case the class is blocking on poll() waiting for items to be added, - * this method should awake poll() and poll() should return. + * In case the class is blocking on poll() waiting for items to be added, this method should awake + * poll() and poll() should return. */ void signalAll(); @@ -51,7 +51,7 @@ public interface ProcedureScheduler { /** * Inserts the specified element at the front of this queue. - * @param proc the Procedure to add + * @param proc the Procedure to add * @param notify whether need to notify worker */ void addFront(Procedure proc, boolean notify); @@ -69,21 +69,20 @@ public interface ProcedureScheduler { /** * Inserts the specified element at the end of this queue. - * @param proc the Procedure to add + * @param proc the Procedure to add * @param notify whether need to notify worker */ void addBack(Procedure proc, boolean notify); /** - * The procedure can't run at the moment. - * add it back to the queue, giving priority to someone else. + * The procedure can't run at the moment. add it back to the queue, giving priority to someone + * else. * @param proc the Procedure to add back to the list */ void yield(Procedure proc); /** - * The procedure in execution completed. - * This can be implemented to perform cleanups. + * The procedure in execution completed. This can be implemented to perform cleanups. * @param proc the Procedure that completed the execution. */ void completionCleanup(Procedure proc); @@ -102,7 +101,7 @@ public interface ProcedureScheduler { /** * Fetch one Procedure from the queue * @param timeout how long to wait before giving up, in units of unit - * @param unit a TimeUnit determining how to interpret the timeout parameter + * @param unit a TimeUnit determining how to interpret the timeout parameter * @return the Procedure to execute, or null if nothing present. */ Procedure poll(long timeout, TimeUnit unit); @@ -126,9 +125,9 @@ public interface ProcedureScheduler { int size(); /** - * Clear current state of scheduler such that it is equivalent to newly created scheduler. - * Used for testing failure and recovery. To emulate server crash/restart, - * {@link ProcedureExecutor} resets its own state and calls clear() on scheduler. + * Clear current state of scheduler such that it is equivalent to newly created scheduler. Used + * for testing failure and recovery. To emulate server crash/restart, {@link ProcedureExecutor} + * resets its own state and calls clear() on scheduler. */ void clear(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java index 216022f1c798..fc4eb1532ee4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.Message; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java index 9f521214f075..95fafae72665 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java index c557c2021b40..4a225161dbf9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,10 +46,11 @@ */ @InterfaceAudience.Private public final class ProcedureUtil { - private ProcedureUtil() { } + private ProcedureUtil() { + } // ========================================================================== - // Reflection helpers to create/validate a Procedure object + // Reflection helpers to create/validate a Procedure object // ========================================================================== private static Procedure newProcedure(String className) throws BadProcedureException { try { @@ -85,18 +86,18 @@ static void validateClass(Procedure proc) throws BadProcedureException { throw new Exception("the " + clazz + " constructor is not public"); } } catch (Exception e) { - throw new BadProcedureException("The procedure class " + proc.getClass().getName() + - " must be accessible and have an empty constructor", e); + throw new BadProcedureException("The procedure class " + proc.getClass().getName() + + " must be accessible and have an empty constructor", e); } } // ========================================================================== - // convert to and from Procedure object + // convert to and from Procedure object // ========================================================================== /** - * A serializer for our Procedures. Instead of the previous serializer, it - * uses the stateMessage list to store the internal state of the Procedures. + * A serializer for our Procedures. Instead of the previous serializer, it uses the stateMessage + * list to store the internal state of the Procedures. */ private static class StateSerializer implements ProcedureStateSerializer { private final ProcedureProtos.Procedure.Builder builder; @@ -113,8 +114,7 @@ public void serialize(Message message) throws IOException { } @Override - public M deserialize(Class clazz) - throws IOException { + public M deserialize(Class clazz) throws IOException { if (deserializeIndex >= builder.getStateMessageCount()) { throw new IOException("Invalid state message index: " + deserializeIndex); } @@ -129,8 +129,8 @@ public M deserialize(Class clazz) } /** - * A serializer (deserializer) for those Procedures which were serialized - * before this patch. It deserializes the old, binary stateData field. + * A serializer (deserializer) for those Procedures which were serialized before this patch. It + * deserializes the old, binary stateData field. */ private static class CompatStateSerializer implements ProcedureStateSerializer { private InputStream inputStream; @@ -146,8 +146,7 @@ public void serialize(Message message) throws IOException { @SuppressWarnings("unchecked") @Override - public M deserialize(Class clazz) - throws IOException { + public M deserialize(Class clazz) throws IOException { Parser parser = (Parser) Internal.getDefaultInstance(clazz).getParserForType(); try { return parser.parseDelimitedFrom(inputStream); @@ -163,16 +162,13 @@ public M deserialize(Class clazz) * Used by ProcedureStore implementations. */ public static ProcedureProtos.Procedure convertToProtoProcedure(Procedure proc) - throws IOException { + throws IOException { Preconditions.checkArgument(proc != null); validateClass(proc); final ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder() - .setClassName(proc.getClass().getName()) - .setProcId(proc.getProcId()) - .setState(proc.getState()) - .setSubmittedTime(proc.getSubmittedTime()) - .setLastUpdate(proc.getLastUpdate()); + .setClassName(proc.getClass().getName()).setProcId(proc.getProcId()).setState(proc.getState()) + .setSubmittedTime(proc.getSubmittedTime()).setLastUpdate(proc.getLastUpdate()); if (proc.hasParent()) { builder.setParentId(proc.getParentProcId()); @@ -232,7 +228,7 @@ public static ProcedureProtos.Procedure convertToProtoProcedure(Procedure pro * it by storing the data only on insert(). */ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) - throws IOException { + throws IOException { // Procedure from class name Procedure proc = newProcedure(proto.getClassName()); @@ -259,9 +255,9 @@ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) } if (proto.hasException()) { - assert proc.getState() == ProcedureProtos.ProcedureState.FAILED || - proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK : - "The procedure must be failed (waiting to rollback) or rolledback"; + assert proc.getState() == ProcedureProtos.ProcedureState.FAILED + || proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK + : "The procedure must be failed (waiting to rollback) or rolledback"; proc.setFailure(RemoteProcedureException.fromProto(proto.getException())); } @@ -298,11 +294,11 @@ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) } // ========================================================================== - // convert from LockedResource object + // convert from LockedResource object // ========================================================================== - public static LockServiceProtos.LockedResourceType convertToProtoResourceType( - LockedResourceType resourceType) { + public static LockServiceProtos.LockedResourceType + convertToProtoResourceType(LockedResourceType resourceType) { return LockServiceProtos.LockedResourceType.valueOf(resourceType.name()); } @@ -310,29 +306,27 @@ public static LockServiceProtos.LockType convertToProtoLockType(LockType lockTyp return LockServiceProtos.LockType.valueOf(lockType.name()); } - public static LockServiceProtos.LockedResource convertToProtoLockedResource( - LockedResource lockedResource) throws IOException { + public static LockServiceProtos.LockedResource + convertToProtoLockedResource(LockedResource lockedResource) throws IOException { LockServiceProtos.LockedResource.Builder builder = - LockServiceProtos.LockedResource.newBuilder(); + LockServiceProtos.LockedResource.newBuilder(); - builder - .setResourceType(convertToProtoResourceType(lockedResource.getResourceType())) - .setResourceName(lockedResource.getResourceName()) - .setLockType(convertToProtoLockType(lockedResource.getLockType())); + builder.setResourceType(convertToProtoResourceType(lockedResource.getResourceType())) + .setResourceName(lockedResource.getResourceName()) + .setLockType(convertToProtoLockType(lockedResource.getLockType())); Procedure exclusiveLockOwnerProcedure = lockedResource.getExclusiveLockOwnerProcedure(); if (exclusiveLockOwnerProcedure != null) { ProcedureProtos.Procedure exclusiveLockOwnerProcedureProto = - convertToProtoProcedure(exclusiveLockOwnerProcedure); + convertToProtoProcedure(exclusiveLockOwnerProcedure); builder.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureProto); } builder.setSharedLockCount(lockedResource.getSharedLockCount()); for (Procedure waitingProcedure : lockedResource.getWaitingProcedures()) { - ProcedureProtos.Procedure waitingProcedureProto = - convertToProtoProcedure(waitingProcedure); + ProcedureProtos.Procedure waitingProcedureProto = convertToProtoProcedure(waitingProcedure); builder.addWaitingProcedures(waitingProcedureProto); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java index dbb998132be5..fc564711e883 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 03702e6f64bb..ebcce07742dd 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; @@ -36,42 +35,43 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A procedure dispatcher that aggregates and sends after elapsed time or after we hit - * count threshold. Creates its own threadpool to run RPCs with timeout. + * A procedure dispatcher that aggregates and sends after elapsed time or after we hit count + * threshold. Creates its own threadpool to run RPCs with timeout. *
      *
    • Each server queue has a dispatch buffer
    • - *
    • Once the dispatch buffer reaches a threshold-size/time we send
    • + *
    • Once the dispatch buffer reaches a threshold-size/time we send + *
    • *
    - *

    Call {@link #start()} and then {@link #submitTask(Runnable)}. When done, - * call {@link #stop()}. + *

    + * Call {@link #start()} and then {@link #submitTask(Runnable)}. When done, call {@link #stop()}. */ @InterfaceAudience.Private public abstract class RemoteProcedureDispatcher> { private static final Logger LOG = LoggerFactory.getLogger(RemoteProcedureDispatcher.class); public static final String THREAD_POOL_SIZE_CONF_KEY = - "hbase.procedure.remote.dispatcher.threadpool.size"; + "hbase.procedure.remote.dispatcher.threadpool.size"; private static final int DEFAULT_THREAD_POOL_SIZE = 128; public static final String DISPATCH_DELAY_CONF_KEY = - "hbase.procedure.remote.dispatcher.delay.msec"; + "hbase.procedure.remote.dispatcher.delay.msec"; private static final int DEFAULT_DISPATCH_DELAY = 150; public static final String DISPATCH_MAX_QUEUE_SIZE_CONF_KEY = - "hbase.procedure.remote.dispatcher.max.queue.size"; + "hbase.procedure.remote.dispatcher.max.queue.size"; private static final int DEFAULT_MAX_QUEUE_SIZE = 32; private final AtomicBoolean running = new AtomicBoolean(false); private final ConcurrentHashMap nodeMap = - new ConcurrentHashMap(); + new ConcurrentHashMap(); private final int operationDelay; private final int queueMaxSize; @@ -92,8 +92,8 @@ public boolean start() { return false; } - LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " + - "operationDelay={}", this.corePoolSize, this.queueMaxSize, this.operationDelay); + LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " + + "operationDelay={}", this.corePoolSize, this.queueMaxSize, this.operationDelay); // Create the timeout executor timeoutExecutor = new TimeoutExecutorThread(); @@ -144,14 +144,14 @@ public void join() { protected abstract UncaughtExceptionHandler getUncaughtExceptionHandler(); // ============================================================================================ - // Node Helpers + // Node Helpers // ============================================================================================ /** * Add a node that will be able to execute remote procedures * @param key the node identifier */ public void addNode(final TRemote key) { - assert key != null: "Tried to add a node with a null key"; + assert key != null : "Tried to add a node with a null key"; nodeMap.computeIfAbsent(key, k -> new BufferNode(k)); } @@ -160,8 +160,7 @@ public void addNode(final TRemote key) { * @param key the node identifier */ public void addOperationToNode(final TRemote key, RemoteProcedure rp) - throws NullTargetServerDispatchException, NoServerDispatchException, - NoNodeDispatchException { + throws NullTargetServerDispatchException, NoServerDispatchException, NoNodeDispatchException { if (key == null) { throw new NullTargetServerDispatchException(rp.toString()); } @@ -203,7 +202,7 @@ public boolean removeNode(final TRemote key) { } // ============================================================================================ - // Task Helpers + // Task Helpers // ============================================================================================ protected final void submitTask(Runnable task) { threadPool.execute(task); @@ -214,6 +213,7 @@ protected final void submitTask(Runnable task, long delay, TimeUnit unit) { } protected abstract void remoteDispatch(TRemote key, Set operations); + protected abstract void abortPendingOperations(TRemote key, Set operations); /** @@ -236,11 +236,11 @@ public RemoteProcedure getRemoteProcedure() { */ public interface RemoteProcedure { /** - * For building the remote operation. - * May be empty if no need to send remote call. Usually, this means the RemoteProcedure has been - * finished already. This is possible, as we may have already sent the procedure to RS but then - * the rpc connection is broken so the executeProcedures call fails, but the RS does receive the - * procedure and execute it and then report back, before we retry again. + * For building the remote operation. May be empty if no need to send remote call. Usually, this + * means the RemoteProcedure has been finished already. This is possible, as we may have already + * sent the procedure to RS but then the rpc connection is broken so the executeProcedures call + * fails, but the RS does receive the procedure and execute it and then report back, before we + * retry again. */ Optional remoteCallBuild(TEnv env, TRemote remote); @@ -262,9 +262,8 @@ public interface RemoteProcedure { void remoteOperationFailed(TEnv env, RemoteProcedureException error); /** - * Whether store this remote procedure in dispatched queue - * only OpenRegionProcedure and CloseRegionProcedure return false since they are - * not fully controlled by dispatcher + * Whether store this remote procedure in dispatched queue only OpenRegionProcedure and + * CloseRegionProcedure return false since they are not fully controlled by dispatcher */ default boolean storeInDispatchedQueue() { return true; @@ -283,7 +282,7 @@ public interface RemoteNode { } protected ArrayListMultimap, RemoteOperation> buildAndGroupRequestByType(final TEnv env, - final TRemote remote, final Set remoteProcedures) { + final TRemote remote, final Set remoteProcedures) { final ArrayListMultimap, RemoteOperation> requestByType = ArrayListMultimap.create(); for (RemoteProcedure proc : remoteProcedures) { Optional operation = proc.remoteCallBuild(env, remote); @@ -293,12 +292,12 @@ protected ArrayListMultimap, RemoteOperation> buildAndGroupRequestByTyp } protected List fetchType( - final ArrayListMultimap, RemoteOperation> requestByType, final Class type) { - return (List)requestByType.removeAll(type); + final ArrayListMultimap, RemoteOperation> requestByType, final Class type) { + return (List) requestByType.removeAll(type); } // ============================================================================================ - // Timeout Helpers + // Timeout Helpers // ============================================================================================ private final class TimeoutExecutorThread extends Thread { private final DelayQueue queue = new DelayQueue(); @@ -310,8 +309,8 @@ public TimeoutExecutorThread() { @Override public void run() { while (running.get()) { - final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, - 20, TimeUnit.SECONDS); + final DelayedWithTimeout task = + DelayedUtil.takeWithoutInterrupt(queue, 20, TimeUnit.SECONDS); if (task == null || task == DelayedUtil.DELAYED_POISON) { if (task == null && queue.size() > 0) { LOG.error("DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting" @@ -348,8 +347,8 @@ public void awaitTermination() { sendStopSignal(); join(250); if (i > 0 && (i % 8) == 0) { - LOG.warn("Waiting termination of thread " + getName() + ", " + - StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); + LOG.warn("Waiting termination of thread " + getName() + ", " + + StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); } } } catch (InterruptedException e) { @@ -359,14 +358,14 @@ public void awaitTermination() { } // ============================================================================================ - // Internals Helpers + // Internals Helpers // ============================================================================================ /** * Node that contains a set of RemoteProcedures */ protected final class BufferNode extends DelayedContainerWithTimestamp - implements RemoteNode { + implements RemoteNode { private Set operations; private final Set dispatchedOperations = new HashSet<>(); @@ -398,7 +397,7 @@ public synchronized void dispatch() { if (operations != null) { remoteDispatch(getKey(), operations); operations.stream().filter(operation -> operation.storeInDispatchedQueue()) - .forEach(operation -> dispatchedOperations.add(operation)); + .forEach(operation -> dispatchedOperations.add(operation)); this.operations = null; } } @@ -412,7 +411,7 @@ public synchronized void abortOperationsInQueue() { this.dispatchedOperations.clear(); } - public synchronized void operationCompleted(final RemoteProcedure remoteProcedure){ + public synchronized void operationCompleted(final RemoteProcedure remoteProcedure) { this.dispatchedOperations.remove(remoteProcedure); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java index 91ad920f27f8..29cdd49c6426 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; @@ -32,25 +31,25 @@ * RemoteProcedureExceptions are sent to 'remote' peers to signal an abort in the face of failures. * When serialized for transmission we encode using Protobufs to ensure version compatibility. *

    - * RemoteProcedureException exceptions contain a Throwable as its cause. - * This can be a "regular" exception generated locally or a ProxyThrowable that is a representation - * of the original exception created on original 'remote' source. These ProxyThrowables have their - * their stacks traces and messages overridden to reflect the original 'remote' exception. + * RemoteProcedureException exceptions contain a Throwable as its cause. This can be a "regular" + * exception generated locally or a ProxyThrowable that is a representation of the original + * exception created on original 'remote' source. These ProxyThrowables have their their stacks + * traces and messages overridden to reflect the original 'remote' exception. */ @InterfaceAudience.Private @InterfaceStability.Evolving @SuppressWarnings("serial") public class RemoteProcedureException extends ProcedureException { /** - * Name of the throwable's source such as a host or thread name. Must be non-null. + * Name of the throwable's source such as a host or thread name. Must be non-null. */ private final String source; /** - * Create a new RemoteProcedureException that can be serialized. - * It is assumed that this came form a local source. + * Create a new RemoteProcedureException that can be serialized. It is assumed that this came form + * a local source. * @param source the host or thread name of the source - * @param cause the actual cause of the exception + * @param cause the actual cause of the exception */ public RemoteProcedureException(String source, Throwable cause) { super(cause); @@ -66,10 +65,10 @@ public String getSource() { public Exception unwrapRemoteException() { final Throwable cause = getCause(); if (cause instanceof RemoteException) { - return ((RemoteException)cause).unwrapRemoteException(); + return ((RemoteException) cause).unwrapRemoteException(); } if (cause instanceof Exception) { - return (Exception)cause; + return (Exception) cause; } return new Exception(cause); } @@ -81,7 +80,7 @@ public Exception unwrapRemoteException() { public IOException unwrapRemoteIOException() { final Exception cause = unwrapRemoteException(); if (cause instanceof IOException) { - return (IOException)cause; + return (IOException) cause; } return new IOException(cause); } @@ -95,7 +94,7 @@ public String toString() { /** * Converts a RemoteProcedureException to an array of bytes. * @param source the name of the external exception source - * @param t the "local" external exception (local) + * @param t the "local" external exception (local) * @return protobuf serialized version of RemoteProcedureException */ public static byte[] serialize(String source, Throwable t) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java index 440f9e7d6ec1..9990bdeb4306 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,15 +29,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". - * A "Root Procedure" is a Procedure without parent, each subprocedure will be - * added to the "Root Procedure" stack (or rollback-stack). - * - * RootProcedureState is used and managed only by the ProcedureExecutor. - * Long rootProcId = getRootProcedureId(proc); - * rollbackStack.get(rootProcId).acquire(proc) - * rollbackStack.get(rootProcId).release(proc) - * ... + * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". A "Root + * Procedure" is a Procedure without parent, each subprocedure will be added to the "Root Procedure" + * stack (or rollback-stack). RootProcedureState is used and managed only by the ProcedureExecutor. + * Long rootProcId = getRootProcedureId(proc); rollbackStack.get(rootProcId).acquire(proc) + * rollbackStack.get(rootProcId).release(proc) ... */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -45,9 +41,9 @@ class RootProcedureState { private static final Logger LOG = LoggerFactory.getLogger(RootProcedureState.class); private enum State { - RUNNING, // The Procedure is running or ready to run - FAILED, // The Procedure failed, waiting for the rollback executing - ROLLINGBACK, // The Procedure failed and the execution was rolledback + RUNNING, // The Procedure is running or ready to run + FAILED, // The Procedure failed, waiting for the rollback executing + ROLLINGBACK, // The Procedure failed and the execution was rolledback } private Set> subprocs = null; @@ -102,7 +98,7 @@ protected synchronized List> getSubproceduresStack() { protected synchronized RemoteProcedureException getException() { if (subprocStack != null) { - for (Procedure proc: subprocStack) { + for (Procedure proc : subprocStack) { if (proc.hasException()) { return proc.getException(); } @@ -137,8 +133,8 @@ protected synchronized void abort() { } /** - * Called by the ProcedureExecutor after the procedure step is completed, - * to add the step to the rollback list (or procedure stack) + * Called by the ProcedureExecutor after the procedure step is completed, to add the step to the + * rollback list (or procedure stack) */ protected synchronized void addRollbackStep(Procedure proc) { if (proc.isFailed()) { @@ -163,11 +159,10 @@ protected synchronized void addSubProcedure(Procedure proc) { } /** - * Called on store load by the ProcedureExecutor to load part of the stack. - * - * Each procedure has its own stack-positions. Which means we have to write - * to the store only the Procedure we executed, and nothing else. - * on load we recreate the full stack by aggregating each procedure stack-positions. + * Called on store load by the ProcedureExecutor to load part of the stack. Each procedure has its + * own stack-positions. Which means we have to write to the store only the Procedure we executed, + * and nothing else. on load we recreate the full stack by aggregating each procedure + * stack-positions. */ protected synchronized void loadStack(Procedure proc) { addSubProcedure(proc); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java index 20abf651e306..131128cf04fb 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,24 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.SequentialProcedureData; /** * A SequentialProcedure describes one step in a procedure chain: + * *

      *   -> Step 1 -> Step 2 -> Step 3
      * 
    - * The main difference from a base Procedure is that the execute() of a - * SequentialProcedure will be called only once; there will be no second - * execute() call once the children are finished. which means once the child - * of a SequentialProcedure are completed the SequentialProcedure is completed too. + * + * The main difference from a base Procedure is that the execute() of a SequentialProcedure will be + * called only once; there will be no second execute() call once the children are finished. which + * means once the child of a SequentialProcedure are completed the SequentialProcedure is completed + * too. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -41,7 +42,7 @@ public abstract class SequentialProcedure extends Procedure getLocks() { } @Override - public LockedResource getLockResource(LockedResourceType resourceType, - String resourceName) { + public LockedResource getLockResource(LockedResourceType resourceType, String resourceName) { return null; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index d1af4969141a..d7ab269cb557 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,19 +31,17 @@ /** * Procedure described by a series of steps. - * - *

    The procedure implementor must have an enum of 'states', describing - * the various step of the procedure. - * Once the procedure is running, the procedure-framework will call executeFromState() - * using the 'state' provided by the user. The first call to executeFromState() - * will be performed with 'state = null'. The implementor can jump between - * states using setNextState(MyStateEnum.ordinal()). - * The rollback will call rollbackState() for each state that was executed, in reverse order. + *

    + * The procedure implementor must have an enum of 'states', describing the various step of the + * procedure. Once the procedure is running, the procedure-framework will call executeFromState() + * using the 'state' provided by the user. The first call to executeFromState() will be performed + * with 'state = null'. The implementor can jump between states using + * setNextState(MyStateEnum.ordinal()). The rollback will call rollbackState() for each state that + * was executed, in reverse order. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public abstract class StateMachineProcedure - extends Procedure { +public abstract class StateMachineProcedure extends Procedure { private static final Logger LOG = LoggerFactory.getLogger(StateMachineProcedure.class); private static final int EOF_STATE = Integer.MIN_VALUE; @@ -78,11 +76,11 @@ public enum Flow { /** * called to perform a single step of the specified 'state' of the procedure * @param state state to execute - * @return Flow.NO_MORE_STATE if the procedure is completed, - * Flow.HAS_MORE_STATE if there is another step. + * @return Flow.NO_MORE_STATE if the procedure is completed, Flow.HAS_MORE_STATE if there is + * another step. */ protected abstract Flow executeFromState(TEnvironment env, TState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; /** * called to perform the rollback of the specified state @@ -122,9 +120,9 @@ protected void setNextState(final TState state) { } /** - * By default, the executor will try ro run all the steps of the procedure start to finish. - * Return true to make the executor yield between execution steps to - * give other procedures time to run their steps. + * By default, the executor will try ro run all the steps of the procedure start to finish. Return + * true to make the executor yield between execution steps to give other procedures time to run + * their steps. * @param state the state we are going to execute next. * @return Return true if the executor should yield before the execution of the specified step. * Defaults to return false. @@ -137,8 +135,8 @@ protected boolean isYieldBeforeExecuteFromState(TEnvironment env, TState state) * Add a child procedure to execute * @param subProcedure the child procedure */ - protected > void addChildProcedure( - @SuppressWarnings("unchecked") T... subProcedure) { + protected > void + addChildProcedure(@SuppressWarnings("unchecked") T... subProcedure) { if (subProcedure == null) { return; } @@ -161,7 +159,7 @@ protected > void addChildProcedure( @Override protected Procedure[] execute(final TEnvironment env) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { updateTimestamp(); try { failIfAborted(); @@ -176,7 +174,7 @@ protected Procedure[] execute(final TEnvironment env) } if (LOG.isTraceEnabled()) { - LOG.trace(state + " " + this + "; cycles=" + this.cycles); + LOG.trace(state + " " + this + "; cycles=" + this.cycles); } // Keep running count of cycles if (getStateId(state) != this.previousState) { @@ -197,15 +195,14 @@ protected Procedure[] execute(final TEnvironment env) subProcList = null; return subProcedures; } - return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] {this}; + return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] { this }; } finally { updateTimestamp(); } } @Override - protected void rollback(final TEnvironment env) - throws IOException, InterruptedException { + protected void rollback(final TEnvironment env) throws IOException, InterruptedException { if (isEofState()) { stateCount--; } @@ -220,7 +217,7 @@ protected void rollback(final TEnvironment env) } protected boolean isEofState() { - return stateCount > 0 && states[stateCount-1] == EOF_STATE; + return stateCount > 0 && states[stateCount - 1] == EOF_STATE; } @Override @@ -253,8 +250,8 @@ protected final void failIfAborted() { } /** - * Used by the default implementation of abort() to know if the current state can be aborted - * and rollback can be triggered. + * Used by the default implementation of abort() to know if the current state can be aborted and + * rollback can be triggered. */ protected boolean isRollbackSupported(final TState state) { return false; @@ -270,7 +267,7 @@ private boolean hasMoreState() { } protected TState getCurrentState() { - return stateCount > 0 ? getState(states[stateCount-1]) : getInitialState(); + return stateCount > 0 ? getState(states[stateCount - 1]) : getInitialState(); } /** @@ -307,8 +304,7 @@ protected void toStringState(StringBuilder builder) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { StateMachineProcedureData.Builder data = StateMachineProcedureData.newBuilder(); for (int i = 0; i < stateCount; ++i) { data.addState(states[i]); @@ -317,8 +313,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { StateMachineProcedureData data = serializer.deserialize(StateMachineProcedureData.class); stateCount = data.getStateCount(); if (stateCount > 0) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java index b58b571a9345..4d0d8941dedf 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java index fc917b6f36ed..3b99781a5585 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,8 +53,7 @@ public void sendStopSignal() { @Override public void run() { while (executor.isRunning()) { - final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, 20, - TimeUnit.SECONDS); + final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, 20, TimeUnit.SECONDS); if (task == null || task == DelayedUtil.DELAYED_POISON) { // the executor may be shutting down, // and the task is just the shutdown request diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java index b8ddad21866e..7a15ebfc494c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java index aba71b95d6da..de44ad5b3df8 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -91,4 +91,4 @@ public Procedure next() throws IOException { moveToNext(); return proc; } -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java index 7a9ea1b0d314..fb2a725177db 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,4 +26,4 @@ public interface LeaseRecovery { void recoverFileLease(FileSystem fs, Path path) throws IOException; -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java index 8fbc1473ed7e..8a4dd403cd20 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2.store; import java.io.IOException; - import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java index c1eaa73230fc..7e54cfa9a125 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2.store; import java.io.IOException; - import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -85,9 +84,8 @@ public interface ProcedureIterator { void reset(); /** - * Returns true if the iterator has more elements. - * (In other words, returns true if next() would return a Procedure - * rather than throwing an exception.) + * Returns true if the iterator has more elements. (In other words, returns true if next() would + * return a Procedure rather than throwing an exception.) * @return true if the iterator has more procedures */ boolean hasNext(); @@ -135,8 +133,8 @@ public interface ProcedureLoader { void load(ProcedureIterator procIter) throws IOException; /** - * Called by the ProcedureStore.load() in case we have procedures not-ready to be added to - * the executor, which probably means they are corrupted since some information/link is missing. + * Called by the ProcedureStore.load() in case we have procedures not-ready to be added to the + * executor, which probably means they are corrupted since some information/link is missing. * @param procIter iterator over the procedures not ready to be added to the executor, corrupted */ void handleCorrupted(ProcedureIterator procIter) throws IOException; @@ -178,8 +176,8 @@ public interface ProcedureLoader { int getNumThreads(); /** - * Set the number of procedure running. - * This can be used, for example, by the store to know how long to wait before a sync. + * Set the number of procedure running. This can be used, for example, by the store to know how + * long to wait before a sync. * @return how many procedures are running (may not be same as count). */ int setRunningProcedureCount(int count); @@ -201,57 +199,48 @@ public interface ProcedureLoader { void load(ProcedureLoader loader) throws IOException; /** - * When a procedure is submitted to the executor insert(proc, null) will be called. - * 'proc' has a 'RUNNABLE' state and the initial information required to start up. - * - * When a procedure is executed and it returns children insert(proc, subprocs) will be called. - * 'proc' has a 'WAITING' state and an update state. - * 'subprocs' are the children in 'RUNNABLE' state with the initial information. - * - * @param proc the procedure to serialize and write to the store. + * When a procedure is submitted to the executor insert(proc, null) will be called. 'proc' has a + * 'RUNNABLE' state and the initial information required to start up. When a procedure is executed + * and it returns children insert(proc, subprocs) will be called. 'proc' has a 'WAITING' state and + * an update state. 'subprocs' are the children in 'RUNNABLE' state with the initial information. + * @param proc the procedure to serialize and write to the store. * @param subprocs the newly created child of the proc. */ void insert(Procedure proc, Procedure[] subprocs); /** - * Serialize a set of new procedures. - * These procedures are freshly submitted to the executor and each procedure - * has a 'RUNNABLE' state and the initial information required to start up. - * + * Serialize a set of new procedures. These procedures are freshly submitted to the executor and + * each procedure has a 'RUNNABLE' state and the initial information required to start up. * @param procs the procedures to serialize and write to the store. */ void insert(Procedure[] procs); /** - * The specified procedure was executed, - * and the new state should be written to the store. + * The specified procedure was executed, and the new state should be written to the store. * @param proc the procedure to serialize and write to the store. */ void update(Procedure proc); /** - * The specified procId was removed from the executor, - * due to completion, abort or failure. - * The store implementor should remove all the information about the specified procId. + * The specified procId was removed from the executor, due to completion, abort or failure. The + * store implementor should remove all the information about the specified procId. * @param procId the ID of the procedure to remove. */ void delete(long procId); /** - * The parent procedure completed. - * Update the state and mark all the child deleted. + * The parent procedure completed. Update the state and mark all the child deleted. * @param parentProc the parent procedure to serialize and write to the store. * @param subProcIds the IDs of the sub-procedure to remove. */ void delete(Procedure parentProc, long[] subProcIds); /** - * The specified procIds were removed from the executor, - * due to completion, abort or failure. - * The store implementor should remove all the information about the specified procIds. + * The specified procIds were removed from the executor, due to completion, abort or failure. The + * store implementor should remove all the information about the specified procIds. * @param procIds the IDs of the procedures to remove. - * @param offset the array offset from where to start to delete - * @param count the number of IDs to delete + * @param offset the array offset from where to start to delete + * @param count the number of IDs to delete */ void delete(long[] procIds, int offset, int count); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java index a5c04fab200c..4efb6d34b5aa 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,14 +27,13 @@ @InterfaceAudience.Private public abstract class ProcedureStoreBase implements ProcedureStore { private final CopyOnWriteArrayList listeners = - new CopyOnWriteArrayList<>(); + new CopyOnWriteArrayList<>(); private final AtomicBoolean running = new AtomicBoolean(false); /** - * Change the state to 'isRunning', - * returns true if the store state was changed, - * false if the store was already in that state. + * Change the state to 'isRunning', returns true if the store state was changed, false if the + * store was already in that state. * @param isRunning the state to set. * @return true if the store state was changed, otherwise false. */ diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java index 4e615b971d8a..fc61d41c7f01 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -167,8 +167,8 @@ private void checkReady(Entry rootEntry, Map remainingProcMap) { rootEntry); valid = false; } else if (entries.size() > 1) { - LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," + - " root procedure is {}", entries, i, maxStackId, rootEntry); + LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," + + " root procedure is {}", entries, i, maxStackId, rootEntry); valid = false; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java index 0cdc48041003..89d8cabf9d62 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,4 +48,4 @@ public Procedure getProcedure() throws IOException { public ProcedureProtos.Procedure getProto() { return proto; } -} \ No newline at end of file +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java index 98416a527b8e..fdbf183b19f4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -415,7 +415,7 @@ void updateState(long procId, boolean isDeleted) { } catch (ArrayIndexOutOfBoundsException aioobe) { // We've gotten a AIOOBE in here; add detail to help debug. ArrayIndexOutOfBoundsException aioobe2 = - new ArrayIndexOutOfBoundsException("pid=" + procId + ", deleted=" + isDeleted); + new ArrayIndexOutOfBoundsException("pid=" + procId + ", deleted=" + isDeleted); aioobe2.initCause(aioobe); throw aioobe2; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java index dc9d16c41f8e..a47b2664a9e7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java index 3436e8b76697..eeb5a3a827ba 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,10 +32,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; /** - * Keeps track of live procedures. - * - * It can be used by the ProcedureStore to identify which procedures are already - * deleted/completed to avoid the deserialization step on restart + * Keeps track of live procedures. It can be used by the ProcedureStore to identify which procedures + * are already deleted/completed to avoid the deserialization step on restart * @deprecated Since 2.3.0, will be removed in 4.0.0. Keep here only for rolling upgrading, now we * use the new region based procedure store. */ @@ -48,29 +46,32 @@ class ProcedureStoreTracker { private final TreeMap map = new TreeMap<>(); /** - * If true, do not remove bits corresponding to deleted procedures. Note that this can result - * in huge bitmaps overtime. - * Currently, it's set to true only when building tracker state from logs during recovery. During - * recovery, if we are sure that a procedure has been deleted, reading its old update entries - * can be skipped. + * If true, do not remove bits corresponding to deleted procedures. Note that this can result in + * huge bitmaps overtime. Currently, it's set to true only when building tracker state from logs + * during recovery. During recovery, if we are sure that a procedure has been deleted, reading its + * old update entries can be skipped. */ private boolean keepDeletes = false; /** - * If true, it means tracker has incomplete information about the active/deleted procedures. - * It's set to true only when recovering from old logs. See {@link #isDeleted(long)} docs to - * understand it's real use. + * If true, it means tracker has incomplete information about the active/deleted procedures. It's + * set to true only when recovering from old logs. See {@link #isDeleted(long)} docs to understand + * it's real use. */ boolean partial = false; private long minModifiedProcId = Long.MAX_VALUE; private long maxModifiedProcId = Long.MIN_VALUE; - public enum DeleteState { YES, NO, MAYBE } + public enum DeleteState { + YES, + NO, + MAYBE + } public void resetToProto(ProcedureProtos.ProcedureStoreTracker trackerProtoBuf) { reset(); - for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode : - trackerProtoBuf.getNodeList()) { + for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode : trackerProtoBuf + .getNodeList()) { final BitSetNode node = new BitSetNode(protoNode); map.put(node.getStart(), node); } @@ -182,6 +183,7 @@ public void setMinMaxModifiedProcIds(long min, long max) { this.minModifiedProcId = min; this.maxModifiedProcId = max; } + /** * This method is used when restarting where we need to rebuild the ProcedureStoreTracker. The * {@link #delete(long)} method above assume that the {@link BitSetNode} exists, but when restart @@ -212,7 +214,7 @@ public void setDeletedIfModified(long... procId) { } private void setDeleteIf(ProcedureStoreTracker tracker, - BiFunction func) { + BiFunction func) { BitSetNode trackerNode = null; for (BitSetNode node : map.values()) { long minProcId = node.getStart(); @@ -236,8 +238,8 @@ private void setDeleteIf(ProcedureStoreTracker tracker, * @see #setDeletedIfModifiedInBoth(ProcedureStoreTracker) */ public void setDeletedIfDeletedByThem(ProcedureStoreTracker tracker) { - setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId) || - node.isDeleted(procId) == DeleteState.YES); + setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId) + || node.isDeleted(procId) == DeleteState.YES); } /** @@ -252,7 +254,7 @@ public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker) { /** * lookup the node containing the specified procId. - * @param node cached node to check before doing a lookup + * @param node cached node to check before doing a lookup * @param procId the procId to lookup * @return the node that may contains the procId or null */ @@ -288,16 +290,15 @@ public void reset() { public boolean isModified(long procId) { final Map.Entry entry = map.floorEntry(procId); - return entry != null && entry.getValue().contains(procId) && - entry.getValue().isModified(procId); + return entry != null && entry.getValue().contains(procId) + && entry.getValue().isModified(procId); } /** * If {@link #partial} is false, returns state from the bitmap. If no state is found for - * {@code procId}, returns YES. - * If partial is true, tracker doesn't have complete view of system state, so it returns MAYBE - * if there is no update for the procedure or if it doesn't have a state in bitmap. Otherwise, - * returns state from the bitmap. + * {@code procId}, returns YES. If partial is true, tracker doesn't have complete view of system + * state, so it returns MAYBE if there is no update for the procedure or if it doesn't have a + * state in bitmap. Otherwise, returns state from the bitmap. */ public DeleteState isDeleted(long procId) { Map.Entry entry = map.floorEntry(procId); @@ -378,8 +379,8 @@ public long[] getAllActiveProcIds() { } /** - * Clears the list of updated procedure ids. This doesn't affect global list of active - * procedure ids. + * Clears the list of updated procedure ids. This doesn't affect global list of active procedure + * ids. */ public void resetModified() { for (Map.Entry entry : map.entrySet()) { @@ -472,17 +473,16 @@ public void dump() { } // ======================================================================== - // Convert to/from Protocol Buffer. + // Convert to/from Protocol Buffer. // ======================================================================== /** - * Builds - * org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker + * Builds org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker * protocol buffer from current state. */ public ProcedureProtos.ProcedureStoreTracker toProto() throws IOException { ProcedureProtos.ProcedureStoreTracker.Builder builder = - ProcedureProtos.ProcedureStoreTracker.newBuilder(); + ProcedureProtos.ProcedureStoreTracker.newBuilder(); for (Map.Entry entry : map.entrySet()) { builder.addNode(entry.getValue().convert()); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java index 947d5bd9d650..74f3e7781070 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ public ProcedureWALFile(final FileSystem fs, final FileStatus logStatus) { tracker.setPartialFlag(true); } - public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, - long startPos, long timestamp) { + public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, long startPos, + long timestamp) { this.fs = fs; this.header = header; this.logFile = logFile; @@ -101,7 +101,7 @@ public void readTracker() throws IOException { try { stream.seek(trailer.getTrackerPos()); final ProcedureProtos.ProcedureStoreTracker trackerProtoBuf = - ProcedureProtos.ProcedureStoreTracker.parseDelimitedFrom(stream); + ProcedureProtos.ProcedureStoreTracker.parseDelimitedFrom(stream); tracker.resetToProto(trackerProtoBuf); } finally { stream.seek(startPos); @@ -205,7 +205,7 @@ public boolean equals(Object o) { return false; } - return compareTo((ProcedureWALFile)o) == 0; + return compareTo((ProcedureWALFile) o) == 0; } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java index bc60584126fb..e480f7200b74 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2.store.wal; import java.io.IOException; @@ -73,7 +72,8 @@ interface Loader extends ProcedureLoader { void markCorruptedWAL(ProcedureWALFile log, IOException e); } - private ProcedureWALFormat() {} + private ProcedureWALFormat() { + } /** * Load all the procedures in these ProcedureWALFiles, and rebuild the given {@code tracker} if @@ -87,7 +87,7 @@ private ProcedureWALFormat() {} * procedures. */ public static void load(Iterator logs, ProcedureStoreTracker tracker, - Loader loader) throws IOException { + Loader loader) throws IOException { ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker, loader); tracker.setKeepDeletes(true); // Ignore the last log which is current active log. @@ -111,33 +111,22 @@ public static void load(Iterator logs, ProcedureStoreTracker t } public static void writeHeader(OutputStream stream, ProcedureWALHeader header) - throws IOException { + throws IOException { header.writeDelimitedTo(stream); } /* - * +-----------------+ - * | END OF WAL DATA | <---+ - * +-----------------+ | - * | | | - * | Tracker | | - * | | | - * +-----------------+ | - * | version | | - * +-----------------+ | - * | TRAILER_MAGIC | | - * +-----------------+ | - * | offset |-----+ - * +-----------------+ + * +-----------------+ | END OF WAL DATA | <---+ +-----------------+ | | | | | Tracker | | | | | + * +-----------------+ | | version | | +-----------------+ | | TRAILER_MAGIC | | + * +-----------------+ | | offset |-----+ +-----------------+ */ public static long writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker) - throws IOException { + throws IOException { long offset = stream.getPos(); // Write EOF Entry - ProcedureWALEntry.newBuilder() - .setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF) - .build().writeDelimitedTo(stream); + ProcedureWALEntry.newBuilder().setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF).build() + .writeDelimitedTo(stream); // Write Tracker tracker.toProto().writeDelimitedTo(stream); @@ -148,8 +137,7 @@ public static long writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker return stream.getPos() - offset; } - public static ProcedureWALHeader readHeader(InputStream stream) - throws IOException { + public static ProcedureWALHeader readHeader(InputStream stream) throws IOException { ProcedureWALHeader header; try { header = ProcedureWALHeader.parseDelimitedFrom(stream); @@ -162,8 +150,8 @@ public static ProcedureWALHeader readHeader(InputStream stream) } if (header.getVersion() < 0 || header.getVersion() != HEADER_VERSION) { - throw new InvalidWALDataException("Invalid Header version. got " + header.getVersion() + - " expected " + HEADER_VERSION); + throw new InvalidWALDataException( + "Invalid Header version. got " + header.getVersion() + " expected " + HEADER_VERSION); } if (header.getType() < 0 || header.getType() > LOG_TYPE_MAX_VALID) { @@ -174,7 +162,7 @@ public static ProcedureWALHeader readHeader(InputStream stream) } public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long startPos, long size) - throws IOException { + throws IOException { // Beginning of the Trailer Jump. 17 = 1 byte version + 8 byte magic + 8 byte offset long trailerPos = size - 17; @@ -185,14 +173,14 @@ public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long sta stream.seek(trailerPos); int version = stream.read(); if (version != TRAILER_VERSION) { - throw new InvalidWALDataException("Invalid Trailer version. got " + version + - " expected " + TRAILER_VERSION); + throw new InvalidWALDataException( + "Invalid Trailer version. got " + version + " expected " + TRAILER_VERSION); } long magic = StreamUtils.readLong(stream); if (magic != TRAILER_MAGIC) { - throw new InvalidWALDataException("Invalid Trailer magic. got " + magic + - " expected " + TRAILER_MAGIC); + throw new InvalidWALDataException( + "Invalid Trailer magic. got " + magic + " expected " + TRAILER_MAGIC); } long trailerOffset = StreamUtils.readLong(stream); @@ -203,10 +191,8 @@ public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long sta throw new InvalidWALDataException("Invalid Trailer begin"); } - ProcedureWALTrailer trailer = ProcedureWALTrailer.newBuilder() - .setVersion(version) - .setTrackerPos(stream.getPos()) - .build(); + ProcedureWALTrailer trailer = + ProcedureWALTrailer.newBuilder().setVersion(version).setTrackerPos(stream.getPos()).build(); return trailer; } @@ -214,8 +200,8 @@ public static ProcedureWALEntry readEntry(InputStream stream) throws IOException return ProcedureWALEntry.parseDelimitedFrom(stream); } - public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, - Procedure proc, Procedure[] subprocs) throws IOException { + public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, Procedure proc, + Procedure[] subprocs) throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(type); builder.addProcedure(ProcedureUtil.convertToProtoProcedure(proc)); @@ -227,23 +213,20 @@ public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, builder.build().writeDelimitedTo(slot); } - public static void writeInsert(ByteSlot slot, Procedure proc) - throws IOException { + public static void writeInsert(ByteSlot slot, Procedure proc) throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_INIT, proc, null); } public static void writeInsert(ByteSlot slot, Procedure proc, Procedure[] subprocs) - throws IOException { + throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_INSERT, proc, subprocs); } - public static void writeUpdate(ByteSlot slot, Procedure proc) - throws IOException { + public static void writeUpdate(ByteSlot slot, Procedure proc) throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_UPDATE, proc, null); } - public static void writeDelete(ByteSlot slot, long procId) - throws IOException { + public static void writeDelete(ByteSlot slot, long procId) throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_DELETE); builder.setProcId(procId); @@ -251,7 +234,7 @@ public static void writeDelete(ByteSlot slot, long procId) } public static void writeDelete(ByteSlot slot, Procedure proc, long[] subprocs) - throws IOException { + throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_DELETE); builder.setProcId(proc.getProcId()); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java index 31150cad8fb2..c7647c72f7de 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,11 +56,10 @@ class ProcedureWALFormatReader { private final ProcedureWALFormat.Loader loader; /** - * Global tracker that will be used by the WALProcedureStore after load. - * If the last WAL was closed cleanly we already have a full tracker ready to be used. - * If the last WAL was truncated (e.g. master killed) the tracker will be empty - * and the 'partial' flag will be set. In this case, on WAL replay we are going - * to rebuild the tracker. + * Global tracker that will be used by the WALProcedureStore after load. If the last WAL was + * closed cleanly we already have a full tracker ready to be used. If the last WAL was truncated + * (e.g. master killed) the tracker will be empty and the 'partial' flag will be set. In this + * case, on WAL replay we are going to rebuild the tracker. */ private final ProcedureStoreTracker tracker; @@ -79,7 +78,7 @@ class ProcedureWALFormatReader { private long maxProcId = 0; public ProcedureWALFormatReader(final ProcedureStoreTracker tracker, - ProcedureWALFormat.Loader loader) { + ProcedureWALFormat.Loader loader) { this.tracker = tracker; this.loader = loader; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java index 41fcc186ad34..7fda7422023f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,13 +67,11 @@ public ProcedureWALPrettyPrinter() { /** * Reads a log file and outputs its contents. - * - * @param conf HBase configuration relevant to this log file - * @param p path of the log file to be read - * @throws IOException IOException + * @param conf HBase configuration relevant to this log file + * @param p path of the log file to be read + * @throws IOException IOException */ - public void processFile(final Configuration conf, final Path p) - throws IOException { + public void processFile(final Configuration conf, final Path p) throws IOException { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { @@ -121,8 +119,7 @@ public void processProcedureWALFile(ProcedureWALFile log) throws IOException { } } catch (IOException e) { out.println("got an exception while reading the procedure WAL " + e.getMessage()); - } - finally { + } finally { log.close(); } } @@ -150,13 +147,9 @@ private void printHeader(ProcedureWALHeader header) { } /** - * Pass one or more log file names and formatting options and it will dump out - * a text version of the contents on stdout. - * - * @param args - * Command line arguments - * @throws IOException - * Thrown upon file system errors etc. + * Pass one or more log file names and formatting options and it will dump out a text version of + * the contents on stdout. n * Command line arguments n * Thrown upon file system + * errors etc. */ @Override public int run(final String[] args) throws IOException { @@ -176,19 +169,19 @@ public int run(final String[] args) throws IOException { if (files.isEmpty() || cmd.hasOption("h")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("ProcedureWALPrettyPrinter ", options, true); - return(-1); + return (-1); } } catch (ParseException e) { LOG.error("Failed to parse commandLine arguments", e); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("ProcedureWALPrettyPrinter ", options, true); - return(-1); + return (-1); } // get configuration, file system, and process the given files for (Path file : files) { processFile(getConf(), file); } - return(0); + return (0); } public static void main(String[] args) throws Exception { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java index 5e1983f46968..6251f4a4a5fd 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,7 +72,7 @@ private void trackProcId(long procId) { * See HBASE-18152. */ private static boolean isIncreasing(ProcedureProtos.Procedure current, - ProcedureProtos.Procedure candidate) { + ProcedureProtos.Procedure candidate) { // Check that the procedures we see are 'increasing'. We used to compare // procedure id first and then update time but it can legitimately go backwards if the // procedure is failed or rolled back so that was unreliable. Was going to compare diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index 29bda4732d0f..2e7a8bfc6c97 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -101,8 +101,7 @@ * will first be initialized to the oldest file's tracker(which is stored in the trailer), using the * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge it * with the tracker of every newer wal files, using the - * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. - * If we find out + * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we find out * that all the modified procedures for the oldest wal file are modified or deleted in newer wal * files, then we can delete it. This is because that, every time we call * {@link ProcedureStore#insert(Procedure[])} or {@link ProcedureStore#update(Procedure)}, we will @@ -121,7 +120,6 @@ public class WALProcedureStore extends ProcedureStoreBase { /** Used to construct the name of the log directory for master procedures */ public static final String MASTER_PROCEDURE_LOGDIR = "MasterProcWALs"; - public static final String WAL_COUNT_WARN_THRESHOLD_CONF_KEY = "hbase.procedure.store.wal.warn.threshold"; private static final int DEFAULT_WAL_COUNT_WARN_THRESHOLD = 10; @@ -138,8 +136,7 @@ public class WALProcedureStore extends ProcedureStoreBase { "hbase.procedure.store.wal.wait.before.roll"; private static final int DEFAULT_WAIT_BEFORE_ROLL = 500; - public static final String ROLL_RETRIES_CONF_KEY = - "hbase.procedure.store.wal.max.roll.retries"; + public static final String ROLL_RETRIES_CONF_KEY = "hbase.procedure.store.wal.max.roll.retries"; private static final int DEFAULT_ROLL_RETRIES = 3; public static final String MAX_SYNC_FAILURE_ROLL_CONF_KEY = @@ -160,7 +157,7 @@ public class WALProcedureStore extends ProcedureStoreBase { private static final long DEFAULT_ROLL_THRESHOLD = 32 * 1024 * 1024; // 32M public static final String STORE_WAL_SYNC_STATS_COUNT = - "hbase.procedure.store.wal.sync.stats.count"; + "hbase.procedure.store.wal.sync.stats.count"; private static final int DEFAULT_SYNC_STATS_COUNT = 10; private final LinkedList logs = new LinkedList<>(); @@ -243,14 +240,14 @@ public WALProcedureStore(Configuration conf, LeaseRecovery leaseRecovery) throws } public WALProcedureStore(final Configuration conf, final Path walDir, final Path walArchiveDir, - final LeaseRecovery leaseRecovery) throws IOException { + final LeaseRecovery leaseRecovery) throws IOException { this.conf = conf; this.leaseRecovery = leaseRecovery; this.walDir = walDir; this.walArchiveDir = walArchiveDir; this.fs = CommonFSUtils.getWALFileSystem(conf); - this.enforceStreamCapability = conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, - true); + this.enforceStreamCapability = + conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true); // Create the log directory for the procedure store if (!fs.exists(walDir)) { @@ -260,7 +257,7 @@ public WALProcedureStore(final Configuration conf, final Path walDir, final Path } // Now that it exists, set the log policy String storagePolicy = - conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); CommonFSUtils.setStoragePolicy(fs, walDir, storagePolicy); // Create archive dir up front. Rename won't work w/o it up on HDFS. @@ -303,8 +300,8 @@ public void start(int numSlots) throws IOException { useHsync = conf.getBoolean(USE_HSYNC_CONF_KEY, DEFAULT_USE_HSYNC); // WebUI - syncMetricsQueue = new CircularFifoQueue<>( - conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT)); + syncMetricsQueue = + new CircularFifoQueue<>(conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT)); // Init sync thread syncThread = new Thread("WALProcedureStoreSyncThread") { @@ -329,8 +326,8 @@ public void stop(final boolean abort) { return; } - LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + - (isSyncAborted() ? " (self aborting)" : "")); + LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + + (isSyncAborted() ? " (self aborting)" : "")); sendStopSignal(); if (!isSyncAborted()) { try { @@ -350,7 +347,7 @@ public void stop(final boolean abort) { // Close the old logs // they should be already closed, this is just in case the load fails // and we call start() and then stop() - for (ProcedureWALFile log: logs) { + for (ProcedureWALFile log : logs) { log.close(); } logs.clear(); @@ -405,8 +402,7 @@ public void recoverLease() throws IOException { while (isRunning()) { // Don't sleep before first attempt if (afterFirstAttempt) { - LOG.trace("Sleep {} ms after first lease recovery attempt.", - waitBeforeRoll); + LOG.trace("Sleep {} ms after first lease recovery attempt.", waitBeforeRoll); Threads.sleepWithoutInterrupt(waitBeforeRoll); } else { afterFirstAttempt = true; @@ -513,8 +509,9 @@ private void tryCleanupLogsOnLoad() { } // the config says to not cleanup wals on load. - if (!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, - DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) { + if ( + !conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY) + ) { LOG.debug("WALs cleanup on load is not enabled: " + getActiveLogs()); return; } @@ -552,8 +549,8 @@ public void insert(Procedure proc, Procedure[] subprocs) { } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. - LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: proc=" + - proc + ", subprocs=" + Arrays.toString(subprocs), e); + LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: proc=" + proc + + ", subprocs=" + Arrays.toString(subprocs), e); throw new RuntimeException(e); } finally { releaseSlot(slot); @@ -581,8 +578,8 @@ public void insert(Procedure[] procs) { } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. - LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: " + - Arrays.toString(procs), e); + LOG.error(HBaseMarkers.FATAL, + "Unable to serialize one of the procedure: " + Arrays.toString(procs), e); throw new RuntimeException(e); } finally { releaseSlot(slot); @@ -706,10 +703,14 @@ private void releaseSlot(final ByteSlot slot) { slotsCache.offer(slot); } - private enum PushType { INSERT, UPDATE, DELETE } + private enum PushType { + INSERT, + UPDATE, + DELETE + } - private long pushData(final PushType type, final ByteSlot slot, - final long procId, final long[] subProcIds) { + private long pushData(final PushType type, final ByteSlot slot, final long procId, + final long[] subProcIds) { if (!isRunning()) { throw new RuntimeException("the store must be running before inserting data"); } @@ -768,8 +769,7 @@ private long pushData(final PushType type, final ByteSlot slot, return logId; } - private void updateStoreTracker(final PushType type, - final long procId, final long[] subProcIds) { + private void updateStoreTracker(final PushType type, final long procId, final long[] subProcIds) { switch (type) { case INSERT: if (subProcIds == null) { @@ -819,8 +819,8 @@ private void syncLoop() throws Throwable { if (LOG.isTraceEnabled()) { float rollTsSec = getMillisFromLastRoll() / 1000.0f; LOG.trace(String.format("Waiting for data. flushed=%s (%s/sec)", - StringUtils.humanSize(totalSynced.get()), - StringUtils.humanSize(totalSynced.get() / rollTsSec))); + StringUtils.humanSize(totalSynced.get()), + StringUtils.humanSize(totalSynced.get() / rollTsSec))); } waitCond.await(getMillisToNextPeriodicRoll(), TimeUnit.MILLISECONDS); @@ -843,9 +843,8 @@ private void syncLoop() throws Throwable { final float syncedPerSec = totalSyncedToStore / rollSec; if (LOG.isTraceEnabled() && (syncWaitMs > 10 || slotIndex < syncMaxSlot)) { LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s (%s/sec)", - StringUtils.humanTimeDiff(syncWaitMs), slotIndex, - StringUtils.humanSize(totalSyncedToStore), - StringUtils.humanSize(syncedPerSec))); + StringUtils.humanTimeDiff(syncWaitMs), slotIndex, + StringUtils.humanSize(totalSyncedToStore), StringUtils.humanSize(syncedPerSec))); } // update webui circular buffers (TODO: get rid of allocations) @@ -921,7 +920,7 @@ private long syncSlots() throws Throwable { } protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots, - final int offset, final int count) throws IOException { + final int offset, final int count) throws IOException { long totalSynced = 0; for (int i = 0; i < count; ++i) { final ByteSlot data = slots[offset + i]; @@ -933,8 +932,8 @@ protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots sendPostSyncSignal(); if (LOG.isTraceEnabled()) { - LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + - ", flushed=" + StringUtils.humanSize(totalSynced)); + LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + ", flushed=" + + StringUtils.humanSize(totalSynced)); } return totalSynced; } @@ -1007,7 +1006,7 @@ void removeInactiveLogsForTesting() throws Exception { lock.lock(); try { removeInactiveLogs(); - } finally { + } finally { lock.unlock(); } } @@ -1061,11 +1060,8 @@ boolean rollWriter(long logId) throws IOException { assert lock.isHeldByCurrentThread() : "expected to be the lock owner. " + lock.isLocked(); ProcedureWALHeader header = ProcedureWALHeader.newBuilder() - .setVersion(ProcedureWALFormat.HEADER_VERSION) - .setType(ProcedureWALFormat.LOG_TYPE_STREAM) - .setMinProcId(storeTracker.getActiveMinProcId()) - .setLogId(logId) - .build(); + .setVersion(ProcedureWALFormat.HEADER_VERSION).setType(ProcedureWALFormat.LOG_TYPE_STREAM) + .setMinProcId(storeTracker.getActiveMinProcId()).setLogId(logId).build(); FSDataOutputStream newStream = null; Path newLogFile = null; @@ -1074,8 +1070,8 @@ boolean rollWriter(long logId) throws IOException { try { FSDataOutputStreamBuilder builder = fs.createFile(newLogFile).overwrite(false); if (builder instanceof DistributedFileSystem.HdfsDataOutputStreamBuilder) { - newStream = ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder) - .replicate().build(); + newStream = + ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder).replicate().build(); } else { newStream = builder.build(); } @@ -1091,11 +1087,11 @@ boolean rollWriter(long logId) throws IOException { // to provide. final String durability = useHsync ? StreamCapabilities.HSYNC : StreamCapabilities.HFLUSH; if (enforceStreamCapability && !newStream.hasCapability(durability)) { - throw new IllegalStateException("The procedure WAL relies on the ability to " + durability + - " for proper operation during component failures, but the underlying filesystem does " + - "not support doing so. Please check the config value of '" + USE_HSYNC_CONF_KEY + - "' to set the desired level of robustness and ensure the config value of '" + - CommonFSUtils.HBASE_WAL_DIR + "' points to a FileSystem mount that can provide it."); + throw new IllegalStateException("The procedure WAL relies on the ability to " + durability + + " for proper operation during component failures, but the underlying filesystem does " + + "not support doing so. Please check the config value of '" + USE_HSYNC_CONF_KEY + + "' to set the desired level of robustness and ensure the config value of '" + + CommonFSUtils.HBASE_WAL_DIR + "' points to a FileSystem mount that can provide it."); } try { ProcedureWALFormat.writeHeader(newStream, header); @@ -1120,8 +1116,8 @@ boolean rollWriter(long logId) throws IOException { if (logs.size() == 2) { buildHoldingCleanupTracker(); } else if (logs.size() > walCountWarnThreshold) { - LOG.warn("procedure WALs count={} above the warning threshold {}. check running procedures" + - " to see if something is stuck.", logs.size(), walCountWarnThreshold); + LOG.warn("procedure WALs count={} above the warning threshold {}. check running procedures" + + " to see if something is stuck.", logs.size(), walCountWarnThreshold); // This is just like what we have done at RS side when there are too many wal files. For RS, // if there are too many wal files, we will find out the wal entries in the oldest file, and // tell the upper layer to flush these regions so the wal entries will be useless and then we @@ -1168,7 +1164,7 @@ private void closeCurrentLogStream(boolean abort) { } // ========================================================================== - // Log Files cleaner helpers + // Log Files cleaner helpers // ========================================================================== private void removeInactiveLogs() throws IOException { // We keep track of which procedures are holding the oldest WAL in 'holdingCleanupTracker'. @@ -1254,7 +1250,7 @@ private boolean removeLogFile(final ProcedureWALFile log, final Path walArchiveD } // ========================================================================== - // FileSystem Log Files helpers + // FileSystem Log Files helpers // ========================================================================== public Path getWALDir() { return this.walDir; @@ -1287,14 +1283,14 @@ public boolean accept(Path path) { }; private static final Comparator FILE_STATUS_ID_COMPARATOR = - new Comparator() { - @Override - public int compare(FileStatus a, FileStatus b) { - final long aId = getLogIdFromName(a.getPath().getName()); - final long bId = getLogIdFromName(b.getPath().getName()); - return Long.compare(aId, bId); - } - }; + new Comparator() { + @Override + public int compare(FileStatus a, FileStatus b) { + final long aId = getLogIdFromName(a.getPath().getName()); + final long bId = getLogIdFromName(b.getPath().getName()); + return Long.compare(aId, bId); + } + }; private FileStatus[] getLogFiles() throws IOException { try { @@ -1367,7 +1363,7 @@ private void initTrackerFromOldLogs() { * Loads given log file and it's tracker. */ private ProcedureWALFile initOldLog(final FileStatus logFile, final Path walArchiveDir) - throws IOException { + throws IOException { final ProcedureWALFile log = new ProcedureWALFile(fs, logFile); if (logFile.getLen() == 0) { LOG.warn("Remove uninitialized log: {}", logFile); @@ -1400,19 +1396,18 @@ private ProcedureWALFile initOldLog(final FileStatus logFile, final Path walArch } /** - * Parses a directory of WALs building up ProcedureState. - * For testing parse and profiling. + * Parses a directory of WALs building up ProcedureState. For testing parse and profiling. * @param args Include pointer to directory of WAL files for a store instance to parse & load. */ - public static void main(String [] args) throws IOException { + public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); if (args == null || args.length != 1) { System.out.println("ERROR: Empty arguments list; pass path to MASTERPROCWALS_DIR."); System.out.println("Usage: WALProcedureStore MASTERPROCWALS_DIR"); System.exit(-1); } - WALProcedureStore store = new WALProcedureStore(conf, new Path(args[0]), null, - new LeaseRecovery() { + WALProcedureStore store = + new WALProcedureStore(conf, new Path(args[0]), null, new LeaseRecovery() { @Override public void recoverFileLease(FileSystem fs, Path path) throws IOException { // no-op @@ -1420,7 +1415,8 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { }); try { store.start(16); - ProcedureExecutor pe = new ProcedureExecutor<>(conf, new Object()/*Pass anything*/, store); + ProcedureExecutor pe = + new ProcedureExecutor<>(conf, new Object()/* Pass anything */, store); pe.init(1, true); } finally { store.stop(true); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java index 3e95de56f255..0a88b3fc2066 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2.util; import java.io.IOException; import java.io.OutputStream; import java.util.Arrays; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. - * e.g. you write some data and you want to prepend an header that contains the data len or cksum. - * + * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. e.g. you + * write some data and you want to prepend an header that contains the data len or cksum. * ByteSlot slot = new ByteSlot(); * // write data * slot.write(...); @@ -78,13 +75,13 @@ public byte[] getBuffer() { public void writeAt(int offset, int b) { head = Math.min(head, offset); - buf[offset] = (byte)b; + buf[offset] = (byte) b; } @Override public void write(int b) { ensureCapacity(size + 1); - buf[size++] = (byte)b; + buf[size++] = (byte) b; } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java index fa796ae97426..19c4f8bcb589 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.DelayQueue; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -30,7 +29,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public final class DelayedUtil { - private DelayedUtil() { } + private DelayedUtil() { + } /** * Add a timeout to a Delay @@ -78,7 +78,7 @@ public String toString() { * @return null (if an interrupt) or an instance of E; resets interrupt on calling thread. */ public static E takeWithoutInterrupt(final DelayQueue queue, - final long timeout, final TimeUnit timeUnit) { + final long timeout, final TimeUnit timeUnit) { try { return queue.poll(timeout, timeUnit); } catch (InterruptedException e) { @@ -104,7 +104,7 @@ public static int compareDelayed(final Delayed o1, final Delayed o2) { private static long getTimeout(final Delayed o) { assert o instanceof DelayedWithTimeout : "expected DelayedWithTimeout instance, got " + o; - return ((DelayedWithTimeout)o).getTimeout(); + return ((DelayedWithTimeout) o).getTimeout(); } public static abstract class DelayedObject implements DelayedWithTimeout { @@ -146,7 +146,7 @@ public boolean equals(final Object other) { return false; } - return Objects.equals(getObject(), ((DelayedContainer)other).getObject()); + return Objects.equals(getObject(), ((DelayedContainer) other).getObject()); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java index fddc999bec3c..cddfd94d3da9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public final class StringUtils { - private StringUtils() {} + private StringUtils() { + } public static String humanTimeDiff(long timeDiff) { if (timeDiff < 1000) { @@ -31,17 +32,17 @@ public static String humanTimeDiff(long timeDiff) { } StringBuilder buf = new StringBuilder(); - long hours = timeDiff / (60*60*1000); - long rem = (timeDiff % (60*60*1000)); - long minutes = rem / (60*1000); - rem = rem % (60*1000); + long hours = timeDiff / (60 * 60 * 1000); + long rem = (timeDiff % (60 * 60 * 1000)); + long minutes = rem / (60 * 1000); + rem = rem % (60 * 1000); float seconds = rem / 1000.0f; - if (hours != 0){ + if (hours != 0) { buf.append(hours); buf.append(" hrs, "); } - if (minutes != 0){ + if (minutes != 0) { buf.append(minutes); buf.append(" mins, "); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java index 6c66a49c2018..926a46e9c56c 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -228,8 +228,9 @@ public static void setKillAndToggleBeforeStoreUpdate(ProcedureExecutor { - public TestRootProcedure() {} + public TestRootProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) { @@ -194,7 +193,8 @@ public boolean abort(TestProcEnv env) { } public static class TestChildProcedure extends SequentialProcedure { - public TestChildProcedure() {} + public TestChildProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java index d0d6864ab6cf..50f2b1ba0338 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -103,7 +103,7 @@ public static final class WaitingProcedure extends NoopProcedure { @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { EXCHANGER.exchange(Boolean.TRUE); setState(ProcedureState.WAITING_TIMEOUT); setTimeout(Integer.MAX_VALUE); @@ -116,7 +116,7 @@ public static final class ParentProcedure extends NoopProcedure { @SuppressWarnings("unchecked") @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { return new Procedure[] { new NoopProcedure<>(), new WaitingProcedure() }; } } @@ -126,7 +126,7 @@ public static final class ExchangeProcedure extends NoopProcedure { @SuppressWarnings("unchecked") @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (EXCHANGER.exchange(Boolean.TRUE)) { return new Procedure[] { this }; } else { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java index 9f24403dc7d4..58c3bb3277ae 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java index fa8db418aece..6bc3b4a84f66 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -40,12 +39,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; - -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureBypass { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule - .forClass(TestProcedureBypass.class); + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestProcedureBypass.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureBypass.class); @@ -77,11 +76,9 @@ public static void setUp() throws Exception { logDir = new Path(testDir, "proc-logs"); procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir); - procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, - procStore); + procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, procStore); procStore.start(PROCEDURE_EXECUTOR_SLOTS); - ProcedureTestingUtility - .initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); + ProcedureTestingUtility.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); } @Test @@ -89,7 +86,7 @@ public void testBypassSuspendProcedure() throws Exception { final SuspendProcedure proc = new SuspendProcedure(); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); - //bypass the procedure + // bypass the procedure assertTrue(procExecutor.bypassProcedure(id, 30000, false, false)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -100,9 +97,9 @@ public void testStuckProcedure() throws Exception { final StuckProcedure proc = new StuckProcedure(); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); - //bypass the procedure + // bypass the procedure assertTrue(procExecutor.bypassProcedure(id, 1000, true, false)); - //Since the procedure is stuck there, we need to restart the executor to recovery. + // Since the procedure is stuck there, we need to restart the executor to recovery. ProcedureTestingUtility.restart(procExecutor); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -113,10 +110,9 @@ public void testBypassingProcedureWithParent() throws Exception { final RootProcedure proc = new RootProcedure(); long rootId = procExecutor.submitProcedure(proc); htu.waitFor(5000, () -> procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()) - .size() > 0); - SuspendProcedure suspendProcedure = (SuspendProcedure)procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).size() > 0); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().stream() + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); assertTrue(procExecutor.bypassProcedure(suspendProcedure.getProcId(), 1000, false, false)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -125,7 +121,7 @@ public void testBypassingProcedureWithParent() throws Exception { @Test public void testBypassingStuckStateMachineProcedure() throws Exception { final StuckStateMachineProcedure proc = - new StuckStateMachineProcedure(procEnv, StuckStateMachineState.START); + new StuckStateMachineProcedure(procEnv, StuckStateMachineState.START); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); // bypass the procedure @@ -141,10 +137,9 @@ public void testBypassingProcedureWithParentRecursive() throws Exception { final RootProcedure proc = new RootProcedure(); long rootId = procExecutor.submitProcedure(proc); htu.waitFor(5000, () -> procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()) - .size() > 0); - SuspendProcedure suspendProcedure = (SuspendProcedure)procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).size() > 0); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().stream() + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); assertTrue(procExecutor.bypassProcedure(rootId, 1000, false, true)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -176,8 +171,7 @@ public SuspendProcedure() { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { // Always suspend the procedure throw new ProcedureSuspendedException(); } @@ -201,7 +195,6 @@ protected Procedure[] execute(final TestProcEnv env) { } - public static class RootProcedure extends ProcedureTestingUtility.NoopProcedure { private boolean childSpwaned = false; @@ -210,11 +203,10 @@ public RootProcedure() { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { if (!childSpwaned) { childSpwaned = true; - return new Procedure[] {new SuspendProcedure()}; + return new Procedure[] { new SuspendProcedure() }; } else { return null; } @@ -222,14 +214,13 @@ protected Procedure[] execute(final TestProcEnv env) } public static class WaitingTimeoutProcedure - extends ProcedureTestingUtility.NoopProcedure { + extends ProcedureTestingUtility.NoopProcedure { public WaitingTimeoutProcedure() { super(); } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { // Always suspend the procedure setTimeout(50000); setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); @@ -246,11 +237,13 @@ protected synchronized boolean setTimeoutFailure(TestProcEnv env) { } public enum StuckStateMachineState { - START, THEN, END + START, + THEN, + END } - public static class StuckStateMachineProcedure extends - ProcedureTestingUtility.NoopStateMachineProcedure { + public static class StuckStateMachineProcedure + extends ProcedureTestingUtility.NoopStateMachineProcedure { private AtomicBoolean stop = new AtomicBoolean(false); public StuckStateMachineProcedure() { @@ -263,7 +256,7 @@ public StuckStateMachineProcedure(TestProcEnv env, StuckStateMachineState initia @Override protected Flow executeFromState(TestProcEnv env, StuckStateMachineState tState) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { switch (tState) { case START: LOG.info("PHASE 1: START"); @@ -292,5 +285,4 @@ protected int getStateId(StuckStateMachineState tState) { } } - } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java index 07dd8d8ae907..0c1c979927a7 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,6 @@ public class TestProcedureCleanup { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestProcedureCleanup.class); - private static final Logger LOG = LoggerFactory.getLogger(TestProcedureCleanup.class); private static final int PROCEDURE_EXECUTOR_SLOTS = 2; @@ -95,8 +94,7 @@ public void testProcedureShouldNotCleanOnLoad() throws Exception { LOG.info("Begin to execute " + rootProc); // wait until the child procedure arrival htu.waitFor(10000, () -> procExecutor.getProcedures().size() >= 2); - SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor - .getProcedures().get(1); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().get(1); // wait until the suspendProcedure executed suspendProcedure.latch.countDown(); Thread.sleep(100); @@ -188,7 +186,6 @@ private void corrupt(FileStatus file) throws IOException { fs.rename(tmpFile, file.getPath()); } - public static final class ExchangeProcedure extends ProcedureTestingUtility.NoopProcedure { private final Exchanger exchanger = new Exchanger<>(); @@ -196,7 +193,7 @@ public static final class ExchangeProcedure extends ProcedureTestingUtility.Noop @SuppressWarnings("unchecked") @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (exchanger.exchange(Boolean.TRUE)) { return new Procedure[] { this }; } else { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java index f8cd787e98f5..b48d7f6877e8 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,12 +41,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureEvents { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureEvents.class); + HBaseClassTestRule.forClass(TestProcedureEvents.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureEvents.class); @@ -81,11 +81,10 @@ public void tearDown() throws IOException { } /** - * Tests being able to suspend a Procedure for N timeouts and then failing.s - * Resets the timeout after each elapses. See {@link TestTimeoutEventProcedure} for example - * of how to do this sort of trickery with the ProcedureExecutor; i.e. suspend for a while, - * check for a condition and if not set, suspend again, etc., ultimately failing or succeeding - * eventually. + * Tests being able to suspend a Procedure for N timeouts and then failing.s Resets the timeout + * after each elapses. See {@link TestTimeoutEventProcedure} for example of how to do this sort of + * trickery with the ProcedureExecutor; i.e. suspend for a while, check for a condition and if not + * set, suspend again, etc., ultimately failing or succeeding eventually. */ @Test public void testTimeoutEventProcedure() throws Exception { @@ -110,7 +109,7 @@ public void testTimeoutEventProcedureDoubleExecutionKillIfSuspended() throws Exc } private void testTimeoutEventProcedureDoubleExecution(final boolean killIfSuspended) - throws Exception { + throws Exception { TestTimeoutEventProcedure proc = new TestTimeoutEventProcedure(1000, 3); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExecutor, true); ProcedureTestingUtility.setKillIfSuspended(procExecutor, killIfSuspended); @@ -122,20 +121,19 @@ private void testTimeoutEventProcedureDoubleExecution(final boolean killIfSuspen /** * This Event+Procedure exhibits following behavior: *

      - *
    • On procedure execute() - *
        - *
      • If had enough timeouts, abort the procedure. Else....
      • - *
      • Suspend the event and add self to its suspend queue
      • - *
      • Go into waiting state
      • - *
      - *
    • - *
    • - * On waiting timeout - *
        - *
      • Wake the event (which adds this procedure back into scheduler queue), and set own's - * state to RUNNABLE (so can be executed again).
      • - *
      - *
    • + *
    • On procedure execute() + *
        + *
      • If had enough timeouts, abort the procedure. Else....
      • + *
      • Suspend the event and add self to its suspend queue
      • + *
      • Go into waiting state
      • + *
      + *
    • + *
    • On waiting timeout + *
        + *
      • Wake the event (which adds this procedure back into scheduler queue), and set own's state + * to RUNNABLE (so can be executed again).
      • + *
      + *
    • *
    */ public static class TestTimeoutEventProcedure extends NoopProcedure { @@ -144,7 +142,8 @@ public static class TestTimeoutEventProcedure extends NoopProcedure private final AtomicInteger ntimeouts = new AtomicInteger(0); private int maxTimeouts = 1; - public TestTimeoutEventProcedure() {} + public TestTimeoutEventProcedure() { + } public TestTimeoutEventProcedure(final int timeoutMsec, final int maxTimeouts) { this.maxTimeouts = maxTimeouts; @@ -190,8 +189,7 @@ protected void afterReplay(final TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { Int32Value.Builder ntimeoutsBuilder = Int32Value.newBuilder().setValue(ntimeouts.get()); serializer.serialize(ntimeoutsBuilder.build()); @@ -200,8 +198,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { Int32Value ntimeoutsValue = serializer.deserialize(Int32Value.class); ntimeouts.set(ntimeoutsValue.getValue()); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java index ba52975cd6f5..c9a1d49b2ba5 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,11 +42,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureExecution { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureExecution.class); + HBaseClassTestRule.forClass(TestProcedureExecution.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureExecution.class); @@ -204,7 +204,8 @@ public void testSingleSequentialProcRollback() { public static class TestFaultyRollback extends SequentialProcedure { private int retries = 0; - public TestFaultyRollback() { } + public TestFaultyRollback() { + } @Override protected Procedure[] execute(Void env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java index 01d25acc1793..923039f794c0 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,12 +37,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureExecutor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureExecutor.class); + HBaseClassTestRule.forClass(TestProcedureExecutor.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureExecutor.class); @@ -155,8 +155,8 @@ private int waitThreadCount(final int expectedThreads) { if (procExecutor.getWorkerThreadCount() == expectedThreads) { break; } - LOG.debug("waiting for thread count=" + expectedThreads + - " current=" + procExecutor.getWorkerThreadCount()); + LOG.debug("waiting for thread count=" + expectedThreads + " current=" + + procExecutor.getWorkerThreadCount()); Threads.sleepWithoutInterrupt(250); } return procExecutor.getWorkerThreadCount(); @@ -189,5 +189,6 @@ protected Procedure[] execute(final TestProcEnv env) { } } - private static class TestProcEnv { } + private static class TestProcEnv { + } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java index 454b188280d4..7ff4b3d5d41d 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,12 +36,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureInMemoryChore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureInMemoryChore.class); + HBaseClassTestRule.forClass(TestProcedureInMemoryChore.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureInMemoryChore.class); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java index a2f833d62d90..287c5d5431b4 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,11 +37,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureMetrics { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureMetrics.class); + HBaseClassTestRule.forClass(TestProcedureMetrics.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureMetrics.class); @@ -204,7 +204,7 @@ public ProcedureMetrics(boolean success, ProcedureMetrics[] subprocs) { } public ProcedureMetrics(boolean success, boolean yield, int yieldCount, - ProcedureMetrics[] subprocs) { + ProcedureMetrics[] subprocs) { this.success = success; this.yield = yield; this.yieldCount = yieldCount; @@ -218,8 +218,8 @@ protected void updateMetricsOnSubmit(TestProcEnv env) { } @Override - protected Procedure[] execute(TestProcEnv env) throws ProcedureYieldException, - ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(TestProcEnv env) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (this.yield) { if (yieldNum < yieldCount) { yieldNum++; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java index 2a5c5ade1e83..911d7ac53679 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,11 +43,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureNonce { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureNonce.class); + HBaseClassTestRule.forClass(TestProcedureNonce.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureNonce.class); @@ -174,8 +174,8 @@ public void testConcurrentNonceRegistrationWithRollback() throws IOException { testConcurrentNonceRegistration(false, 890, 55555); } - private void testConcurrentNonceRegistration(final boolean submitProcedure, - final long nonceGroup, final long nonce) throws IOException { + private void testConcurrentNonceRegistration(final boolean submitProcedure, final long nonceGroup, + final long nonce) throws IOException { // register the nonce final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce); @@ -229,8 +229,7 @@ public void run() { // register the nonce t2BeforeNonceRegisteredLatch.countDown(); - assertFalse("unexpected non registered nonce", - procExecutor.registerNonce(nonceKey) < 0); + assertFalse("unexpected non registered nonce", procExecutor.registerNonce(nonceKey) < 0); } catch (Throwable e) { t2Exception.set(e); } finally { @@ -256,7 +255,8 @@ public void run() { public static class TestSingleStepProcedure extends SequentialProcedure { private int step = 0; - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -269,7 +269,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } @Override - protected void rollback(TestProcEnv env) { } + protected void rollback(TestProcEnv env) { + } @Override protected boolean abort(TestProcEnv env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java index 686b10dbf42b..706803958fc0 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,11 +45,11 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Int32Value; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureRecovery { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureRecovery.class); + HBaseClassTestRule.forClass(TestProcedureRecovery.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureRecovery.class); @@ -98,7 +98,8 @@ private void restart() throws Exception { public static class TestSingleStepProcedure extends SequentialProcedure { private int step = 0; - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -110,7 +111,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } @Override - protected void rollback(TestProcEnv env) { } + protected void rollback(TestProcEnv env) { + } @Override protected boolean abort(TestProcEnv env) { @@ -130,9 +132,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { step++; Threads.sleepWithoutInterrupt(procSleepInterval); if (isAborted()) { - setFailure(new RemoteProcedureException(getClass().getName(), - new ProcedureAbortedException( - "got an abort at " + getClass().getName() + " step=" + step))); + setFailure(new RemoteProcedureException(getClass().getName(), new ProcedureAbortedException( + "got an abort at " + getClass().getName() + " step=" + step))); return null; } return null; @@ -155,7 +156,7 @@ private boolean isAborted() { boolean aborted = abort.get(); BaseTestStepProcedure proc = this; while (proc.hasParent() && !aborted) { - proc = (BaseTestStepProcedure)procExecutor.getProcedure(proc.getParentProcId()); + proc = (BaseTestStepProcedure) procExecutor.getProcedure(proc.getParentProcId()); aborted = proc.isAborted(); } return aborted; @@ -163,7 +164,8 @@ private boolean isAborted() { } public static class TestMultiStepProcedure extends BaseTestStepProcedure { - public TestMultiStepProcedure() { } + public TestMultiStepProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -172,7 +174,8 @@ public Procedure[] execute(TestProcEnv env) throws InterruptedException { } public static class Step1Procedure extends BaseTestStepProcedure { - public Step1Procedure() { } + public Step1Procedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -182,7 +185,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } public static class Step2Procedure extends BaseTestStepProcedure { - public Step2Procedure() { } + public Step2Procedure() { + } } } @@ -294,10 +298,16 @@ public void testMultiStepRollbackRecovery() throws Exception { } public static class TestStateMachineProcedure - extends StateMachineProcedure { - enum State { STATE_1, STATE_2, STATE_3, DONE } + extends StateMachineProcedure { + enum State { + STATE_1, + STATE_2, + STATE_3, + DONE + } - public TestStateMachineProcedure() {} + public TestStateMachineProcedure() { + } public TestStateMachineProcedure(final boolean testSubmitChildProc) { this.submitChildProc = testSubmitChildProc; @@ -388,16 +398,14 @@ protected boolean abort(TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { super.serializeStateData(serializer); Int32Value.Builder builder = Int32Value.newBuilder().setValue(iResult); serializer.serialize(builder.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { super.deserializeStateData(serializer); Int32Value value = serializer.deserialize(Int32Value.class); iResult = value.getValue(); @@ -515,7 +523,7 @@ private void dumpLogDirState() { try { FileStatus[] files = fs.listStatus(logDir); if (files != null && files.length > 0) { - for (FileStatus file: files) { + for (FileStatus file : files) { assertTrue(file.toString(), file.isFile()); LOG.debug("log file " + file.getPath() + " size=" + file.getLen()); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java index 4d7d45de3ac7..2735cf2b319c 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; @@ -51,7 +52,7 @@ public class TestProcedureReplayOrder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureReplayOrder.class); + HBaseClassTestRule.forClass(TestProcedureReplayOrder.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureReplayOrder.class); @@ -133,7 +134,7 @@ public void testMultiStepReplayOrder() throws Exception { } private void submitProcedures(final int nthreads, final int nprocPerThread, - final Class procClazz) throws Exception { + final Class procClazz) throws Exception { Thread[] submitThreads = new Thread[nthreads]; for (int i = 0; i < submitThreads.length; ++i) { submitThreads[i] = new Thread() { @@ -141,8 +142,8 @@ private void submitProcedures(final int nthreads, final int nprocPerThread, public void run() { for (int i = 0; i < nprocPerThread; ++i) { try { - procExecutor.submitProcedure((Procedure) - procClazz.getDeclaredConstructor().newInstance()); + procExecutor + .submitProcedure((Procedure) procClazz.getDeclaredConstructor().newInstance()); } catch (Exception e) { LOG.error("unable to instantiate the procedure", e); fail("failure during the proc.newInstance(): " + e.getMessage()); @@ -197,7 +198,8 @@ public long getExecId() { } @Override - protected void rollback(TestProcedureEnv env) { } + protected void rollback(TestProcedureEnv env) { + } @Override protected boolean abort(TestProcedureEnv env) { @@ -205,15 +207,13 @@ protected boolean abort(TestProcedureEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { Int64Value.Builder builder = Int64Value.newBuilder().setValue(execId); serializer.serialize(builder.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { Int64Value value = serializer.deserialize(Int64Value.class); execId = value.getValue(); step = 2; @@ -221,7 +221,8 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) } public static class TestSingleStepProcedure extends TestProcedure { - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcedureEnv env) throws ProcedureYieldException { @@ -244,7 +245,8 @@ public String toString() { } public static class TestTwoStepProcedure extends TestProcedure { - public TestTwoStepProcedure() { } + public TestTwoStepProcedure() { + } @Override protected Procedure[] execute(TestProcedureEnv env) throws ProcedureYieldException { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java index 098c53fff28d..c5554da0f1a8 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ public class TestProcedureRollbackAIOOB { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureRollbackAIOOB.class); + HBaseClassTestRule.forClass(TestProcedureRollbackAIOOB.class); private static final HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil(); @@ -53,7 +53,7 @@ public static final class ParentProcedure extends NoopProcedure { @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { latch.await(); if (scheduled) { return null; @@ -67,7 +67,7 @@ public static final class SubProcedure extends NoopProcedure { @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { setFailure("Inject error", new RuntimeException("Inject error")); return null; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java index f56cdb31b6b8..186d38595344 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,11 +34,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureSchedulerConcurrency { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureSchedulerConcurrency.class); + HBaseClassTestRule.forClass(TestProcedureSchedulerConcurrency.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureEvents.class); @@ -105,8 +105,10 @@ public void run() { } if (wakeCount.get() != oldWakeCount) { lastUpdate = EnvironmentEdgeManager.currentTime(); - } else if (wakeCount.get() >= NRUNS && - (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) { + } else if ( + wakeCount.get() >= NRUNS + && (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD + ) { break; } Threads.sleepWithoutInterrupt(25); @@ -119,7 +121,7 @@ public void run() { @Override public void run() { while (true) { - TestProcedureWithEvent proc = (TestProcedureWithEvent)sched.poll(); + TestProcedureWithEvent proc = (TestProcedureWithEvent) sched.poll(); if (proc == null) { continue; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java index 266082e04487..452b6f7e8078 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,7 +70,7 @@ protected boolean holdLock(ProcEnv env) { @Override protected Procedure[] execute(ProcEnv env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (STEP == 0) { STEP = 1; setTimeout(60 * 60 * 1000); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java index 38aef16cffea..d34a08d6dd53 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureSuspended { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureSuspended.class); + HBaseClassTestRule.forClass(TestProcedureSuspended.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureSuspended.class); @@ -181,8 +181,8 @@ public static class TestLockProcedure extends Procedure { private AtomicBoolean lock = null; private boolean hasLock = false; - public TestLockProcedure(final AtomicBoolean lock, final String key, - final boolean throwYield, final boolean throwSuspend) { + public TestLockProcedure(final AtomicBoolean lock, final String key, final boolean throwYield, + final boolean throwSuspend) { this.lock = lock; this.key = key; this.throwYield = throwYield; @@ -203,7 +203,7 @@ public void setTriggerRollback(final boolean triggerRollback) { @Override protected Procedure[] execute(final TestProcEnv env) - throws ProcedureYieldException, ProcedureSuspendedException { + throws ProcedureYieldException, ProcedureSuspendedException { LOG.info("EXECUTE " + this + " suspend " + (lock != null)); timestamps.add(env.nextTimestamp()); if (triggerRollback) { @@ -259,13 +259,11 @@ protected boolean abort(TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java index 07eacfeb7c7f..ff4de723b119 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,17 +30,18 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureToString { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureToString.class); + HBaseClassTestRule.forClass(TestProcedureToString.class); /** * A do-nothing environment for BasicProcedure. */ - static class BasicProcedureEnv {} + static class BasicProcedureEnv { + } /** * A do-nothing basic procedure just for testing toString. @@ -48,8 +49,8 @@ static class BasicProcedureEnv {} static class BasicProcedure extends Procedure { @Override protected Procedure[] execute(BasicProcedureEnv env) - throws ProcedureYieldException, InterruptedException { - return new Procedure [] {this}; + throws ProcedureYieldException, InterruptedException { + return new Procedure[] { this }; } @Override @@ -62,13 +63,11 @@ protected boolean abort(BasicProcedureEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -106,17 +105,17 @@ public void testBasicToString() { * Do-nothing SimpleMachineProcedure for checking its toString. */ static class SimpleStateMachineProcedure - extends StateMachineProcedure { + extends StateMachineProcedure { @Override - protected org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow executeFromState( - BasicProcedureEnv env, ServerCrashState state) - throws ProcedureYieldException, InterruptedException { + protected org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow + executeFromState(BasicProcedureEnv env, ServerCrashState state) + throws ProcedureYieldException, InterruptedException { return null; } @Override - protected void rollbackState(BasicProcedureEnv env, ServerCrashState state) throws IOException, - InterruptedException { + protected void rollbackState(BasicProcedureEnv env, ServerCrashState state) + throws IOException, InterruptedException { } @Override diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java index 4d57c37ac619..885ba88a8327 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,13 +51,14 @@ public void testConvert() throws Exception { // check Procedure to protobuf conversion final TestProcedure proc1 = new TestProcedure(10, 1, new byte[] { 65 }); final ProcedureProtos.Procedure proto1 = ProcedureUtil.convertToProtoProcedure(proc1); - final TestProcedure proc2 = (TestProcedure)ProcedureUtil.convertToProcedure(proto1); + final TestProcedure proc2 = (TestProcedure) ProcedureUtil.convertToProcedure(proto1); final ProcedureProtos.Procedure proto2 = ProcedureUtil.convertToProtoProcedure(proc2); assertEquals(false, proto2.hasResult()); assertEquals("Procedure protobuf does not match", proto1, proto2); } public static class TestProcedureNoDefaultConstructor extends TestProcedure { - public TestProcedureNoDefaultConstructor(int x) {} + public TestProcedureNoDefaultConstructor(int x) { + } } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java index ec001d1e3373..07ca0221b772 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java index cc3c765885d2..61bc42d75ac4 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestStateMachineProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStateMachineProcedure.class); + HBaseClassTestRule.forClass(TestStateMachineProcedure.class); private static final Logger LOG = LoggerFactory.getLogger(TestStateMachineProcedure.class); @@ -62,7 +62,7 @@ public boolean equals(final Object other) { // we are going to serialize the exception in the test, // so the instance comparison will not match - return getMessage().equals(((Exception)other).getMessage()); + return getMessage().equals(((Exception) other).getMessage()); } @Override @@ -179,10 +179,13 @@ public void testChildOnLastStepWithRollbackDoubleExecution() throws Exception { assertEquals(TEST_FAILURE_EXCEPTION, cause); } - public enum TestSMProcedureState { STEP_1, STEP_2 } + public enum TestSMProcedureState { + STEP_1, + STEP_2 + } public static class TestSMProcedure - extends StateMachineProcedure { + extends StateMachineProcedure { @Override protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { LOG.info("EXEC " + state + " " + this); @@ -228,7 +231,7 @@ protected TestSMProcedureState getInitialState() { } public static class TestSMProcedureBadRollback - extends StateMachineProcedure { + extends StateMachineProcedure { @Override protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { LOG.info("EXEC " + state + " " + this); @@ -245,6 +248,7 @@ protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { } return Flow.HAS_MORE_STATE; } + @Override protected void rollbackState(TestProcEnv env, TestSMProcedureState state) { LOG.info("ROLLBACK " + state + " " + this); @@ -267,8 +271,7 @@ protected TestSMProcedureState getInitialState() { } @Override - protected void rollback(final TestProcEnv env) - throws IOException, InterruptedException { + protected void rollback(final TestProcEnv env) throws IOException, InterruptedException { if (isEofState()) { stateCount--; } @@ -276,8 +279,8 @@ protected void rollback(final TestProcEnv env) updateTimestamp(); rollbackState(env, getCurrentState()); throw new IOException(); - } catch(IOException e) { - //do nothing for now + } catch (IOException e) { + // do nothing for now } finally { stateCount--; updateTimestamp(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java index 890bbd1871cf..b15d8b38d4da 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,11 +40,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestYieldProcedures { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestYieldProcedures.class); + HBaseClassTestRule.forClass(TestYieldProcedures.class); private static final Logger LOG = LoggerFactory.getLogger(TestYieldProcedures.class); @@ -188,8 +188,12 @@ public long nextTimestamp() { } public static class TestStateMachineProcedure - extends StateMachineProcedure { - enum State { STATE_1, STATE_2, STATE_3 } + extends StateMachineProcedure { + enum State { + STATE_1, + STATE_2, + STATE_3 + } public static class ExecutionInfo { private final boolean rollback; @@ -225,7 +229,7 @@ public TestStateMachineProcedure() { } public TestStateMachineProcedure(boolean abortOnFinalStep, - boolean throwInterruptOnceOnEachStep) { + boolean throwInterruptOnceOnEachStep) { this.abortOnFinalStep = abortOnFinalStep; this.throwInterruptOnceOnEachStep = throwInterruptOnceOnEachStep; } @@ -236,7 +240,7 @@ public ArrayList getExecutionInfo() { @Override protected StateMachineProcedure.Flow executeFromState(TestProcEnv env, State state) - throws InterruptedException { + throws InterruptedException { final long ts = env.nextTimestamp(); LOG.info(getProcId() + " execute step " + state + " ts=" + ts); executionInfo.add(new ExecutionInfo(ts, state, false)); @@ -266,8 +270,7 @@ protected StateMachineProcedure.Flow executeFromState(TestProcEnv env, State sta } @Override - protected void rollbackState(TestProcEnv env, final State state) - throws InterruptedException { + protected void rollbackState(TestProcEnv env, final State state) throws InterruptedException { final long ts = env.nextTimestamp(); LOG.debug(getProcId() + " rollback state " + state + " ts=" + ts); executionInfo.add(new ExecutionInfo(ts, state, true)); @@ -347,13 +350,11 @@ protected boolean isYieldAfterExecutionStep(final TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -364,7 +365,8 @@ private static class TestScheduler extends SimpleProcedureScheduler { private int yieldCalls; private int pollCalls; - public TestScheduler() {} + public TestScheduler() { + } @Override public void addFront(final Procedure proc) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java index d88d93e571f2..d937ba6ace12 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -50,24 +51,24 @@ public abstract class ProcedureStorePerformanceEvaluation future : futures) { - long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - - EnvironmentEdgeManager.currentTime(); + long timeout = + start + WORKER_THREADS_TIMEOUT_SEC * 1000 - EnvironmentEdgeManager.currentTime(); failure |= (future.get(timeout, TimeUnit.MILLISECONDS).equals(EXIT_FAILURE)); } } catch (Exception e) { @@ -219,8 +220,8 @@ public Integer call() throws IOException { } if (procId != 0 && procId % 10000 == 0) { long ns = System.nanoTime() - start; - System.out.println("Wrote " + procId + " procedures in " + - StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(ns))); + System.out.println("Wrote " + procId + " procedures in " + + StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(ns))); } try { preWrite(procId); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java index 29d114af7212..73acdf633ce6 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,7 +66,7 @@ public synchronized void addStackIndex(int index) { @Override protected Procedure[] execute(Void env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { return null; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java index 2866b21518ec..4331a681a2cb 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -42,24 +42,24 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { protected static final HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil(); // Command line options and defaults. - public static int DEFAULT_NUM_PROCS = 1000000; // 1M - public static Option NUM_PROCS_OPTION = new Option("procs", true, - "Total number of procedures. Default: " + DEFAULT_NUM_PROCS); + public static int DEFAULT_NUM_PROCS = 1000000; // 1M + public static Option NUM_PROCS_OPTION = + new Option("procs", true, "Total number of procedures. Default: " + DEFAULT_NUM_PROCS); public static int DEFAULT_NUM_WALS = 0; public static Option NUM_WALS_OPTION = new Option("wals", true, - "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + - " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); - public static int DEFAULT_STATE_SIZE = 1024; // 1KB - public static Option STATE_SIZE_OPTION = new Option("state_size", true, - "Size of serialized state in bytes to write on update. Default: " + DEFAULT_STATE_SIZE - + " bytes"); + "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + + " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); + public static int DEFAULT_STATE_SIZE = 1024; // 1KB + public static Option STATE_SIZE_OPTION = + new Option("state_size", true, "Size of serialized state in bytes to write on update. Default: " + + DEFAULT_STATE_SIZE + " bytes"); public static int DEFAULT_UPDATES_PER_PROC = 5; public static Option UPDATES_PER_PROC_OPTION = new Option("updates_per_proc", true, - "Number of update states to write for each proc. Default: " + DEFAULT_UPDATES_PER_PROC); + "Number of update states to write for each proc. Default: " + DEFAULT_UPDATES_PER_PROC); public static double DEFAULT_DELETE_PROCS_FRACTION = 0.50; public static Option DELETE_PROCS_FRACTION_OPTION = new Option("delete_procs_fraction", true, - "Fraction of procs for which to write delete state. Distribution of procs chosen for " - + "delete is uniform across all procs. Default: " + DEFAULT_DELETE_PROCS_FRACTION); + "Fraction of procs for which to write delete state. Distribution of procs chosen for " + + "delete is uniform across all procs. Default: " + DEFAULT_DELETE_PROCS_FRACTION); public int numProcs; public int updatesPerProc; @@ -69,7 +69,8 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { static byte[] serializedState; private static class LoadCounter implements ProcedureStore.ProcedureLoader { - public LoadCounter() {} + public LoadCounter() { + } @Override public void setMaxProcId(long maxProcId) { @@ -105,10 +106,10 @@ protected void processOptions(CommandLine cmd) { numWals = getOptionAsInt(cmd, NUM_WALS_OPTION.getOpt(), DEFAULT_NUM_WALS); int stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE); serializedState = new byte[stateSize]; - updatesPerProc = getOptionAsInt(cmd, UPDATES_PER_PROC_OPTION.getOpt(), - DEFAULT_UPDATES_PER_PROC); - deleteProcsFraction = getOptionAsDouble(cmd, DELETE_PROCS_FRACTION_OPTION.getOpt(), - DEFAULT_DELETE_PROCS_FRACTION); + updatesPerProc = + getOptionAsInt(cmd, UPDATES_PER_PROC_OPTION.getOpt(), DEFAULT_UPDATES_PER_PROC); + deleteProcsFraction = + getOptionAsDouble(cmd, DELETE_PROCS_FRACTION_OPTION.getOpt(), DEFAULT_DELETE_PROCS_FRACTION); setupConf(); } @@ -140,7 +141,7 @@ private List shuffleProcWriteSequence() { Set toBeDeletedProcs = new HashSet<>(); // Add n + 1 entries of the proc id for insert + updates. If proc is chosen for delete, add // extra entry which is marked -ve in the loop after shuffle. - for (int procId = 1; procId <= numProcs; ++procId) { + for (int procId = 1; procId <= numProcs; ++procId) { procStatesSequence.addAll(Collections.nCopies(updatesPerProc + 1, procId)); if (ThreadLocalRandom.current().nextFloat() < deleteProcsFraction) { procStatesSequence.add(procId); @@ -161,7 +162,7 @@ private List shuffleProcWriteSequence() { private void writeWals() throws IOException { List procStates = shuffleProcWriteSequence(); - TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. + TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. int numProcsPerWal = numWals > 0 ? procStates.size() / numWals : Integer.MAX_VALUE; long startTime = EnvironmentEdgeManager.currentTime(); long lastTime = startTime; @@ -179,15 +180,15 @@ private void writeWals() throws IOException { } if (i > 0 && i % numProcsPerWal == 0) { long currentTime = EnvironmentEdgeManager.currentTime(); - System.out.println("Forcing wall roll. Time taken on last WAL: " + - (currentTime - lastTime) / 1000.0f + " sec"); + System.out.println("Forcing wall roll. Time taken on last WAL: " + + (currentTime - lastTime) / 1000.0f + " sec"); store.rollWriterForTesting(); lastTime = currentTime; } } long timeTaken = EnvironmentEdgeManager.currentTime() - startTime; System.out.println("\n\nDone writing WALs.\nNum procs : " + numProcs + "\nTotal time taken : " - + StringUtils.humanTimeDiff(timeTaken) + "\n\n"); + + StringUtils.humanTimeDiff(timeTaken) + "\n\n"); } private void storeRestart(ProcedureStore.ProcedureLoader loader) throws IOException { @@ -203,11 +204,11 @@ private void storeRestart(ProcedureStore.ProcedureLoader loader) throws IOExcept System.out.println("Load time : " + (timeTaken / 1000.0f) + "sec"); System.out.println("******************************************"); System.out.println("Raw format for scripts"); - System.out.println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " - + "total_time_ms=%s]", - NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), serializedState.length, - UPDATES_PER_PROC_OPTION.getOpt(), updatesPerProc, DELETE_PROCS_FRACTION_OPTION.getOpt(), - deleteProcsFraction, NUM_WALS_OPTION.getOpt(), numWals, timeTaken)); + System.out + .println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + "total_time_ms=%s]", + NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), serializedState.length, + UPDATES_PER_PROC_OPTION.getOpt(), updatesPerProc, DELETE_PROCS_FRACTION_OPTION.getOpt(), + deleteProcsFraction, NUM_WALS_OPTION.getOpt(), numWals, timeTaken)); } public void tearDownProcedureStore() { @@ -216,7 +217,7 @@ public void tearDownProcedureStore() { store.getFileSystem().delete(store.getWALDir(), true); } catch (IOException e) { System.err.println("Error: Couldn't delete log dir. You can delete it manually to free up " - + "disk space. Location: " + store.getWALDir().toString()); + + "disk space. Location: " + store.getWALDir().toString()); System.err.println(e.toString()); } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java index cab44264f295..f9bad936ee23 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +36,8 @@ public class ProcedureWALPerformanceEvaluation // Command line options and defaults. public static int DEFAULT_NUM_WALS = 0; public static Option NUM_WALS_OPTION = new Option("wals", true, - "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + - " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); + "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + + " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); private long numProcsPerWal = Long.MAX_VALUE; // never roll wall based on this value. private int numWals; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java index 9d897cf878c5..251cb39842b2 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java index e3064c9ab823..46164be64698 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -33,11 +32,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureStoreTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureStoreTracker.class); + HBaseClassTestRule.forClass(TestProcedureStoreTracker.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureStoreTracker.class); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java index 12ea02adf9c6..31fb550c7cb1 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; @@ -43,12 +44,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestStressWALProcedureStore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStressWALProcedureStore.class); + HBaseClassTestRule.forClass(TestStressWALProcedureStore.class); private static final Logger LOG = LoggerFactory.getLogger(TestWALProcedureStore.class); @@ -115,7 +116,8 @@ public void run() { for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) { try { Thread.sleep(0, rand.nextInt(15)); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } procStore.update(proc); } // Delete @@ -136,7 +138,8 @@ public void run() { assertEquals(1, procStore.getActiveLogs().size()); } - @Ignore @Test // REENABLE after merge of + @Ignore + @Test // REENABLE after merge of // https://github.com/google/protobuf/issues/2228#issuecomment-252058282 public void testEntrySizeLimit() throws Exception { final int NITEMS = 20; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java index 915d6190b815..9d98b0b4f95f 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,11 +61,11 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Int64Value; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestWALProcedureStore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALProcedureStore.class); + HBaseClassTestRule.forClass(TestWALProcedureStore.class); private static final Logger LOG = LoggerFactory.getLogger(TestWALProcedureStore.class); @@ -161,7 +161,7 @@ public void testWalCleanerSequentialClean() throws Exception { procStore.insert(procs[i], null); procStore.rollWriterForTesting(); logs = procStore.getActiveLogs(); - assertEquals(logs.size(), i + 2); // Extra 1 for current ongoing wal. + assertEquals(logs.size(), i + 2); // Extra 1 for current ongoing wal. } // Delete procedures in sequential order make sure that only the corresponding wal is deleted @@ -176,7 +176,6 @@ public void testWalCleanerSequentialClean() throws Exception { } } - // Test that wal cleaner doesn't create holes in wal files list i.e. it only deletes files if // they are in the starting of the list. @Test @@ -189,7 +188,7 @@ public void testWalCleanerNoHoles() throws Exception { procStore.insert(procs[i], null); procStore.rollWriterForTesting(); logs = procStore.getActiveLogs(); - assertEquals(i + 2, logs.size()); // Extra 1 for current ongoing wal. + assertEquals(i + 2, logs.size()); // Extra 1 for current ongoing wal. } for (int i = 1; i < procs.length; i++) { @@ -222,18 +221,18 @@ public void testWalCleanerUpdatesDontLeaveHoles() throws Exception { TestSequentialProcedure p2 = new TestSequentialProcedure(); procStore.insert(p1, null); procStore.insert(p2, null); - procStore.rollWriterForTesting(); // generates first log with p1 + p2 + procStore.rollWriterForTesting(); // generates first log with p1 + p2 ProcedureWALFile log1 = procStore.getActiveLogs().get(0); procStore.update(p2); - procStore.rollWriterForTesting(); // generates second log with p2 + procStore.rollWriterForTesting(); // generates second log with p2 ProcedureWALFile log2 = procStore.getActiveLogs().get(1); procStore.update(p2); - procStore.rollWriterForTesting(); // generates third log with p2 - procStore.removeInactiveLogsForTesting(); // Shouldn't remove 2nd log. + procStore.rollWriterForTesting(); // generates third log with p2 + procStore.removeInactiveLogsForTesting(); // Shouldn't remove 2nd log. assertEquals(4, procStore.getActiveLogs().size()); procStore.update(p1); - procStore.rollWriterForTesting(); // generates fourth log with p1 - procStore.removeInactiveLogsForTesting(); // Should remove first two logs. + procStore.rollWriterForTesting(); // generates fourth log with p1 + procStore.removeInactiveLogsForTesting(); // Should remove first two logs. assertEquals(3, procStore.getActiveLogs().size()); assertFalse(procStore.getActiveLogs().contains(log1)); assertFalse(procStore.getActiveLogs().contains(log2)); @@ -418,8 +417,8 @@ public void testCorruptedTrailer() throws Exception { assertEquals(0, loader.getCorruptedCount()); } - private static void assertUpdated(final ProcedureStoreTracker tracker, - final Procedure[] procs, final int[] updatedProcs, final int[] nonUpdatedProcs) { + private static void assertUpdated(final ProcedureStoreTracker tracker, final Procedure[] procs, + final int[] updatedProcs, final int[] nonUpdatedProcs) { for (int index : updatedProcs) { long procId = procs[index].getProcId(); assertTrue("Procedure id : " + procId, tracker.isModified(procId)); @@ -430,17 +429,17 @@ private static void assertUpdated(final ProcedureStoreTracker tracker, } } - private static void assertDeleted(final ProcedureStoreTracker tracker, - final Procedure[] procs, final int[] deletedProcs, final int[] nonDeletedProcs) { + private static void assertDeleted(final ProcedureStoreTracker tracker, final Procedure[] procs, + final int[] deletedProcs, final int[] nonDeletedProcs) { for (int index : deletedProcs) { long procId = procs[index].getProcId(); - assertEquals("Procedure id : " + procId, - ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(procId)); + assertEquals("Procedure id : " + procId, ProcedureStoreTracker.DeleteState.YES, + tracker.isDeleted(procId)); } for (int index : nonDeletedProcs) { long procId = procs[index].getProcId(); - assertEquals("Procedure id : " + procId, - ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(procId)); + assertEquals("Procedure id : " + procId, ProcedureStoreTracker.DeleteState.NO, + tracker.isDeleted(procId)); } } @@ -451,13 +450,13 @@ public void testCorruptedTrailersRebuild() throws Exception { procs[i] = new TestSequentialProcedure(); } // Log State (I=insert, U=updated, D=delete) - // | log 1 | log 2 | log 3 | - // 0 | I, D | | | - // 1 | I | | | - // 2 | I | D | | - // 3 | I | U | | - // 4 | | I | D | - // 5 | | | I | + // | log 1 | log 2 | log 3 | + // 0 | I, D | | | + // 1 | I | | | + // 2 | I | D | | + // 3 | I | U | | + // 4 | | I | D | + // 5 | | | I | procStore.insert(procs[0], null); procStore.insert(procs[1], null); procStore.insert(procs[2], null); @@ -485,7 +484,7 @@ public void testCorruptedTrailersRebuild() throws Exception { htu.getConfiguration().setBoolean(WALProcedureStore.EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, false); final LoadCounter loader = new LoadCounter(); storeRestart(loader); - assertEquals(3, loader.getLoadedCount()); // procs 1, 3 and 5 + assertEquals(3, loader.getLoadedCount()); // procs 1, 3 and 5 assertEquals(0, loader.getCorruptedCount()); // Check the Trackers @@ -493,13 +492,16 @@ public void testCorruptedTrailersRebuild() throws Exception { LOG.info("WALs " + walFiles); assertEquals(4, walFiles.size()); LOG.info("Checking wal " + walFiles.get(0)); - assertUpdated(walFiles.get(0).getTracker(), procs, new int[]{0, 1, 2, 3}, new int[] {4, 5}); + assertUpdated(walFiles.get(0).getTracker(), procs, new int[] { 0, 1, 2, 3 }, + new int[] { 4, 5 }); LOG.info("Checking wal " + walFiles.get(1)); - assertUpdated(walFiles.get(1).getTracker(), procs, new int[]{2, 3, 4}, new int[] {0, 1, 5}); + assertUpdated(walFiles.get(1).getTracker(), procs, new int[] { 2, 3, 4 }, + new int[] { 0, 1, 5 }); LOG.info("Checking wal " + walFiles.get(2)); - assertUpdated(walFiles.get(2).getTracker(), procs, new int[]{4, 5}, new int[] {0, 1, 2, 3}); + assertUpdated(walFiles.get(2).getTracker(), procs, new int[] { 4, 5 }, + new int[] { 0, 1, 2, 3 }); LOG.info("Checking global tracker "); - assertDeleted(procStore.getStoreTracker(), procs, new int[]{0, 2, 4}, new int[] {1, 3, 5}); + assertDeleted(procStore.getStoreTracker(), procs, new int[] { 0, 2, 4 }, new int[] { 1, 3, 5 }); } @Test @@ -531,17 +533,17 @@ public void testCorruptedProcedures() throws Exception { // Insert root-procedures TestProcedure[] rootProcs = new TestProcedure[10]; for (int i = 1; i <= rootProcs.length; i++) { - rootProcs[i-1] = new TestProcedure(i, 0); - procStore.insert(rootProcs[i-1], null); - rootProcs[i-1].addStackId(0); - procStore.update(rootProcs[i-1]); + rootProcs[i - 1] = new TestProcedure(i, 0); + procStore.insert(rootProcs[i - 1], null); + rootProcs[i - 1].addStackId(0); + procStore.update(rootProcs[i - 1]); } // insert root-child txn procStore.rollWriterForTesting(); for (int i = 1; i <= rootProcs.length; i++) { TestProcedure b = new TestProcedure(rootProcs.length + i, i); - rootProcs[i-1].addStackId(1); - procStore.insert(rootProcs[i-1], new Procedure[] { b }); + rootProcs[i - 1].addStackId(1); + procStore.insert(rootProcs[i - 1], new Procedure[] { b }); } // insert child updates procStore.rollWriterForTesting(); @@ -629,20 +631,19 @@ public void testFileNotFoundDuringLeaseRecovery() throws IOException { assertEquals(procs.length + 1, status.length); // simulate another active master removing the wals - procStore = new WALProcedureStore(htu.getConfiguration(), logDir, null, - new LeaseRecovery() { - private int count = 0; - - @Override - public void recoverFileLease(FileSystem fs, Path path) throws IOException { - if (++count <= 2) { - fs.delete(path, false); - LOG.debug("Simulate FileNotFound at count=" + count + " for " + path); - throw new FileNotFoundException("test file not found " + path); - } - LOG.debug("Simulate recoverFileLease() at count=" + count + " for " + path); + procStore = new WALProcedureStore(htu.getConfiguration(), logDir, null, new LeaseRecovery() { + private int count = 0; + + @Override + public void recoverFileLease(FileSystem fs, Path path) throws IOException { + if (++count <= 2) { + fs.delete(path, false); + LOG.debug("Simulate FileNotFound at count=" + count + " for " + path); + throw new FileNotFoundException("test file not found " + path); } - }); + LOG.debug("Simulate recoverFileLease() at count=" + count + " for " + path); + } + }); final LoadCounter loader = new LoadCounter(); procStore.start(PROCEDURE_STORE_SLOTS); @@ -656,7 +657,7 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { @Test public void testLogFileAlreadyExists() throws IOException { - final boolean[] tested = {false}; + final boolean[] tested = { false }; WALProcedureStore mStore = Mockito.spy(procStore); Answer ans = new Answer() { @@ -806,20 +807,19 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { }); } - private LoadCounter restartAndAssert(long maxProcId, long runnableCount, - int completedCount, int corruptedCount) throws Exception { - return ProcedureTestingUtility.storeRestartAndAssert(procStore, maxProcId, - runnableCount, completedCount, corruptedCount); + private LoadCounter restartAndAssert(long maxProcId, long runnableCount, int completedCount, + int corruptedCount) throws Exception { + return ProcedureTestingUtility.storeRestartAndAssert(procStore, maxProcId, runnableCount, + completedCount, corruptedCount); } - private void corruptLog(final FileStatus logFile, final long dropBytes) - throws IOException { + private void corruptLog(final FileStatus logFile, final long dropBytes) throws IOException { assertTrue(logFile.getLen() > dropBytes); - LOG.debug("corrupt log " + logFile.getPath() + - " size=" + logFile.getLen() + " drop=" + dropBytes); + LOG.debug( + "corrupt log " + logFile.getPath() + " size=" + logFile.getLen() + " drop=" + dropBytes); Path tmpPath = new Path(testDir, "corrupted.log"); InputStream in = fs.open(logFile.getPath()); - OutputStream out = fs.create(tmpPath); + OutputStream out = fs.create(tmpPath); IOUtils.copyBytes(in, out, logFile.getLen() - dropBytes, true); if (!fs.rename(tmpPath, logFile.getPath())) { throw new IOException("Unable to rename"); @@ -856,8 +856,7 @@ protected boolean abort(Void env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { long procId = getProcId(); if (procId % 2 == 0) { Int64Value.Builder builder = Int64Value.newBuilder().setValue(procId); @@ -866,8 +865,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { long procId = getProcId(); if (procId % 2 == 0) { Int64Value value = serializer.deserialize(Int64Value.class); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java index 0d494fcdd6b3..27f280ad13ac 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,12 +28,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestDelayedUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDelayedUtil.class); + HBaseClassTestRule.forClass(TestDelayedUtil.class); private static final Logger LOG = LoggerFactory.getLogger(TestDelayedUtil.class); @@ -50,9 +50,8 @@ public void testDelayedContainerEquals() { ZeroDelayContainer o1cb = new ZeroDelayContainer<>(o1); ZeroDelayContainer o2c = new ZeroDelayContainer<>(o2); - ZeroDelayContainer[] items = new ZeroDelayContainer[] { - lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, - }; + ZeroDelayContainer[] items = + new ZeroDelayContainer[] { lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, }; assertContainersEquals(lnull, items, lnull, onull); assertContainersEquals(l10a, items, l10a, l10b); @@ -65,7 +64,7 @@ public void testDelayedContainerEquals() { } private void assertContainersEquals(final ZeroDelayContainer src, - final ZeroDelayContainer[] items, final ZeroDelayContainer... matches) { + final ZeroDelayContainer[] items, final ZeroDelayContainer... matches) { for (int i = 0; i < items.length; ++i) { boolean shouldMatch = false; for (int j = 0; j < matches.length; ++j) { @@ -75,8 +74,8 @@ private void assertContainersEquals(final ZeroDelayContainer src, } } boolean isMatching = src.equals(items[i]); - assertEquals(src.getObject() + " unexpectedly match " + items[i].getObject(), - shouldMatch, isMatching); + assertEquals(src.getObject() + " unexpectedly match " + items[i].getObject(), shouldMatch, + isMatching); } } diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index de40e56e20a5..82710d6bae59 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -36,13 +36,28 @@ --> 3.17.3 + + + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + + + junit + junit + test + + org.apache.maven.plugins maven-source-plugin - + maven-assembly-plugin @@ -56,10 +71,10 @@ secondPartTestsExecution - test test + test true @@ -72,10 +87,10 @@ compile-protoc - generate-sources compile + generate-sources com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} true @@ -95,48 +110,48 @@ com.google.code.maven-replacer-plugin replacer 1.5.3 + + ${basedir}/target/generated-sources/ + + **/*.java + + + true + + + ([^\.])com.google.protobuf + $1org.apache.hbase.thirdparty.com.google.protobuf + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + - process-sources replace + process-sources - - ${basedir}/target/generated-sources/ - - **/*.java - - - true - - - ([^\.])com.google.protobuf - $1org.apache.hbase.thirdparty.com.google.protobuf - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - org.apache.maven.plugins maven-shade-plugin - package shade + package true true @@ -187,21 +202,6 @@ - - - - - org.apache.hbase.thirdparty - hbase-shaded-protobuf - - - junit - junit - test - - @@ -260,9 +260,7 @@ - - com.google.code.maven-replacer-plugin - + com.google.code.maven-replacer-plugin replacer [1.5.3,) @@ -271,7 +269,7 @@ - false + false diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java index f8cef893d7d7..a864f41fcd6b 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,19 +22,20 @@ import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; /** - * Helper to convert Exceptions and StackTraces from/to protobuf. - * (see ErrorHandling.proto for the internal of the proto messages) + * Helper to convert Exceptions and StackTraces from/to protobuf. (see ErrorHandling.proto for the + * internal of the proto messages) */ @InterfaceAudience.Private public final class ForeignExceptionUtil { - private ForeignExceptionUtil() { } + private ForeignExceptionUtil() { + } public static Exception toException(final ForeignExceptionMessage eem) { Exception re; @@ -57,8 +58,8 @@ public static IOException toIOException(final ForeignExceptionMessage eem) { } private static T createException(final Class clazz, - final ForeignExceptionMessage eem) throws ClassNotFoundException, NoSuchMethodException, - InstantiationException, IllegalAccessException, InvocationTargetException { + final ForeignExceptionMessage eem) throws ClassNotFoundException, NoSuchMethodException, + InstantiationException, IllegalAccessException, InvocationTargetException { final GenericExceptionMessage gem = eem.getGenericException(); final Class realClass = Class.forName(gem.getClassName()); final Class cls = realClass.asSubclass(clazz); @@ -68,7 +69,7 @@ private static T createException(final Class clazz, } private static T setExceptionDetails(final T exception, - final ForeignExceptionMessage eem) { + final ForeignExceptionMessage eem) { final GenericExceptionMessage gem = eem.getGenericException(); final StackTraceElement[] trace = toStackTrace(gem.getTraceList()); exception.setStackTrace(trace); @@ -127,8 +128,7 @@ public static List toProtoStackTraceElement(StackTrace } /** - * Unwind a serialized array of {@link StackTraceElementMessage}s to a - * {@link StackTraceElement}s. + * Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s. * @param traceList list that was serialized * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on * the sender). @@ -140,10 +140,8 @@ public static StackTraceElement[] toStackTrace(List tr StackTraceElement[] trace = new StackTraceElement[traceList.size()]; for (int i = 0; i < traceList.size(); i++) { StackTraceElementMessage elem = traceList.get(i); - trace[i] = new StackTraceElement( - elem.getDeclaringClass(), elem.getMethodName(), - elem.hasFileName() ? elem.getFileName() : null, - elem.getLineNumber()); + trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(), + elem.hasFileName() ? elem.getFileName() : null, elem.getLineNumber()); } return trace; } diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index 9405d5367b04..d454075b80f3 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,34 +30,6 @@ Apache HBase - Replication HBase Replication Support - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -159,12 +130,42 @@ + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + hadoop-3.0 - !hadoop.profile + + !hadoop.profile + ${hadoop-three.version} @@ -193,8 +194,7 @@ lifecycle-mapping - - + diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 83421600aa0d..6dba30a34c04 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java index 5c21e1e023ce..36b958d2fa2e 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index 85b6c7626614..51f7ae88717d 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; @@ -108,7 +107,7 @@ default boolean isPeerEnabled() { /** * @deprecated since 2.1.0 and will be removed in 4.0.0. Use - * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. + * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. * @see #registerPeerConfigListener(ReplicationPeerConfigListener) * @see HBASE-19573 */ @@ -116,4 +115,4 @@ default boolean isPeerEnabled() { default void trackPeerConfigChanges(ReplicationPeerConfigListener listener) { registerPeerConfigListener(listener); } -} \ No newline at end of file +} diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java index d4d8023ead76..d0bacda6d496 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -24,8 +22,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface ReplicationPeerConfigListener { - /** Callback method for when users update the ReplicationPeerConfig for this peer - * + /** + * Callback method for when users update the ReplicationPeerConfig for this peer * @param rpc The updated ReplicationPeerConfig */ void peerConfigUpdated(ReplicationPeerConfig rpc); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java index 08799856b754..cda7742bcbf6 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java @@ -51,13 +51,13 @@ public class ReplicationPeerImpl implements ReplicationPeer { /** * Constructor that takes all the objects required to communicate with the specified peer, except * for the region server addresses. - * @param conf configuration object to this peer - * @param id string representation of this peer's identifier + * @param conf configuration object to this peer + * @param id string representation of this peer's identifier * @param peerConfig configuration for the replication peer */ public ReplicationPeerImpl(Configuration conf, String id, ReplicationPeerConfig peerConfig, - boolean peerState, SyncReplicationState syncReplicationState, - SyncReplicationState newSyncReplicationState) { + boolean peerState, SyncReplicationState syncReplicationState, + SyncReplicationState newSyncReplicationState) { this.conf = conf; this.id = id; setPeerState(peerState); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java index f74ac37187c4..1fa78d50b460 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ public interface ReplicationPeerStorage { * @throws ReplicationException if there are errors accessing the storage service. */ void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled, - SyncReplicationState syncReplicationState) throws ReplicationException; + SyncReplicationState syncReplicationState) throws ReplicationException; /** * Remove a replication peer. @@ -50,7 +50,7 @@ void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled, * @throws ReplicationException if there are errors accessing the storage service. */ void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException; + throws ReplicationException; /** * Return the peer ids of all replication peers. @@ -75,7 +75,7 @@ void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) * @throws ReplicationException if there are errors accessing the storage service. */ void setPeerNewSyncReplicationState(String peerId, SyncReplicationState state) - throws ReplicationException; + throws ReplicationException; /** * Overwrite the sync replication state with the new sync replication state which is set with the @@ -92,8 +92,8 @@ void setPeerNewSyncReplicationState(String peerId, SyncReplicationState state) SyncReplicationState getPeerSyncReplicationState(String peerId) throws ReplicationException; /** - * Get the new sync replication state. Will return {@link SyncReplicationState#NONE} if we are - * not in a transition. + * Get the new sync replication state. Will return {@link SyncReplicationState#NONE} if we are not + * in a transition. * @throws ReplicationException if there are errors accessing the storage service. */ SyncReplicationState getPeerNewSyncReplicationState(String peerId) throws ReplicationException; diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index ebe99da3541b..70344c07bdcb 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -118,7 +118,7 @@ public ReplicationPeerConfig refreshPeerConfig(String peerId) throws Replication } public SyncReplicationState refreshPeerNewSyncReplicationState(String peerId) - throws ReplicationException { + throws ReplicationException { ReplicationPeerImpl peer = peerCache.get(peerId); SyncReplicationState newState = peerStorage.getPeerNewSyncReplicationState(peerId); peer.setNewSyncReplicationState(newState); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index d39a37eca8f0..908780112c76 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,16 +20,15 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This class is responsible for the parsing logic for a queue id representing a queue. - * It will extract the peerId if it's recovered as well as the dead region servers - * that were part of the queue's history. + * This class is responsible for the parsing logic for a queue id representing a queue. It will + * extract the peerId if it's recovered as well as the dead region servers that were part of the + * queue's history. */ @InterfaceAudience.Private public class ReplicationQueueInfo { @@ -43,8 +41,8 @@ public class ReplicationQueueInfo { private List deadRegionServers = new ArrayList<>(); /** - * The passed queueId will be either the id of the peer or the handling story of that queue - * in the form of id-servername-* + * The passed queueId will be either the id of the peer or the handling story of that queue in the + * form of id-servername-* */ public ReplicationQueueInfo(String queueId) { this.queueId = queueId; @@ -62,9 +60,9 @@ public ReplicationQueueInfo(String queueId) { * "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name>-... */ - private static void - extractDeadServersFromZNodeString(String deadServerListStr, List result) { - if(deadServerListStr == null || result == null || deadServerListStr.isEmpty()) { + private static void extractDeadServersFromZNodeString(String deadServerListStr, + List result) { + if (deadServerListStr == null || result == null || deadServerListStr.isEmpty()) { return; } @@ -79,10 +77,10 @@ public ReplicationQueueInfo(String queueId) { seenCommaCnt += 1; break; case '-': - if(seenCommaCnt>=2) { + if (seenCommaCnt >= 2) { if (i > startIndex) { String serverName = deadServerListStr.substring(startIndex, i); - if(ServerName.isFullServerName(serverName)){ + if (ServerName.isFullServerName(serverName)) { result.add(ServerName.valueOf(serverName)); } else { LOG.error("Found invalid server name:" + serverName); @@ -98,9 +96,9 @@ public ReplicationQueueInfo(String queueId) { } // add tail - if(startIndex < len - 1){ + if (startIndex < len - 1) { String serverName = deadServerListStr.substring(startIndex, len); - if(ServerName.isFullServerName(serverName)){ + if (ServerName.isFullServerName(serverName)) { result.add(ServerName.valueOf(serverName)); } else { LOG.error("Found invalid server name at the end:" + serverName); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java index 59278e9807d5..0f95c04b2542 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Map; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Pair; @@ -36,7 +35,7 @@ public interface ReplicationQueueStorage { /** * Remove a replication queue for a given regionserver. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue. + * @param queueId a String that identifies the queue. */ void removeQueue(ServerName serverName, String queueId) throws ReplicationException; @@ -44,36 +43,36 @@ public interface ReplicationQueueStorage { * Add a new WAL file to the given queue for a given regionserver. If the queue does not exist it * is created. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue. - * @param fileName name of the WAL + * @param queueId a String that identifies the queue. + * @param fileName name of the WAL */ void addWAL(ServerName serverName, String queueId, String fileName) throws ReplicationException; /** * Remove an WAL file from the given queue for a given regionserver. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue. - * @param fileName name of the WAL + * @param queueId a String that identifies the queue. + * @param fileName name of the WAL */ void removeWAL(ServerName serverName, String queueId, String fileName) - throws ReplicationException; + throws ReplicationException; /** * Set the current position for a specific WAL in a given queue for a given regionserver. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue - * @param fileName name of the WAL - * @param position the current position in the file. Will ignore if less than or equal to 0. + * @param queueId a String that identifies the queue + * @param fileName name of the WAL + * @param position the current position in the file. Will ignore if less than or equal to 0. * @param lastSeqIds map with {encodedRegionName, sequenceId} pairs for serial replication. */ void setWALPosition(ServerName serverName, String queueId, String fileName, long position, - Map lastSeqIds) throws ReplicationException; + Map lastSeqIds) throws ReplicationException; /** * Read the max sequence id of the specific region for a given peer. For serial replication, we * need the max sequenced id to decide whether we can push the next entries. * @param encodedRegionName the encoded region name - * @param peerId peer id + * @param peerId peer id * @return the max sequence id of the specific region for a given peer. */ long getLastSequenceId(String encodedRegionName, String peerId) throws ReplicationException; @@ -81,7 +80,7 @@ void setWALPosition(ServerName serverName, String queueId, String fileName, long /** * Set the max sequence id of a bunch of regions for a given peer. Will be called when setting up * a serial replication peer. - * @param peerId peer id + * @param peerId peer id * @param lastSeqIds map with {encodedRegionName, sequenceId} pairs for serial replication. */ void setLastSequenceIds(String peerId, Map lastSeqIds) throws ReplicationException; @@ -94,26 +93,26 @@ void setWALPosition(ServerName serverName, String queueId, String fileName, long /** * Remove the max sequence id record for the given peer and regions. - * @param peerId peer id + * @param peerId peer id * @param encodedRegionNames the encoded region names */ void removeLastSequenceIds(String peerId, List encodedRegionNames) - throws ReplicationException; + throws ReplicationException; /** * Get the current position for a specific WAL in a given queue for a given regionserver. * @param serverName the name of the regionserver - * @param queueId a String that identifies the queue - * @param fileName name of the WAL + * @param queueId a String that identifies the queue + * @param fileName name of the WAL * @return the current position in the file */ long getWALPosition(ServerName serverName, String queueId, String fileName) - throws ReplicationException; + throws ReplicationException; /** * Get a list of all WALs in the given queue on the given region server. * @param serverName the server name of the region server that owns the queue - * @param queueId a String that identifies the queue + * @param queueId a String that identifies the queue * @return a list of WALs */ List getWALsInQueue(ServerName serverName, String queueId) throws ReplicationException; @@ -128,12 +127,12 @@ long getWALPosition(ServerName serverName, String queueId, String fileName) /** * Change ownership for the queue identified by queueId and belongs to a dead region server. * @param sourceServerName the name of the dead region server - * @param destServerName the name of the target region server - * @param queueId the id of the queue + * @param destServerName the name of the target region server + * @param queueId the id of the queue * @return the new PeerId and A SortedSet of WALs in its queue */ Pair> claimQueue(ServerName sourceServerName, String queueId, - ServerName destServerName) throws ReplicationException; + ServerName destServerName) throws ReplicationException; /** * Remove the record of region server if the queue is empty. @@ -170,8 +169,8 @@ Pair> claimQueue(ServerName sourceServerName, String q /** * Add new hfile references to the queue. * @param peerId peer cluster id to which the hfiles need to be replicated - * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir which - * will be added in the queue } + * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir which + * will be added in the queue } * @throws ReplicationException if fails to add a hfile reference */ void addHFileRefs(String peerId, List> pairs) throws ReplicationException; @@ -179,7 +178,7 @@ Pair> claimQueue(ServerName sourceServerName, String q /** * Remove hfile references from the queue. * @param peerId peer cluster id from which this hfile references needs to be removed - * @param files list of hfile references to be removed + * @param files list of hfile references to be removed */ void removeHFileRefs(String peerId, List files) throws ReplicationException; diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java index 462cfedd0a04..1080b2125c79 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public static ReplicationPeerStorage getReplicationPeerStorage(ZKWatcher zk, Con * Create a new {@link ReplicationQueueStorage}. */ public static ReplicationQueueStorage getReplicationQueueStorage(ZKWatcher zk, - Configuration conf) { + Configuration conf) { return new ZKReplicationQueueStorage(zk, conf); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java index e8ecec262bf6..d1bca8b4b042 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,7 +63,7 @@ private ReplicationUtils() { } public static Configuration getPeerClusterConfiguration(ReplicationPeerConfig peerConfig, - Configuration baseConf) throws ReplicationException { + Configuration baseConf) throws ReplicationException { Configuration otherConf; try { otherConf = HBaseConfiguration.createClusterConf(baseConf, peerConfig.getClusterKey()); @@ -82,7 +82,7 @@ public static Configuration getPeerClusterConfiguration(ReplicationPeerConfig pe } public static void removeAllQueues(ReplicationQueueStorage queueStorage, String peerId) - throws ReplicationException { + throws ReplicationException { for (ServerName replicator : queueStorage.getListOfReplicators()) { List queueIds = queueStorage.getAllQueues(replicator); for (String queueId : queueIds) { @@ -110,7 +110,7 @@ private static boolean isNamespacesEqual(Set ns1, Set ns2) { } private static boolean isTableCFsEqual(Map> tableCFs1, - Map> tableCFs2) { + Map> tableCFs2) { if (tableCFs1 == null) { return tableCFs2 == null; } @@ -135,16 +135,16 @@ private static boolean isTableCFsEqual(Map> tableCFs1, } public static boolean isNamespacesAndTableCFsEqual(ReplicationPeerConfig rpc1, - ReplicationPeerConfig rpc2) { + ReplicationPeerConfig rpc2) { if (rpc1.replicateAllUserTables() != rpc2.replicateAllUserTables()) { return false; } if (rpc1.replicateAllUserTables()) { - return isNamespacesEqual(rpc1.getExcludeNamespaces(), rpc2.getExcludeNamespaces()) && - isTableCFsEqual(rpc1.getExcludeTableCFsMap(), rpc2.getExcludeTableCFsMap()); + return isNamespacesEqual(rpc1.getExcludeNamespaces(), rpc2.getExcludeNamespaces()) + && isTableCFsEqual(rpc1.getExcludeTableCFsMap(), rpc2.getExcludeTableCFsMap()); } else { - return isNamespacesEqual(rpc1.getNamespaces(), rpc2.getNamespaces()) && - isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap()); + return isNamespacesEqual(rpc1.getNamespaces(), rpc2.getNamespaces()) + && isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap()); } } @@ -158,7 +158,7 @@ public static boolean isReplicationForBulkLoadDataEnabled(final Configuration c) } public static FileSystem getRemoteWALFileSystem(Configuration conf, String remoteWALDir) - throws IOException { + throws IOException { return new Path(remoteWALDir).getFileSystem(conf); } @@ -184,14 +184,14 @@ public static Path getPeerSnapshotWALDir(Path remoteWALDir, String peerId) { /** * Do the sleeping logic - * @param msg Why we sleep - * @param sleepForRetries the base sleep time. - * @param sleepMultiplier by how many times the default sleeping time is augmented + * @param msg Why we sleep + * @param sleepForRetries the base sleep time. + * @param sleepMultiplier by how many times the default sleeping time is augmented * @param maxRetriesMultiplier the max retry multiplier * @return True if sleepMultiplier is < maxRetriesMultiplier */ public static boolean sleepForRetries(String msg, long sleepForRetries, int sleepMultiplier, - int maxRetriesMultiplier) { + int maxRetriesMultiplier) { try { LOG.trace("{}, sleeping {} times {}", msg, sleepForRetries, sleepMultiplier); Thread.sleep(sleepForRetries * sleepMultiplier); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java index 09aeee55cca8..56d7f43376df 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ */ @InterfaceAudience.Private public class ZKReplicationPeerStorage extends ZKReplicationStorageBase - implements ReplicationPeerStorage { + implements ReplicationPeerStorage { public static final String PEERS_ZNODE = "zookeeper.znode.replication.peers"; public static final String PEERS_ZNODE_DEFAULT = "peers"; @@ -94,7 +94,7 @@ public String getNewSyncReplicationStateNode(String peerId) { @Override public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled, - SyncReplicationState syncReplicationState) throws ReplicationException { + SyncReplicationState syncReplicationState) throws ReplicationException { List multiOps = Arrays.asList( ZKUtilOp.createAndFailSilent(getPeerNode(peerId), ReplicationPeerConfigUtil.toByteArray(peerConfig)), @@ -108,8 +108,8 @@ public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean ena ZKUtil.multiOrSequential(zookeeper, multiOps, false); } catch (KeeperException e) { throw new ReplicationException( - "Could not add peer with id=" + peerId + ", peerConfig=>" + peerConfig + ", state=" + - (enabled ? "ENABLED" : "DISABLED") + ", syncReplicationState=" + syncReplicationState, + "Could not add peer with id=" + peerId + ", peerConfig=>" + peerConfig + ", state=" + + (enabled ? "ENABLED" : "DISABLED") + ", syncReplicationState=" + syncReplicationState, e); } } @@ -135,7 +135,7 @@ public void setPeerState(String peerId, boolean enabled) throws ReplicationExcep @Override public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException { + throws ReplicationException { try { ZKUtil.setData(this.zookeeper, getPeerNode(peerId), ReplicationPeerConfigUtil.toByteArray(peerConfig)); @@ -187,7 +187,7 @@ public ReplicationPeerConfig getPeerConfig(String peerId) throws ReplicationExce @Override public void setPeerNewSyncReplicationState(String peerId, SyncReplicationState state) - throws ReplicationException { + throws ReplicationException { try { ZKUtil.createSetData(zookeeper, getNewSyncReplicationStateNode(peerId), SyncReplicationState.toByteArray(state)); @@ -213,7 +213,7 @@ public void transitPeerSyncReplicationState(String peerId) throws ReplicationExc } private SyncReplicationState getSyncReplicationState(String peerId, String path) - throws ReplicationException { + throws ReplicationException { try { byte[] data = ZKUtil.getData(zookeeper, path); if (data == null || data.length == 0) { @@ -235,13 +235,13 @@ private SyncReplicationState getSyncReplicationState(String peerId, String path) @Override public SyncReplicationState getPeerNewSyncReplicationState(String peerId) - throws ReplicationException { + throws ReplicationException { return getSyncReplicationState(peerId, getNewSyncReplicationStateNode(peerId)); } @Override public SyncReplicationState getPeerSyncReplicationState(String peerId) - throws ReplicationException { + throws ReplicationException { return getSyncReplicationState(peerId, getSyncReplicationStateNode(peerId)); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java index c51bdfcc283e..f3506ad3555a 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,16 +80,16 @@ */ @InterfaceAudience.Private class ZKReplicationQueueStorage extends ZKReplicationStorageBase - implements ReplicationQueueStorage { + implements ReplicationQueueStorage { private static final Logger LOG = LoggerFactory.getLogger(ZKReplicationQueueStorage.class); public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY = - "zookeeper.znode.replication.hfile.refs"; + "zookeeper.znode.replication.hfile.refs"; public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT = "hfile-refs"; public static final String ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY = - "zookeeper.znode.replication.regions"; + "zookeeper.znode.replication.regions"; public static final String ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT = "regions"; /** @@ -113,7 +113,7 @@ public ZKReplicationQueueStorage(ZKWatcher zookeeper, Configuration conf) { this.queuesZNode = ZNodePaths.joinZNode(replicationZNode, queuesZNodeName); this.hfileRefsZNode = ZNodePaths.joinZNode(replicationZNode, hfileRefsZNodeName); this.regionsZNode = ZNodePaths.joinZNode(replicationZNode, conf - .get(ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY, ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT)); + .get(ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY, ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT)); } @Override @@ -152,20 +152,20 @@ private String getFileNode(ServerName serverName, String queueId, String fileNam * is the peer id. *

    * @param encodedRegionName the encoded region name. - * @param peerId peer id for replication. + * @param peerId peer id for replication. * @return ZNode path to persist the max sequence id that we've pushed for the given region and * peer. */ String getSerialReplicationRegionPeerNode(String encodedRegionName, String peerId) { if (encodedRegionName == null || encodedRegionName.length() != RegionInfo.MD5_HEX_LENGTH) { throw new IllegalArgumentException( - "Invalid encoded region name: " + encodedRegionName + ", length should be 32."); + "Invalid encoded region name: " + encodedRegionName + ", length should be 32."); } return new StringBuilder(regionsZNode).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 0, 2).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 2, 4).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 4, encodedRegionName.length()).append("-").append(peerId) - .toString(); + .append(encodedRegionName, 0, 2).append(ZNodePaths.ZNODE_PATH_SEPARATOR) + .append(encodedRegionName, 2, 4).append(ZNodePaths.ZNODE_PATH_SEPARATOR) + .append(encodedRegionName, 4, encodedRegionName.length()).append("-").append(peerId) + .toString(); } @Override @@ -174,37 +174,37 @@ public void removeQueue(ServerName serverName, String queueId) throws Replicatio ZKUtil.deleteNodeRecursively(zookeeper, getQueueNode(serverName, queueId)); } catch (KeeperException e) { throw new ReplicationException( - "Failed to delete queue (serverName=" + serverName + ", queueId=" + queueId + ")", e); + "Failed to delete queue (serverName=" + serverName + ", queueId=" + queueId + ")", e); } } @Override public void addWAL(ServerName serverName, String queueId, String fileName) - throws ReplicationException { + throws ReplicationException { try { ZKUtil.createWithParents(zookeeper, getFileNode(serverName, queueId, fileName)); } catch (KeeperException e) { throw new ReplicationException("Failed to add wal to queue (serverName=" + serverName - + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } } @Override public void removeWAL(ServerName serverName, String queueId, String fileName) - throws ReplicationException { + throws ReplicationException { String fileNode = getFileNode(serverName, queueId, fileName); try { ZKUtil.deleteNode(zookeeper, fileNode); } catch (NoNodeException e) { LOG.warn("{} already deleted when removing log", fileNode); } catch (KeeperException e) { - throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + - ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } } private void addLastSeqIdsToOps(String queueId, Map lastSeqIds, - List listOfOps) throws KeeperException, ReplicationException { + List listOfOps) throws KeeperException, ReplicationException { String peerId = new ReplicationQueueInfo(queueId).getPeerId(); for (Entry lastSeqEntry : lastSeqIds.entrySet()) { String path = getSerialReplicationRegionPeerNode(lastSeqEntry.getKey(), peerId); @@ -228,7 +228,7 @@ private void addLastSeqIdsToOps(String queueId, Map lastSeqIds, @Override public void setWALPosition(ServerName serverName, String queueId, String fileName, long position, - Map lastSeqIds) throws ReplicationException { + Map lastSeqIds) throws ReplicationException { try { for (int retry = 0;; retry++) { List listOfOps = new ArrayList<>(); @@ -247,13 +247,13 @@ public void setWALPosition(ServerName serverName, String queueId, String fileNam } catch (KeeperException.BadVersionException | KeeperException.NodeExistsException e) { LOG.warn( "Bad version(or node exist) when persist the last pushed sequence id to zookeeper " - + "storage, Retry = " + retry + ", serverName=" + serverName + ", queueId=" - + queueId + ", fileName=" + fileName); + + "storage, Retry = " + retry + ", serverName=" + serverName + ", queueId=" + queueId + + ", fileName=" + fileName); } } } catch (KeeperException e) { throw new ReplicationException("Failed to set log position (serverName=" + serverName - + ", queueId=" + queueId + ", fileName=" + fileName + ", position=" + position + ")", e); + + ", queueId=" + queueId + ", fileName=" + fileName + ", position=" + position + ")", e); } } @@ -262,7 +262,7 @@ public void setWALPosition(ServerName serverName, String queueId, String fileNam * that the ZNode does not exist. */ protected Pair getLastSequenceIdWithVersion(String encodedRegionName, - String peerId) throws KeeperException { + String peerId) throws KeeperException { Stat stat = new Stat(); String path = getSerialReplicationRegionPeerNode(encodedRegionName, peerId); byte[] data = ZKUtil.getDataNoWatch(zookeeper, path, stat); @@ -274,25 +274,25 @@ protected Pair getLastSequenceIdWithVersion(String encodedRegionN return Pair.newPair(ZKUtil.parseWALPositionFrom(data), stat.getVersion()); } catch (DeserializationException de) { LOG.warn("Failed to parse log position (region=" + encodedRegionName + ", peerId=" + peerId - + "), data=" + Bytes.toStringBinary(data)); + + "), data=" + Bytes.toStringBinary(data)); } return Pair.newPair(HConstants.NO_SEQNUM, stat.getVersion()); } @Override public long getLastSequenceId(String encodedRegionName, String peerId) - throws ReplicationException { + throws ReplicationException { try { return getLastSequenceIdWithVersion(encodedRegionName, peerId).getFirst(); } catch (KeeperException e) { throw new ReplicationException("Failed to get last pushed sequence id (encodedRegionName=" - + encodedRegionName + ", peerId=" + peerId + ")", e); + + encodedRegionName + ", peerId=" + peerId + ")", e); } } @Override public void setLastSequenceIds(String peerId, Map lastSeqIds) - throws ReplicationException { + throws ReplicationException { try { // No need CAS and retry here, because it'll call setLastSequenceIds() for disabled peers // only, so no conflict happen. @@ -307,7 +307,7 @@ public void setLastSequenceIds(String peerId, Map lastSeqIds) } } catch (KeeperException e) { throw new ReplicationException("Failed to set last sequence ids, peerId=" + peerId - + ", size of lastSeqIds=" + lastSeqIds.size(), e); + + ", size of lastSeqIds=" + lastSeqIds.size(), e); } } @@ -347,33 +347,33 @@ public void removeLastSequenceIds(String peerId) throws ReplicationException { @Override public void removeLastSequenceIds(String peerId, List encodedRegionNames) - throws ReplicationException { + throws ReplicationException { try { List listOfOps = encodedRegionNames.stream().map(n -> getSerialReplicationRegionPeerNode(n, peerId)) .map(ZKUtilOp::deleteNodeFailSilent).collect(Collectors.toList()); ZKUtil.multiOrSequential(zookeeper, listOfOps, true); } catch (KeeperException e) { - throw new ReplicationException("Failed to remove last sequence ids, peerId=" + peerId + - ", encodedRegionNames.size=" + encodedRegionNames.size(), e); + throw new ReplicationException("Failed to remove last sequence ids, peerId=" + peerId + + ", encodedRegionNames.size=" + encodedRegionNames.size(), e); } } @Override public long getWALPosition(ServerName serverName, String queueId, String fileName) - throws ReplicationException { + throws ReplicationException { byte[] bytes; try { bytes = ZKUtil.getData(zookeeper, getFileNode(serverName, queueId, fileName)); } catch (KeeperException | InterruptedException e) { - throw new ReplicationException("Failed to get log position (serverName=" + serverName + - ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + throw new ReplicationException("Failed to get log position (serverName=" + serverName + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } try { return ZKUtil.parseWALPositionFrom(bytes); } catch (DeserializationException de) { - LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", - serverName, queueId, fileName); + LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", serverName, + queueId, fileName); } // if we can not parse the position, start at the beginning of the wal file again return 0; @@ -386,15 +386,13 @@ public long getWALPosition(ServerName serverName, String queueId, String fileNam */ @Override public Pair> claimQueue(ServerName sourceServerName, String queueId, - ServerName destServerName) throws ReplicationException { + ServerName destServerName) throws ReplicationException { LOG.info("Atomically moving {}/{}'s WALs to {}", sourceServerName, queueId, destServerName); try { ZKUtil.createWithParents(zookeeper, getRsNode(destServerName)); } catch (KeeperException e) { - throw new ReplicationException( - "Claim queue queueId=" + queueId + " from " + sourceServerName + " to " + destServerName + - " failed when creating the node for " + destServerName, - e); + throw new ReplicationException("Claim queue queueId=" + queueId + " from " + sourceServerName + + " to " + destServerName + " failed when creating the node for " + destServerName, e); } String newQueueId = queueId + "-" + sourceServerName; try { @@ -440,11 +438,11 @@ public Pair> claimQueue(ServerName sourceServerName, S // queue to tell the upper layer that claim nothing. For other types of exception should be // thrown out to notify the upper layer. LOG.info("Claim queue queueId={} from {} to {} failed with {}, someone else took the log?", - queueId,sourceServerName, destServerName, e.toString()); + queueId, sourceServerName, destServerName, e.toString()); return new Pair<>(newQueueId, Collections.emptySortedSet()); } catch (KeeperException | InterruptedException e) { - throw new ReplicationException("Claim queue queueId=" + queueId + " from " + - sourceServerName + " to " + destServerName + " failed", e); + throw new ReplicationException("Claim queue queueId=" + queueId + " from " + sourceServerName + + " to " + destServerName + " failed", e); } } @@ -477,21 +475,20 @@ public List getListOfReplicators() throws ReplicationException { } private List getWALsInQueue0(ServerName serverName, String queueId) - throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName, - queueId)); + throws KeeperException { + List children = + ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName, queueId)); return children != null ? children : Collections.emptyList(); } @Override public List getWALsInQueue(ServerName serverName, String queueId) - throws ReplicationException { + throws ReplicationException { try { return getWALsInQueue0(serverName, queueId); } catch (KeeperException e) { throw new ReplicationException( - "Failed to get wals in queue (serverName=" + serverName + ", queueId=" + queueId + ")", - e); + "Failed to get wals in queue (serverName=" + serverName + ", queueId=" + queueId + ")", e); } } @@ -521,7 +518,7 @@ protected int getQueuesZNodeCversion() throws KeeperException { * Therefore, we must update the cversion of root {@link #queuesZNode} when migrate wal nodes to * other queues. * @see #claimQueue(ServerName, String, ServerName) as an example of updating root - * {@link #queuesZNode} cversion. + * {@link #queuesZNode} cversion. */ @Override public Set getAllWALs() throws ReplicationException { @@ -543,8 +540,8 @@ public Set getAllWALs() throws ReplicationException { if (v0 == v1) { return wals; } - LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", - v0, v1, retry); + LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", v0, v1, + retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all wals", e); @@ -569,7 +566,7 @@ public void addPeerToHFileRefs(String peerId) throws ReplicationException { } } catch (KeeperException e) { throw new ReplicationException("Failed to add peer " + peerId + " to hfile reference queue.", - e); + e); } } @@ -585,20 +582,20 @@ public void removePeerFromHFileRefs(String peerId) throws ReplicationException { } } catch (KeeperException e) { throw new ReplicationException( - "Failed to remove peer " + peerId + " from hfile reference queue.", e); + "Failed to remove peer " + peerId + " from hfile reference queue.", e); } } @Override public void addHFileRefs(String peerId, List> pairs) - throws ReplicationException { + throws ReplicationException { String peerNode = getHFileRefsPeerNode(peerId); LOG.debug("Adding hfile references {} in queue {}", pairs, peerNode); - List listOfOps = pairs.stream().map(p -> p.getSecond().getName()) - .map(n -> getHFileNode(peerNode, n)) + List listOfOps = + pairs.stream().map(p -> p.getSecond().getName()).map(n -> getHFileNode(peerNode, n)) .map(f -> ZKUtilOp.createAndFailSilent(f, HConstants.EMPTY_BYTE_ARRAY)).collect(toList()); - LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", - peerNode, listOfOps.size()); + LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", peerNode, + listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -612,9 +609,9 @@ public void removeHFileRefs(String peerId, List files) throws Replicatio LOG.debug("Removing hfile references {} from queue {}", files, peerNode); List listOfOps = files.stream().map(n -> getHFileNode(peerNode, n)) - .map(ZKUtilOp::deleteNodeFailSilent).collect(toList()); - LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", - peerNode, listOfOps.size()); + .map(ZKUtilOp::deleteNodeFailSilent).collect(toList()); + LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", peerNode, + listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -633,13 +630,13 @@ public List getAllPeersFromHFileRefsQueue() throws ReplicationException return getAllPeersFromHFileRefsQueue0(); } catch (KeeperException e) { throw new ReplicationException("Failed to get list of all peers in hfile references node.", - e); + e); } } private List getReplicableHFiles0(String peerId) throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(this.zookeeper, - getHFileRefsPeerNode(peerId)); + List children = + ZKUtil.listChildrenNoWatch(this.zookeeper, getHFileRefsPeerNode(peerId)); return children != null ? children : Collections.emptyList(); } @@ -649,7 +646,7 @@ public List getReplicableHFiles(String peerId) throws ReplicationExcepti return getReplicableHFiles0(peerId); } catch (KeeperException e) { throw new ReplicationException("Failed to get list of hfile references for peer " + peerId, - e); + e); } } @@ -683,7 +680,7 @@ public Set getAllHFileRefs() throws ReplicationException { return hfileRefs; } LOG.debug("Replication hfile references node cversion changed from %d to %d, retry = %d", - v0, v1, retry); + v0, v1, retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all hfile refs", e); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java index 596167f9abfc..631960ed4031 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -61,7 +60,7 @@ protected ZKReplicationStorageBase(ZKWatcher zookeeper, Configuration conf) { */ protected static byte[] toByteArray(final ReplicationProtos.ReplicationState.State state) { ReplicationProtos.ReplicationState msg = - ReplicationProtos.ReplicationState.newBuilder().setState(state).build(); + ReplicationProtos.ReplicationState.newBuilder().setState(state).build(); // There is no toByteArray on this pb Message? // 32 bytes is default which seems fair enough here. try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index 4bb1021b7a42..15cf5b1f1f64 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -79,11 +79,11 @@ public void testReplicationQueueStorage() throws ReplicationException { */ rqs.addWAL(server1, "qId1", "trash"); rqs.removeWAL(server1, "qId1", "trash"); - rqs.addWAL(server1,"qId2", "filename1"); - rqs.addWAL(server1,"qId3", "filename2"); - rqs.addWAL(server1,"qId3", "filename3"); - rqs.addWAL(server2,"trash", "trash"); - rqs.removeQueue(server2,"trash"); + rqs.addWAL(server1, "qId2", "filename1"); + rqs.addWAL(server1, "qId3", "filename2"); + rqs.addWAL(server1, "qId3", "filename3"); + rqs.addWAL(server2, "trash", "trash"); + rqs.removeQueue(server2, "trash"); List reps = rqs.getListOfReplicators(); assertEquals(2, reps.size()); @@ -105,10 +105,11 @@ public void testReplicationQueueStorage() throws ReplicationException { } private void removeAllQueues(ServerName serverName) throws ReplicationException { - for (String queue: rqs.getAllQueues(serverName)) { + for (String queue : rqs.getAllQueues(serverName)) { rqs.removeQueue(serverName, queue); } } + @Test public void testReplicationQueues() throws ReplicationException { // Initialize ReplicationPeer so we can add peers (we don't transfer lone queues) @@ -254,7 +255,7 @@ public void testReplicationPeers() throws Exception { assertNumberOfPeers(2); assertEquals(KEY_ONE, ZKConfig.getZooKeeperClusterKey(ReplicationUtils - .getPeerClusterConfiguration(rp.getPeerStorage().getPeerConfig(ID_ONE), rp.getConf()))); + .getPeerClusterConfiguration(rp.getPeerStorage().getPeerConfig(ID_ONE), rp.getConf()))); rp.getPeerStorage().removePeer(ID_ONE); rp.removePeer(ID_ONE); assertNumberOfPeers(1); @@ -344,7 +345,7 @@ protected void assertConnectedPeerStatus(boolean status, String peerId) throws E } if (zkTimeoutCount < ZK_MAX_COUNT) { LOG.debug("ConnectedPeerStatus was " + !status + " but expected " + status - + ", sleeping and trying again."); + + ", sleeping and trying again."); Thread.sleep(ZK_SLEEP_INTERVAL); } else { fail("Timed out waiting for ConnectedPeerStatus to be " + status); @@ -373,8 +374,8 @@ protected void populateQueues() throws ReplicationException { } // Add peers for the corresponding queues so they are not orphans rp.getPeerStorage().addPeer("qId" + i, - ReplicationPeerConfig.newBuilder(). - setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(), + ReplicationPeerConfig.newBuilder() + .setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(), true, SyncReplicationState.NONE); } } diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 3d67bd37a6a7..311e7e337f9a 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStateZKImpl.class); + HBaseClassTestRule.forClass(TestReplicationStateZKImpl.class); private static Configuration conf; private static HBaseZKTestingUtil utility; @@ -64,13 +64,13 @@ public static void setUpBeforeClass() throws Exception { } private static String initPeerClusterState(String baseZKNode) - throws IOException, KeeperException { + throws IOException, KeeperException { // Add a dummy region server and set up the cluster id Configuration testConf = new Configuration(conf); testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode); ZKWatcher zkw1 = new ZKWatcher(testConf, "test1", null); - String fakeRs = ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode, - "hostname1.example.org:1234"); + String fakeRs = + ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode, "hostname1.example.org:1234"); ZKUtil.createWithParents(zkw1, fakeRs); ZKClusterId.setClusterId(zkw1, new ClusterId()); return ZKConfig.getZooKeeperClusterKey(testConf); diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java index 211576173308..7bc479189929 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ public class TestZKReplicationPeerStorage { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class); + HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class); private static final HBaseZKTestingUtil UTIL = new HBaseZKTestingUtil(); private static final Random RNG = new Random(); // Seed may be set with Random#setSeed @@ -82,7 +82,7 @@ public void cleanCustomConfigurations() { private Set randNamespaces(Random rand) { return Stream.generate(() -> Long.toHexString(rand.nextLong())).limit(rand.nextInt(5)) - .collect(toSet()); + .collect(toSet()); } private Map> randTableCFs(Random rand) { @@ -91,7 +91,7 @@ private Map> randTableCFs(Random rand) { for (int i = 0; i < size; i++) { TableName tn = TableName.valueOf(Long.toHexString(rand.nextLong())); List cfs = Stream.generate(() -> Long.toHexString(rand.nextLong())) - .limit(rand.nextInt(5)).collect(toList()); + .limit(rand.nextInt(5)).collect(toList()); map.put(tn, cfs); } return map; @@ -100,11 +100,11 @@ private Map> randTableCFs(Random rand) { private ReplicationPeerConfig getConfig(int seed) { RNG.setSeed(seed); return ReplicationPeerConfig.newBuilder().setClusterKey(Long.toHexString(RNG.nextLong())) - .setReplicationEndpointImpl(Long.toHexString(RNG.nextLong())) - .setRemoteWALDir(Long.toHexString(RNG.nextLong())).setNamespaces(randNamespaces(RNG)) - .setExcludeNamespaces(randNamespaces(RNG)).setTableCFsMap(randTableCFs(RNG)) - .setExcludeTableCFsMap(randTableCFs(RNG)).setReplicateAllUserTables(RNG.nextBoolean()) - .setBandwidth(RNG.nextInt(1000)).build(); + .setReplicationEndpointImpl(Long.toHexString(RNG.nextLong())) + .setRemoteWALDir(Long.toHexString(RNG.nextLong())).setNamespaces(randNamespaces(RNG)) + .setExcludeNamespaces(randNamespaces(RNG)).setTableCFsMap(randTableCFs(RNG)) + .setExcludeTableCFsMap(randTableCFs(RNG)).setReplicateAllUserTables(RNG.nextBoolean()) + .setBandwidth(RNG.nextInt(1000)).build(); } private void assertSetEquals(Set expected, Set actual) { @@ -117,7 +117,7 @@ private void assertSetEquals(Set expected, Set actual) { } private void assertMapEquals(Map> expected, - Map> actual) { + Map> actual) { if (expected == null || expected.size() == 0) { assertTrue(actual == null || actual.size() == 0); return; @@ -131,8 +131,8 @@ private void assertMapEquals(Map> expected, } else { assertNotNull(actualCFs); assertEquals(expectedCFs.size(), actualCFs.size()); - for (Iterator expectedIt = expectedCFs.iterator(), actualIt = actualCFs.iterator(); - expectedIt.hasNext();) { + for (Iterator expectedIt = expectedCFs.iterator(), + actualIt = actualCFs.iterator(); expectedIt.hasNext();) { assertEquals(expectedIt.next(), actualIt.next()); } } @@ -198,7 +198,7 @@ public void test() throws ReplicationException { @Test public void testNoSyncReplicationState() - throws ReplicationException, KeeperException, IOException { + throws ReplicationException, KeeperException, IOException { // This could happen for a peer created before we introduce sync replication. String peerId = "testNoSyncReplicationState"; try { @@ -244,31 +244,31 @@ public void testBaseReplicationPeerConfig() throws ReplicationException { Configuration conf = UTIL.getConfiguration(); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, - customPeerConfigKey.concat("=").concat(customPeerConfigValue).concat(";"). - concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); + customPeerConfigKey.concat("=").concat(customPeerConfigValue).concat(";") + .concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig - assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigSecondKey)); + assertEquals(customPeerConfigValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); + assertEquals(customPeerConfigSecondValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigSecondKey)); // validates base configs get updated values even if config already present conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, - customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";"). - concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); + customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";") + .concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); - ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); - assertEquals(customPeerConfigUpdatedValue, replicationPeerConfigAfterValueUpdate. - getConfiguration().get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondUpdatedValue, replicationPeerConfigAfterValueUpdate. - getConfiguration().get(customPeerConfigSecondKey)); + assertEquals(customPeerConfigUpdatedValue, + replicationPeerConfigAfterValueUpdate.getConfiguration().get(customPeerConfigKey)); + assertEquals(customPeerConfigSecondUpdatedValue, + replicationPeerConfigAfterValueUpdate.getConfiguration().get(customPeerConfigSecondKey)); } @Test @@ -284,19 +284,19 @@ public void testBaseReplicationRemovePeerConfig() throws ReplicationException { conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat(customPeerConfigValue)); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig - assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigKey)); + assertEquals(customPeerConfigValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat("")); - ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); assertNull(replicationPeerConfigRemoved.getConfiguration().get(customPeerConfigKey)); } @@ -313,8 +313,8 @@ public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat("")); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); assertNull(updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); } diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java index a2ca0d96fa57..ccd3c17f3bca 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestZKReplicationQueueStorage { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZKReplicationQueueStorage.class); + HBaseClassTestRule.forClass(TestZKReplicationQueueStorage.class); private static final HBaseZKTestingUtil UTIL = new HBaseZKTestingUtil(); @@ -247,7 +247,7 @@ protected int getQueuesZNodeCversion() throws KeeperException { @Override protected Pair getLastSequenceIdWithVersion(String encodedRegionName, - String peerId) throws KeeperException { + String peerId) throws KeeperException { Pair oldPair = super.getLastSequenceIdWithVersion(encodedRegionName, peerId); if (getLastSeqIdOpIndex < 100) { // Let the ZNode version increase. diff --git a/hbase-resource-bundle/pom.xml b/hbase-resource-bundle/pom.xml index ac0c77e53197..59c2b3a0f055 100644 --- a/hbase-resource-bundle/pom.xml +++ b/hbase-resource-bundle/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -36,15 +36,15 @@ true
    - + - - maven-assembly-plugin - - true - + + maven-assembly-plugin + + true + org.apache.maven.plugins diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 169542891fd0..3787c7bcaf76 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-rest Apache HBase - Rest HBase Rest Server - - - - - - ${project.build.directory} - - hbase-webapps/** - - - - - - src/test/resources - - **/** - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-antrun-plugin - - - - generate - generate-sources - - - - - - - - - - - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - net.revelc.code - warbucks-maven-plugin - - - - com.sun.jersey - jersey-core + com.sun.jersey + jersey-core @@ -286,12 +186,12 @@ --> org.codehaus.jettison jettison - - - stax - stax-api - - + + + stax + stax-api + + @@ -378,6 +278,106 @@ test + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + + + src/test/resources + + **/** + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-antrun-plugin + + + + generate + + run + + generate-sources + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -396,7 +396,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index 56bc9297f85f..af8b9e303bdf 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.yetus.audience.InterfaceAudience; @@ -29,7 +28,7 @@ public interface Constants { String VERSION_STRING = "0.0.3"; - int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours + int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours int DEFAULT_LISTEN_PORT = 8080; @@ -83,11 +82,13 @@ public interface Constants { String SCAN_FILTER = "filter"; String SCAN_REVERSED = "reversed"; String SCAN_CACHE_BLOCKS = "cacheblocks"; - String CUSTOM_FILTERS = "hbase.rest.custom.filters"; + String CUSTOM_FILTERS = "hbase.rest.custom.filters"; String ROW_KEYS_PARAM_NAME = "row"; - /** If this query parameter is present when processing row or scanner resources, - it disables server side block caching */ + /** + * If this query parameter is present when processing row or scanner resources, it disables server + * side block caching + */ String NOCACHE_PARAM_NAME = "nocache"; /** Configuration parameter to set rest client connection timeout */ diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java index 0a6fd0e1d5ac..61dede2ae835 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -43,9 +41,7 @@ public class ExistsResource extends ResourceBase { TableResource tableResource; /** - * Constructor - * @param tableResource - * @throws IOException + * Constructor nn */ public ExistsResource(TableResource tableResource) throws IOException { super(); @@ -53,19 +49,17 @@ public ExistsResource(TableResource tableResource) throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF, + MIMETYPE_BINARY }) public Response get(final @Context UriInfo uriInfo) { try { if (!tableResource.exists()) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } ResponseBuilder response = Response.ok(); response.cacheControl(cacheControl); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java index f1b2cea6e952..a16d3530ad71 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - -import org.apache.hadoop.hbase.rest.MetricsRESTSource; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsREST { @@ -34,23 +30,23 @@ public MetricsRESTSource getSource() { private MetricsRESTSource source; public MetricsREST() { - source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); + source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); } - + /** * @param inc How much to add to requests. */ public void incrementRequests(final int inc) { source.incrementRequests(inc); } - + /** * @param inc How much to add to sucessfulGetCount. */ public void incrementSucessfulGetRequests(final int inc) { source.incrementSucessfulGetRequests(inc); } - + /** * @param inc How much to add to sucessfulPutCount. */ @@ -64,7 +60,7 @@ public void incrementSucessfulPutRequests(final int inc) { public void incrementFailedPutRequests(final int inc) { source.incrementFailedPutRequests(inc); } - + /** * @param inc How much to add to failedGetCount. */ diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index 2d097752bd9b..68d774e420ca 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,14 +44,10 @@ public class MultiRowResource extends ResourceBase implements Constants { String[] columns = null; /** - * Constructor - * - * @param tableResource - * @param versions - * @throws java.io.IOException + * Constructor nn * @throws java.io.IOException */ public MultiRowResource(TableResource tableResource, String versions, String columnsStr) - throws IOException { + throws IOException { super(); this.tableResource = tableResource; @@ -87,15 +82,14 @@ public Response get(final @Context UriInfo uriInfo) { } } - ResultGenerator generator = - ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(this.tableResource.getName(), + rowSpec, null, !params.containsKey(NOCACHE_PARAM_NAME)); Cell value = null; RowModel rowModel = new RowModel(rowSpec.getRow()); if (generator.hasNext()) { while ((value = generator.next()) != null) { - rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil - .cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), + CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); } model.addRow(rowModel); } else { @@ -106,11 +100,10 @@ public Response get(final @Context UriInfo uriInfo) { } if (model.getRows().isEmpty()) { - //If no rows found. + // If no rows found. servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("No rows found." + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("No rows found." + CRLF).build(); } else { servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(model).build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java index 6156b8aaf979..e1282c493ab5 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,16 +62,14 @@ public class NamespacesInstanceResource extends ResourceBase { boolean queryTables = false; /** - * Constructor for standard NamespaceInstanceResource. - * @throws IOException + * Constructor for standard NamespaceInstanceResource. n */ public NamespacesInstanceResource(String namespace) throws IOException { this(namespace, false); } /** - * Constructor for querying namespace table list via NamespaceInstanceResource. - * @throws IOException + * Constructor for querying namespace table list via NamespaceInstanceResource. n */ public NamespacesInstanceResource(String namespace, boolean queryTables) throws IOException { super(); @@ -85,22 +82,21 @@ public NamespacesInstanceResource(String namespace, boolean queryTables) throws * @param context servlet context * @param uriInfo (JAX-RS context variable) request URL * @return A response containing NamespacesInstanceModel for a namespace descriptions and - * TableListModel for a list of namespace tables. + * TableListModel for a list of namespace tables. */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, - final @Context UriInfo uriInfo) { + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); // Respond to list of namespace tables requests. - if(queryTables){ + if (queryTables) { TableListModel tableModel = new TableListModel(); - try{ + try { List tables = servlet.getAdmin().listTableDescriptorsByNamespace(Bytes.toBytes(namespace)); for (TableDescriptor table : tables) { @@ -109,7 +105,7 @@ public Response get(final @Context ServletContext context, servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(tableModel).build(); - }catch(IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); throw new RuntimeException("Cannot retrieve table list for '" + namespace + "'."); } @@ -117,8 +113,7 @@ public Response get(final @Context ServletContext context, // Respond to namespace description requests. try { - NamespacesInstanceModel rowModel = - new NamespacesInstanceModel(servlet.getAdmin(), namespace); + NamespacesInstanceModel rowModel = new NamespacesInstanceModel(servlet.getAdmin(), namespace); servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(rowModel).build(); } catch (IOException e) { @@ -129,42 +124,38 @@ public Response get(final @Context ServletContext context, /** * Build a response for PUT alter namespace with properties specified. - * @param model properties used for alter. + * @param model properties used for alter. * @param uriInfo (JAX-RS context variable) request URL * @return response code. */ @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { return processUpdate(model, true, uriInfo); } /** * Build a response for POST create namespace with properties specified. - * @param model properties used for create. + * @param model properties used for create. * @param uriInfo (JAX-RS context variable) request URL * @return response code. */ @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final NamespacesInstanceModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { return processUpdate(model, false, uriInfo); } - // Check that POST or PUT is valid and then update namespace. private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting, - final UriInfo uriInfo) { + final UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace((updateExisting ? "PUT " : "POST ") + uriInfo.getAbsolutePath()); } if (model == null) { try { model = new NamespacesInstanceModel(namespace); - } catch(IOException ioe) { + } catch (IOException ioe) { servlet.getMetrics().incrementFailedPutRequests(1); throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); } @@ -174,7 +165,7 @@ private Response processUpdate(NamespacesInstanceModel model, final boolean upda if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) - .entity("Forbidden" + CRLF).build(); + .entity("Forbidden" + CRLF).build(); } Admin admin = null; @@ -182,25 +173,25 @@ private Response processUpdate(NamespacesInstanceModel model, final boolean upda try { admin = servlet.getAdmin(); namespaceExists = doesNamespaceExist(admin, namespace); - }catch (IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } // Do not allow creation if namespace already exists. - if(!updateExisting && namespaceExists){ + if (!updateExisting && namespaceExists) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' already exists. Use REST PUT " + - "to alter the existing namespace.").build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Namespace '" + + namespace + "' already exists. Use REST PUT " + "to alter the existing namespace.") + .build(); } // Do not allow altering if namespace does not exist. - if (updateExisting && !namespaceExists){ + if (updateExisting && !namespaceExists) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' does not exist. Use " + - "REST POST to create the namespace.").build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity( + "Namespace '" + namespace + "' does not exist. Use " + "REST POST to create the namespace.") + .build(); } return createOrUpdate(model, uriInfo, admin, updateExisting); @@ -208,35 +199,36 @@ private Response processUpdate(NamespacesInstanceModel model, final boolean upda // Do the actual namespace create or alter. private Response createOrUpdate(final NamespacesInstanceModel model, final UriInfo uriInfo, - final Admin admin, final boolean updateExisting) { + final Admin admin, final boolean updateExisting) { NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(namespace); builder.addConfiguration(model.getProperties()); - if(model.getProperties().size() > 0){ + if (model.getProperties().size() > 0) { builder.addConfiguration(model.getProperties()); } NamespaceDescriptor nsd = builder.build(); - try{ - if(updateExisting){ + try { + if (updateExisting) { admin.modifyNamespace(nsd); - }else{ + } else { admin.createNamespace(nsd); } - }catch (IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } servlet.getMetrics().incrementSucessfulPutRequests(1); - return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() : - Response.created(uriInfo.getAbsolutePath()).build(); + return updateExisting + ? Response.ok(uriInfo.getAbsolutePath()).build() + : Response.created(uriInfo.getAbsolutePath()).build(); } - private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException{ + private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException { NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); - for(int i = 0; i < nd.length; i++){ - if(nd[i].getName().equals(namespaceName)){ + for (int i = 0; i < nd.length; i++) { + if (nd[i].getName().equals(namespaceName)) { return true; } } @@ -250,23 +242,23 @@ private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOE * @return response code. */ @DELETE - public Response deleteNoBody(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response deleteNoBody(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) - .entity("Forbidden" + CRLF).build(); + .entity("Forbidden" + CRLF).build(); } - try{ + try { Admin admin = servlet.getAdmin(); - if (!doesNamespaceExist(admin, namespace)){ - return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' does not exists. Cannot " + - "drop namespace.").build(); + if (!doesNamespaceExist(admin, namespace)) { + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Namespace '" + namespace + "' does not exists. Cannot " + "drop namespace.") + .build(); } admin.deleteNamespace(namespace); @@ -283,8 +275,8 @@ public Response deleteNoBody(final byte[] message, * Dispatch to NamespaceInstanceResource for getting list of tables. */ @Path("tables") - public NamespacesInstanceResource getNamespaceInstanceResource( - final @PathParam("tables") String namespace) throws IOException { + public NamespacesInstanceResource + getNamespaceInstanceResource(final @PathParam("tables") String namespace) throws IOException { return new NamespacesInstanceResource(this.namespace, true); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java index e458d463f672..a3c0e2d2f1a5 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -46,8 +44,7 @@ public class NamespacesResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(NamespacesResource.class); /** - * Constructor - * @throws IOException + * Constructor n */ public NamespacesResource() throws IOException { super(); @@ -60,8 +57,8 @@ public NamespacesResource() throws IOException { * @return a response for a version request */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -83,7 +80,7 @@ public Response get(final @Context ServletContext context, final @Context UriInf */ @Path("{namespace}") public NamespacesInstanceResource getNamespaceInstanceResource( - final @PathParam("namespace") String namespace) throws IOException { + final @PathParam("namespace") String namespace) throws IOException { return new NamespacesInstanceResource(namespace); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java index d5e4354e4391..f90354b7ab78 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Common interface for models capable of supporting protobuf marshalling - * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and - * ProtobufMessageBodyProducer adapters. + * Common interface for models capable of supporting protobuf marshalling and unmarshalling. Hooks + * up to the ProtobufMessageBodyConsumer and ProtobufMessageBodyProducer adapters. */ @InterfaceAudience.Private public interface ProtobufMessageHandler { @@ -38,9 +34,7 @@ public interface ProtobufMessageHandler { /** * Initialize the model from a protobuf representation. * @param message the raw bytes of the protobuf message - * @return reference to self for convenience - * @throws IOException + * @return reference to self for convenience n */ - ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException; + ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java index d1ba5b7dd827..eadd6a9334bc 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,15 +50,15 @@ protected ProtobufStreamingOutput(ResultScanner scanner, String type, int limit, this.limit = limit; this.fetchSize = fetchSize; if (LOG.isTraceEnabled()) { - LOG.trace("Created StreamingOutput with content type = " + this.contentType - + " user limit : " + this.limit + " scan fetch size : " + this.fetchSize); + LOG.trace("Created StreamingOutput with content type = " + this.contentType + " user limit : " + + this.limit + " scan fetch size : " + this.fetchSize); } } @Override public void write(OutputStream outStream) throws IOException, WebApplicationException { Result[] rowsToSend; - if(limit < fetchSize){ + if (limit < fetchSize) { rowsToSend = this.resultScanner.next(limit); writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream); } else { @@ -69,7 +69,7 @@ public void write(OutputStream outStream) throws IOException, WebApplicationExce } else { rowsToSend = this.resultScanner.next(this.fetchSize); } - if(rowsToSend.length == 0){ + if (rowsToSend.length == 0) { break; } count = count - rowsToSend.length; @@ -79,9 +79,9 @@ public void write(OutputStream outStream) throws IOException, WebApplicationExce } private void writeToStream(CellSetModel model, String contentType, OutputStream outStream) - throws IOException { + throws IOException { byte[] objectBytes = model.createProtobufOutput(); - outStream.write(Bytes.toBytes((short)objectBytes.length)); + outStream.write(Bytes.toBytes((short) objectBytes.length)); outStream.write(objectBytes); outStream.flush(); if (LOG.isTraceEnabled()) { @@ -96,8 +96,8 @@ private CellSetModel createModelFromResults(Result[] results) { RowModel rModel = new RowModel(rowKey); List kvs = rs.listCells(); for (Cell kv : kvs) { - rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv - .getTimestamp(), CellUtil.cloneValue(kv))); + rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), + kv.getTimestamp(), CellUtil.cloneValue(kv))); } cellSetModel.addRow(rModel); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 9c9275844275..02fc17c4b4e0 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.lang.management.ManagementFactory; @@ -88,14 +87,14 @@ public class RESTServer implements Constants { static final String REST_CSRF_ENABLED_KEY = "hbase.rest.csrf.enabled"; static final boolean REST_CSRF_ENABLED_DEFAULT = false; boolean restCSRFEnabled = false; - static final String REST_CSRF_CUSTOM_HEADER_KEY ="hbase.rest.csrf.custom.header"; + static final String REST_CSRF_CUSTOM_HEADER_KEY = "hbase.rest.csrf.custom.header"; static final String REST_CSRF_CUSTOM_HEADER_DEFAULT = "X-XSRF-HEADER"; static final String REST_CSRF_METHODS_TO_IGNORE_KEY = "hbase.rest.csrf.methods.to.ignore"; static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login"; static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k static final String HTTP_HEADER_CACHE_SIZE = "hbase.rest.http.header.cache.size"; - static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE -1; + static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE - 1; private static final String PATH_SPEC_ANY = "/*"; @@ -107,8 +106,8 @@ public class RESTServer implements Constants { // HACK, making this static for AuthFilter to get at our configuration. Necessary for unit tests. @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value={"ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL"}, - justification="For testing") + value = { "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL" }, + justification = "For testing") public static Configuration conf = null; private final UserProvider userProvider; private Server server; @@ -122,16 +121,17 @@ public RESTServer(Configuration conf) { private static void printUsageAndExit(Options options, int exitCode) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("hbase rest start", "", options, - "\nTo run the REST server as a daemon, execute " + - "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", true); + "\nTo run the REST server as a daemon, execute " + + "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", + true); System.exit(exitCode); } void addCSRFFilter(ServletContextHandler ctxHandler, Configuration conf) { restCSRFEnabled = conf.getBoolean(REST_CSRF_ENABLED_KEY, REST_CSRF_ENABLED_DEFAULT); if (restCSRFEnabled) { - Map restCsrfParams = RestCsrfPreventionFilter - .getFilterParams(conf, "hbase.rest-csrf."); + Map restCsrfParams = + RestCsrfPreventionFilter.getFilterParams(conf, "hbase.rest-csrf."); FilterHolder holder = new FilterHolder(); holder.setName("csrf"); holder.setClassName(RestCsrfPreventionFilter.class.getName()); @@ -141,7 +141,7 @@ void addCSRFFilter(ServletContextHandler ctxHandler, Configuration conf) { } private void addClickjackingPreventionFilter(ServletContextHandler ctxHandler, - Configuration conf) { + Configuration conf) { FilterHolder holder = new FilterHolder(); holder.setName("clickjackingprevention"); holder.setClassName(ClickjackingPreventionFilter.class.getName()); @@ -149,8 +149,8 @@ private void addClickjackingPreventionFilter(ServletContextHandler ctxHandler, ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); } - private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, - Configuration conf, boolean isSecure) { + private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configuration conf, + boolean isSecure) { FilterHolder holder = new FilterHolder(); holder.setName("securityheaders"); holder.setClassName(SecurityHeadersFilter.class.getName()); @@ -159,13 +159,12 @@ private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, } // login the server principal (if using secure Hadoop) - private static Pair> loginServerPrincipal( - UserProvider userProvider, Configuration conf) throws Exception { + private static Pair> + loginServerPrincipal(UserProvider userProvider, Configuration conf) throws Exception { Class containerClass = ServletContainer.class; if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) { - String machineName = Strings.domainNamePointerToHostName( - DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), - conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); String keytabFilename = conf.get(REST_KEYTAB_FILE); Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(), REST_KEYTAB_FILE + " should be set if security is enabled"); @@ -181,7 +180,7 @@ private static Pair> loginServer FilterHolder authFilter = new FilterHolder(); authFilter.setClassName(AuthFilter.class.getName()); authFilter.setName("AuthenticationFilter"); - return new Pair<>(authFilter,containerClass); + return new Pair<>(authFilter, containerClass); } } return new Pair<>(null, containerClass); @@ -190,8 +189,8 @@ private static Pair> loginServer private static void parseCommandLine(String[] args, Configuration conf) { Options options = new Options(); options.addOption("p", "port", true, "Port to bind to [default: " + DEFAULT_LISTEN_PORT + "]"); - options.addOption("ro", "readonly", false, "Respond only to GET HTTP " + - "method requests [default: false]"); + options.addOption("ro", "readonly", false, + "Respond only to GET HTTP " + "method requests [default: false]"); options.addOption("i", "infoport", true, "Port for WEB UI"); CommandLine commandLine = null; @@ -250,20 +249,19 @@ private static void parseCommandLine(String[] args, Configuration conf) { } } - /** * Runs the REST server. */ public synchronized void run() throws Exception { - Pair> pair = loginServerPrincipal( - userProvider, conf); + Pair> pair = + loginServerPrincipal(userProvider, conf); FilterHolder authFilter = pair.getFirst(); Class containerClass = pair.getSecond(); RESTServlet servlet = RESTServlet.getInstance(conf, userProvider); // set up the Jersey servlet container for Jetty - ResourceConfig application = new ResourceConfig(). - packages("org.apache.hadoop.hbase.rest").register(JacksonJaxbJsonProvider.class); + ResourceConfig application = new ResourceConfig().packages("org.apache.hadoop.hbase.rest") + .register(JacksonJaxbJsonProvider.class); // Using our custom ServletContainer is tremendously important. This is what makes sure the // UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself. ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application); @@ -279,23 +277,24 @@ public synchronized void run() throws Exception { // Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use // bounded {@link ArrayBlockingQueue} with the given size int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1); - int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); - QueuedThreadPool threadPool = queueSize > 0 ? - new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : - new QueuedThreadPool(maxThreads, minThreads, idleTimeout); + int idleTimeout = + servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); + QueuedThreadPool threadPool = queueSize > 0 + ? new QueuedThreadPool(maxThreads, minThreads, idleTimeout, + new ArrayBlockingQueue<>(queueSize)) + : new QueuedThreadPool(maxThreads, minThreads, idleTimeout); this.server = new Server(threadPool); // Setup JMX - MBeanContainer mbContainer=new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); + MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); server.addEventListener(mbContainer); server.addBean(mbContainer); - String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0"); int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080); - int httpHeaderCacheSize = servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, - DEFAULT_HTTP_HEADER_CACHE_SIZE); + int httpHeaderCacheSize = + servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, DEFAULT_HTTP_HEADER_CACHE_SIZE); HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSecureScheme("https"); httpConfig.setSecurePort(servicePort); @@ -315,56 +314,55 @@ public synchronized void run() throws Exception { SslContextFactory sslCtxFactory = new SslContextFactory(); String keystore = conf.get(REST_SSL_KEYSTORE_STORE); String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE); - String password = HBaseConfiguration.getPassword(conf, - REST_SSL_KEYSTORE_PASSWORD, null); - String keyPassword = HBaseConfiguration.getPassword(conf, - REST_SSL_KEYSTORE_KEYPASSWORD, password); + String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null); + String keyPassword = + HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password); sslCtxFactory.setKeyStorePath(keystore); - if(StringUtils.isNotBlank(keystoreType)) { + if (StringUtils.isNotBlank(keystoreType)) { sslCtxFactory.setKeyStoreType(keystoreType); } sslCtxFactory.setKeyStorePassword(password); sslCtxFactory.setKeyManagerPassword(keyPassword); String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE); - if(StringUtils.isNotBlank(trustStore)) { + if (StringUtils.isNotBlank(trustStore)) { sslCtxFactory.setTrustStorePath(trustStore); } String trustStorePassword = HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); - if(StringUtils.isNotBlank(trustStorePassword)) { + if (StringUtils.isNotBlank(trustStorePassword)) { sslCtxFactory.setTrustStorePassword(trustStorePassword); } String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE); - if(StringUtils.isNotBlank(trustStoreType)) { + if (StringUtils.isNotBlank(trustStoreType)) { sslCtxFactory.setTrustStoreType(trustStoreType); } - String[] excludeCiphers = servlet.getConfiguration().getStrings( - REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + String[] excludeCiphers = servlet.getConfiguration() + .getStrings(REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (excludeCiphers.length != 0) { sslCtxFactory.setExcludeCipherSuites(excludeCiphers); } - String[] includeCiphers = servlet.getConfiguration().getStrings( - REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + String[] includeCiphers = servlet.getConfiguration() + .getStrings(REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (includeCiphers.length != 0) { sslCtxFactory.setIncludeCipherSuites(includeCiphers); } - String[] excludeProtocols = servlet.getConfiguration().getStrings( - REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + String[] excludeProtocols = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_PROTOCOLS, + ArrayUtils.EMPTY_STRING_ARRAY); if (excludeProtocols.length != 0) { sslCtxFactory.setExcludeProtocols(excludeProtocols); } - String[] includeProtocols = servlet.getConfiguration().getStrings( - REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + String[] includeProtocols = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_PROTOCOLS, + ArrayUtils.EMPTY_STRING_ARRAY); if (includeProtocols.length != 0) { sslCtxFactory.setIncludeProtocols(includeProtocols); } serverConnector = new ServerConnector(server, - new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), - new HttpConnectionFactory(httpsConfig)); + new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), + new HttpConnectionFactory(httpsConfig)); } else { serverConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfig)); } @@ -381,15 +379,16 @@ public synchronized void run() throws Exception { server.setStopAtShutdown(true); // set up context - ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); + ServletContextHandler ctxHandler = + new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); ctxHandler.addServlet(sh, PATH_SPEC_ANY); if (authFilter != null) { ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); } // Load filters from configuration. - String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, - GzipFilter.class.getName()); + String[] filterClasses = + servlet.getConfiguration().getStrings(FILTER_CLASSES, GzipFilter.class.getName()); for (String filter : filterClasses) { filter = filter.trim(); ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); @@ -398,7 +397,7 @@ public synchronized void run() throws Exception { addClickjackingPreventionFilter(ctxHandler, conf); addSecurityHeadersFilter(ctxHandler, conf, isSecure); HttpServerUtil.constrainHttpMethods(ctxHandler, servlet.getConfiguration() - .getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT)); + .getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT)); // Put up info server. int port = conf.getInt("hbase.rest.info.port", 8085); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java index 6c71bb6222e0..10b96ec92845 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.ParseFilter; @@ -32,6 +27,9 @@ import org.apache.hadoop.hbase.util.JvmPauseMonitor; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Singleton class encapsulating global REST servlet state and functions. @@ -58,7 +56,7 @@ UserGroupInformation getRealUser() { * @return the RESTServlet singleton instance */ public synchronized static RESTServlet getInstance() { - assert(INSTANCE != null); + assert (INSTANCE != null); return INSTANCE; } @@ -70,13 +68,12 @@ public ConnectionCache getConnectionCache() { } /** - * @param conf Existing configuration to use in rest servlet + * @param conf Existing configuration to use in rest servlet * @param userProvider the login user provider - * @return the RESTServlet singleton instance - * @throws IOException + * @return the RESTServlet singleton instance n */ - public synchronized static RESTServlet getInstance(Configuration conf, - UserProvider userProvider) throws IOException { + public synchronized static RESTServlet getInstance(Configuration conf, UserProvider userProvider) + throws IOException { if (INSTANCE == null) { INSTANCE = new RESTServlet(conf, userProvider); } @@ -92,20 +89,17 @@ public synchronized static void stop() { /** * Constructor with existing configuration - * @param conf existing configuration - * @param userProvider the login user provider - * @throws IOException + * @param conf existing configuration + * @param userProvider the login user provider n */ - RESTServlet(final Configuration conf, - final UserProvider userProvider) throws IOException { + RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException { this.realUser = userProvider.getCurrent().getUGI(); this.conf = conf; registerCustomFilter(conf); int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000); int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000); - connectionCache = new ConnectionCache( - conf, userProvider, cleanInterval, maxIdleTime); + connectionCache = new ConnectionCache(conf, userProvider, cleanInterval, maxIdleTime); if (supportsProxyuser()) { ProxyUsers.refreshSuperUserGroupsConfiguration(conf); } @@ -136,8 +130,7 @@ MetricsREST getMetrics() { } /** - * Helper method to determine if server should - * only respond to GET HTTP method requests. + * Helper method to determine if server should only respond to GET HTTP method requests. * @return boolean for server read-only state */ boolean isReadOnly() { @@ -166,8 +159,7 @@ private void registerCustomFilter(Configuration conf) { for (String filterClass : filterList) { String[] filterPart = filterClass.split(":"); if (filterPart.length != 2) { - LOG.warn( - "Invalid filter specification " + filterClass + " - skipping"); + LOG.warn("Invalid filter specification " + filterClass + " - skipping"); } else { ParseFilter.registerFilter(filterPart[0], filterPart[1]); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java index 28cf4cba9fa7..d605aec39c8e 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; +import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; + import java.io.IOException; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; @@ -31,11 +31,10 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; -import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; /** - * REST servlet container. It is used to get the remote request user - * without going through @HttpContext, so that we can minimize code changes. + * REST servlet container. It is used to get the remote request user without going + * through @HttpContext, so that we can minimize code changes. */ @InterfaceAudience.Private public class RESTServletContainer extends ServletContainer { @@ -46,13 +45,12 @@ public RESTServletContainer(ResourceConfig config) { } /** - * This container is used only if authentication and - * impersonation is enabled. The remote request user is used - * as a proxy user for impersonation in invoking any REST service. + * This container is used only if authentication and impersonation is enabled. The remote request + * user is used as a proxy user for impersonation in invoking any REST service. */ @Override - public void service(final HttpServletRequest request, - final HttpServletResponse response) throws ServletException, IOException { + public void service(final HttpServletRequest request, final HttpServletResponse response) + throws ServletException, IOException { final HttpServletRequest lowerCaseRequest = toLowerCase(request); final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas"); RESTServlet servlet = RESTServlet.getInstance(); @@ -69,7 +67,7 @@ public void service(final HttpServletRequest request, // validate the proxy user authorization try { ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf); - } catch(AuthorizationException e) { + } catch (AuthorizationException e) { throw new ServletException(e.getMessage()); } servlet.setEffectiveUser(doAsUserFromQuery); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index 784894e27571..21c973026030 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -57,9 +55,7 @@ public class RegionsResource extends ResourceBase { TableResource tableResource; /** - * Constructor - * @param tableResource - * @throws IOException + * Constructor nn */ public RegionsResource(TableResource tableResource) throws IOException { super(); @@ -67,8 +63,8 @@ public RegionsResource(TableResource tableResource) throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -98,14 +94,12 @@ public Response get(final @Context UriInfo uriInfo) { return response.build(); } catch (TableNotFoundException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java index 9beb69df682b..422f4a3b9430 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -32,7 +31,7 @@ public class ResourceBase implements Constants { RESTServlet servlet; - Class accessDeniedClazz; + Class accessDeniedClazz; public ResourceBase() throws IOException { servlet = RESTServlet.getInstance(); @@ -41,53 +40,42 @@ public ResourceBase() throws IOException { } catch (ClassNotFoundException e) { } } - + protected Response processException(Throwable exp) { Throwable curr = exp; - if(accessDeniedClazz != null) { - //some access denied exceptions are buried + if (accessDeniedClazz != null) { + // some access denied exceptions are buried while (curr != null) { - if(accessDeniedClazz.isAssignableFrom(curr.getClass())) { + if (accessDeniedClazz.isAssignableFrom(curr.getClass())) { throw new WebApplicationException( - Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } curr = curr.getCause(); } } - //TableNotFound may also be buried one level deep - if (exp instanceof TableNotFoundException || - exp.getCause() instanceof TableNotFoundException) { + // TableNotFound may also be buried one level deep + if (exp instanceof TableNotFoundException || exp.getCause() instanceof TableNotFoundException) { throw new WebApplicationException( - Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } - if (exp instanceof NoSuchColumnFamilyException){ + if (exp instanceof NoSuchColumnFamilyException) { throw new WebApplicationException( - Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } if (exp instanceof RuntimeException) { throw new WebApplicationException( - Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } if (exp instanceof RetriesExhaustedException) { RetriesExhaustedException retryException = (RetriesExhaustedException) exp; processException(retryException.getCause()); } throw new WebApplicationException( - Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java index 41135a814f38..c73e86603d10 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,19 +19,16 @@ import java.io.IOException; import java.util.Iterator; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.rest.model.ScannerModel; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public abstract class ResultGenerator implements Iterator { - public static ResultGenerator fromRowSpec(final String table, - final RowSpec rowspec, final Filter filter, final boolean cacheBlocks) - throws IOException { + public static ResultGenerator fromRowSpec(final String table, final RowSpec rowspec, + final Filter filter, final boolean cacheBlocks) throws IOException { if (rowspec.isSingleRow()) { return new RowResultGenerator(table, rowspec, filter, cacheBlocks); } else { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java index 3f5e1e1f6f82..9baf7aa7c045 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -50,8 +48,7 @@ public class RootResource extends ResourceBase { } /** - * Constructor - * @throws IOException + * Constructor n */ public RootResource() throws IOException { super(); @@ -60,15 +57,15 @@ public RootResource() throws IOException { private final TableListModel getTableList() throws IOException { TableListModel tableList = new TableListModel(); TableName[] tableNames = servlet.getAdmin().listTableNames(); - for (TableName name: tableNames) { + for (TableName name : tableNames) { tableList.add(new TableModel(name.getNameAsString())); } return tableList; } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -86,8 +83,7 @@ public Response get(final @Context UriInfo uriInfo) { } @Path("status/cluster") - public StorageClusterStatusResource getClusterStatusResource() - throws IOException { + public StorageClusterStatusResource getClusterStatusResource() throws IOException { return new StorageClusterStatusResource(); } @@ -97,8 +93,7 @@ public VersionResource getVersionResource() throws IOException { } @Path("{table}") - public TableResource getTableResource( - final @PathParam("table") String table) throws IOException { + public TableResource getTableResource(final @PathParam("table") String table) throws IOException { return new TableResource(table); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index d109b0938861..cfd63aa2d1c6 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -71,16 +69,10 @@ public class RowResource extends ResourceBase { private boolean returnResult = false; /** - * Constructor - * @param tableResource - * @param rowspec - * @param versions - * @param check - * @param returnResult - * @throws IOException + * Constructor nnnnnn */ - public RowResource(TableResource tableResource, String rowspec, - String versions, String check, String returnResult) throws IOException { + public RowResource(TableResource tableResource, String rowspec, String versions, String check, + String returnResult) throws IOException { super(); this.tableResource = tableResource; this.rowspec = new RowSpec(rowspec); @@ -94,8 +86,7 @@ public RowResource(TableResource tableResource, String rowspec, } @GET - @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -103,14 +94,12 @@ public Response get(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); MultivaluedMap params = uriInfo.getQueryParameters(); try { - ResultGenerator generator = - ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, + null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } int count = 0; CellSetModel model = new CellSetModel(); @@ -143,7 +132,7 @@ public Response get(final @Context UriInfo uriInfo) { @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); // doesn't make sense to use a non specific coordinate as this can only @@ -151,24 +140,22 @@ public Response getBinary(final @Context UriInfo uriInfo) { if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) { servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) - .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + - "in the row. Using the 'Accept' header with one of these formats lets you " + - "retrieve the entire row if it has multiple columns: " + - // Same as the @Produces list for the get method. - MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + - MIMETYPE_PROTOBUF + ", " + MIMETYPE_PROTOBUF_IETF + - CRLF).build(); + .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + + "in the row. Using the 'Accept' header with one of these formats lets you " + + "retrieve the entire row if it has multiple columns: " + + // Same as the @Produces list for the get method. + MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + MIMETYPE_PROTOBUF + ", " + + MIMETYPE_PROTOBUF_IETF + CRLF) + .build(); } MultivaluedMap params = uriInfo.getQueryParameters(); try { - ResultGenerator generator = - ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, + null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } Cell value = generator.next(); ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); @@ -185,9 +172,8 @@ Response update(final CellSetModel model, final boolean replace) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } if (CHECK_PUT.equalsIgnoreCase(check)) { @@ -199,29 +185,27 @@ Response update(final CellSetModel model, final boolean replace) { } else if (CHECK_INCREMENT.equalsIgnoreCase(check)) { return increment(model); } else if (check != null && check.length() > 0) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Invalid check value '" + check + "'" + CRLF).build(); } Table table = null; try { List rows = model.getRows(); List puts = new ArrayList<>(); - for (RowModel row: rows) { + for (RowModel row : rows) { byte[] key = row.getKey(); if (key == null) { key = rowspec.getRow(); } if (key == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key not specified." + CRLF).build(); } Put put = new Put(key); int i = 0; - for (CellModel cell: row.getCells()) { + for (CellModel cell : row.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -232,24 +216,17 @@ Response update(final CellSetModel model, final boolean replace) { } if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(cell.getTimestamp()) - .setType(Type.Put) - .setValue(cell.getValue()) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) + .setType(Type.Put).setValue(cell.getValue()).build()); } puts.add(put); if (LOG.isTraceEnabled()) { @@ -276,14 +253,12 @@ Response update(final CellSetModel model, final boolean replace) { } // This currently supports only update of one row at a time. - Response updateBinary(final byte[] message, final HttpHeaders headers, - final boolean replace) { + Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } Table table = null; try { @@ -308,25 +283,18 @@ Response updateBinary(final byte[] message, final HttpHeaders headers, } if (column == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } Put put = new Put(row); byte parts[][] = CellUtil.parseColumn(column); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(timestamp) - .setType(Type.Put) - .setValue(message) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(timestamp).setType(Type.Put) + .setValue(message).build()); table = servlet.getTable(tableResource.getName()); table.put(put); if (LOG.isTraceEnabled()) { @@ -349,45 +317,39 @@ Response updateBinary(final byte[] message, final HttpHeaders headers, } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final CellSetModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final CellSetModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("PUT " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, true); } @PUT @Consumes(MIMETYPE_BINARY) - public Response putBinary(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response putBinary(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { - LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } return updateBinary(message, headers, true); } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final CellSetModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final CellSetModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("POST " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); + LOG.trace("POST " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, false); } @POST @Consumes(MIMETYPE_BINARY) - public Response postBinary(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response postBinary(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { - LOG.trace("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY); + LOG.trace("POST " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } return updateBinary(message, headers, false); } @@ -400,9 +362,8 @@ public Response delete(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } Delete delete = null; if (rowspec.hasTimestamp()) { @@ -411,7 +372,7 @@ public Response delete(final @Context UriInfo uriInfo) { delete = new Delete(rowspec.getRow()); } - for (byte[] column: rowspec.getColumns()) { + for (byte[] column : rowspec.getColumns()) { byte[][] split = CellUtil.parseColumn(column); if (rowspec.hasTimestamp()) { if (split.length == 1) { @@ -419,9 +380,8 @@ public Response delete(final @Context UriInfo uriInfo) { } else if (split.length == 2) { delete.addColumns(split[0], split[1], rowspec.getTimestamp()); } else { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } } else { if (split.length == 1) { @@ -429,9 +389,8 @@ public Response delete(final @Context UriInfo uriInfo) { } else if (split.length == 2) { delete.addColumns(split[0], split[1]); } else { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } } } @@ -459,9 +418,8 @@ public Response delete(final @Context UriInfo uriInfo) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes checkAndPut on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes + * checkAndPut on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -472,7 +430,7 @@ Response checkAndPut(final CellSetModel model) { if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) - .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); @@ -485,12 +443,10 @@ Response checkAndPut(final CellSetModel model) { int cellModelCount = cellModels.size(); if (key == null || cellModelCount <= 1) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response - .status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT) - .entity( - "Bad request: Either row key is null or no data found for columns specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity( + "Bad request: Either row key is null or no data found for columns specified." + CRLF) + .build(); } Put put = new Put(key); @@ -503,34 +459,26 @@ Response checkAndPut(final CellSetModel model) { // Copy all the cells to the Put request // and track if the check cell's latest value is also sent - for (int i = 0, n = cellModelCount - 1; i < n ; i++) { + for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(cell.getTimestamp()) - .setType(Type.Put) - .setValue(cell.getValue()) - .build()); - if(Bytes.equals(col, - valueToCheckCell.getColumn())) { + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) + .setType(Type.Put).setValue(cell.getValue()).build()); + if (Bytes.equals(col, valueToCheckCell.getColumn())) { valueToPutCell = cell; } } @@ -538,16 +486,15 @@ Response checkAndPut(final CellSetModel model) { if (valueToPutCell == null) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) - .entity("Bad request: The column to put and check do not match." + CRLF).build(); + .entity("Bad request: The column to put and check do not match." + CRLF).build(); } else { retValue = table.checkAndMutate(key, valueToPutParts[0]).qualifier(valueToPutParts[1]) .ifEquals(valueToCheckCell.getValue()).thenPut(put); } } else { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { @@ -555,9 +502,8 @@ Response checkAndPut(final CellSetModel model) { } if (!retValue) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Value not Modified" + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulPutRequests(1); @@ -577,9 +523,8 @@ Response checkAndPut(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes checkAndDelete on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes + * checkAndDelete on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -590,9 +535,8 @@ Response checkAndDelete(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -601,9 +545,8 @@ Response checkAndDelete(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } List cellModels = rowModel.getCells(); @@ -611,31 +554,29 @@ Response checkAndDelete(final CellSetModel model) { delete = new Delete(key); boolean retValue; - CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1); + CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount - 1); byte[] valueToDeleteColumn = valueToDeleteCell.getColumn(); if (valueToDeleteColumn == null) { try { valueToDeleteColumn = rowspec.getColumns()[0]; } catch (final ArrayIndexOutOfBoundsException e) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column not specified for check." + CRLF).build(); } } - byte[][] parts ; + byte[][] parts; // Copy all the cells to the Delete request if extra cells are sent - if(cellModelCount > 1) { + if (cellModelCount > 1) { for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } parts = CellUtil.parseColumn(col); @@ -647,10 +588,8 @@ Response checkAndDelete(final CellSetModel model) { delete.addColumn(parts[0], parts[1], cell.getTimestamp()); } else { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT) - .entity("Bad request: Column to delete incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column to delete incorrectly specified." + CRLF).build(); } } } @@ -660,36 +599,33 @@ Response checkAndDelete(final CellSetModel model) { if (parts[1].length != 0) { // To support backcompat of deleting a cell // if that is the only cell passed to the rest api - if(cellModelCount == 1) { + if (cellModelCount == 1) { delete.addColumns(parts[0], parts[1]); } retValue = table.checkAndMutate(key, parts[0]).qualifier(parts[1]) - .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); + .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); } else { // The case of empty qualifier. - if(cellModelCount == 1) { + if (cellModelCount == 1) { delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY)); } - retValue = table.checkAndMutate(key, parts[0]) - .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); + retValue = table.checkAndMutate(key, parts[0]).ifEquals(valueToDeleteCell.getValue()) + .thenDelete(delete); } } else { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column to check incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column to check incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { - LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " - + retValue); + LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " + retValue); } if (!retValue) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity(" Delete check failed." + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulDeleteRequests(1); @@ -709,9 +645,8 @@ Response checkAndDelete(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes Append on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes Append on + * HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -722,9 +657,8 @@ Response append(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -733,15 +667,14 @@ Response append(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } append = new Append(key); append.setReturnResults(returnResult); int i = 0; - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -752,16 +685,14 @@ Response append(final CellSetModel model) { } if (col == null) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } append.addColumn(parts[0], parts[1], cell.getValue()); } @@ -773,16 +704,15 @@ Response append(final CellSetModel model) { if (returnResult) { if (result.isEmpty()) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Append return empty." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Append return empty." + CRLF).build(); } CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + cell.getTimestamp(), CellUtil.cloneValue(cell))); } rModel.addRow(rRowModel); servlet.getMetrics().incrementSucessfulAppendRequests(1); @@ -805,9 +735,8 @@ Response append(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes Increment on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes Increment + * on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -818,9 +747,8 @@ Response increment(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -829,15 +757,14 @@ Response increment(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } increment = new Increment(key); increment.setReturnResults(returnResult); int i = 0; - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -848,18 +775,17 @@ Response increment(final CellSetModel model) { } if (col == null) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } - increment.addColumn(parts[0], parts[1], Long.parseLong(Bytes.toStringBinary(cell.getValue()))); + increment.addColumn(parts[0], parts[1], + Long.parseLong(Bytes.toStringBinary(cell.getValue()))); } if (LOG.isDebugEnabled()) { @@ -870,16 +796,15 @@ Response increment(final CellSetModel model) { if (returnResult) { if (result.isEmpty()) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Increment return empty." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Increment return empty." + CRLF).build(); } CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + cell.getTimestamp(), CellUtil.cloneValue(cell))); } rModel.addRow(rowModel); servlet.getMetrics().incrementSucessfulIncrementRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java index 3d81c414867d..ce1eda224b4b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.io.IOException; import java.util.Iterator; import java.util.NoSuchElementException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -30,11 +28,8 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.security.AccessDeniedException; - import org.apache.hadoop.util.StringUtils; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,9 +40,8 @@ public class RowResultGenerator extends ResultGenerator { private Iterator valuesI; private Cell cache; - public RowResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public RowResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final boolean cacheBlocks) throws IllegalArgumentException, IOException { try (Table table = RESTServlet.getInstance().getTable(tableName)) { Get get = new Get(rowspec.getRow()); if (rowspec.hasColumns()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java index c510c9ed797d..c9993336fa14 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.UnsupportedEncodingException; @@ -26,22 +24,19 @@ import java.util.Collections; import java.util.List; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Parses a path based row/column/timestamp specification into its component - * elements. + * Parses a path based row/column/timestamp specification into its component elements. *

    - * */ @InterfaceAudience.Private public class RowSpec { public static final long DEFAULT_START_TIMESTAMP = 0; public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE; - + private byte[] row = HConstants.EMPTY_START_ROW; private byte[] endRow = null; private TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); @@ -62,8 +57,7 @@ public RowSpec(String path) throws IllegalArgumentException { i = parseQueryParams(path, i); } - private int parseRowKeys(final String path, int i) - throws IllegalArgumentException { + private int parseRowKeys(final String path, int i) throws IllegalArgumentException { String startRow = null, endRow = null; try { StringBuilder sb = new StringBuilder(); @@ -76,10 +70,8 @@ private int parseRowKeys(final String path, int i) String row = startRow = sb.toString(); int idx = startRow.indexOf(','); if (idx != -1) { - startRow = URLDecoder.decode(row.substring(0, idx), - HConstants.UTF8_ENCODING); - endRow = URLDecoder.decode(row.substring(idx + 1), - HConstants.UTF8_ENCODING); + startRow = URLDecoder.decode(row.substring(0, idx), HConstants.UTF8_ENCODING); + endRow = URLDecoder.decode(row.substring(idx + 1), HConstants.UTF8_ENCODING); } else { startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING); } @@ -93,13 +85,11 @@ private int parseRowKeys(final String path, int i) // table scanning if (startRow.charAt(startRow.length() - 1) == '*') { if (endRow != null) - throw new IllegalArgumentException("invalid path: start row "+ - "specified with wildcard"); - this.row = Bytes.toBytes(startRow.substring(0, - startRow.lastIndexOf("*"))); + throw new IllegalArgumentException("invalid path: start row " + "specified with wildcard"); + this.row = Bytes.toBytes(startRow.substring(0, startRow.lastIndexOf("*"))); this.endRow = new byte[this.row.length + 1]; System.arraycopy(this.row, 0, this.endRow, 0, this.row.length); - this.endRow[this.row.length] = (byte)255; + this.endRow[this.row.length] = (byte) 255; } else { this.row = Bytes.toBytes(startRow.toString()); if (endRow != null) { @@ -145,8 +135,7 @@ private int parseColumns(final String path, int i) throws IllegalArgumentExcepti return i; } - private int parseTimestamp(final String path, int i) - throws IllegalArgumentException { + private int parseTimestamp(final String path, int i) throws IllegalArgumentException { if (i >= path.length()) { return i; } @@ -163,8 +152,7 @@ private int parseTimestamp(final String path, int i) i++; } try { - time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), - HConstants.UTF8_ENCODING)); + time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); } @@ -176,8 +164,7 @@ private int parseTimestamp(final String path, int i) i++; } try { - time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), - HConstants.UTF8_ENCODING)); + time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); } @@ -206,8 +193,7 @@ private int parseQueryParams(final String path, int i) { } StringBuilder query = new StringBuilder(); try { - query.append(URLDecoder.decode(path.substring(i), - HConstants.UTF8_ENCODING)); + query.append(URLDecoder.decode(path.substring(i), HConstants.UTF8_ENCODING)); } catch (UnsupportedEncodingException e) { // should not happen throw new RuntimeException(e); @@ -234,39 +220,41 @@ private int parseQueryParams(final String path, int i) { break; } switch (what) { - case 'm': { - StringBuilder sb = new StringBuilder(); - while (j <= query.length()) { - c = query.charAt(j); - if (c < '0' || c > '9') { - j--; - break; + case 'm': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); } - sb.append(c); + maxVersions = Integer.parseInt(sb.toString()); } - maxVersions = Integer.parseInt(sb.toString()); - } break; - case 'n': { - StringBuilder sb = new StringBuilder(); - while (j <= query.length()) { - c = query.charAt(j); - if (c < '0' || c > '9') { - j--; - break; + break; + case 'n': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); } - sb.append(c); + maxValues = Integer.parseInt(sb.toString()); } - maxValues = Integer.parseInt(sb.toString()); - } break; - default: - throw new IllegalArgumentException("unknown parameter '" + c + "'"); + break; + default: + throw new IllegalArgumentException("unknown parameter '" + c + "'"); } } return i; } - public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, - long startTime, long endTime, int maxVersions) { + public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, long startTime, long endTime, + int maxVersions) { this.row = startRow; this.endRow = endRow; if (columns != null) { @@ -277,15 +265,16 @@ public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, this.maxVersions = maxVersions; } - public RowSpec(byte[] startRow, byte[] endRow, Collection columns, - long startTime, long endTime, int maxVersions, Collection labels) { + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, long startTime, + long endTime, int maxVersions, Collection labels) { this(startRow, endRow, columns, startTime, endTime, maxVersions); - if(labels != null) { + if (labels != null) { this.labels.addAll(labels); } } - public RowSpec(byte[] startRow, byte[] endRow, Collection columns, - long startTime, long endTime, int maxVersions) { + + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, long startTime, + long endTime, int maxVersions) { this.row = startRow; this.endRow = endRow; if (columns != null) { @@ -319,7 +308,7 @@ public void setMaxValues(final int maxValues) { public boolean hasColumns() { return !columns.isEmpty(); } - + public boolean hasLabels() { return !labels.isEmpty(); } @@ -347,7 +336,7 @@ public void addColumn(final byte[] column) { public byte[][] getColumns() { return columns.toArray(new byte[columns.size()][]); } - + public List getLabels() { return labels; } @@ -384,11 +373,11 @@ public String toString() { result.append(Bytes.toString(row)); } result.append("', endRow => '"); - if (endRow != null) { + if (endRow != null) { result.append(Bytes.toString(endRow)); } result.append("', columns => ["); - for (byte[] col: columns) { + for (byte[] col : columns) { result.append(" '"); result.append(Bytes.toString(col)); result.append("'"); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java index 4bbc2cf11261..81ab8e24692f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -44,8 +42,7 @@ @InterfaceAudience.Private public class ScannerInstanceResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(ScannerInstanceResource.class); + private static final Logger LOG = LoggerFactory.getLogger(ScannerInstanceResource.class); static CacheControl cacheControl; static { @@ -58,29 +55,28 @@ public class ScannerInstanceResource extends ResourceBase { String id = null; int batch = 1; - public ScannerInstanceResource() throws IOException { } + public ScannerInstanceResource() throws IOException { + } - public ScannerInstanceResource(String table, String id, - ResultGenerator generator, int batch) throws IOException { + public ScannerInstanceResource(String table, String id, ResultGenerator generator, int batch) + throws IOException { this.id = id; this.generator = generator; this.batch = batch; } @GET - @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context UriInfo uriInfo, - @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) { + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context UriInfo uriInfo, @QueryParam("n") int maxRows, + final @QueryParam("c") int maxValues) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); if (generator == null) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } else { // Updated the connection access time for each client next() call RESTServlet.getInstance().getConnectionCache().updateConnectionAccessTime(); @@ -104,15 +100,13 @@ public Response get(final @Context UriInfo uriInfo, servlet.getMetrics().incrementFailedDeleteRequests(1); } servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.GONE) - .type(MIMETYPE_TEXT).entity("Gone" + CRLF) + return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF) .build(); } catch (IllegalArgumentException e) { Throwable t = e.getCause(); if (t instanceof TableNotFoundException) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } throw e; } @@ -144,9 +138,8 @@ public Response get(final @Context UriInfo uriInfo, rowKey = CellUtil.cloneRow(value); rowModel = new RowModel(rowKey); } - rowModel.addCell( - new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), - value.getTimestamp(), CellUtil.cloneValue(value))); + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), + value.getTimestamp(), CellUtil.cloneValue(value))); } while (--count > 0); model.addRow(rowModel); ResponseBuilder response = Response.ok(model); @@ -159,8 +152,7 @@ public Response get(final @Context UriInfo uriInfo, @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + - MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { @@ -173,10 +165,10 @@ public Response getBinary(final @Context UriInfo uriInfo) { } ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); response.cacheControl(cacheControl); - response.header("X-Row", Bytes.toString(Base64.getEncoder().encode( - CellUtil.cloneRow(value)))); - response.header("X-Column", Bytes.toString(Base64.getEncoder().encode( - CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); + response.header("X-Row", + Bytes.toString(Base64.getEncoder().encode(CellUtil.cloneRow(value)))); + response.header("X-Column", Bytes.toString(Base64.getEncoder() + .encode(CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); response.header("X-Timestamp", value.getTimestamp()); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); @@ -187,8 +179,7 @@ public Response getBinary(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementFailedDeleteRequests(1); } servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.GONE) - .type(MIMETYPE_TEXT).entity("Gone" + CRLF) + return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF) .build(); } } @@ -200,9 +191,8 @@ public Response delete(final @Context UriInfo uriInfo) { } servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } if (ScannerResource.delete(id)) { servlet.getMetrics().incrementSucessfulDeleteRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java index cd3af0bf9f5c..f5606bb25d7f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import com.fasterxml.jackson.core.JsonParseException; @@ -48,17 +46,15 @@ public class ScannerResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(ScannerResource.class); - static final Map scanners = - Collections.synchronizedMap(new HashMap()); + static final Map scanners = + Collections.synchronizedMap(new HashMap()); TableResource tableResource; /** - * Constructor - * @param tableResource - * @throws IOException + * Constructor nn */ - public ScannerResource(TableResource tableResource)throws IOException { + public ScannerResource(TableResource tableResource) throws IOException { super(); this.tableResource = tableResource; } @@ -73,30 +69,27 @@ static boolean delete(final String id) { } } - Response update(final ScannerModel model, final boolean replace, - final UriInfo uriInfo) { + Response update(final ScannerModel model, final boolean replace, final UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } byte[] endRow = model.hasEndRow() ? model.getEndRow() : null; RowSpec spec = null; if (model.getLabels() != null) { spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(), - model.getEndTime(), model.getMaxVersions(), model.getLabels()); + model.getEndTime(), model.getMaxVersions(), model.getLabels()); } else { spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(), - model.getEndTime(), model.getMaxVersions()); + model.getEndTime(), model.getMaxVersions()); } try { Filter filter = ScannerResultGenerator.buildFilterFromModel(model); String tableName = tableResource.getName(); - ScannerResultGenerator gen = - new ScannerResultGenerator(tableName, spec, filter, model.getCaching(), - model.getCacheBlocks(), model.getLimit()); + ScannerResultGenerator gen = new ScannerResultGenerator(tableName, spec, filter, + model.getCaching(), model.getCacheBlocks(), model.getLimit()); String id = gen.getID(); ScannerInstanceResource instance = new ScannerInstanceResource(tableName, id, gen, model.getBatch()); @@ -112,26 +105,23 @@ Response update(final ScannerModel model, final boolean replace, LOG.error("Exception occurred while processing " + uriInfo.getAbsolutePath() + " : ", e); servlet.getMetrics().incrementFailedPutRequests(1); if (e instanceof TableNotFoundException) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); - } else if (e instanceof RuntimeException - || e instanceof JsonMappingException | e instanceof JsonParseException) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); + } else if ( + e instanceof RuntimeException + || e instanceof JsonMappingException | e instanceof JsonParseException + ) { + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final ScannerModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final ScannerModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -139,10 +129,8 @@ public Response put(final ScannerModel model, } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final ScannerModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final ScannerModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("POST " + uriInfo.getAbsolutePath()); } @@ -150,8 +138,8 @@ public Response post(final ScannerModel model, } @Path("{scanner: .+}") - public ScannerInstanceResource getScannerInstanceResource( - final @PathParam("scanner") String id) throws IOException { + public ScannerInstanceResource getScannerInstanceResource(final @PathParam("scanner") String id) + throws IOException { ScannerInstanceResource instance = scanners.get(id); if (instance == null) { servlet.getMetrics().incrementFailedGetRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java index 4a4e10efb029..34627e79e778 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; import java.util.Iterator; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableNotEnabledException; @@ -43,11 +40,9 @@ @InterfaceAudience.Private public class ScannerResultGenerator extends ResultGenerator { - private static final Logger LOG = - LoggerFactory.getLogger(ScannerResultGenerator.class); + private static final Logger LOG = LoggerFactory.getLogger(ScannerResultGenerator.class); - public static Filter buildFilterFromModel(final ScannerModel model) - throws Exception { + public static Filter buildFilterFromModel(final ScannerModel model) throws Exception { String filter = model.getFilter(); if (filter == null || filter.length() == 0) { return null; @@ -61,20 +56,18 @@ public static Filter buildFilterFromModel(final ScannerModel model) private ResultScanner scanner; private Result cached; - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final boolean cacheBlocks) throws IllegalArgumentException, IOException { this(tableName, rowspec, filter, -1, cacheBlocks); } - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final int caching, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final int caching, final boolean cacheBlocks) throws IllegalArgumentException, IOException { this(tableName, rowspec, filter, caching, cacheBlocks, -1); } - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final int caching ,final boolean cacheBlocks, int limit) throws IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final int caching, final boolean cacheBlocks, int limit) throws IOException { Table table = RESTServlet.getInstance().getTable(tableName); try { Scan scan; @@ -85,7 +78,7 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, } if (rowspec.hasColumns()) { byte[][] columns = rowspec.getColumns(); - for (byte[] column: columns) { + for (byte[] column : columns) { byte[][] split = CellUtil.parseColumn(column); if (split.length == 1) { scan.addFamily(split[0]); @@ -101,7 +94,7 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, if (filter != null) { scan.setFilter(filter); } - if (caching > 0 ) { + if (caching > 0) { scan.setCaching(caching); } if (limit > 0) { @@ -113,8 +106,8 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, } scanner = table.getScanner(scan); cached = null; - id = Long.toString(EnvironmentEdgeManager.currentTime()) + - Integer.toHexString(scanner.hashCode()); + id = Long.toString(EnvironmentEdgeManager.currentTime()) + + Integer.toHexString(scanner.hashCode()); } finally { table.close(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index 0cdc2867f351..86c87eb1c00c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,16 +78,15 @@ private TableDescriptor getTableSchema() throws IOException, TableNotFoundExcept } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { - ResponseBuilder response = - Response.ok(new TableSchemaModel(getTableSchema())); + ResponseBuilder response = Response.ok(new TableSchemaModel(getTableSchema())); response.cacheControl(cacheControl); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); @@ -98,15 +97,13 @@ public Response get(final @Context UriInfo uriInfo) { } private Response replace(final TableName name, final TableSchemaModel model, - final UriInfo uriInfo, final Admin admin) { + final UriInfo uriInfo, final Admin admin) { if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } try { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(name); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(name); for (Map.Entry e : model.getAny().entrySet()) { tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } @@ -131,9 +128,8 @@ private Response replace(final TableName name, final TableSchemaModel model, servlet.getMetrics().incrementSucessfulPutRequests(1); } catch (TableExistsException e) { // race, someone else created a table with the same name - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Not modified" + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Not modified" + CRLF).build(); } } return Response.created(uriInfo.getAbsolutePath()).build(); @@ -144,12 +140,11 @@ private Response replace(final TableName name, final TableSchemaModel model, } } - private Response update(final TableName name, final TableSchemaModel model, - final UriInfo uriInfo, final Admin admin) { + private Response update(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, + final Admin admin) { if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } try { TableDescriptorBuilder tableDescriptorBuilder = @@ -172,9 +167,8 @@ private Response update(final TableName name, final TableSchemaModel model, } } } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } finally { admin.enableTable(TableName.valueOf(tableResource.getName())); } @@ -187,7 +181,7 @@ private Response update(final TableName name, final TableSchemaModel model, } private Response update(final TableSchemaModel model, final boolean replace, - final UriInfo uriInfo) { + final UriInfo uriInfo) { try { TableName name = TableName.valueOf(tableResource.getName()); Admin admin = servlet.getAdmin(); @@ -207,10 +201,8 @@ private Response update(final TableSchemaModel model, final boolean replace, } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final TableSchemaModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final TableSchemaModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -219,10 +211,8 @@ public Response put(final TableSchemaModel model, } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final TableSchemaModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final TableSchemaModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -230,8 +220,8 @@ public Response post(final TableSchemaModel model, return update(model, false, uriInfo); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", - justification="Expected") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DE_MIGHT_IGNORE", + justification = "Expected") @DELETE public Response delete(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { @@ -240,13 +230,14 @@ public Response delete(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) - .entity("Forbidden" + CRLF).build(); + .entity("Forbidden" + CRLF).build(); } try { Admin admin = servlet.getAdmin(); try { admin.disableTable(TableName.valueOf(tableResource.getName())); - } catch (TableNotEnabledException e) { /* this is what we want anyway */ } + } catch (TableNotEnabledException e) { + /* this is what we want anyway */ } admin.deleteTable(TableName.valueOf(tableResource.getName())); servlet.getMetrics().incrementSucessfulDeleteRequests(1); return Response.ok().build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java index d60b8eed6600..8348b79985cc 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -43,8 +41,7 @@ @InterfaceAudience.Private public class StorageClusterStatusResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(StorageClusterStatusResource.class); + private static final Logger LOG = LoggerFactory.getLogger(StorageClusterStatusResource.class); static CacheControl cacheControl; static { @@ -54,55 +51,48 @@ public class StorageClusterStatusResource extends ResourceBase { } /** - * Constructor - * @throws IOException + * Constructor n */ public StorageClusterStatusResource() throws IOException { super(); } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { - ClusterMetrics status = servlet.getAdmin().getClusterMetrics( - EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); + ClusterMetrics status = + servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); StorageClusterStatusModel model = new StorageClusterStatusModel(); model.setRegions(status.getRegionCount()); model.setRequests(status.getRequestCount()); model.setAverageLoad(status.getAverageLoad()); - for (Map.Entry entry: status.getLiveServerMetrics().entrySet()) { + for (Map.Entry entry : status.getLiveServerMetrics().entrySet()) { ServerName sn = entry.getKey(); ServerMetrics load = entry.getValue(); StorageClusterStatusModel.Node node = - model.addLiveNode( - sn.getHostname() + ":" + - Integer.toString(sn.getPort()), + model.addLiveNode(sn.getHostname() + ":" + Integer.toString(sn.getPort()), sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE)); node.setRequests(load.getRequestCount()); - for (RegionMetrics region: load.getRegionMetrics().values()) { - node.addRegion(region.getRegionName(), region.getStoreCount(), - region.getStoreFileCount(), + for (RegionMetrics region : load.getRegionMetrics().values()) { + node.addRegion(region.getRegionName(), region.getStoreCount(), region.getStoreFileCount(), (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), - region.getReadRequestCount(), - region.getCpRequestCount(), - region.getWriteRequestCount(), + region.getReadRequestCount(), region.getCpRequestCount(), region.getWriteRequestCount(), (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), - region.getCompactingCellCount(), - region.getCompactedCellCount()); + region.getCompactingCellCount(), region.getCompactedCellCount()); } } - for (ServerName name: status.getDeadServerNames()) { + for (ServerName name : status.getDeadServerNames()) { model.addDeadNode(name.toString()); } ResponseBuilder response = Response.ok(model); @@ -111,9 +101,8 @@ public Response get(final @Context UriInfo uriInfo) { return response.build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java index ffa17e442394..ea7641e54cdb 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -37,8 +35,7 @@ @InterfaceAudience.Private public class StorageClusterVersionResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(StorageClusterVersionResource.class); + private static final Logger LOG = LoggerFactory.getLogger(StorageClusterVersionResource.class); static CacheControl cacheControl; static { @@ -48,15 +45,14 @@ public class StorageClusterVersionResource extends ResourceBase { } /** - * Constructor - * @throws IOException + * Constructor n */ public StorageClusterVersionResource() throws IOException { super(); } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -65,17 +61,15 @@ public Response get(final @Context UriInfo uriInfo) { try { StorageClusterVersionModel model = new StorageClusterVersionModel(); model.setVersion( - servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)) - .getHBaseVersion()); + servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)).getHBaseVersion()); ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index a7a40b859a04..cb47ae1d278d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -48,9 +46,7 @@ public class TableResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(TableResource.class); /** - * Constructor - * @param table - * @throws IOException + * Constructor nn */ public TableResource(String table) throws IOException { super(); @@ -63,8 +59,7 @@ String getName() { } /** - * @return true if the table exists - * @throws IOException + * @return true if the table exists n */ boolean exists() throws IOException { return servlet.getAdmin().tableExists(TableName.valueOf(table)); @@ -92,47 +87,43 @@ public SchemaResource getSchemaResource() throws IOException { @Path("{multiget: multiget.*}") public MultiRowResource getMultipleRowResource(final @QueryParam("v") String versions, - @PathParam("multiget") String path) throws IOException { + @PathParam("multiget") String path) throws IOException { return new MultiRowResource(this, versions, path.replace("multiget", "").replace("/", "")); } @Path("{rowspec: [^*]+}") public RowResource getRowResource( - // We need the @Encoded decorator so Jersey won't urldecode before - // the RowSpec constructor has a chance to parse - final @PathParam("rowspec") @Encoded String rowspec, - final @QueryParam("v") String versions, - final @QueryParam("check") String check, - final @QueryParam("rr") String returnResult) throws IOException { + // We need the @Encoded decorator so Jersey won't urldecode before + // the RowSpec constructor has a chance to parse + final @PathParam("rowspec") @Encoded String rowspec, final @QueryParam("v") String versions, + final @QueryParam("check") String check, final @QueryParam("rr") String returnResult) + throws IOException { return new RowResource(this, rowspec, versions, check, returnResult); } @Path("{suffixglobbingspec: .*\\*/.+}") public RowResource getRowResourceWithSuffixGlobbing( - // We need the @Encoded decorator so Jersey won't urldecode before - // the RowSpec constructor has a chance to parse - final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec, - final @QueryParam("v") String versions, - final @QueryParam("check") String check, - final @QueryParam("rr") String returnResult) throws IOException { + // We need the @Encoded decorator so Jersey won't urldecode before + // the RowSpec constructor has a chance to parse + final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec, + final @QueryParam("v") String versions, final @QueryParam("check") String check, + final @QueryParam("rr") String returnResult) throws IOException { return new RowResource(this, suffixglobbingspec, versions, check, returnResult); } @Path("{scanspec: .*[*]$}") - public TableScanResource getScanResource( - final @PathParam("scanspec") String scanSpec, - @DefaultValue(Integer.MAX_VALUE + "") - @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, - @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow, - @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow, - @QueryParam(Constants.SCAN_COLUMN) List column, - @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions, - @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize, - @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime, - @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime, - @DefaultValue("true") @QueryParam(Constants.SCAN_CACHE_BLOCKS) boolean cacheBlocks, - @DefaultValue("false") @QueryParam(Constants.SCAN_REVERSED) boolean reversed, - @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String paramFilter) { + public TableScanResource getScanResource(final @PathParam("scanspec") String scanSpec, + @DefaultValue(Integer.MAX_VALUE + "") @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, + @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow, + @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow, + @QueryParam(Constants.SCAN_COLUMN) List column, + @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions, + @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize, + @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime, + @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime, + @DefaultValue("true") @QueryParam(Constants.SCAN_CACHE_BLOCKS) boolean cacheBlocks, + @DefaultValue("false") @QueryParam(Constants.SCAN_REVERSED) boolean reversed, + @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String paramFilter) { try { Filter prefixFilter = null; Scan tableScan = new Scan(); @@ -146,9 +137,9 @@ public TableScanResource getScanResource( } if (LOG.isTraceEnabled()) { LOG.trace("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow - + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime - + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => " - + maxVersions + " Batch Size => " + batchSize); + + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime + + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => " + + maxVersions + " Batch Size => " + batchSize); } Table hTable = RESTServlet.getInstance().getTable(this.table); tableScan.setBatch(batchSize); @@ -159,7 +150,7 @@ public TableScanResource getScanResource( } tableScan.withStopRow(Bytes.toBytes(endRow)); for (String col : column) { - byte [][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); + byte[][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); if (parts.length == 1) { if (LOG.isTraceEnabled()) { LOG.trace("Scan family : " + Bytes.toStringBinary(parts[0])); @@ -167,8 +158,8 @@ public TableScanResource getScanResource( tableScan.addFamily(parts[0]); } else if (parts.length == 2) { if (LOG.isTraceEnabled()) { - LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) - + " " + Bytes.toStringBinary(parts[1])); + LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) + " " + + Bytes.toStringBinary(parts[1])); } tableScan.addColumn(parts[0], parts[1]); } else { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java index d31a346757b8..e30beaa37df7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +47,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.UriInfo; @InterfaceAudience.Private -public class TableScanResource extends ResourceBase { +public class TableScanResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(TableScanResource.class); TableResource tableResource; @@ -93,7 +92,7 @@ public RowModel next() { List kvs = rs.listCells(); for (Cell kv : kvs) { rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), - kv.getTimestamp(), CellUtil.cloneValue(kv))); + kv.getTimestamp(), CellUtil.cloneValue(kv))); } count--; if (count == 0) { @@ -108,18 +107,16 @@ public RowModel next() { @GET @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) - public Response getProtobuf( - final @Context UriInfo uriInfo, - final @HeaderParam("Accept") String contentType) { + public Response getProtobuf(final @Context UriInfo uriInfo, + final @HeaderParam("Accept") String contentType) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + - MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); - StreamingOutput stream = new ProtobufStreamingOutput(this.results, contentType, - userRequestedLimit, fetchSize); + StreamingOutput stream = + new ProtobufStreamingOutput(this.results, contentType, userRequestedLimit, fetchSize); servlet.getMetrics().incrementSucessfulScanRequests(1); ResponseBuilder response = Response.ok(stream); response.header("content-type", contentType); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java index e12ff9907b86..8b71f7086452 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -55,8 +53,7 @@ public class VersionResource extends ResourceBase { } /** - * Constructor - * @throws IOException + * Constructor n */ public VersionResource() throws IOException { super(); @@ -69,10 +66,9 @@ public VersionResource() throws IOException { * @return a response for a version request */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, - final @Context UriInfo uriInfo) { + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } @@ -87,8 +83,7 @@ public Response get(final @Context ServletContext context, * Dispatch to StorageClusterVersionResource */ @Path("cluster") - public StorageClusterVersionResource getClusterVersionResource() - throws IOException { + public StorageClusterVersionResource getClusterVersionResource() throws IOException { return new StorageClusterVersionResource(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 7459f8af0ad7..ff1fe141bb88 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.io.BufferedInputStream; @@ -68,8 +66,8 @@ import org.slf4j.LoggerFactory; /** - * A wrapper around HttpClient which provides some useful function and - * semantics for interacting with the REST gateway. + * A wrapper around HttpClient which provides some useful function and semantics for interacting + * with the REST gateway. */ @InterfaceAudience.Public public class Client { @@ -98,7 +96,7 @@ public Client() { } private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, - Optional trustStore) { + Optional trustStore) { this.cluster = cluster; this.conf = conf; this.sslEnabled = sslEnabled; @@ -111,10 +109,9 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, Constants.DEFAULT_REST_CLIENT_CONN_TIMEOUT); int socketTimeout = this.conf.getInt(Constants.REST_CLIENT_SOCKET_TIMEOUT, Constants.DEFAULT_REST_CLIENT_SOCKET_TIMEOUT); - RequestConfig requestConfig = RequestConfig.custom() - .setConnectTimeout(connTimeout) - .setSocketTimeout(socketTimeout) - .setNormalizeUri(false) // URIs should not be normalized, see HBASE-26903 + RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(connTimeout) + .setSocketTimeout(socketTimeout).setNormalizeUri(false) // URIs should not be normalized, see + // HBASE-26903 .build(); httpClientBuilder.setDefaultRequestConfig(requestConfig); @@ -124,7 +121,7 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, // automatic content compression. httpClientBuilder.disableContentCompression(); - if(sslEnabled && trustStore.isPresent()) { + if (sslEnabled && trustStore.isPresent()) { try { SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); @@ -147,7 +144,7 @@ public Client(Cluster cluster) { /** * Constructor - * @param cluster the cluster definition + * @param cluster the cluster definition * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, boolean sslEnabled) { @@ -156,8 +153,8 @@ public Client(Cluster cluster, boolean sslEnabled) { /** * Constructor - * @param cluster the cluster definition - * @param conf Configuration + * @param cluster the cluster definition + * @param conf Configuration * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { @@ -166,31 +163,28 @@ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { /** * Constructor, allowing to define custom trust store (only for SSL connections) - * - * @param cluster the cluster definition - * @param trustStorePath custom trust store to use for SSL connections + * @param cluster the cluster definition + * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store - * @param trustStoreType type of custom trust store - * + * @param trustStoreType type of custom trust store * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded */ public Client(Cluster cluster, String trustStorePath, Optional trustStorePassword, - Optional trustStoreType) { + Optional trustStoreType) { this(cluster, HBaseConfiguration.create(), trustStorePath, trustStorePassword, trustStoreType); } /** * Constructor, allowing to define custom trust store (only for SSL connections) - * - * @param cluster the cluster definition - * @param conf Configuration - * @param trustStorePath custom trust store to use for SSL connections + * @param cluster the cluster definition + * @param conf Configuration + * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store - * @param trustStoreType type of custom trust store + * @param trustStoreType type of custom trust store * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded */ public Client(Cluster cluster, Configuration conf, String trustStorePath, - Optional trustStorePassword, Optional trustStoreType) { + Optional trustStorePassword, Optional trustStoreType) { char[] password = trustStorePassword.map(String::toCharArray).orElse(null); String type = trustStoreType.orElse(KeyStore.getDefaultType()); @@ -201,8 +195,8 @@ public Client(Cluster cluster, Configuration conf, String trustStorePath, } catch (KeyStoreException e) { throw new ClientTrustStoreInitializationException("Invalid trust store type: " + type, e); } - try (InputStream inputStream = new BufferedInputStream( - Files.newInputStream(new File(trustStorePath).toPath()))) { + try (InputStream inputStream = + new BufferedInputStream(Files.newInputStream(new File(trustStorePath).toPath()))) { trustStore.load(inputStream, password); } catch (CertificateException | NoSuchAlgorithmException | IOException e) { throw new ClientTrustStoreInitializationException("Trust store load error: " + trustStorePath, @@ -226,9 +220,8 @@ public HttpClient getHttpClient() { } /** - * Add extra headers. These extra headers will be applied to all http - * methods before they are removed. If any header is not used any more, - * client needs to remove it explicitly. + * Add extra headers. These extra headers will be applied to all http methods before they are + * removed. If any header is not used any more, client needs to remove it explicitly. */ public void addExtraHeader(final String name, final String value) { extraHeaders.put(name, value); @@ -256,25 +249,23 @@ public void removeExtraHeader(final String name) { } /** - * Execute a transaction method given only the path. Will select at random - * one of the members of the supplied cluster definition and iterate through - * the list until a transaction can be successfully completed. The - * definition of success here is a complete HTTP transaction, irrespective - * of result code. + * Execute a transaction method given only the path. Will select at random one of the members of + * the supplied cluster definition and iterate through the list until a transaction can be + * successfully completed. The definition of success here is a complete HTTP transaction, + * irrespective of result code. * @param cluster the cluster definition - * @param method the transaction method + * @param method the transaction method * @param headers HTTP header values to send - * @param path the properly urlencoded path - * @return the HTTP response code - * @throws IOException + * @param path the properly urlencoded path + * @return the HTTP response code n */ - public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, - Header[] headers, String path) throws IOException { + public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers, + String path) throws IOException { IOException lastException; if (cluster.nodes.size() < 1) { throw new IOException("Cluster is empty"); } - int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random()); + int start = (int) Math.round((cluster.nodes.size() - 1) * Math.random()); int i = start; do { cluster.lastHost = cluster.nodes.get(i); @@ -317,20 +308,19 @@ public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, /** * Execute a transaction method given a complete URI. - * @param method the transaction method + * @param method the transaction method * @param headers HTTP header values to send - * @param uri a properly urlencoded URI - * @return the HTTP response code - * @throws IOException + * @param uri a properly urlencoded URI + * @return the HTTP response code n */ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri) - throws IOException { + throws IOException { // method.setURI(new URI(uri, true)); - for (Map.Entry e: extraHeaders.entrySet()) { + for (Map.Entry e : extraHeaders.entrySet()) { method.addHeader(e.getKey(), e.getValue()); } if (headers != null) { - for (Header header: headers) { + for (Header header : headers) { method.addHeader(header); } } @@ -346,25 +336,24 @@ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String u long endTime = EnvironmentEdgeManager.currentTime(); if (LOG.isTraceEnabled()) { - LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + - resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); + LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + + resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); } return resp; } /** - * Execute a transaction method. Will call either executePathOnly - * or executeURI depending on whether a path only is supplied in - * 'path', or if a complete URI is passed instead, respectively. + * Execute a transaction method. Will call either executePathOnly or executeURI + * depending on whether a path only is supplied in 'path', or if a complete URI is passed instead, + * respectively. * @param cluster the cluster definition - * @param method the HTTP method + * @param method the HTTP method * @param headers HTTP header values to send - * @param path the properly urlencoded path or URI - * @return the HTTP response code - * @throws IOException + * @param path the properly urlencoded path or URI + * @return the HTTP response code n */ - public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, - String path) throws IOException { + public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path) + throws IOException { if (path.startsWith("/")) { return executePathOnly(cluster, method, headers, path); } @@ -374,7 +363,7 @@ public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] hea /** * Initiate client side Kerberos negotiation with the server. * @param method method to inject the authentication token into. - * @param uri the String to parse as a URL. + * @param uri the String to parse as a URL. * @throws IOException if unknown protocol is found. */ private void negotiate(HttpUriRequest method, String uri) throws IOException { @@ -393,7 +382,7 @@ private void negotiate(HttpUriRequest method, String uri) throws IOException { /** * Helper method that injects an authentication token to send with the method. * @param method method to inject the authentication token into. - * @param token authentication token to inject. + * @param token authentication token to inject. */ private void injectToken(HttpUriRequest method, AuthenticatedURL.Token token) { String t = token.toString(); @@ -422,8 +411,7 @@ public void setCluster(Cluster cluster) { /** * Send a HEAD request * @param path the path or URI - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response head(String path) throws IOException { return head(cluster, path, null); @@ -432,13 +420,11 @@ public Response head(String path) throws IOException { /** * Send a HEAD request * @param cluster the cluster definition - * @param path the path or URI + * @param path the path or URI * @param headers the HTTP headers to include in the request - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response head(Cluster cluster, String path, Header[] headers) - throws IOException { + public Response head(Cluster cluster, String path, Header[] headers) throws IOException { HttpHead method = new HttpHead(path); try { HttpResponse resp = execute(cluster, method, null, path); @@ -451,8 +437,7 @@ public Response head(Cluster cluster, String path, Header[] headers) /** * Send a GET request * @param path the path or URI - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response get(String path) throws IOException { return get(cluster, path); @@ -461,9 +446,8 @@ public Response get(String path) throws IOException { /** * Send a GET request * @param cluster the cluster definition - * @param path the path or URI - * @return a Response object with response detail - * @throws IOException + * @param path the path or URI + * @return a Response object with response detail n */ public Response get(Cluster cluster, String path) throws IOException { return get(cluster, path, EMPTY_HEADER_ARRAY); @@ -471,10 +455,9 @@ public Response get(Cluster cluster, String path) throws IOException { /** * Send a GET request - * @param path the path or URI + * @param path the path or URI * @param accept Accept header value - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response get(String path, String accept) throws IOException { return get(cluster, path, accept); @@ -483,13 +466,11 @@ public Response get(String path, String accept) throws IOException { /** * Send a GET request * @param cluster the cluster definition - * @param path the path or URI - * @param accept Accept header value - * @return a Response object with response detail - * @throws IOException + * @param path the path or URI + * @param accept Accept header value + * @return a Response object with response detail n */ - public Response get(Cluster cluster, String path, String accept) - throws IOException { + public Response get(Cluster cluster, String path, String accept) throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Accept", accept); return get(cluster, path, headers); @@ -497,43 +478,37 @@ public Response get(Cluster cluster, String path, String accept) /** * Send a GET request - * @param path the path or URI - * @param headers the HTTP headers to include in the request, - * Accept must be supplied - * @return a Response object with response detail - * @throws IOException + * @param path the path or URI + * @param headers the HTTP headers to include in the request, Accept must be supplied + * @return a Response object with response detail n */ public Response get(String path, Header[] headers) throws IOException { return get(cluster, path, headers); } /** - * Returns the response body of the HTTPResponse, if any, as an array of bytes. - * If response body is not available or cannot be read, returns null - * - * Note: This will cause the entire response body to be buffered in memory. A - * malicious server may easily exhaust all the VM memory. It is strongly - * recommended, to use getResponseAsStream if the content length of the response - * is unknown or reasonably large. - * + * Returns the response body of the HTTPResponse, if any, as an array of bytes. If response body + * is not available or cannot be read, returns null Note: This will cause the entire + * response body to be buffered in memory. A malicious server may easily exhaust all the VM + * memory. It is strongly recommended, to use getResponseAsStream if the content length of the + * response is unknown or reasonably large. * @param resp HttpResponse * @return The response body, null if body is empty - * @throws IOException If an I/O (transport) problem occurs while obtaining the - * response body. + * @throws IOException If an I/O (transport) problem occurs while obtaining the response body. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = - "NP_LOAD_OF_KNOWN_NULL_VALUE", justification = "null is possible return value") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_LOAD_OF_KNOWN_NULL_VALUE", + justification = "null is possible return value") public static byte[] getResponseBody(HttpResponse resp) throws IOException { if (resp.getEntity() == null) return null; try (InputStream instream = resp.getEntity().getContent()) { if (instream != null) { long contentLength = resp.getEntity().getContentLength(); if (contentLength > Integer.MAX_VALUE) { - //guard integer cast from overflow - throw new IOException("Content too large to be buffered: " + contentLength +" bytes"); + // guard integer cast from overflow + throw new IOException("Content too large to be buffered: " + contentLength + " bytes"); } - ByteArrayOutputStream outstream = new ByteArrayOutputStream( - contentLength > 0 ? (int) contentLength : 4*1024); + ByteArrayOutputStream outstream = + new ByteArrayOutputStream(contentLength > 0 ? (int) contentLength : 4 * 1024); byte[] buffer = new byte[4096]; int len; while ((len = instream.read(buffer)) > 0) { @@ -548,61 +523,56 @@ public static byte[] getResponseBody(HttpResponse resp) throws IOException { /** * Send a GET request - * @param c the cluster definition - * @param path the path or URI + * @param c the cluster definition + * @param path the path or URI * @param headers the HTTP headers to include in the request - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response get(Cluster c, String path, Header[] headers) - throws IOException { + public Response get(Cluster c, String path, Header[] headers) throws IOException { if (httpGet != null) { httpGet.releaseConnection(); } httpGet = new HttpGet(path); HttpResponse resp = execute(c, httpGet, headers, path); - return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), - resp, resp.getEntity() == null ? null : resp.getEntity().getContent()); + return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), resp, + resp.getEntity() == null ? null : resp.getEntity().getContent()); } /** * Send a PUT request - * @param path the path or URI + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @param content the content bytes + * @return a Response object with response detail n */ - public Response put(String path, String contentType, byte[] content) - throws IOException { + public Response put(String path, String contentType, byte[] content) throws IOException { return put(cluster, path, contentType, content); } /** * Send a PUT request - * @param path the path or URI + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @param extraHdr extra Header to send - * @return a Response object with response detail - * @throws IOException + * @param content the content bytes + * @param extraHdr extra Header to send + * @return a Response object with response detail n */ public Response put(String path, String contentType, byte[] content, Header extraHdr) - throws IOException { + throws IOException { return put(cluster, path, contentType, content, extraHdr); } /** * Send a PUT request - * @param cluster the cluster definition - * @param path the path or URI + * @param cluster the cluster definition + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes + * @param content the content bytes * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, - byte[] content) throws IOException { + public Response put(Cluster cluster, String path, String contentType, byte[] content) + throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Content-Type", contentType); return put(cluster, path, headers, content); @@ -610,16 +580,16 @@ public Response put(Cluster cluster, String path, String contentType, /** * Send a PUT request - * @param cluster the cluster definition - * @param path the path or URI + * @param cluster the cluster definition + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @param extraHdr additional Header to send + * @param content the content bytes + * @param extraHdr additional Header to send * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, - byte[] content, Header extraHdr) throws IOException { + public Response put(Cluster cluster, String path, String contentType, byte[] content, + Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; headers[0] = new BasicHeader("Content-Type", contentType); @@ -631,30 +601,25 @@ public Response put(Cluster cluster, String path, String contentType, /** * Send a PUT request - * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response put(String path, Header[] headers, byte[] content) - throws IOException { + public Response put(String path, Header[] headers, byte[] content) throws IOException { return put(cluster, path, headers, content); } /** * Send a PUT request * @param cluster the cluster definition - * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response put(Cluster cluster, String path, Header[] headers, - byte[] content) throws IOException { + public Response put(Cluster cluster, String path, Header[] headers, byte[] content) + throws IOException { HttpPut method = new HttpPut(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); @@ -669,42 +634,39 @@ public Response put(Cluster cluster, String path, Header[] headers, /** * Send a POST request - * @param path the path or URI + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @param content the content bytes + * @return a Response object with response detail n */ - public Response post(String path, String contentType, byte[] content) - throws IOException { + public Response post(String path, String contentType, byte[] content) throws IOException { return post(cluster, path, contentType, content); } /** * Send a POST request - * @param path the path or URI + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @param extraHdr additional Header to send - * @return a Response object with response detail - * @throws IOException + * @param content the content bytes + * @param extraHdr additional Header to send + * @return a Response object with response detail n */ public Response post(String path, String contentType, byte[] content, Header extraHdr) - throws IOException { + throws IOException { return post(cluster, path, contentType, content, extraHdr); } /** * Send a POST request - * @param cluster the cluster definition - * @param path the path or URI + * @param cluster the cluster definition + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes + * @param content the content bytes * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, - byte[] content) throws IOException { + public Response post(Cluster cluster, String path, String contentType, byte[] content) + throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Content-Type", contentType); return post(cluster, path, headers, content); @@ -712,16 +674,16 @@ public Response post(Cluster cluster, String path, String contentType, /** * Send a POST request - * @param cluster the cluster definition - * @param path the path or URI + * @param cluster the cluster definition + * @param path the path or URI * @param contentType the content MIME type - * @param content the content bytes - * @param extraHdr additional Header to send + * @param content the content bytes + * @param extraHdr additional Header to send * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, - byte[] content, Header extraHdr) throws IOException { + public Response post(Cluster cluster, String path, String contentType, byte[] content, + Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; headers[0] = new BasicHeader("Content-Type", contentType); @@ -733,30 +695,25 @@ public Response post(Cluster cluster, String path, String contentType, /** * Send a POST request - * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response post(String path, Header[] headers, byte[] content) - throws IOException { + public Response post(String path, Header[] headers, byte[] content) throws IOException { return post(cluster, path, headers, content); } /** * Send a POST request * @param cluster the cluster definition - * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ - public Response post(Cluster cluster, String path, Header[] headers, - byte[] content) throws IOException { + public Response post(Cluster cluster, String path, Header[] headers, byte[] content) + throws IOException { HttpPost method = new HttpPost(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); @@ -772,8 +729,7 @@ public Response post(Cluster cluster, String path, Header[] headers, /** * Send a DELETE request * @param path the path or URI - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response delete(String path) throws IOException { return delete(cluster, path); @@ -781,10 +737,9 @@ public Response delete(String path) throws IOException { /** * Send a DELETE request - * @param path the path or URI + * @param path the path or URI * @param extraHdr additional Header to send - * @return a Response object with response detail - * @throws IOException + * @return a Response object with response detail n */ public Response delete(String path, Header extraHdr) throws IOException { return delete(cluster, path, extraHdr); @@ -793,7 +748,7 @@ public Response delete(String path, Header extraHdr) throws IOException { /** * Send a DELETE request * @param cluster the cluster definition - * @param path the path or URI + * @param path the path or URI * @return a Response object with response detail * @throws IOException for error */ @@ -812,7 +767,7 @@ public Response delete(Cluster cluster, String path) throws IOException { /** * Send a DELETE request * @param cluster the cluster definition - * @param path the path or URI + * @param path the path or URI * @return a Response object with response detail * @throws IOException for error */ @@ -829,7 +784,6 @@ public Response delete(Cluster cluster, String path, Header extraHdr) throws IOE } } - public static class ClientTrustStoreInitializationException extends RuntimeException { public ClientTrustStoreInitializationException(String message, Throwable cause) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java index 008470826dea..dbb30adbc74b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,29 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** - * A list of 'host:port' addresses of HTTP servers operating as a single - * entity, for example multiple redundant web service gateways. + * A list of 'host:port' addresses of HTTP servers operating as a single entity, for example + * multiple redundant web service gateways. */ @InterfaceAudience.Public public class Cluster { - protected List nodes = - Collections.synchronizedList(new ArrayList()); + protected List nodes = Collections.synchronizedList(new ArrayList()); protected String lastHost; /** * Constructor */ - public Cluster() {} + public Cluster() { + } /** * Constructor @@ -99,10 +96,8 @@ public Cluster remove(String name, int port) { return remove(sb.toString()); } - @Override public String toString() { - return "Cluster{" + - "nodes=" + nodes + - ", lastHost='" + lastHost + '\'' + - '}'; + @Override + public String toString() { + return "Cluster{" + "nodes=" + nodes + ", lastHost='" + lastHost + '\'' + '}'; } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java index 0e91005ab2b8..cd0ac33dd798 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,12 +19,9 @@ import java.io.IOException; import java.io.InputStream; - import org.apache.http.Header; import org.apache.http.HttpResponse; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +48,7 @@ public Response(int code) { /** * Constructor - * @param code the HTTP response code + * @param code the HTTP response code * @param headers the HTTP response headers */ public Response(int code, Header[] headers) { @@ -61,23 +57,22 @@ public Response(int code, Header[] headers) { /** * Constructor - * @param code the HTTP response code + * @param code the HTTP response code * @param headers the HTTP response headers - * @param body the response body, can be null + * @param body the response body, can be null */ public Response(int code, Header[] headers, byte[] body) { this.code = code; this.headers = headers; this.body = body; } - + /** * Constructor. Note: this is not thread-safe - * - * @param code the HTTP response code + * @param code the HTTP response code * @param headers headers the HTTP response headers - * @param resp the response - * @param in Inputstream if the response had one. + * @param resp the response + * @param in Inputstream if the response had one. */ public Response(int code, Header[] headers, HttpResponse resp, InputStream in) { this.code = code; @@ -93,13 +88,12 @@ public Response(int code, Header[] headers, HttpResponse resp, InputStream in) { public int getCode() { return code; } - + /** * Gets the input stream instance. - * * @return an instance of InputStream class. */ - public InputStream getStream(){ + public InputStream getStream() { return this.stream; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java index b9b8a006437c..c996e75f9376 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,18 +44,16 @@ public class AuthFilter extends AuthenticationFilter { private static final int REST_PREFIX_LEN = REST_PREFIX.length(); /** - * Returns the configuration to be used by the authentication filter - * to initialize the authentication handler. - * - * This filter retrieves all HBase configurations and passes those started - * with REST_PREFIX to the authentication handler. It is useful to support - * plugging different authentication handlers. - */ + * Returns the configuration to be used by the authentication filter to initialize the + * authentication handler. This filter retrieves all HBase configurations and passes those started + * with REST_PREFIX to the authentication handler. It is useful to support plugging different + * authentication handlers. + */ @Override - protected Properties getConfiguration( - String configPrefix, FilterConfig filterConfig) throws ServletException { + protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) + throws ServletException { Properties props = super.getConfiguration(configPrefix, filterConfig); - //setting the cookie path to root '/' so it is used for all resources. + // setting the cookie path to root '/' so it is used for all resources. props.setProperty(AuthenticationFilter.COOKIE_PATH, "/"); Configuration conf = null; @@ -70,11 +68,10 @@ protected Properties getConfiguration( String name = entry.getKey(); if (name.startsWith(REST_PREFIX)) { String value = entry.getValue(); - if(name.equals(REST_AUTHENTICATION_PRINCIPAL)) { + if (name.equals(REST_AUTHENTICATION_PRINCIPAL)) { try { - String machineName = Strings.domainNamePointerToHostName( - DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), - conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); value = SecurityUtil.getServerPrincipal(value, machineName); } catch (IOException ie) { throw new ServletException("Failed to retrieve server principal", ie); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java index f74e10cae74b..efb7e2a227aa 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,9 @@ import java.io.IOException; import java.util.zip.GZIPInputStream; - import javax.servlet.ReadListener; import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServletRequest; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java index 51eba665f3fd..db41fbb5b847 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.filter; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; - import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java index 3fa1ad6f857d..7c1a4f995472 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,9 @@ import java.io.IOException; import java.util.zip.GZIPOutputStream; - import javax.servlet.ServletOutputStream; import javax.servlet.WriteListener; import javax.servlet.http.HttpServletResponse; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java index 53a26ea1ac80..41342214100d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.filter; import java.io.IOException; import java.io.PrintWriter; - import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponseWrapper; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -80,7 +76,7 @@ public void flushBuffer() throws IOException { writer.flush(); } if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).finish(); + ((GZIPResponseStream) os).finish(); } else { getResponse().flushBuffer(); } @@ -90,7 +86,7 @@ public void flushBuffer() throws IOException { public void reset() { super.reset(); if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).resetBuffer(); + ((GZIPResponseStream) os).resetBuffer(); } writer = null; os = null; @@ -101,7 +97,7 @@ public void reset() { public void resetBuffer() { super.resetBuffer(); if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).resetBuffer(); + ((GZIPResponseStream) os).resetBuffer(); } writer = null; os = null; @@ -129,7 +125,7 @@ public void sendRedirect(String location) throws IOException { public ServletOutputStream getOutputStream() throws IOException { if (os == null) { if (!response.isCommitted() && compress) { - os = (ServletOutputStream)new GZIPResponseStream(response); + os = (ServletOutputStream) new GZIPResponseStream(response); } else { os = response.getOutputStream(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java index 4ba9eca302d0..282e8b100c43 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.Locale; import java.util.Set; import java.util.StringTokenizer; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -33,9 +31,7 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -58,27 +54,27 @@ public void destroy() { } @Override - public void doFilter(ServletRequest req, ServletResponse rsp, - FilterChain chain) throws IOException, ServletException { - HttpServletRequest request = (HttpServletRequest)req; - HttpServletResponse response = (HttpServletResponse)rsp; + public void doFilter(ServletRequest req, ServletResponse rsp, FilterChain chain) + throws IOException, ServletException { + HttpServletRequest request = (HttpServletRequest) req; + HttpServletResponse response = (HttpServletResponse) rsp; String contentEncoding = request.getHeader("content-encoding"); String acceptEncoding = request.getHeader("accept-encoding"); String contentType = request.getHeader("content-type"); - if ((contentEncoding != null) && - (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { + if ((contentEncoding != null) && (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { request = new GZIPRequestWrapper(request); } - if (((acceptEncoding != null) && - (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) || - ((contentType != null) && mimeTypes.contains(contentType))) { + if ( + ((acceptEncoding != null) && (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) + || ((contentType != null) && mimeTypes.contains(contentType)) + ) { response = new GZIPResponseWrapper(response); } chain.doFilter(request, response); if (response instanceof GZIPResponseWrapper) { OutputStream os = response.getOutputStream(); if (os instanceof GZIPResponseStream) { - ((GZIPResponseStream)os).finish(); + ((GZIPResponseStream) os).finish(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java index 94eb314e01ab..47e67dbea5a2 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -34,36 +33,29 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This filter provides protection against cross site request forgery (CSRF) - * attacks for REST APIs. Enabling this filter on an endpoint results in the - * requirement of all client to send a particular (configurable) HTTP header - * with every request. In the absense of this header the filter will reject the - * attempt as a bad request. + * This filter provides protection against cross site request forgery (CSRF) attacks for REST APIs. + * Enabling this filter on an endpoint results in the requirement of all client to send a particular + * (configurable) HTTP header with every request. In the absense of this header the filter will + * reject the attempt as a bad request. */ @InterfaceAudience.Public public class RestCsrfPreventionFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(RestCsrfPreventionFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(RestCsrfPreventionFilter.class); public static final String HEADER_USER_AGENT = "User-Agent"; - public static final String BROWSER_USER_AGENT_PARAM = - "browser-useragents-regex"; + public static final String BROWSER_USER_AGENT_PARAM = "browser-useragents-regex"; public static final String CUSTOM_HEADER_PARAM = "custom-header"; - public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = - "methods-to-ignore"; - static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; + public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = "methods-to-ignore"; + static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; public static final String HEADER_DEFAULT = "X-XSRF-HEADER"; - static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; - private String headerName = HEADER_DEFAULT; + static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; + private String headerName = HEADER_DEFAULT; private Set methodsToIgnore = null; private Set browserUserAgents; @@ -73,8 +65,7 @@ public void init(FilterConfig filterConfig) { if (customHeader != null) { headerName = customHeader; } - String customMethodsToIgnore = - filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); + String customMethodsToIgnore = filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); if (customMethodsToIgnore != null) { parseMethodsToIgnore(customMethodsToIgnore); } else { @@ -86,13 +77,14 @@ public void init(FilterConfig filterConfig) { agents = BROWSER_USER_AGENTS_DEFAULT; } parseBrowserUserAgents(agents); - LOG.info(String.format("Adding cross-site request forgery (CSRF) protection, " + LOG.info(String.format( + "Adding cross-site request forgery (CSRF) protection, " + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s", - headerName, methodsToIgnore, browserUserAgents)); + headerName, methodsToIgnore, browserUserAgents)); } void parseBrowserUserAgents(String userAgents) { - String[] agentsArray = userAgents.split(","); + String[] agentsArray = userAgents.split(","); browserUserAgents = new HashSet<>(); for (String patternString : agentsArray) { browserUserAgents.add(Pattern.compile(patternString)); @@ -106,17 +98,14 @@ void parseMethodsToIgnore(String mti) { } /** - * This method interrogates the User-Agent String and returns whether it - * refers to a browser. If its not a browser, then the requirement for the - * CSRF header will not be enforced; if it is a browser, the requirement will - * be enforced. + * This method interrogates the User-Agent String and returns whether it refers to a browser. If + * its not a browser, then the requirement for the CSRF header will not be enforced; if it is a + * browser, the requirement will be enforced. *

    - * A User-Agent String is considered to be a browser if it matches - * any of the regex patterns from browser-useragent-regex; the default - * behavior is to consider everything a browser that matches the following: - * "^Mozilla.*,^Opera.*". Subclasses can optionally override - * this method to use different behavior. - * + * A User-Agent String is considered to be a browser if it matches any of the regex patterns from + * browser-useragent-regex; the default behavior is to consider everything a browser that matches + * the following: "^Mozilla.*,^Opera.*". Subclasses can optionally override this method to use + * different behavior. * @param userAgent The User-Agent String, or null if there isn't one * @return true if the User-Agent String refers to a browser, false if not */ @@ -134,44 +123,38 @@ protected boolean isBrowser(String userAgent) { } /** - * Defines the minimal API requirements for the filter to execute its - * filtering logic. This interface exists to facilitate integration in - * components that do not run within a servlet container and therefore cannot - * rely on a servlet container to dispatch to the {@link #doFilter} method. - * Applications that do run inside a servlet container will not need to write - * code that uses this interface. Instead, they can use typical servlet - * container configuration mechanisms to insert the filter. + * Defines the minimal API requirements for the filter to execute its filtering logic. This + * interface exists to facilitate integration in components that do not run within a servlet + * container and therefore cannot rely on a servlet container to dispatch to the {@link #doFilter} + * method. Applications that do run inside a servlet container will not need to write code that + * uses this interface. Instead, they can use typical servlet container configuration mechanisms + * to insert the filter. */ public interface HttpInteraction { /** * Returns the value of a header. - * * @param header name of header * @return value of header */ String getHeader(String header); /** - * Returns the method. - * - * @return method + * Returns the method. n */ String getMethod(); /** * Called by the filter after it decides that the request may proceed. - * - * @throws IOException if there is an I/O error - * @throws ServletException if the implementation relies on the servlet API - * and a servlet API call has failed + * @throws IOException if there is an I/O error + * @throws ServletException if the implementation relies on the servlet API and a servlet API + * call has failed */ void proceed() throws IOException, ServletException; /** - * Called by the filter after it decides that the request is a potential - * CSRF attack and therefore must be rejected. - * - * @param code status code to send + * Called by the filter after it decides that the request is a potential CSRF attack and + * therefore must be rejected. + * @param code status code to send * @param message response message * @throws IOException if there is an I/O error */ @@ -180,31 +163,31 @@ public interface HttpInteraction { /** * Handles an {@link HttpInteraction} by applying the filtering logic. - * * @param httpInteraction caller's HTTP interaction - * @throws IOException if there is an I/O error - * @throws ServletException if the implementation relies on the servlet API - * and a servlet API call has failed + * @throws IOException if there is an I/O error + * @throws ServletException if the implementation relies on the servlet API and a servlet API call + * has failed */ public void handleHttpInteraction(HttpInteraction httpInteraction) - throws IOException, ServletException { - if (!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) || - methodsToIgnore.contains(httpInteraction.getMethod()) || - httpInteraction.getHeader(headerName) != null) { + throws IOException, ServletException { + if ( + !isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) + || methodsToIgnore.contains(httpInteraction.getMethod()) + || httpInteraction.getHeader(headerName) != null + ) { httpInteraction.proceed(); } else { httpInteraction.sendError(HttpServletResponse.SC_BAD_REQUEST, - "Missing Required Header for CSRF Vulnerability Protection"); + "Missing Required Header for CSRF Vulnerability Protection"); } } @Override - public void doFilter(ServletRequest request, ServletResponse response, - final FilterChain chain) throws IOException, ServletException { - final HttpServletRequest httpRequest = (HttpServletRequest)request; - final HttpServletResponse httpResponse = (HttpServletResponse)response; - handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, - httpResponse, chain)); + public void doFilter(ServletRequest request, ServletResponse response, final FilterChain chain) + throws IOException, ServletException { + final HttpServletRequest httpRequest = (HttpServletRequest) request; + final HttpServletResponse httpResponse = (HttpServletResponse) response; + handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, httpResponse, chain)); } @Override @@ -212,15 +195,12 @@ public void destroy() { } /** - * Constructs a mapping of configuration properties to be used for filter - * initialization. The mapping includes all properties that start with the - * specified configuration prefix. Property names in the mapping are trimmed - * to remove the configuration prefix. - * - * @param conf configuration to read + * Constructs a mapping of configuration properties to be used for filter initialization. The + * mapping includes all properties that start with the specified configuration prefix. Property + * names in the mapping are trimmed to remove the configuration prefix. + * @param conf configuration to read * @param confPrefix configuration prefix - * @return mapping of configuration properties to be used for filter - * initialization + * @return mapping of configuration properties to be used for filter initialization */ public static Map getFilterParams(Configuration conf, String confPrefix) { Map filterConfigMap = new HashMap<>(); @@ -245,13 +225,12 @@ private static final class ServletFilterHttpInteraction implements HttpInteracti /** * Creates a new ServletFilterHttpInteraction. - * - * @param httpRequest request to process + * @param httpRequest request to process * @param httpResponse response to process - * @param chain filter chain to forward to if HTTP interaction is allowed + * @param chain filter chain to forward to if HTTP interaction is allowed */ public ServletFilterHttpInteraction(HttpServletRequest httpRequest, - HttpServletResponse httpResponse, FilterChain chain) { + HttpServletResponse httpResponse, FilterChain chain) { this.httpRequest = httpRequest; this.httpResponse = httpResponse; this.chain = chain; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java index 128be02bb348..3a6e4fa30f9d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; @@ -39,10 +37,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; + /** - * Representation of a cell. A cell is a single value associated a column and - * optional qualifier, and either the timestamp when it was stored or the user- - * provided timestamp if one was explicitly supplied. + * Representation of a cell. A cell is a single value associated a column and optional qualifier, + * and either the timestamp when it was stored or the user- provided timestamp if one was explicitly + * supplied. * *

      * <complexType name="Cell">
    @@ -58,7 +57,7 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="Cell") +@XmlRootElement(name = "Cell") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class CellModel implements ProtobufMessageHandler, Serializable { @@ -79,41 +78,33 @@ public class CellModel implements ProtobufMessageHandler, Serializable { /** * Default constructor */ - public CellModel() {} + public CellModel() { + } /** - * Constructor - * @param column - * @param value + * Constructor nn */ public CellModel(byte[] column, byte[] value) { this(column, HConstants.LATEST_TIMESTAMP, value); } /** - * Constructor - * @param column - * @param qualifier - * @param value + * Constructor nnn */ public CellModel(byte[] column, byte[] qualifier, byte[] value) { this(column, qualifier, HConstants.LATEST_TIMESTAMP, value); } /** - * Constructor from KeyValue - * @param cell + * Constructor from KeyValue n */ public CellModel(org.apache.hadoop.hbase.Cell cell) { - this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil - .cloneValue(cell)); + this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), + CellUtil.cloneValue(cell)); } /** - * Constructor - * @param column - * @param timestamp - * @param value + * Constructor nnn */ public CellModel(byte[] column, long timestamp, byte[] value) { this.column = column; @@ -122,14 +113,9 @@ public CellModel(byte[] column, long timestamp, byte[] value) { } /** - * Constructor - * @param column - * @param qualifier - * @param timestamp - * @param value + * Constructor nnnn */ - public CellModel(byte[] column, byte[] qualifier, long timestamp, - byte[] value) { + public CellModel(byte[] column, byte[] qualifier, long timestamp, byte[] value) { this.column = CellUtil.makeColumn(column, qualifier); this.timestamp = timestamp; this.value = value; @@ -150,8 +136,7 @@ public void setColumn(byte[] column) { } /** - * @return true if the timestamp property has been specified by the - * user + * @return true if the timestamp property has been specified by the user */ public boolean hasUserTimestamp() { return timestamp != HConstants.LATEST_TIMESTAMP; @@ -197,8 +182,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Cell.Builder builder = Cell.newBuilder(); ProtobufUtil.mergeFrom(builder, message); setColumn(builder.getColumn().toByteArray()); @@ -221,28 +205,18 @@ public boolean equals(Object obj) { return false; } CellModel cellModel = (CellModel) obj; - return new EqualsBuilder(). - append(column, cellModel.column). - append(timestamp, cellModel.timestamp). - append(value, cellModel.value). - isEquals(); + return new EqualsBuilder().append(column, cellModel.column) + .append(timestamp, cellModel.timestamp).append(value, cellModel.value).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(). - append(column). - append(timestamp). - append(value). - toHashCode(); + return new HashCodeBuilder().append(column).append(timestamp).append(value).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this). - append("column", column). - append("timestamp", timestamp). - append("value", value). - toString(); + return new ToStringBuilder(this).append("column", column).append("timestamp", timestamp) + .append("value", value).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java index ebb2b1832fb1..b1287284f42b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,26 +21,23 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellSetMessage.CellSet; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; - -import org.apache.yetus.audience.InterfaceAudience; - /** - * Representation of a grouping of cells. May contain cells from more than - * one row. Encapsulates RowModel and CellModel models. + * Representation of a grouping of cells. May contain cells from more than one row. Encapsulates + * RowModel and CellModel models. * *
      * <complexType name="CellSet">
    @@ -72,13 +68,13 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="CellSet") +@XmlRootElement(name = "CellSet") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class CellSetModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; - @XmlElement(name="Row") + @XmlElement(name = "Row") private List rows; /** @@ -132,8 +128,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { CellSet.Builder builder = CellSet.newBuilder(); ProtobufUtil.mergeFrom(builder, message); for (CellSet.Row row : builder.getRowsList()) { @@ -144,8 +139,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) timestamp = cell.getTimestamp(); } rowModel.addCell( - new CellModel(cell.getColumn().toByteArray(), timestamp, - cell.getData().toByteArray())); + new CellModel(cell.getColumn().toByteArray(), timestamp, cell.getData().toByteArray())); } addRow(rowModel); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java index 6de4b7743ef6..bd0e6f58a777 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonAnyGetter; @@ -34,7 +32,7 @@ /** * Representation of a column family schema. - * + * *
      * <complexType name="ColumnSchema">
      *   <attribute name="name" type="string"></attribute>
    @@ -42,7 +40,7 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="ColumnSchema") +@XmlRootElement(name = "ColumnSchema") @InterfaceAudience.Private public class ColumnSchemaModel implements Serializable { private static final long serialVersionUID = 1L; @@ -55,16 +53,17 @@ public class ColumnSchemaModel implements Serializable { private static QName VERSIONS = new QName(HConstants.VERSIONS); private String name; - private Map attrs = new LinkedHashMap<>(); + private Map attrs = new LinkedHashMap<>(); /** * Default constructor */ - public ColumnSchemaModel() {} + public ColumnSchemaModel() { + } /** * Add an attribute to the column family schema - * @param name the attribute name + * @param name the attribute name * @param value the attribute value */ @JsonAnySetter @@ -78,7 +77,7 @@ public void addAttribute(String name, Object value) { */ public String getAttribute(String name) { Object o = attrs.get(new QName(name)); - return o != null ? o.toString(): null; + return o != null ? o.toString() : null; } /** @@ -94,7 +93,7 @@ public String getName() { */ @XmlAnyAttribute @JsonAnyGetter - public Map getAny() { + public Map getAny() { return attrs; } @@ -105,7 +104,8 @@ public void setName(String name) { this.name = name; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -114,7 +114,7 @@ public String toString() { sb.append("{ NAME => '"); sb.append(name); sb.append('\''); - for (Map.Entry e: attrs.entrySet()) { + for (Map.Entry e : attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); @@ -135,8 +135,9 @@ public String toString() { */ public boolean __getBlockcache() { Object o = attrs.get(BLOCKCACHE); - return o != null ? Boolean.parseBoolean(o.toString()) : - ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE; + return o != null + ? Boolean.parseBoolean(o.toString()) + : ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE; } /** @@ -144,8 +145,9 @@ public boolean __getBlockcache() { */ public int __getBlocksize() { Object o = attrs.get(BLOCKSIZE); - return o != null ? Integer.parseInt(o.toString()) : - ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE; + return o != null + ? Integer.parseInt(o.toString()) + : ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE; } /** @@ -169,8 +171,9 @@ public String __getCompression() { */ public boolean __getInMemory() { Object o = attrs.get(IN_MEMORY); - return o != null ? - Boolean.parseBoolean(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY; + return o != null + ? Boolean.parseBoolean(o.toString()) + : ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY; } /** @@ -186,8 +189,9 @@ public int __getTTL() { */ public int __getVersions() { Object o = attrs.get(VERSIONS); - return o != null ? Integer.parseInt(o.toString()) : - ColumnFamilyDescriptorBuilder.DEFAULT_MAX_VERSIONS; + return o != null + ? Integer.parseInt(o.toString()) + : ColumnFamilyDescriptorBuilder.DEFAULT_MAX_VERSIONS; } /** @@ -212,7 +216,7 @@ public void __setBloomfilter(String value) { * @param value the desired value of the COMPRESSION attribute */ public void __setCompression(String value) { - attrs.put(COMPRESSION, value); + attrs.put(COMPRESSION, value); } /** diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java index aa7df1e983ab..358183e5a8bb 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; @@ -24,19 +22,16 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; - import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.rest.protobuf - .generated.NamespacePropertiesMessage.NamespaceProperties; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacePropertiesMessage.NamespaceProperties; /** * List a HBase namespace's key/value properties. @@ -48,7 +43,7 @@ *
  • value: property value
  • * */ -@XmlRootElement(name="NamespaceProperties") +@XmlRootElement(name = "NamespaceProperties") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class NamespacesInstanceModel implements Serializable, ProtobufMessageHandler { @@ -56,7 +51,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan private static final long serialVersionUID = 1L; // JAX-RS automatically converts Map to XMLAnyElement. - private Map properties = null; + private Map properties = null; @XmlTransient private String namespaceName; @@ -64,12 +59,12 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan /** * Default constructor. Do not use. */ - public NamespacesInstanceModel() {} + public NamespacesInstanceModel() { + } /** * Constructor to use if namespace does not exist in HBASE. - * @param namespaceName the namespace name. - * @throws IOException + * @param namespaceName the namespace name. n */ public NamespacesInstanceModel(String namespaceName) throws IOException { this(null, namespaceName); @@ -77,18 +72,21 @@ public NamespacesInstanceModel(String namespaceName) throws IOException { /** * Constructor - * @param admin the administrative API - * @param namespaceName the namespace name. - * @throws IOException + * @param admin the administrative API + * @param namespaceName the namespace name. n */ public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException { this.namespaceName = namespaceName; - if(admin == null) { return; } + if (admin == null) { + return; + } NamespaceDescriptor nd = admin.getNamespaceDescriptor(namespaceName); // For properly formed JSON, if no properties, field has to be null (not just no elements). - if(nd.getConfiguration().isEmpty()){ return; } + if (nd.getConfiguration().isEmpty()) { + return; + } properties = new HashMap<>(); properties.putAll(nd.getConfiguration()); @@ -96,11 +94,11 @@ public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOExcep /** * Add property to the namespace. - * @param key attribute name + * @param key attribute name * @param value attribute value */ public void addProperty(String key, String value) { - if(properties == null){ + if (properties == null) { properties = new HashMap<>(); } properties.put(key, value); @@ -109,18 +107,19 @@ public void addProperty(String key, String value) { /** * @return The map of uncategorized namespace properties. */ - public Map getProperties() { - if(properties == null){ + public Map getProperties() { + if (properties == null) { properties = new HashMap<>(); } return properties; } - public String getNamespaceName(){ + public String getNamespaceName() { return namespaceName; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -129,7 +128,7 @@ public String toString() { sb.append("{NAME => \'"); sb.append(namespaceName); sb.append("\'"); - if(properties != null){ + if (properties != null) { for (Map.Entry entry : properties.entrySet()) { sb.append(", "); sb.append(entry.getKey()); @@ -145,7 +144,7 @@ public String toString() { @Override public byte[] createProtobufOutput() { NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); - if(properties != null){ + if (properties != null) { for (Map.Entry entry : properties.entrySet()) { String key = entry.getKey(); NamespaceProperties.Property.Builder property = NamespaceProperties.Property.newBuilder(); @@ -162,7 +161,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOExce NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); builder.mergeFrom(message); List properties = builder.getPropsList(); - for(NamespaceProperties.Property property: properties){ + for (NamespaceProperties.Property property : properties) { addProperty(property.getKey(), property.getValue()); } return this; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java index 0be558d22553..96457da8267b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,36 +15,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacesMessage.Namespaces; -import com.fasterxml.jackson.annotation.JsonProperty; - - /** * A list of HBase namespaces. *
      *
    • Namespace: namespace name
    • *
    */ -@XmlRootElement(name="Namespaces") +@XmlRootElement(name = "Namespaces") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class NamespacesModel implements Serializable, ProtobufMessageHandler { @@ -53,18 +47,18 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @JsonProperty("Namespace") - @XmlElement(name="Namespace") + @XmlElement(name = "Namespace") private List namespaces = new ArrayList<>(); /** * Default constructor. Do not use. */ - public NamespacesModel() {} + public NamespacesModel() { + } /** * Constructor - * @param admin the administrative API - * @throws IOException + * @param admin the administrative API n */ public NamespacesModel(Admin admin) throws IOException { NamespaceDescriptor[] nds = admin.listNamespaceDescriptors(); @@ -88,7 +82,8 @@ public void setNamespaces(List namespaces) { this.namespaces = namespaces; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java index b560f697dead..34f065105d65 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; @@ -40,21 +35,20 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a row. A row is a related set of cells, grouped by common - * row key. RowModels do not appear in results by themselves. They are always - * encapsulated within CellSetModels. - * + * Representation of a row. A row is a related set of cells, grouped by common row key. RowModels do + * not appear in results by themselves. They are always encapsulated within CellSetModels. + * *
      * <complexType name="Row">
      *   <sequence>
      *     <element name="key" type="base64Binary"></element>
    - *     <element name="cell" type="tns:Cell" 
    + *     <element name="cell" type="tns:Cell"
      *       maxOccurs="unbounded" minOccurs="1"></element>
      *   </sequence>
      * </complexType>
      * 
    */ -@XmlRootElement(name="Row") +@XmlRootElement(name = "Row") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class RowModel implements ProtobufMessageHandler, Serializable { @@ -65,14 +59,14 @@ public class RowModel implements ProtobufMessageHandler, Serializable { private byte[] key; @JsonProperty("Cell") - @XmlElement(name="Cell") + @XmlElement(name = "Cell") private List cells = new ArrayList<>(); - /** * Default constructor */ - public RowModel() { } + public RowModel() { + } /** * Constructor @@ -81,7 +75,7 @@ public RowModel() { } public RowModel(final String key) { this(Bytes.toBytes(key)); } - + /** * Constructor * @param key the row key @@ -93,23 +87,23 @@ public RowModel(final byte[] key) { /** * Constructor - * @param key the row key + * @param key the row key * @param cells the cells */ public RowModel(final String key, final List cells) { this(Bytes.toBytes(key), cells); } - + /** * Constructor - * @param key the row key + * @param key the row key * @param cells the cells */ public RowModel(final byte[] key, final List cells) { this.key = key; this.cells = cells; } - + /** * Adds a cell to the list of cells for this row * @param cell the cell @@ -142,16 +136,13 @@ public List getCells() { @Override public byte[] createProtobufOutput() { // there is no standalone row protobuf message - throw new UnsupportedOperationException( - "no protobuf equivalent to RowModel"); + throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { // there is no standalone row protobuf message - throw new UnsupportedOperationException( - "no protobuf equivalent to RowModel"); + throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } @Override @@ -166,25 +157,16 @@ public boolean equals(Object obj) { return false; } RowModel rowModel = (RowModel) obj; - return new EqualsBuilder(). - append(key, rowModel.key). - append(cells, rowModel.cells). - isEquals(); + return new EqualsBuilder().append(key, rowModel.key).append(cells, rowModel.cells).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(). - append(key). - append(cells). - toHashCode(); + return new HashCodeBuilder().append(key).append(cells).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this). - append("key", key). - append("cells", cells). - toString(); + return new ToStringBuilder(this).append("key", key).append("cells", cells).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index de1af216f8b3..2cd80b2858cb 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -97,7 +96,7 @@ * </complexType> * */ -@XmlRootElement(name="Scanner") +@XmlRootElement(name = "Scanner") @JsonInclude(JsonInclude.Include.NON_NULL) @InterfaceAudience.Private public class ScannerModel implements ProtobufMessageHandler, Serializable { @@ -118,8 +117,8 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { private int limit = -1; /** - * Implement lazily-instantiated singleton as per recipe - * here: http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ + * Implement lazily-instantiated singleton as per recipe here: + * http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ */ private static class JaxbJsonProviderHolder { static final JacksonJaxbJsonProvider INSTANCE = new JacksonJaxbJsonProvider(); @@ -130,9 +129,12 @@ static class FilterModel { @XmlRootElement static class ByteArrayComparableModel { - @XmlAttribute public String type; - @XmlAttribute public String value; - @XmlAttribute public String op; + @XmlAttribute + public String type; + @XmlAttribute + public String value; + @XmlAttribute + public String op; static enum ComparatorType { BinaryComparator, @@ -143,10 +145,10 @@ static enum ComparatorType { SubstringComparator } - public ByteArrayComparableModel() { } + public ByteArrayComparableModel() { + } - public ByteArrayComparableModel( - ByteArrayComparable comparator) { + public ByteArrayComparableModel(ByteArrayComparable comparator) { String typeName = comparator.getClass().getSimpleName(); ComparatorType type = ComparatorType.valueOf(typeName); this.type = typeName; @@ -157,7 +159,7 @@ public ByteArrayComparableModel( break; case BitComparator: this.value = Bytes.toString(Base64.getEncoder().encode(comparator.getValue())); - this.op = ((BitComparator)comparator).getOperator().toString(); + this.op = ((BitComparator) comparator).getOperator().toString(); break; case NullComparator: break; @@ -181,7 +183,7 @@ public ByteArrayComparable build() { break; case BitComparator: comparator = new BitComparator(Base64.getDecoder().decode(value), - BitComparator.BitwiseOp.valueOf(op)); + BitComparator.BitwiseOp.valueOf(op)); break; case NullComparator: comparator = new NullComparator(); @@ -202,26 +204,46 @@ public ByteArrayComparable build() { // A grab bag of fields, would have been a union if this were C. // These are null by default and will only be serialized if set (non null). - @XmlAttribute public String type; - @XmlAttribute public String op; - @XmlElement ByteArrayComparableModel comparator; - @XmlAttribute public String value; - @XmlElement public List filters; - @XmlAttribute public Integer limit; - @XmlAttribute public Integer offset; - @XmlAttribute public String family; - @XmlAttribute public String qualifier; - @XmlAttribute public Boolean ifMissing; - @XmlAttribute public Boolean latestVersion; - @XmlAttribute public String minColumn; - @XmlAttribute public Boolean minColumnInclusive; - @XmlAttribute public String maxColumn; - @XmlAttribute public Boolean maxColumnInclusive; - @XmlAttribute public Boolean dropDependentColumn; - @XmlAttribute public Float chance; - @XmlElement public List prefixes; - @XmlElement private List ranges; - @XmlElement public List timestamps; + @XmlAttribute + public String type; + @XmlAttribute + public String op; + @XmlElement + ByteArrayComparableModel comparator; + @XmlAttribute + public String value; + @XmlElement + public List filters; + @XmlAttribute + public Integer limit; + @XmlAttribute + public Integer offset; + @XmlAttribute + public String family; + @XmlAttribute + public String qualifier; + @XmlAttribute + public Boolean ifMissing; + @XmlAttribute + public Boolean latestVersion; + @XmlAttribute + public String minColumn; + @XmlAttribute + public Boolean minColumnInclusive; + @XmlAttribute + public String maxColumn; + @XmlAttribute + public Boolean maxColumnInclusive; + @XmlAttribute + public Boolean dropDependentColumn; + @XmlAttribute + public Float chance; + @XmlElement + public List prefixes; + @XmlElement + private List ranges; + @XmlElement + public List timestamps; static enum FilterType { ColumnCountGetFilter, @@ -249,7 +271,8 @@ static enum FilterType { WhileMatchFilter } - public FilterModel() { } + public FilterModel() { + } public FilterModel(Filter filter) { String typeName = filter.getClass().getSimpleName(); @@ -257,25 +280,25 @@ public FilterModel(Filter filter) { this.type = typeName; switch (type) { case ColumnCountGetFilter: - this.limit = ((ColumnCountGetFilter)filter).getLimit(); + this.limit = ((ColumnCountGetFilter) filter).getLimit(); break; case ColumnPaginationFilter: - this.limit = ((ColumnPaginationFilter)filter).getLimit(); - this.offset = ((ColumnPaginationFilter)filter).getOffset(); + this.limit = ((ColumnPaginationFilter) filter).getLimit(); + this.offset = ((ColumnPaginationFilter) filter).getOffset(); break; case ColumnPrefixFilter: - byte[] src = ((ColumnPrefixFilter)filter).getPrefix(); + byte[] src = ((ColumnPrefixFilter) filter).getPrefix(); this.value = Bytes.toString(Base64.getEncoder().encode(src)); break; case ColumnRangeFilter: - ColumnRangeFilter crf = (ColumnRangeFilter)filter; + ColumnRangeFilter crf = (ColumnRangeFilter) filter; this.minColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMinColumn())); this.minColumnInclusive = crf.getMinColumnInclusive(); this.maxColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMaxColumn())); this.maxColumnInclusive = crf.getMaxColumnInclusive(); break; case DependentColumnFilter: { - DependentColumnFilter dcf = (DependentColumnFilter)filter; + DependentColumnFilter dcf = (DependentColumnFilter) filter; this.family = Bytes.toString(Base64.getEncoder().encode(dcf.getFamily())); byte[] qualifier = dcf.getQualifier(); if (qualifier != null) { @@ -284,11 +307,12 @@ public FilterModel(Filter filter) { this.op = dcf.getCompareOperator().toString(); this.comparator = new ByteArrayComparableModel(dcf.getComparator()); this.dropDependentColumn = dcf.dropDependentColumn(); - } break; + } + break; case FilterList: - this.op = ((FilterList)filter).getOperator().toString(); + this.op = ((FilterList) filter).getOperator().toString(); this.filters = new ArrayList<>(); - for (Filter child: ((FilterList)filter).getFilters()) { + for (Filter child : ((FilterList) filter).getFilters()) { this.filters.add(new FilterModel(child)); } break; @@ -296,40 +320,38 @@ public FilterModel(Filter filter) { case KeyOnlyFilter: break; case InclusiveStopFilter: - this.value = Bytes.toString(Base64.getEncoder().encode( - ((InclusiveStopFilter)filter).getStopRowKey())); + this.value = Bytes + .toString(Base64.getEncoder().encode(((InclusiveStopFilter) filter).getStopRowKey())); break; case MultipleColumnPrefixFilter: this.prefixes = new ArrayList<>(); - for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) { + for (byte[] prefix : ((MultipleColumnPrefixFilter) filter).getPrefix()) { this.prefixes.add(Bytes.toString(Base64.getEncoder().encode(prefix))); } break; case MultiRowRangeFilter: this.ranges = new ArrayList<>(); - for(RowRange range : ((MultiRowRangeFilter)filter).getRowRanges()) { + for (RowRange range : ((MultiRowRangeFilter) filter).getRowRanges()) { this.ranges.add(new RowRange(range.getStartRow(), range.isStartRowInclusive(), - range.getStopRow(), range.isStopRowInclusive())); + range.getStopRow(), range.isStopRowInclusive())); } break; case PageFilter: - this.value = Long.toString(((PageFilter)filter).getPageSize()); + this.value = Long.toString(((PageFilter) filter).getPageSize()); break; case PrefixFilter: - this.value = Bytes.toString(Base64.getEncoder().encode( - ((PrefixFilter)filter).getPrefix())); + this.value = + Bytes.toString(Base64.getEncoder().encode(((PrefixFilter) filter).getPrefix())); break; case FamilyFilter: case QualifierFilter: case RowFilter: case ValueFilter: - this.op = ((CompareFilter)filter).getCompareOperator().toString(); - this.comparator = - new ByteArrayComparableModel( - ((CompareFilter)filter).getComparator()); + this.op = ((CompareFilter) filter).getCompareOperator().toString(); + this.comparator = new ByteArrayComparableModel(((CompareFilter) filter).getComparator()); break; case RandomRowFilter: - this.chance = ((RandomRowFilter)filter).getChance(); + this.chance = ((RandomRowFilter) filter).getChance(); break; case SingleColumnValueExcludeFilter: case SingleColumnValueFilter: { @@ -340,26 +362,25 @@ public FilterModel(Filter filter) { this.qualifier = Bytes.toString(Base64.getEncoder().encode(qualifier)); } this.op = scvf.getCompareOperator().toString(); - this.comparator = - new ByteArrayComparableModel(scvf.getComparator()); + this.comparator = new ByteArrayComparableModel(scvf.getComparator()); if (scvf.getFilterIfMissing()) { this.ifMissing = true; } if (scvf.getLatestVersionOnly()) { this.latestVersion = true; } - } break; + } + break; case SkipFilter: this.filters = new ArrayList<>(); - this.filters.add(new FilterModel(((SkipFilter)filter).getFilter())); + this.filters.add(new FilterModel(((SkipFilter) filter).getFilter())); break; case TimestampsFilter: - this.timestamps = ((TimestampsFilter)filter).getTimestamps(); + this.timestamps = ((TimestampsFilter) filter).getTimestamps(); break; case WhileMatchFilter: this.filters = new ArrayList<>(); - this.filters.add( - new FilterModel(((WhileMatchFilter)filter).getFilter())); + this.filters.add(new FilterModel(((WhileMatchFilter) filter).getFilter())); break; default: throw new RuntimeException("unhandled filter type " + type); @@ -369,105 +390,107 @@ public FilterModel(Filter filter) { public Filter build() { Filter filter; switch (FilterType.valueOf(type)) { - case ColumnCountGetFilter: - filter = new ColumnCountGetFilter(limit); - break; - case ColumnPaginationFilter: - filter = new ColumnPaginationFilter(limit, offset); - break; - case ColumnPrefixFilter: - filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); - break; - case ColumnRangeFilter: - filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), - minColumnInclusive, Base64.getDecoder().decode(maxColumn), - maxColumnInclusive); - break; - case DependentColumnFilter: - filter = new DependentColumnFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - dropDependentColumn, CompareOperator.valueOf(op), comparator.build()); - break; - case FamilyFilter: - filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case FilterList: { - List list = new ArrayList<>(filters.size()); - for (FilterModel model: filters) { - list.add(model.build()); - } - filter = new FilterList(FilterList.Operator.valueOf(op), list); - } break; - case FirstKeyOnlyFilter: - filter = new FirstKeyOnlyFilter(); - break; - case InclusiveStopFilter: - filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); - break; - case KeyOnlyFilter: - filter = new KeyOnlyFilter(); - break; - case MultipleColumnPrefixFilter: { - byte[][] values = new byte[prefixes.size()][]; - for (int i = 0; i < prefixes.size(); i++) { - values[i] = Base64.getDecoder().decode(prefixes.get(i)); - } - filter = new MultipleColumnPrefixFilter(values); - } break; - case MultiRowRangeFilter: { - filter = new MultiRowRangeFilter(ranges); - } break; - case PageFilter: - filter = new PageFilter(Long.parseLong(value)); - break; - case PrefixFilter: - filter = new PrefixFilter(Base64.getDecoder().decode(value)); - break; - case QualifierFilter: - filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case RandomRowFilter: - filter = new RandomRowFilter(chance); - break; - case RowFilter: - filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case SingleColumnValueFilter: - filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - CompareOperator.valueOf(op), comparator.build()); - if (ifMissing != null) { - ((SingleColumnValueFilter)filter).setFilterIfMissing(ifMissing); - } - if (latestVersion != null) { - ((SingleColumnValueFilter)filter).setLatestVersionOnly(latestVersion); + case ColumnCountGetFilter: + filter = new ColumnCountGetFilter(limit); + break; + case ColumnPaginationFilter: + filter = new ColumnPaginationFilter(limit, offset); + break; + case ColumnPrefixFilter: + filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); + break; + case ColumnRangeFilter: + filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), minColumnInclusive, + Base64.getDecoder().decode(maxColumn), maxColumnInclusive); + break; + case DependentColumnFilter: + filter = new DependentColumnFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, dropDependentColumn, + CompareOperator.valueOf(op), comparator.build()); + break; + case FamilyFilter: + filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case FilterList: { + List list = new ArrayList<>(filters.size()); + for (FilterModel model : filters) { + list.add(model.build()); + } + filter = new FilterList(FilterList.Operator.valueOf(op), list); } - break; - case SingleColumnValueExcludeFilter: - filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - CompareOperator.valueOf(op), comparator.build()); - if (ifMissing != null) { - ((SingleColumnValueExcludeFilter)filter).setFilterIfMissing(ifMissing); + break; + case FirstKeyOnlyFilter: + filter = new FirstKeyOnlyFilter(); + break; + case InclusiveStopFilter: + filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); + break; + case KeyOnlyFilter: + filter = new KeyOnlyFilter(); + break; + case MultipleColumnPrefixFilter: { + byte[][] values = new byte[prefixes.size()][]; + for (int i = 0; i < prefixes.size(); i++) { + values[i] = Base64.getDecoder().decode(prefixes.get(i)); + } + filter = new MultipleColumnPrefixFilter(values); } - if (latestVersion != null) { - ((SingleColumnValueExcludeFilter)filter).setLatestVersionOnly(latestVersion); + break; + case MultiRowRangeFilter: { + filter = new MultiRowRangeFilter(ranges); } - break; - case SkipFilter: - filter = new SkipFilter(filters.get(0).build()); - break; - case TimestampsFilter: - filter = new TimestampsFilter(timestamps); - break; - case ValueFilter: - filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case WhileMatchFilter: - filter = new WhileMatchFilter(filters.get(0).build()); - break; - default: - throw new RuntimeException("unhandled filter type: " + type); + break; + case PageFilter: + filter = new PageFilter(Long.parseLong(value)); + break; + case PrefixFilter: + filter = new PrefixFilter(Base64.getDecoder().decode(value)); + break; + case QualifierFilter: + filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case RandomRowFilter: + filter = new RandomRowFilter(chance); + break; + case RowFilter: + filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case SingleColumnValueFilter: + filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueFilter) filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueFilter) filter).setLatestVersionOnly(latestVersion); + } + break; + case SingleColumnValueExcludeFilter: + filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueExcludeFilter) filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueExcludeFilter) filter).setLatestVersionOnly(latestVersion); + } + break; + case SkipFilter: + filter = new SkipFilter(filters.get(0).build()); + break; + case TimestampsFilter: + filter = new TimestampsFilter(timestamps); + break; + case ValueFilter: + filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case WhileMatchFilter: + filter = new WhileMatchFilter(filters.get(0).build()); + break; + default: + throw new RuntimeException("unhandled filter type: " + type); } return filter; } @@ -476,7 +499,6 @@ public Filter build() { /** * Get the JacksonJaxbJsonProvider instance; - * * @return A JacksonJaxbJsonProvider. */ private static JacksonJaxbJsonProvider getJasonProvider() { @@ -485,40 +507,38 @@ private static JacksonJaxbJsonProvider getJasonProvider() { /** * @param s the JSON representation of the filter - * @return the filter - * @throws Exception + * @return the filter n */ public static Filter buildFilter(String s) throws Exception { - FilterModel model = getJasonProvider().locateMapper(FilterModel.class, - MediaType.APPLICATION_JSON_TYPE).readValue(s, FilterModel.class); + FilterModel model = + getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE) + .readValue(s, FilterModel.class); return model.build(); } /** * @param filter the filter - * @return the JSON representation of the filter - * @throws Exception + * @return the JSON representation of the filter n */ public static String stringifyFilter(final Filter filter) throws Exception { - return getJasonProvider().locateMapper(FilterModel.class, - MediaType.APPLICATION_JSON_TYPE).writeValueAsString(new FilterModel(filter)); + return getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE) + .writeValueAsString(new FilterModel(filter)); } private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":"); /** - * @param scan the scan specification - * @throws Exception + * @param scan the scan specification n */ public static ScannerModel fromScan(Scan scan) throws Exception { ScannerModel model = new ScannerModel(); model.setStartRow(scan.getStartRow()); model.setEndRow(scan.getStopRow()); - Map> families = scan.getFamilyMap(); + Map> families = scan.getFamilyMap(); if (families != null) { - for (Map.Entry> entry : families.entrySet()) { + for (Map.Entry> entry : families.entrySet()) { if (entry.getValue() != null) { - for (byte[] qualifier: entry.getValue()) { + for (byte[] qualifier : entry.getValue()) { model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier)); } } else { @@ -561,22 +581,22 @@ public static ScannerModel fromScan(Scan scan) throws Exception { /** * Default constructor */ - public ScannerModel() {} + public ScannerModel() { + } /** * Constructor - * @param startRow the start key of the row-range - * @param endRow the end key of the row-range - * @param columns the columns to scan - * @param batch the number of values to return in batch - * @param caching the number of rows that the scanner will fetch at once - * @param endTime the upper bound on timestamps of values of interest + * @param startRow the start key of the row-range + * @param endRow the end key of the row-range + * @param columns the columns to scan + * @param batch the number of values to return in batch + * @param caching the number of rows that the scanner will fetch at once + * @param endTime the upper bound on timestamps of values of interest * @param maxVersions the maximum number of versions to return - * @param filter a filter specification - * (values with timestamps later than this are excluded) + * @param filter a filter specification (values with timestamps later than this are excluded) */ - public ScannerModel(byte[] startRow, byte[] endRow, List columns, - int batch, int caching, long endTime, int maxVersions, String filter) { + public ScannerModel(byte[] startRow, byte[] endRow, List columns, int batch, int caching, + long endTime, int maxVersions, String filter) { super(); this.startRow = startRow; this.endRow = endRow; @@ -590,19 +610,19 @@ public ScannerModel(byte[] startRow, byte[] endRow, List columns, /** * Constructor - * @param startRow the start key of the row-range - * @param endRow the end key of the row-range - * @param columns the columns to scan - * @param batch the number of values to return in batch - * @param caching the number of rows that the scanner will fetch at once - * @param startTime the lower bound on timestamps of values of interest - * (values with timestamps earlier than this are excluded) - * @param endTime the upper bound on timestamps of values of interest - * (values with timestamps later than this are excluded) - * @param filter a filter specification - */ - public ScannerModel(byte[] startRow, byte[] endRow, List columns, - int batch, int caching, long startTime, long endTime, String filter) { + * @param startRow the start key of the row-range + * @param endRow the end key of the row-range + * @param columns the columns to scan + * @param batch the number of values to return in batch + * @param caching the number of rows that the scanner will fetch at once + * @param startTime the lower bound on timestamps of values of interest (values with timestamps + * earlier than this are excluded) + * @param endTime the upper bound on timestamps of values of interest (values with timestamps + * later than this are excluded) + * @param filter a filter specification + */ + public ScannerModel(byte[] startRow, byte[] endRow, List columns, int batch, int caching, + long startTime, long endTime, String filter) { super(); this.startRow = startRow; this.endRow = endRow; @@ -628,6 +648,7 @@ public void addColumn(byte[] column) { public void addLabel(String label) { labels.add(label); } + /** * @return true if a start row was specified */ @@ -661,12 +682,12 @@ public byte[] getEndRow() { /** * @return list of columns of interest in column:qualifier format, or empty for all */ - @XmlElement(name="column") + @XmlElement(name = "column") public List getColumns() { return columns; } - @XmlElement(name="labels") + @XmlElement(name = "labels") public List getLabels() { return labels; } @@ -771,7 +792,8 @@ public void setCaching(int caching) { } /** - * @param value true if HFile blocks should be cached on the servers for this scan, false otherwise + * @param value true if HFile blocks should be cached on the servers for this scan, false + * otherwise */ public void setCacheBlocks(boolean value) { this.cacheBlocks = value; @@ -821,7 +843,7 @@ public byte[] createProtobufOutput() { if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) { builder.setEndRow(UnsafeByteOperations.unsafeWrap(endRow)); } - for (byte[] column: columns) { + for (byte[] column : columns) { builder.addColumns(UnsafeByteOperations.unsafeWrap(column)); } if (startTime != 0) { @@ -834,7 +856,7 @@ public byte[] createProtobufOutput() { if (caching > 0) { builder.setCaching(caching); } - if (limit > 0){ + if (limit > 0) { builder.setLimit(limit); } builder.setMaxVersions(maxVersions); @@ -850,8 +872,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Scanner.Builder builder = Scanner.newBuilder(); ProtobufUtil.mergeFrom(builder, message); if (builder.hasStartRow()) { @@ -860,7 +881,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) if (builder.hasEndRow()) { endRow = builder.getEndRow().toByteArray(); } - for (ByteString column: builder.getColumnsList()) { + for (ByteString column : builder.getColumnsList()) { addColumn(column.toByteArray()); } if (builder.hasBatch()) { @@ -886,7 +907,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) } if (builder.getLabelsList() != null) { List labels = builder.getLabelsList(); - for(String label : labels) { + for (String label : labels) { addLabel(label); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index cb1e7f76028a..16783e95749b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,34 +18,30 @@ package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.util.Bytes; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; /** * Representation of the status of a storage cluster: *

    *

      *
    • regions: the total number of regions served by the cluster
    • - *
    • requests: the total number of requests per second handled by the - * cluster in the last reporting interval
    • + *
    • requests: the total number of requests per second handled by the cluster in the last + * reporting interval
    • *
    • averageLoad: the average load of the region servers in the cluster
    • *
    • liveNodes: detailed status of the live region servers
    • *
    • deadNodes: the names of region servers declared dead
    • @@ -97,7 +92,7 @@ * </complexType> * */ -@XmlRootElement(name="ClusterStatus") +@XmlRootElement(name = "ClusterStatus") @InterfaceAudience.Private public class StorageClusterStatusModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -145,18 +140,17 @@ public Region(byte[] name) { /** * Constructor - * @param name the region name - * @param stores the number of stores - * @param storefiles the number of store files - * @param storefileSizeMB total size of store files, in MB - * @param memstoreSizeMB total size of memstore, in MB + * @param name the region name + * @param stores the number of stores + * @param storefiles the number of store files + * @param storefileSizeMB total size of store files, in MB + * @param memstoreSizeMB total size of memstore, in MB * @param storefileIndexSizeKB total size of store file indexes, in KB */ - public Region(byte[] name, int stores, int storefiles, - int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, - long readRequestsCount, long cpRequestsCount, long writeRequestsCount, - int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, - long totalCompactingKVs, long currentCompactedKVs) { + public Region(byte[] name, int stores, int storefiles, int storefileSizeMB, + int memstoreSizeMB, long storefileIndexSizeKB, long readRequestsCount, long cpRequestsCount, + long writeRequestsCount, int rootIndexSizeKB, int totalStaticIndexSizeKB, + int totalStaticBloomSizeKB, long totalCompactingKVs, long currentCompactedKVs) { this.name = name; this.stores = stores; this.storefiles = storefiles; @@ -300,8 +294,7 @@ public void setCpRequestsCount(long cpRequestsCount) { } /** - * @param rootIndexSizeKB The current total size of root-level indexes - * for the region, in KB + * @param rootIndexSizeKB The current total size of root-level indexes for the region, in KB */ public void setRootIndexSizeKB(int rootIndexSizeKB) { this.rootIndexSizeKB = rootIndexSizeKB; @@ -315,32 +308,31 @@ public void setWriteRequestsCount(long writeRequestsCount) { } /** - * @param currentCompactedKVs The completed count of key values - * in currently running compaction + * @param currentCompactedKVs The completed count of key values in currently running + * compaction */ public void setCurrentCompactedKVs(long currentCompactedKVs) { this.currentCompactedKVs = currentCompactedKVs; } /** - * @param totalCompactingKVs The total compacting key values - * in currently running compaction + * @param totalCompactingKVs The total compacting key values in currently running compaction */ public void setTotalCompactingKVs(long totalCompactingKVs) { this.totalCompactingKVs = totalCompactingKVs; } /** - * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, - * not just loaded into the block cache, in KB. + * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, not just loaded + * into the block cache, in KB. */ public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) { this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; } /** - * @param totalStaticIndexSizeKB The total size of all index blocks, - * not just the root level, in KB. + * @param totalStaticIndexSizeKB The total size of all index blocks, not just the root level, + * in KB. */ public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) { this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; @@ -400,15 +392,14 @@ public void setStorefileIndexSizeKB(long storefileIndexSizeKB) { * Add a region name to the list * @param name the region name */ - public void addRegion(byte[] name, int stores, int storefiles, - int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, - long readRequestsCount, long cpRequestsCount, long writeRequestsCount, - int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, - long totalCompactingKVs, long currentCompactedKVs) { - regions.add(new Region(name, stores, storefiles, storefileSizeMB, - memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, cpRequestsCount, - writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, - totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs)); + public void addRegion(byte[] name, int stores, int storefiles, int storefileSizeMB, + int memstoreSizeMB, long storefileIndexSizeKB, long readRequestsCount, long cpRequestsCount, + long writeRequestsCount, int rootIndexSizeKB, int totalStaticIndexSizeKB, + int totalStaticBloomSizeKB, long totalCompactingKVs, long currentCompactedKVs) { + regions.add( + new Region(name, stores, storefiles, storefileSizeMB, memstoreSizeMB, storefileIndexSizeKB, + readRequestsCount, cpRequestsCount, writeRequestsCount, rootIndexSizeKB, + totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs)); } /** @@ -422,11 +413,12 @@ public Region getRegion(int index) { /** * Default constructor */ - public Node() {} + public Node() { + } /** * Constructor - * @param name the region server name + * @param name the region server name * @param startCode the region server's start code */ public Node(String name, long startCode) { @@ -469,7 +461,7 @@ public int getMaxHeapSizeMB() { /** * @return the list of regions served by the region server */ - @XmlElement(name="Region") + @XmlElement(name = "Region") public List getRegions() { return regions; } @@ -533,9 +525,9 @@ public void setRequests(long requests) { /** * Add a live node to the cluster representation. - * @param name the region server name - * @param startCode the region server's start code - * @param heapSizeMB the current heap size, in MB + * @param name the region server name + * @param startCode the region server's start code + * @param heapSizeMB the current heap size, in MB * @param maxHeapSizeMB the maximum heap size, in MB */ public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) { @@ -608,7 +600,7 @@ public int getRegions() { /** * @return the total number of requests per second handled by the cluster in the last reporting - * interval + * interval */ @XmlAttribute public long getRequests() { @@ -661,9 +653,8 @@ public void setAverageLoad(double averageLoad) { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append(String.format("%d live servers, %d dead servers, " + - "%.4f average load%n%n", liveNodes.size(), deadNodes.size(), - averageLoad)); + sb.append(String.format("%d live servers, %d dead servers, " + "%.4f average load%n%n", + liveNodes.size(), deadNodes.size(), averageLoad)); if (!liveNodes.isEmpty()) { sb.append(liveNodes.size()); sb.append(" live servers\n"); @@ -735,8 +726,7 @@ public byte[] createProtobufOutput() { builder.setRequests(requests); builder.setAverageLoad(averageLoad); for (Node node : liveNodes) { - StorageClusterStatus.Node.Builder nodeBuilder = - StorageClusterStatus.Node.newBuilder(); + StorageClusterStatus.Node.Builder nodeBuilder = StorageClusterStatus.Node.newBuilder(); nodeBuilder.setName(node.name); nodeBuilder.setStartCode(node.startCode); nodeBuilder.setRequests(node.requests); @@ -785,26 +775,16 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOExce for (StorageClusterStatus.Node node : builder.getLiveNodesList()) { long startCode = node.hasStartCode() ? node.getStartCode() : -1; StorageClusterStatusModel.Node nodeModel = - addLiveNode(node.getName(), startCode, node.getHeapSizeMB(), - node.getMaxHeapSizeMB()); + addLiveNode(node.getName(), startCode, node.getHeapSizeMB(), node.getMaxHeapSizeMB()); long requests = node.hasRequests() ? node.getRequests() : 0; nodeModel.setRequests(requests); for (StorageClusterStatus.Region region : node.getRegionsList()) { - nodeModel.addRegion( - region.getName().toByteArray(), - region.getStores(), - region.getStorefiles(), - region.getStorefileSizeMB(), - region.getMemStoreSizeMB(), - region.getStorefileIndexSizeKB(), - region.getReadRequestsCount(), - region.getCpRequestsCount(), - region.getWriteRequestsCount(), - region.getRootIndexSizeKB(), - region.getTotalStaticIndexSizeKB(), - region.getTotalStaticBloomSizeKB(), - region.getTotalCompactingKVs(), - region.getCurrentCompactedKVs()); + nodeModel.addRegion(region.getName().toByteArray(), region.getStores(), + region.getStorefiles(), region.getStorefileSizeMB(), region.getMemStoreSizeMB(), + region.getStorefileIndexSizeKB(), region.getReadRequestsCount(), + region.getCpRequestsCount(), region.getWriteRequestsCount(), region.getRootIndexSizeKB(), + region.getTotalStaticIndexSizeKB(), region.getTotalStaticBloomSizeKB(), + region.getTotalCompactingKVs(), region.getCurrentCompactedKVs()); } } for (String node : builder.getDeadNodesList()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java index 584099765c7b..e23c6af81776 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.yetus.audience.InterfaceAudience; /** * Simple representation of the version of the storage cluster - * + * *
        * <complexType name="StorageClusterVersion">
        *   <attribute name="version" type="string"></attribute>
        * </complexType>
        * 
      */ -@XmlRootElement(name="ClusterVersion") +@XmlRootElement(name = "ClusterVersion") @InterfaceAudience.Private public class StorageClusterVersionModel implements Serializable { private static final long serialVersionUID = 1L; @@ -45,7 +41,7 @@ public class StorageClusterVersionModel implements Serializable { /** * @return the storage cluster version */ - @XmlAttribute(name="Version") + @XmlAttribute(name = "Version") public String getVersion() { return version; } @@ -57,7 +53,8 @@ public void setVersion(String version) { this.version = version; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -65,10 +62,9 @@ public String toString() { return version; } - //needed for jackson deserialization + // needed for jackson deserialization private static StorageClusterVersionModel valueOf(String value) { - StorageClusterVersionModel versionModel - = new StorageClusterVersionModel(); + StorageClusterVersionModel versionModel = new StorageClusterVersionModel(); versionModel.setVersion(value); return versionModel; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java index 320062512152..9bb68f962a99 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableInfoMessage.TableInfo; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; - /** * Representation of a list of table regions. * @@ -49,7 +45,7 @@ * </complexType> * */ -@XmlRootElement(name="TableInfo") +@XmlRootElement(name = "TableInfo") @InterfaceAudience.Private public class TableInfoModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -60,11 +56,11 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler { /** * Default constructor */ - public TableInfoModel() {} + public TableInfoModel() { + } /** - * Constructor - * @param name + * Constructor n */ public TableInfoModel(String name) { this.name = name; @@ -97,7 +93,7 @@ public String getName() { /** * @return the regions */ - @XmlElement(name="Region") + @XmlElement(name = "Region") public List getRegions() { return regions; } @@ -116,13 +112,14 @@ public void setRegions(List regions) { this.regions = regions; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); - for(TableRegionModel aRegion : regions) { + for (TableRegionModel aRegion : regions) { sb.append(aRegion.toString()); sb.append('\n'); } @@ -133,7 +130,7 @@ public String toString() { public byte[] createProtobufOutput() { TableInfo.Builder builder = TableInfo.newBuilder(); builder.setName(name); - for (TableRegionModel aRegion: regions) { + for (TableRegionModel aRegion : regions) { TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder(); regionBuilder.setName(aRegion.getName()); regionBuilder.setId(aRegion.getId()); @@ -146,16 +143,14 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableInfo.Builder builder = TableInfo.newBuilder(); ProtobufUtil.mergeFrom(builder, message); setName(builder.getName()); - for (TableInfo.Region region: builder.getRegionsList()) { - add(new TableRegionModel(builder.getName(), region.getId(), - region.getStartKey().toByteArray(), - region.getEndKey().toByteArray(), - region.getLocation())); + for (TableInfo.Region region : builder.getRegionsList()) { + add( + new TableRegionModel(builder.getName(), region.getId(), region.getStartKey().toByteArray(), + region.getEndKey().toByteArray(), region.getLocation())); } return this; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java index 8d3e1ab04641..a092d179af8c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlElementRef; import javax.xml.bind.annotation.XmlRootElement; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableListMessage.TableList; @@ -36,7 +32,7 @@ /** * Simple representation of a list of table names. */ -@XmlRootElement(name="TableList") +@XmlRootElement(name = "TableList") @InterfaceAudience.Private public class TableListModel implements Serializable, ProtobufMessageHandler { @@ -47,7 +43,8 @@ public class TableListModel implements Serializable, ProtobufMessageHandler { /** * Default constructor */ - public TableListModel() {} + public TableListModel() { + } /** * Add the table name model to the list @@ -68,7 +65,7 @@ public TableModel get(int index) { /** * @return the tables */ - @XmlElementRef(name="table") + @XmlElementRef(name = "table") public List getTables() { return tables; } @@ -80,13 +77,14 @@ public void setTables(List tables) { this.tables = tables; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); - for(TableModel aTable : tables) { + for (TableModel aTable : tables) { sb.append(aTable.toString()); sb.append('\n'); } @@ -103,11 +101,10 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableList.Builder builder = TableList.newBuilder(); ProtobufUtil.mergeFrom(builder, message); - for (String table: builder.getNameList()) { + for (String table : builder.getNameList()) { this.add(new TableModel(table)); } return this; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java index 4628263e9922..75513eadbaad 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.yetus.audience.InterfaceAudience; /** * Simple representation of a table name. - * + * *
        * <complexType name="Table">
        *   <sequence>
      @@ -37,22 +33,22 @@
        * </complexType>
        * 
      */ -@XmlRootElement(name="table") +@XmlRootElement(name = "table") @InterfaceAudience.Private public class TableModel implements Serializable { private static final long serialVersionUID = 1L; - + private String name; - + /** * Default constructor */ - public TableModel() {} + public TableModel() { + } /** - * Constructor - * @param name + * Constructor n */ public TableModel(String name) { super(); @@ -74,7 +70,8 @@ public void setName(String name) { this.name = name; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java index d794c44d7f71..32ae708d3e72 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; @@ -28,9 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a region of a table and its current location on the - * storage cluster. - * + * Representation of a region of a table and its current location on the storage cluster. + * *
        * <complexType name="TableRegion">
        *   <attribute name="name" type="string"></attribute>
      @@ -41,7 +38,7 @@
        *  </complexType>
        * 
      */ -@XmlRootElement(name="Region") +@XmlRootElement(name = "Region") @InterfaceAudience.Private public class TableRegionModel implements Serializable { @@ -49,37 +46,36 @@ public class TableRegionModel implements Serializable { private String table; private long id; - private byte[] startKey; + private byte[] startKey; private byte[] endKey; private String location; /** * Constructor */ - public TableRegionModel() {} + public TableRegionModel() { + } /** * Constructor - * @param table the table name - * @param id the encoded id of the region + * @param table the table name + * @param id the encoded id of the region * @param startKey the start key of the region - * @param endKey the end key of the region + * @param endKey the end key of the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey) { this(table, id, startKey, endKey, null); } /** * Constructor - * @param table the table name - * @param id the encoded id of the region + * @param table the table name + * @param id the encoded id of the region * @param startKey the start key of the region - * @param endKey the end key of the region + * @param endKey the end key of the region * @param location the name and port of the region server hosting the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey, String location) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey, String location) { this.table = table; this.id = id; this.startKey = startKey; @@ -92,17 +88,17 @@ public TableRegionModel(String table, long id, byte[] startKey, */ @XmlAttribute public String getName() { - byte [] tableNameAsBytes = Bytes.toBytes(this.table); + byte[] tableNameAsBytes = Bytes.toBytes(this.table); TableName tableName = TableName.valueOf(tableNameAsBytes); - byte [] nameAsBytes = RegionInfo.createRegionName( - tableName, this.startKey, this.id, !tableName.isSystemTable()); + byte[] nameAsBytes = + RegionInfo.createRegionName(tableName, this.startKey, this.id, !tableName.isSystemTable()); return Bytes.toString(nameAsBytes); } /** * @return the encoded region id */ - @XmlAttribute + @XmlAttribute public long getId() { return id; } @@ -110,7 +106,7 @@ public long getId() { /** * @return the start key */ - @XmlAttribute + @XmlAttribute public byte[] getStartKey() { return startKey; } @@ -118,7 +114,7 @@ public byte[] getStartKey() { /** * @return the end key */ - @XmlAttribute + @XmlAttribute public byte[] getEndKey() { return endKey; } @@ -126,7 +122,7 @@ public byte[] getEndKey() { /** * @return the name and port of the region server hosting the region */ - @XmlAttribute + @XmlAttribute public String getLocation() { return location; } @@ -171,7 +167,8 @@ public void setLocation(String location) { this.location = location; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java index b5578b70e8f3..ba462256f8a2 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +60,7 @@ * </complexType> * */ -@XmlRootElement(name="TableSchema") +@XmlRootElement(name = "TableSchema") @InterfaceAudience.Private public class TableSchemaModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -73,13 +72,14 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { private static final QName COMPRESSION = new QName(ColumnFamilyDescriptorBuilder.COMPRESSION); private String name; - private Map attrs = new LinkedHashMap<>(); + private Map attrs = new LinkedHashMap<>(); private List columns = new ArrayList<>(); /** * Default constructor. */ - public TableSchemaModel() {} + public TableSchemaModel() { + } /** * Constructor @@ -88,16 +88,14 @@ public TableSchemaModel() {} public TableSchemaModel(TableDescriptor tableDescriptor) { setName(tableDescriptor.getTableName().getNameAsString()); for (Map.Entry e : tableDescriptor.getValues().entrySet()) { - addAttribute(Bytes.toString(e.getKey().get()), - Bytes.toString(e.getValue().get())); + addAttribute(Bytes.toString(e.getKey().get()), Bytes.toString(e.getValue().get())); } for (ColumnFamilyDescriptor hcd : tableDescriptor.getColumnFamilies()) { ColumnSchemaModel columnModel = new ColumnSchemaModel(); columnModel.setName(hcd.getNameAsString()); - for (Map.Entry e: - hcd.getValues().entrySet()) { + for (Map.Entry e : hcd.getValues().entrySet()) { columnModel.addAttribute(Bytes.toString(e.getKey().get()), - Bytes.toString(e.getValue().get())); + Bytes.toString(e.getValue().get())); } addColumnFamily(columnModel); } @@ -105,7 +103,7 @@ public TableSchemaModel(TableDescriptor tableDescriptor) { /** * Add an attribute to the table descriptor - * @param name attribute name + * @param name attribute name * @param value attribute value */ @JsonAnySetter @@ -114,8 +112,8 @@ public void addAttribute(String name, Object value) { } /** - * Return a table descriptor value as a string. Calls toString() on the - * object stored in the descriptor value map. + * Return a table descriptor value as a string. Calls toString() on the object stored in the + * descriptor value map. * @param name the attribute name * @return the attribute value */ @@ -154,14 +152,14 @@ public String getName() { */ @XmlAnyAttribute @JsonAnyGetter - public Map getAny() { + public Map getAny() { return attrs; } /** * @return the columns */ - @XmlElement(name="ColumnSchema") + @XmlElement(name = "ColumnSchema") public List getColumns() { return columns; } @@ -180,7 +178,8 @@ public void setColumns(List columns) { this.columns = columns; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -189,7 +188,7 @@ public String toString() { sb.append("{ NAME=> '"); sb.append(name); sb.append('\''); - for (Map.Entry e : attrs.entrySet()) { + for (Map.Entry e : attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); @@ -265,8 +264,7 @@ public byte[] createProtobufOutput() { TableSchema.Builder builder = TableSchema.newBuilder(); builder.setName(name); for (Map.Entry e : attrs.entrySet()) { - TableSchema.Attribute.Builder attrBuilder = - TableSchema.Attribute.newBuilder(); + TableSchema.Attribute.Builder attrBuilder = TableSchema.Attribute.newBuilder(); attrBuilder.setName(e.getKey().getLocalPart()); attrBuilder.setValue(e.getValue().toString()); builder.addAttrs(attrBuilder); @@ -276,8 +274,7 @@ public byte[] createProtobufOutput() { ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder(); familyBuilder.setName(family.getName()); for (Map.Entry e : familyAttrs.entrySet()) { - ColumnSchema.Attribute.Builder attrBuilder = - ColumnSchema.Attribute.newBuilder(); + ColumnSchema.Attribute.Builder attrBuilder = ColumnSchema.Attribute.newBuilder(); attrBuilder.setName(e.getKey().getLocalPart()); attrBuilder.setValue(e.getValue().toString()); familyBuilder.addAttrs(attrBuilder); @@ -300,8 +297,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableSchema.Builder builder = TableSchema.newBuilder(); ProtobufUtil.mergeFrom(builder, message); this.setName(builder.getName()); @@ -321,8 +317,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) familyModel.addAttribute(ColumnFamilyDescriptorBuilder.TTL, family.getTtl()); } if (family.hasMaxVersions()) { - familyModel.addAttribute(HConstants.VERSIONS, - family.getMaxVersions()); + familyModel.addAttribute(HConstants.VERSIONS, family.getMaxVersions()); } if (family.hasCompression()) { familyModel.addAttribute(ColumnFamilyDescriptorBuilder.COMPRESSION, @@ -344,8 +339,8 @@ public TableDescriptor getTableDescriptor() { tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } for (ColumnSchemaModel column : getColumns()) { - ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(column.getName())); + ColumnFamilyDescriptorBuilder cfdb = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column.getName())); for (Map.Entry e : column.getAny().entrySet()) { cfdb.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java index a3f2fa6a76b4..ed3949a087ac 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; @@ -34,8 +32,7 @@ import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.VersionMessage.Version; /** - * A representation of the collection of versions of the REST gateway software - * components. + * A representation of the collection of versions of the REST gateway software components. *
        *
      • restVersion: REST gateway revision
      • *
      • jvmVersion: the JVM vendor and version information
      • @@ -44,7 +41,7 @@ *
      • jerseyVersion: the version of the embedded Jersey framework
      • *
      */ -@XmlRootElement(name="Version") +@XmlRootElement(name = "Version") @InterfaceAudience.Private public class VersionModel implements Serializable, ProtobufMessageHandler { @@ -59,7 +56,8 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { /** * Default constructor. Do not use. */ - public VersionModel() {} + public VersionModel() { + } /** * Constructor @@ -67,12 +65,10 @@ public VersionModel() {} */ public VersionModel(ServletContext context) { restVersion = RESTServlet.VERSION_STRING; - jvmVersion = System.getProperty("java.vm.vendor") + ' ' + - System.getProperty("java.version") + '-' + - System.getProperty("java.vm.version"); - osVersion = System.getProperty("os.name") + ' ' + - System.getProperty("os.version") + ' ' + - System.getProperty("os.arch"); + jvmVersion = System.getProperty("java.vm.vendor") + ' ' + System.getProperty("java.version") + + '-' + System.getProperty("java.vm.version"); + osVersion = System.getProperty("os.name") + ' ' + System.getProperty("os.version") + ' ' + + System.getProperty("os.arch"); serverVersion = context.getServerInfo(); jerseyVersion = ServletContainer.class.getPackage().getImplementationVersion(); // Currently, this will always be null because the manifest doesn't have any useful information @@ -82,7 +78,7 @@ public VersionModel(ServletContext context) { /** * @return the REST gateway version */ - @XmlAttribute(name="REST") + @XmlAttribute(name = "REST") public String getRESTVersion() { return restVersion; } @@ -90,7 +86,7 @@ public String getRESTVersion() { /** * @return the JVM vendor and version */ - @XmlAttribute(name="JVM") + @XmlAttribute(name = "JVM") public String getJVMVersion() { return jvmVersion; } @@ -98,7 +94,7 @@ public String getJVMVersion() { /** * @return the OS name, version, and hardware architecture */ - @XmlAttribute(name="OS") + @XmlAttribute(name = "OS") public String getOSVersion() { return osVersion; } @@ -106,7 +102,7 @@ public String getOSVersion() { /** * @return the servlet container version */ - @XmlAttribute(name="Server") + @XmlAttribute(name = "Server") public String getServerVersion() { return serverVersion; } @@ -114,7 +110,7 @@ public String getServerVersion() { /** * @return the version of the embedded Jersey framework */ - @XmlAttribute(name="Jersey") + @XmlAttribute(name = "Jersey") public String getJerseyVersion() { return jerseyVersion; } @@ -154,7 +150,8 @@ public void setJerseyVersion(String version) { this.jerseyVersion = version; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -186,8 +183,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Version.Builder builder = Version.newBuilder(); ProtobufUtil.mergeFrom(builder, message); if (builder.hasRestVersion()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java index 3aa81db5f03a..e87e516c5cfe 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider; import java.util.Arrays; @@ -44,10 +42,9 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * Plumbing for hooking up Jersey's JSON entity body encoding and decoding - * support to JAXB. Modify how the context is created (by using e.g. a - * different configuration builder) to control how JSON is processed and - * created. + * Plumbing for hooking up Jersey's JSON entity body encoding and decoding support to JAXB. Modify + * how the context is created (by using e.g. a different configuration builder) to control how JSON + * is processed and created. */ @Provider @InterfaceAudience.Private @@ -57,23 +54,11 @@ public class JAXBContextResolver implements ContextResolver { private final Set> types; - private final Class[] cTypes = { - CellModel.class, - CellSetModel.class, - ColumnSchemaModel.class, - NamespacesModel.class, - NamespacesInstanceModel.class, - RowModel.class, - ScannerModel.class, - StorageClusterStatusModel.class, - StorageClusterVersionModel.class, - TableInfoModel.class, - TableListModel.class, - TableModel.class, - TableRegionModel.class, - TableSchemaModel.class, - VersionModel.class - }; + private final Class[] cTypes = { CellModel.class, CellSetModel.class, ColumnSchemaModel.class, + NamespacesModel.class, NamespacesInstanceModel.class, RowModel.class, ScannerModel.class, + StorageClusterStatusModel.class, StorageClusterVersionModel.class, TableInfoModel.class, + TableListModel.class, TableModel.class, TableRegionModel.class, TableSchemaModel.class, + VersionModel.class }; @SuppressWarnings("unchecked") public JAXBContextResolver() throws Exception { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java index 9c94611355ae..7c3f6f8ea401 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.consumer; import java.io.ByteArrayOutputStream; @@ -39,28 +37,25 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * Adapter for hooking up Jersey content processing dispatch to - * ProtobufMessageHandler interface capable handlers for decoding protobuf input. + * Adapter for hooking up Jersey content processing dispatch to ProtobufMessageHandler interface + * capable handlers for decoding protobuf input. */ @Provider -@Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) +@Consumes({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) @InterfaceAudience.Private -public class ProtobufMessageBodyConsumer - implements MessageBodyReader { - private static final Logger LOG = - LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class); +public class ProtobufMessageBodyConsumer implements MessageBodyReader { + private static final Logger LOG = LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class); @Override - public boolean isReadable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public boolean isReadable(Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { return ProtobufMessageHandler.class.isAssignableFrom(type); } @Override public ProtobufMessageHandler readFrom(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, InputStream inputStream) - throws IOException, WebApplicationException { + Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, + InputStream inputStream) throws IOException, WebApplicationException { ProtobufMessageHandler obj = null; try { obj = type.getDeclaredConstructor().newInstance(); @@ -74,12 +69,11 @@ public ProtobufMessageHandler readFrom(Class type, Type } } while (read > 0); if (LOG.isTraceEnabled()) { - LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + - inputStream); + LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + inputStream); } obj = obj.getObjectFromMessage(baos.toByteArray()); } catch (InstantiationException | NoSuchMethodException | InvocationTargetException - | IllegalAccessException e) { + | IllegalAccessException e) { throw new WebApplicationException(e); } return obj; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java index 9eef5bf3df47..973665f00fa9 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.producer; import java.io.IOException; @@ -35,35 +33,31 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * An adapter between Jersey and Object.toString(). Hooks up plain text output - * to the Jersey content handling framework. - * Jersey will first call getSize() to learn the number of bytes that will be + * An adapter between Jersey and Object.toString(). Hooks up plain text output to the Jersey content + * handling framework. Jersey will first call getSize() to learn the number of bytes that will be * sent, then writeTo to perform the actual I/O. */ @Provider @Produces(Constants.MIMETYPE_TEXT) @InterfaceAudience.Private -public class PlainTextMessageBodyProducer - implements MessageBodyWriter { +public class PlainTextMessageBodyProducer implements MessageBodyWriter { @Override - public boolean isWriteable(Class arg0, Type arg1, Annotation[] arg2, - MediaType arg3) { + public boolean isWriteable(Class arg0, Type arg1, Annotation[] arg2, MediaType arg3) { return true; } @Override - public long getSize(Object object, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public long getSize(Object object, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { // deprecated by JAX-RS 2.0 and ignored by Jersey runtime return -1; } @Override - public void writeTo(Object object, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, OutputStream outStream) - throws IOException, WebApplicationException { + public void writeTo(Object object, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType, MultivaluedMap httpHeaders, OutputStream outStream) + throws IOException, WebApplicationException { outStream.write(Bytes.toBytes(object.toString())); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java index 3f21893470d1..1d95e6f343e7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.producer; import java.io.IOException; @@ -35,35 +33,32 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up - * protobuf output producing methods to the Jersey content handling framework. - * Jersey will first call getSize() to learn the number of bytes that will be - * sent, then writeTo to perform the actual I/O. + * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up protobuf output + * producing methods to the Jersey content handling framework. Jersey will first call getSize() to + * learn the number of bytes that will be sent, then writeTo to perform the actual I/O. */ @Provider -@Produces({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) +@Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) @InterfaceAudience.Private -public class ProtobufMessageBodyProducer - implements MessageBodyWriter { +public class ProtobufMessageBodyProducer implements MessageBodyWriter { @Override - public boolean isWriteable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public boolean isWriteable(Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { return ProtobufMessageHandler.class.isAssignableFrom(type); } @Override public long getSize(ProtobufMessageHandler m, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + Annotation[] annotations, MediaType mediaType) { // deprecated by JAX-RS 2.0 and ignored by Jersey runtime return -1; } @Override public void writeTo(ProtobufMessageHandler m, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, OutputStream entityStream) - throws IOException, WebApplicationException { + Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, + OutputStream entityStream) throws IOException, WebApplicationException { entityStream.write(m.createProtobufOutput()); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java index 5af8ee2bfafc..4c9b20c6ace3 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -27,7 +26,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,9 +38,11 @@ public void destroy() { @Override public void doFilter(ServletRequest paramServletRequest, ServletResponse paramServletResponse, - FilterChain paramFilterChain) throws IOException, ServletException { - if (paramServletRequest instanceof HttpServletRequest - && paramServletResponse instanceof HttpServletResponse) { + FilterChain paramFilterChain) throws IOException, ServletException { + if ( + paramServletRequest instanceof HttpServletRequest + && paramServletResponse instanceof HttpServletResponse + ) { HttpServletRequest request = (HttpServletRequest) paramServletRequest; HttpServletResponse response = (HttpServletResponse) paramServletResponse; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java index 00b28c7534b4..e1e52d2c1848 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,8 +44,7 @@ public void startServletContainer(Configuration conf) throws Exception { server = new RESTServer(conf); server.run(); - LOG.info("started " + server.getClass().getName() + " on port " + - server.getPort()); + LOG.info("started " + server.getClass().getName() + " on port " + server.getPort()); } public void shutdownServletContainer() { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index 0ed14f0dd1bf..caf4f378bd78 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -96,24 +95,20 @@ import org.slf4j.LoggerFactory; /** - * Script used evaluating Stargate performance and scalability. Runs a SG - * client that steps through one of a set of hardcoded tests or 'experiments' - * (e.g. a random reads test, a random writes test, etc.). Pass on the - * command-line which test to run and how many clients are participating in - * this experiment. Run java PerformanceEvaluation --help to - * obtain usage. - * - *

      This class sets up and runs the evaluation programs described in - * Section 7, Performance Evaluation, of the Bigtable - * paper, pages 8-10. - * - *

      If number of clients > 1, we start up a MapReduce job. Each map task - * runs an individual client. Each client does about 1GB of data. + * Script used evaluating Stargate performance and scalability. Runs a SG client that steps through + * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test, a random writes test, + * etc.). Pass on the command-line which test to run and how many clients are participating in this + * experiment. Run java PerformanceEvaluation --help to obtain usage. + *

      + * This class sets up and runs the evaluation programs described in Section 7, Performance + * Evaluation, of the Bigtable paper, + * pages 8-10. + *

      + * If number of clients > 1, we start up a MapReduce job. Each map task runs an individual client. + * Each client does about 1GB of data. */ public class PerformanceEvaluation extends Configured implements Tool { - protected static final Logger LOG = - LoggerFactory.getLogger(PerformanceEvaluation.class); + protected static final Logger LOG = LoggerFactory.getLogger(PerformanceEvaluation.class); private static final int DEFAULT_ROW_PREFIX_LENGTH = 16; private static final int ROW_LENGTH = 1000; @@ -149,20 +144,14 @@ public class PerformanceEvaluation extends Configured implements Tool { /** * Regex to parse lines in input file passed to mapreduce task. */ - public static final Pattern LINE_PATTERN = - Pattern.compile("tableName=(\\w+),\\s+" + - "startRow=(\\d+),\\s+" + - "perClientRunRows=(\\d+),\\s+" + - "totalRows=(\\d+),\\s+" + - "clients=(\\d+),\\s+" + - "flushCommits=(\\w+),\\s+" + - "writeToWAL=(\\w+),\\s+" + - "useTags=(\\w+),\\s+" + - "noOfTags=(\\d+)"); + public static final Pattern LINE_PATTERN = Pattern + .compile("tableName=(\\w+),\\s+" + "startRow=(\\d+),\\s+" + "perClientRunRows=(\\d+),\\s+" + + "totalRows=(\\d+),\\s+" + "clients=(\\d+),\\s+" + "flushCommits=(\\w+),\\s+" + + "writeToWAL=(\\w+),\\s+" + "useTags=(\\w+),\\s+" + "noOfTags=(\\d+)"); /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected enum Counter { /** elapsed time */ @@ -178,33 +167,28 @@ protected enum Counter { public PerformanceEvaluation(final Configuration c) { this.conf = c; - addCommandDescriptor(RandomReadTest.class, "randomRead", - "Run random read test"); + addCommandDescriptor(RandomReadTest.class, "randomRead", "Run random read test"); addCommandDescriptor(RandomSeekScanTest.class, "randomSeekScan", - "Run random seek and scan 100 test"); + "Run random seek and scan 100 test"); addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", - "Run random seek scan with both start and stop row (max 10 rows)"); + "Run random seek scan with both start and stop row (max 10 rows)"); addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100", - "Run random seek scan with both start and stop row (max 100 rows)"); + "Run random seek scan with both start and stop row (max 100 rows)"); addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000", - "Run random seek scan with both start and stop row (max 1000 rows)"); + "Run random seek scan with both start and stop row (max 1000 rows)"); addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", - "Run random seek scan with both start and stop row (max 10000 rows)"); - addCommandDescriptor(RandomWriteTest.class, "randomWrite", - "Run random write test"); - addCommandDescriptor(SequentialReadTest.class, "sequentialRead", - "Run sequential read test"); - addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", - "Run sequential write test"); - addCommandDescriptor(ScanTest.class, "scan", - "Run scan test (read every row)"); + "Run random seek scan with both start and stop row (max 10000 rows)"); + addCommandDescriptor(RandomWriteTest.class, "randomWrite", "Run random write test"); + addCommandDescriptor(SequentialReadTest.class, "sequentialRead", "Run sequential read test"); + addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); + addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", - "Run scan test using a filter to find a specific row based " + - "on it's value (make sure to use --rows=20)"); + "Run scan test using a filter to find a specific row based " + + "on it's value (make sure to use --rows=20)"); } - protected void addCommandDescriptor(Class cmdClass, - String name, String description) { + protected void addCommandDescriptor(Class cmdClass, String name, + String description) { CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); commands.put(name, cmdDescriptor); } @@ -222,10 +206,9 @@ interface Status { } /** - * This class works as the InputSplit of Performance Evaluation - * MapReduce InputFormat, and the Record Value of RecordReader. - * Each map task will only read one record from a PeInputSplit, - * the record value is the PeInputSplit itself. + * This class works as the InputSplit of Performance Evaluation MapReduce InputFormat, and the + * Record Value of RecordReader. Each map task will only read one record from a PeInputSplit, the + * record value is the PeInputSplit itself. */ public static class PeInputSplit extends InputSplit implements Writable { private TableName tableName; @@ -239,7 +222,7 @@ public static class PeInputSplit extends InputSplit implements Writable { private int noOfTags; public PeInputSplit(TableName tableName, int startRow, int rows, int totalRows, int clients, - boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags) { + boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags) { this.tableName = tableName; this.startRow = startRow; this.rows = rows; @@ -326,8 +309,8 @@ public int getNoOfTags() { } /** - * InputFormat of Performance Evaluation MapReduce job. - * It extends from FileInputFormat, want to use it's methods such as setInputPaths(). + * InputFormat of Performance Evaluation MapReduce job. It extends from FileInputFormat, want to + * use it's methods such as setInputPaths(). */ public static class PeInputFormat extends FileInputFormat { @Override @@ -362,20 +345,13 @@ public List getSplits(JobContext job) throws IOException { boolean useTags = Boolean.parseBoolean(m.group(8)); int noOfTags = Integer.parseInt(m.group(9)); - LOG.debug("tableName=" + tableName + - " split["+ splitList.size() + "] " + - " startRow=" + startRow + - " rows=" + rows + - " totalRows=" + totalRows + - " clients=" + clients + - " flushCommits=" + flushCommits + - " writeToWAL=" + writeToWAL + - " useTags=" + useTags + - " noOfTags=" + noOfTags); - - PeInputSplit newSplit = - new PeInputSplit(tableName, startRow, rows, totalRows, clients, - flushCommits, writeToWAL, useTags, noOfTags); + LOG.debug("tableName=" + tableName + " split[" + splitList.size() + "] " + " startRow=" + + startRow + " rows=" + rows + " totalRows=" + totalRows + " clients=" + clients + + " flushCommits=" + flushCommits + " writeToWAL=" + writeToWAL + " useTags=" + + useTags + " noOfTags=" + noOfTags); + + PeInputSplit newSplit = new PeInputSplit(tableName, startRow, rows, totalRows, clients, + flushCommits, writeToWAL, useTags, noOfTags); splitList.add(newSplit); } } @@ -388,7 +364,7 @@ public List getSplits(JobContext job) throws IOException { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) { + TaskAttemptContext context) { return new PeRecordReader(); } @@ -401,7 +377,7 @@ public static class PeRecordReader extends RecordReader { + extends Mapper { /** configuration parameter name that contains the command */ public final static String CMD_KEY = "EvaluationMapTask.command"; @@ -464,10 +440,10 @@ protected void setup(Context context) { // this is required so that extensions of PE are instantiated within the // map reduce task... Class peClass = - forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); + forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); try { - this.pe = peClass.getConstructor(Configuration.class) - .newInstance(context.getConfiguration()); + this.pe = + peClass.getConstructor(Configuration.class).newInstance(context.getConfiguration()); } catch (Exception e) { throw new IllegalStateException("Could not instantiate PE instance", e); } @@ -485,16 +461,15 @@ private Class forName(String className, Class type) @Override protected void map(NullWritable key, PeInputSplit value, final Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Status status = context::setStatus; // Evaluation task pe.tableName = value.getTableName(); - long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(), - value.getRows(), value.getTotalRows(), - value.isFlushCommits(), value.isWriteToWAL(), - value.isUseTags(), value.getNoOfTags(), - ConnectionFactory.createConnection(context.getConfiguration()), status); + long elapsedTime = + this.pe.runOneClient(this.cmd, value.getStartRow(), value.getRows(), value.getTotalRows(), + value.isFlushCommits(), value.isWriteToWAL(), value.isUseTags(), value.getNoOfTags(), + ConnectionFactory.createConnection(context.getConfiguration()), status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime); @@ -519,7 +494,7 @@ private boolean checkTable(RemoteAdmin admin) throws IOException { } byte[][] splits = getSplits(); - for (int i=0; i < splits.length; i++) { + for (int i = 0; i < splits.length; i++) { LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i])); } admin.createTable(tableDescriptor); @@ -549,17 +524,16 @@ protected TableDescriptor getDescriptor() { /** * Generates splits based on total number of rows and specified split regions - * * @return splits : array of byte [] */ - protected byte[][] getSplits() { + protected byte[][] getSplits() { if (this.presplitRegions == 0) { return new byte[0][]; } int numSplitPoints = presplitRegions - 1; byte[][] splits = new byte[numSplitPoints][]; - int jump = this.R / this.presplitRegions; + int jump = this.R / this.presplitRegions; for (int i = 0; i < numSplitPoints; i++) { int rowkey = jump * (1 + i); splits[i] = format(rowkey); @@ -568,12 +542,12 @@ protected byte[][] getSplits() { } /** - * We're to run multiple clients concurrently. Setup a mapreduce job. Run - * one map per client. Then run a single reduce to sum the elapsed times. + * We're to run multiple clients concurrently. Setup a mapreduce job. Run one map per client. Then + * run a single reduce to sum the elapsed times. * @param cmd Command to run. */ private void runNIsMoreThanOne(final Class cmd) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { RemoteAdmin remoteAdmin = new RemoteAdmin(new Client(cluster), getConf()); checkTable(remoteAdmin); if (nomapred) { @@ -591,7 +565,7 @@ private void runNIsMoreThanOne(final Class cmd) private void doMultipleClients(final Class cmd) throws IOException { final List threads = new ArrayList<>(this.N); final long[] timings = new long[this.N]; - final int perClientRows = R/N; + final int perClientRows = R / N; final TableName tableName = this.tableName; final DataBlockEncoding encoding = this.blockEncoding; final boolean flushCommits = this.flushCommits; @@ -619,13 +593,12 @@ public void run() { pe.useTags = useTags; pe.noOfTags = numTags; try { - long elapsedTime = pe.runOneClient(cmd, index * perClientRows, - perClientRows, R, - flushCommits, writeToWAL, useTags, noOfTags, connection, + long elapsedTime = pe.runOneClient(cmd, index * perClientRows, perClientRows, R, + flushCommits, writeToWAL, useTags, noOfTags, connection, msg -> LOG.info("client-" + getName() + " " + msg)); timings[index] = elapsedTime; - LOG.info("Finished " + getName() + " in " + elapsedTime + - "ms writing " + perClientRows + " rows"); + LOG.info("Finished " + getName() + " in " + elapsedTime + "ms writing " + perClientRows + + " rows"); } catch (IOException e) { throw new RuntimeException(e); } @@ -646,27 +619,23 @@ public void run() { } } final String test = cmd.getSimpleName(); - LOG.info("[" + test + "] Summary of timings (ms): " - + Arrays.toString(timings)); + LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(timings)); Arrays.sort(timings); long total = 0; for (int i = 0; i < this.N; i++) { total += timings[i]; } - LOG.info("[" + test + "]" - + "\tMin: " + timings[0] + "ms" - + "\tMax: " + timings[this.N - 1] + "ms" - + "\tAvg: " + (total / this.N) + "ms"); + LOG.info("[" + test + "]" + "\tMin: " + timings[0] + "ms" + "\tMax: " + timings[this.N - 1] + + "ms" + "\tAvg: " + (total / this.N) + "ms"); } /** - * Run a mapreduce job. Run as many maps as asked-for clients. - * Before we start up the job, write out an input file with instruction - * per client regards which row they are to start on. + * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write + * out an input file with instruction per client regards which row they are to start on. * @param cmd Command to run. */ private void doMapReduce(final Class cmd) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); Path inputDir = writeInputFile(conf); conf.set(EvaluationMapTask.CMD_KEY, cmd.getName()); @@ -764,8 +733,8 @@ public String getDescription() { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation} tests - * This makes the reflection logic a little easier to understand... + * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation} tests This + * makes the reflection logic a little easier to understand... */ static class TestOptions { private int startRow; @@ -779,8 +748,8 @@ static class TestOptions { private Connection connection; TestOptions(int startRow, int perClientRunRows, int totalRows, TableName tableName, - boolean flushCommits, boolean writeToWAL, boolean useTags, - int noOfTags, Connection connection) { + boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, + Connection connection) { this.startRow = startRow; this.perClientRunRows = perClientRunRows; this.totalRows = totalRows; @@ -830,17 +799,17 @@ public int getNumTags() { } /* - * A test. - * Subclass to particularize what happens per row. + * A test. Subclass to particularize what happens per row. */ static abstract class Test { // Below is make it so when Tests are all running in the one // jvm, that they each have a differently seeded Random. - private static final Random randomSeed = - new Random(EnvironmentEdgeManager.currentTime()); + private static final Random randomSeed = new Random(EnvironmentEdgeManager.currentTime()); + private static long nextRandomSeed() { return randomSeed.nextLong(); } + protected final Random rand = new Random(nextRandomSeed()); protected final int startRow; @@ -855,8 +824,8 @@ private static long nextRandomSeed() { protected Connection connection; /** - * Note that all subclasses of this class must provide a public contructor - * that has the exact same list of arguments. + * Note that all subclasses of this class must provide a public contructor that has the exact + * same list of arguments. */ Test(final Configuration conf, final TestOptions options, final Status status) { super(); @@ -878,10 +847,10 @@ protected String generateStatus(final int sr, final int i, final int lr) { protected int getReportingPeriod() { int period = this.perClientRunRows / 10; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } - abstract void testTakedown() throws IOException; + abstract void testTakedown() throws IOException; /** * Run test @@ -956,7 +925,7 @@ void testSetup() throws IOException { } @Override - void testTakedown() throws IOException { + void testTakedown() throws IOException { if (flushCommits) { this.mutator.flush(); } @@ -981,7 +950,7 @@ void testRow(final int i) throws IOException { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -1005,8 +974,8 @@ void testRow(final int i) throws IOException { if (i % 100 == 0) { LOG.info(String.format("Scan for key range %s - %s returned %s rows", - Bytes.toString(startAndStopRow.getFirst()), - Bytes.toString(startAndStopRow.getSecond()), count)); + Bytes.toString(startAndStopRow.getFirst()), Bytes.toString(startAndStopRow.getSecond()), + count)); } s.close(); @@ -1023,7 +992,7 @@ protected Pair generateStartAndStopRows(int maxRange) { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -1086,7 +1055,7 @@ void testRow(final int i) throws IOException { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -1107,8 +1076,8 @@ void testRow(final int i) throws IOException { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); } else { put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value); @@ -1174,8 +1143,8 @@ void testRow(final int i) throws IOException { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); } else { put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value); @@ -1203,10 +1172,8 @@ void testRow(int i) throws IOException { } protected Scan constructScan(byte[] valuePrefix) { - Filter filter = new SingleColumnValueFilter( - FAMILY_NAME, QUALIFIER_NAME, CompareOperator.EQUAL, - new BinaryComparator(valuePrefix) - ); + Filter filter = new SingleColumnValueFilter(FAMILY_NAME, QUALIFIER_NAME, + CompareOperator.EQUAL, new BinaryComparator(valuePrefix)); Scan scan = new Scan(); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); scan.setFilter(filter); @@ -1218,31 +1185,31 @@ protected Scan constructScan(byte[] valuePrefix) { * Format passed integer. * @param number the integer to format * @return Returns zero-prefixed 10-byte wide decimal version of passed number (Does absolute in - * case number is negative). + * case number is negative). */ - public static byte [] format(final int number) { + public static byte[] format(final int number) { byte[] b = new byte[DEFAULT_ROW_PREFIX_LENGTH + 10]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; } public static byte[] generateData(final Random r, int length) { - byte[] b = new byte [length]; + byte[] b = new byte[length]; int i; - for (i = 0; i < (length-8); i += 8) { + for (i = 0; i < (length - 8); i += 8) { b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; + b[i + 1] = b[i]; + b[i + 2] = b[i]; + b[i + 3] = b[i]; + b[i + 4] = b[i]; + b[i + 5] = b[i]; + b[i + 6] = b[i]; + b[i + 7] = b[i]; } byte a = (byte) (65 + r.nextInt(26)); @@ -1253,7 +1220,7 @@ public static byte[] generateData(final Random r, int length) { } public static byte[] generateValue(final Random r) { - byte [] b = new byte [ROW_LENGTH]; + byte[] b = new byte[ROW_LENGTH]; r.nextBytes(b); return b; } @@ -1262,33 +1229,32 @@ static byte[] getRandomRow(final Random random, final int totalRows) { return format(random.nextInt(Integer.MAX_VALUE) % totalRows); } - long runOneClient(final Class cmd, final int startRow, - final int perClientRunRows, final int totalRows, - boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, - Connection connection, final Status status) throws IOException { - status.setStatus("Start " + cmd + " at offset " + startRow + " for " + - perClientRunRows + " rows"); + long runOneClient(final Class cmd, final int startRow, final int perClientRunRows, + final int totalRows, boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, + Connection connection, final Status status) throws IOException { + status + .setStatus("Start " + cmd + " at offset " + startRow + " for " + perClientRunRows + " rows"); long totalElapsedTime; - TestOptions options = new TestOptions(startRow, perClientRunRows, - totalRows, tableName, flushCommits, writeToWAL, useTags, noOfTags, connection); + TestOptions options = new TestOptions(startRow, perClientRunRows, totalRows, tableName, + flushCommits, writeToWAL, useTags, noOfTags, connection); final Test t; try { - Constructor constructor = cmd.getDeclaredConstructor( - Configuration.class, TestOptions.class, Status.class); + Constructor constructor = + cmd.getDeclaredConstructor(Configuration.class, TestOptions.class, Status.class); t = constructor.newInstance(this.conf, options, status); } catch (NoSuchMethodException e) { - throw new IllegalArgumentException("Invalid command class: " + - cmd.getName() + ". It does not provide a constructor as described by" + - "the javadoc comment. Available constructors are: " + - Arrays.toString(cmd.getConstructors())); + throw new IllegalArgumentException("Invalid command class: " + cmd.getName() + + ". It does not provide a constructor as described by" + + "the javadoc comment. Available constructors are: " + + Arrays.toString(cmd.getConstructors())); } catch (Exception e) { throw new IllegalStateException("Failed to construct command class", e); } totalElapsedTime = t.test(); - status.setStatus("Finished " + cmd + " in " + totalElapsedTime + - "ms at offset " + startRow + " for " + perClientRunRows + " rows"); + status.setStatus("Finished " + cmd + " in " + totalElapsedTime + "ms at offset " + startRow + + " for " + perClientRunRows + " rows"); return totalElapsedTime; } @@ -1300,15 +1266,15 @@ private void runNIsOne(final Class cmd) { Client client = new Client(cluster); admin = new RemoteAdmin(client, getConf()); checkTable(admin); - runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL, - this.useTags, this.noOfTags, this.connection, status); + runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL, this.useTags, + this.noOfTags, this.connection, status); } catch (Exception e) { LOG.error("Failed", e); } } private void runTest(final Class cmd) - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { if (N == 1) { // If there is only one client and one HRegionServer, we assume nothing // has been set up at all. @@ -1329,30 +1295,30 @@ protected void printUsage(final String message) { } System.err.println("Usage: java " + this.getClass().getName() + " \\"); System.err.println(" [--nomapred] [--rows=ROWS] [--table=NAME] \\"); - System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] " + - "[-D]* "); + System.err.println( + " [--compress=TYPE] [--blockEncoding=TYPE] " + "[-D]* "); System.err.println(); System.err.println("General Options:"); - System.err.println(" nomapred Run multiple clients using threads " + - "(rather than use mapreduce)"); + System.err.println( + " nomapred Run multiple clients using threads " + "(rather than use mapreduce)"); System.err.println(" rows Rows each client runs. Default: One million"); System.err.println(); System.err.println("Table Creation / Write Tests:"); System.err.println(" table Alternate table name. Default: 'TestTable'"); System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'"); - System.err.println(" flushCommits Used to determine if the test should flush the table. " + - "Default: false"); + System.err.println( + " flushCommits Used to determine if the test should flush the table. " + "Default: false"); System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); - System.err.println(" presplit Create presplit table. Recommended for accurate perf " + - "analysis (see guide). Default: disabled"); - System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + - "Default : false"); - System.err.println(" numoftags Specify the no of tags that would be needed. " + - "This works only if usetags is true."); + System.err.println(" presplit Create presplit table. Recommended for accurate perf " + + "analysis (see guide). Default: disabled"); + System.err.println( + " usetags Writes tags along with KVs. Use with HFile V3. " + "Default : false"); + System.err.println(" numoftags Specify the no of tags that would be needed. " + + "This works only if usetags is true."); System.err.println(); System.err.println("Read Tests:"); - System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " + - "possible. Not guaranteed that reads are always served from inmemory. Default: false"); + System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " + + "possible. Not guaranteed that reads are always served from inmemory. Default: false"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); @@ -1365,13 +1331,12 @@ protected void printUsage(final String message) { } System.err.println(); System.err.println("Args:"); - System.err.println(" nclients Integer. Required. Total number of " + - "clients (and HRegionServers)"); + System.err.println( + " nclients Integer. Required. Total number of " + "clients (and HRegionServers)"); System.err.println(" running: 1 <= value <= 500"); System.err.println("Examples:"); System.err.println(" To run a single evaluation client:"); - System.err.println(" $ hbase " + this.getClass().getName() - + " sequentialWrite 1"); + System.err.println(" $ hbase " + this.getClass().getName() + " sequentialWrite 1"); } private void getArgs(final int start, final String[] args) { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java index 53f2f14ec6ae..27de4c5803c4 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,8 +73,7 @@ public class RowResourceBase { protected static final String VALUE_6 = "6"; protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - protected static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + protected static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); protected static Client client; protected static JAXBContext context; protected static Marshaller xmlMarshaller; @@ -87,16 +86,12 @@ public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(conf); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class); xmlMarshaller = context.createMarshaller(); xmlUnmarshaller = context.createUnmarshaller(); - jsonMapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + jsonMapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); } @AfterClass @@ -113,11 +108,10 @@ public void beforeMethod() throws Exception { } TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE)); - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(CFA)).build(); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); } @@ -130,8 +124,8 @@ public void afterMethod() throws Exception { } } - static Response putValuePB(String table, String row, String column, - String value) throws IOException { + static Response putValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -142,39 +136,38 @@ static Response putValuePB(String table, String row, String column, return putValuePB(path.toString(), table, row, column, value); } - static Response putValuePB(String url, String table, String row, - String column, String value) throws IOException { + static Response putValuePB(String url, String table, String row, String column, String value) + throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static void checkValueXML(String url, String table, String row, - String column, String value) throws IOException, JAXBException { + protected static void checkValueXML(String url, String table, String row, String column, + String value) throws IOException, JAXBException { Response response = getValueXML(url); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static void checkValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { Response response = getValueXML(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); @@ -182,12 +175,12 @@ protected static void checkValueXML(String table, String row, String column, } protected static void checkIncrementValueXML(String table, String row, String column, long value) - throws IOException, JAXBException { + throws IOException, JAXBException { Response response1 = getValueXML(table, row, column); assertEquals(200, response1.getCode()); assertEquals(Constants.MIMETYPE_XML, response1.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response1.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response1.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); @@ -195,12 +188,12 @@ protected static void checkIncrementValueXML(String table, String row, String co } protected static Response getValuePB(String url) throws IOException { - Response response = client.get(url, Constants.MIMETYPE_PROTOBUF); + Response response = client.get(url, Constants.MIMETYPE_PROTOBUF); return response; } - protected static Response putValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response putValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -211,23 +204,20 @@ protected static Response putValueXML(String table, String row, String column, return putValueXML(path.toString(), table, row, column, value); } - protected static Response putValueXML(String url, String table, String row, - String column, String value) throws IOException, JAXBException { + protected static Response putValueXML(String url, String table, String row, String column, + String value) throws IOException, JAXBException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } - protected static Response getValuePB(String table, String row, String column) - throws IOException { + protected static Response getValuePB(String table, String row, String column) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -238,8 +228,8 @@ protected static Response getValuePB(String table, String row, String column) return getValuePB(path.toString()); } - protected static void checkValuePB(String table, String row, String column, - String value) throws IOException { + protected static void checkValuePB(String table, String row, String column, String value) + throws IOException { Response response = getValuePB(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); @@ -251,8 +241,8 @@ protected static void checkValuePB(String table, String row, String column, assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkIncrementValuePB(String table, String row, String column, - long value) throws IOException { + protected static void checkIncrementValuePB(String table, String row, String column, long value) + throws IOException { Response response = getValuePB(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); @@ -265,126 +255,115 @@ protected static void checkIncrementValuePB(String table, String row, String col } protected static Response checkAndPutValuePB(String url, String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException { + String valueToCheck, String valueToPut, HashMap otherCells) throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToPut))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToPut))); if (otherCells != null) { - for (Map.Entry entry : otherCells.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : otherCells.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // This Cell need to be added as last cell. - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static Response checkAndPutValuePB(String table, String row, - String column, String valueToCheck, String valueToPut) throws IOException { - return checkAndPutValuePB(table,row,column,valueToCheck,valueToPut,null); + protected static Response checkAndPutValuePB(String table, String row, String column, + String valueToCheck, String valueToPut) throws IOException { + return checkAndPutValuePB(table, row, column, valueToCheck, valueToPut, null); } protected static Response checkAndPutValuePB(String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException { + String valueToCheck, String valueToPut, HashMap otherCells) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); path.append('/'); path.append(row); path.append("?check=put"); - return checkAndPutValuePB(path.toString(), table, row, column, - valueToCheck, valueToPut, otherCells); + return checkAndPutValuePB(path.toString(), table, row, column, valueToCheck, valueToPut, + otherCells); } protected static Response checkAndPutValueXML(String url, String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException, JAXBException { + String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException, JAXBException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToPut))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToPut))); if (otherCells != null) { - for (Map.Entry entry : otherCells.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : otherCells.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // This Cell need to be added as last cell. - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } protected static Response checkAndPutValueXML(String table, String row, String column, - String valueToCheck, String valueToPut) throws IOException, JAXBException { - return checkAndPutValueXML(table,row,column,valueToCheck,valueToPut, null); + String valueToCheck, String valueToPut) throws IOException, JAXBException { + return checkAndPutValueXML(table, row, column, valueToCheck, valueToPut, null); } - protected static Response checkAndPutValueXML(String table, String row, - String column, String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException, JAXBException { + protected static Response checkAndPutValueXML(String table, String row, String column, + String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); path.append('/'); path.append(row); path.append("?check=put"); - return checkAndPutValueXML(path.toString(), table, row, column, - valueToCheck, valueToPut, otherCells); + return checkAndPutValueXML(path.toString(), table, row, column, valueToCheck, valueToPut, + otherCells); } - protected static Response checkAndDeleteXML(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) - throws IOException, JAXBException { + protected static Response checkAndDeleteXML(String url, String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException, JAXBException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } - protected static Response checkAndDeleteXML(String table, String row, - String column, String valueToCheck) throws IOException, JAXBException { + protected static Response checkAndDeleteXML(String table, String row, String column, + String valueToCheck) throws IOException, JAXBException { return checkAndDeleteXML(table, row, column, valueToCheck, null); } - protected static Response checkAndDeleteXML(String table, String row, - String column, String valueToCheck, HashMap cellsToDelete) - throws IOException, JAXBException { + protected static Response checkAndDeleteXML(String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -394,14 +373,13 @@ protected static Response checkAndDeleteXML(String table, String row, return checkAndDeleteXML(path.toString(), table, row, column, valueToCheck, cellsToDelete); } - protected static Response checkAndDeleteJson(String table, String row, - String column, String valueToCheck) throws IOException { + protected static Response checkAndDeleteJson(String table, String row, String column, + String valueToCheck) throws IOException { return checkAndDeleteJson(table, row, column, valueToCheck, null); } - protected static Response checkAndDeleteJson(String table, String row, - String column, String valueToCheck, HashMap cellsToDelete) - throws IOException { + protected static Response checkAndDeleteJson(String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -411,36 +389,33 @@ protected static Response checkAndDeleteJson(String table, String row, return checkAndDeleteJson(path.toString(), table, row, column, valueToCheck, cellsToDelete); } - protected static Response checkAndDeleteJson(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) - throws IOException { + protected static Response checkAndDeleteJson(String url, String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(url, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(url, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); return response; } protected static Response checkAndDeletePB(String table, String row, String column, String value) - throws IOException { + throws IOException { return checkAndDeletePB(table, row, column, value, null); } - protected static Response checkAndDeletePB(String table, String row, - String column, String value, HashMap cellsToDelete) throws IOException { + protected static Response checkAndDeletePB(String table, String row, String column, String value, + HashMap cellsToDelete) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -449,30 +424,29 @@ protected static Response checkAndDeletePB(String table, String row, path.append("?check=delete"); return checkAndDeleteValuePB(path.toString(), table, row, column, value, cellsToDelete); } - protected static Response checkAndDeleteValuePB(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) - throws IOException { + + protected static Response checkAndDeleteValuePB(String url, String table, String row, + String column, String valueToCheck, HashMap cellsToDelete) throws IOException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes - .toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static Response getValueXML(String table, String startRow, - String endRow, String column) throws IOException { + protected static Response getValueXML(String table, String startRow, String endRow, String column) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -496,7 +470,7 @@ protected static Response getValueJson(String url) throws IOException { } protected static Response deleteValue(String table, String row, String column) - throws IOException { + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -510,7 +484,7 @@ protected static Response deleteValue(String table, String row, String column) } protected static Response getValueXML(String table, String row, String column) - throws IOException { + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -521,8 +495,7 @@ protected static Response getValueXML(String table, String row, String column) return getValueXML(path.toString()); } - protected static Response deleteRow(String table, String row) - throws IOException { + protected static Response deleteRow(String table, String row) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -533,8 +506,8 @@ protected static Response deleteRow(String table, String row) return response; } - protected static Response getValueJson(String table, String row, - String column) throws IOException { + protected static Response getValueJson(String table, String row, String column) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -545,8 +518,8 @@ protected static Response getValueJson(String table, String row, return getValueJson(path.toString()); } - protected static void checkValueJSON(String table, String row, String column, - String value) throws IOException { + protected static void checkValueJSON(String table, String row, String column, String value) + throws IOException { Response response = getValueJson(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); @@ -559,13 +532,13 @@ protected static void checkValueJSON(String table, String row, String column, assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkIncrementValueJSON(String table, String row, String column, - long value) throws IOException { + protected static void checkIncrementValueJSON(String table, String row, String column, long value) + throws IOException { Response response = getValueJson(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); @@ -573,8 +546,8 @@ protected static void checkIncrementValueJSON(String table, String row, String c assertEquals(Bytes.toLong(cell.getValue()), value); } - protected static Response putValueJson(String table, String row, String column, - String value) throws IOException { + protected static Response putValueJson(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -586,21 +559,19 @@ protected static Response putValueJson(String table, String row, String column, } protected static Response putValueJson(String url, String table, String row, String column, - String value) throws IOException { + String value) throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(url, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(url, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); return response; } - protected static Response appendValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response appendValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -610,8 +581,8 @@ protected static Response appendValueXML(String table, String row, String column return putValueXML(path.toString(), table, row, column, value); } - protected static Response appendValuePB(String table, String row, String column, - String value) throws IOException { + protected static Response appendValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -621,8 +592,8 @@ protected static Response appendValuePB(String table, String row, String column, return putValuePB(path.toString(), table, row, column, value); } - protected static Response appendValueJson(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response appendValueJson(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -632,8 +603,8 @@ protected static Response appendValueJson(String table, String row, String colum return putValueJson(path.toString(), table, row, column, value); } - protected static Response incrementValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response incrementValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -643,8 +614,8 @@ protected static Response incrementValueXML(String table, String row, String col return putValueXML(path.toString(), table, row, column, value); } - protected static Response incrementValuePB(String table, String row, String column, - String value) throws IOException { + protected static Response incrementValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -655,7 +626,7 @@ protected static Response incrementValuePB(String table, String row, String colu } protected static Response incrementValueJson(String table, String row, String column, - String value) throws IOException, JAXBException { + String value) throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java index f4f9c7572081..9d9d2f337699 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestDeleteRow extends RowResourceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDeleteRow.class); + HBaseClassTestRule.forClass(TestDeleteRow.class); @Test public void testDeleteNonExistentColumn() throws Exception { @@ -91,11 +91,11 @@ public void testDeleteXML() throws IOException, JAXBException { response = getValueXML(TABLE, ROW_1, COLUMN_2); assertEquals(404, response.getCode()); - //Delete a row in non existent table + // Delete a row in non existent table response = deleteValue("dummy", ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //Delete non existent column + // Delete non existent column response = deleteValue(TABLE, ROW_1, "dummy"); assertEquals(404, response.getCode()); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java index 42e38fc99a69..fbff87c3d0c1 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,12 +36,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestGZIPResponseWrapper { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGZIPResponseWrapper.class); + HBaseClassTestRule.forClass(TestGZIPResponseWrapper.class); private final HttpServletResponse response = mock(HttpServletResponse.class); private final GZIPResponseWrapper wrapper = new GZIPResponseWrapper(response); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java index e1dec900d491..b2c45e8cbd78 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,14 +44,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestGetAndPutResource extends RowResourceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGetAndPutResource.class); + HBaseClassTestRule.forClass(TestGetAndPutResource.class); private static final MetricsAssertHelper METRICS_ASSERT = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); + CompatibilityFactory.getInstance(MetricsAssertHelper.class); @Test public void testForbidden() throws IOException, JAXBException { @@ -146,8 +146,8 @@ public void testMultipleCellCheckPutPB() throws IOException { assertEquals(200, response.getCode()); checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); - HashMap otherCells = new HashMap<>(); - otherCells.put(COLUMN_2,VALUE_3); + HashMap otherCells = new HashMap<>(); + otherCells.put(COLUMN_2, VALUE_3); // On Success update both the cells response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_3, otherCells); @@ -179,8 +179,8 @@ public void testMultipleCellCheckPutXML() throws IOException, JAXBException { assertEquals(200, response.getCode()); checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); - HashMap otherCells = new HashMap<>(); - otherCells.put(COLUMN_2,VALUE_3); + HashMap otherCells = new HashMap<>(); + otherCells.put(COLUMN_2, VALUE_3); // On Success update both the cells response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_3, otherCells); @@ -217,9 +217,9 @@ public void testMultipleCellCheckDeletePB() throws IOException { checkValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); // Deletes the following columns based on Column1 check - HashMap cellsToDelete = new HashMap<>(); - cellsToDelete.put(COLUMN_2,VALUE_2); // Value does not matter - cellsToDelete.put(COLUMN_3,VALUE_3); // Value does not matter + HashMap cellsToDelete = new HashMap<>(); + cellsToDelete.put(COLUMN_2, VALUE_2); // Value does not matter + cellsToDelete.put(COLUMN_3, VALUE_3); // Value does not matter // On Success update both the cells response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1, cellsToDelete); @@ -265,7 +265,7 @@ public void testSingleCellGetPutBinary() throws IOException { assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type")); assertTrue(Bytes.equals(response.getBody(), body)); boolean foundTimestampHeader = false; - for (Header header: response.getHeaders()) { + for (Header header : response.getHeaders()) { if (header.getName().equals("X-Timestamp")) { foundTimestampHeader = true; break; @@ -280,8 +280,7 @@ public void testSingleCellGetPutBinary() throws IOException { @Test public void testSingleCellGetJSON() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; - Response response = client.put(path, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_4)); + Response response = client.put(path, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_4)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -296,16 +295,13 @@ public void testLatestCellGetJSON() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_4); - CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L, - Bytes.toBytes(VALUE_1)); - CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L, - Bytes.toBytes(VALUE_2)); + CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L, Bytes.toBytes(VALUE_1)); + CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L, Bytes.toBytes(VALUE_2)); rowModel.addCell(cellOne); rowModel.addCell(cellTwo); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(path, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(path, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -315,8 +311,8 @@ public void testLatestCellGetJSON() throws IOException { assertTrue(cellSet.getRows().size() == 1); assertTrue(cellSet.getRows().get(0).getCells().size() == 1); CellModel cell = cellSet.getRows().get(0).getCells().get(0); - assertEquals(VALUE_2 , Bytes.toString(cell.getValue())); - assertEquals(2L , cell.getTimestamp()); + assertEquals(VALUE_2, Bytes.toString(cell.getValue())); + assertEquals(2L, cell.getTimestamp()); response = deleteRow(TABLE, ROW_4); assertEquals(200, response.getCode()); } @@ -332,18 +328,16 @@ public void testURLEncodedKey() throws IOException, JAXBException { path.append('/'); path.append(COLUMN_1); Response response; - response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1, - VALUE_1); + response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1); } @Test public void testNoSuchCF() throws IOException { - final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA+":"; + final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA + ":"; final String badPath = "/" + TABLE + "/" + ROW_1 + "/" + "BAD"; - Response response = client.post(goodPath, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_1)); + Response response = client.post(goodPath, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1)); assertEquals(200, response.getCode()); assertEquals(200, client.get(goodPath, Constants.MIMETYPE_BINARY).getCode()); assertEquals(404, client.get(badPath, Constants.MIMETYPE_BINARY).getCode()); @@ -352,25 +346,20 @@ public void testNoSuchCF() throws IOException { @Test public void testMultiCellGetPutXML() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -391,23 +380,19 @@ public void testMultiCellGetPutXML() throws IOException, JAXBException { @Test public void testMultiCellGetPutPB() throws IOException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); - Response response = client.put(path, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(path, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); // make sure the fake row was not actually created @@ -438,12 +423,12 @@ public void testStartEndRowGetPutXML() throws IOException, JAXBException { } response = getValueXML(TABLE, rows[0], rows[2], COLUMN_1); assertEquals(200, response.getCode()); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(2, cellSet.getRows().size()); - for (int i = 0; i < cellSet.getRows().size()-1; i++) { + for (int i = 0; i < cellSet.getRows().size() - 1; i++) { RowModel rowModel = cellSet.getRows().get(i); - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { assertEquals(COLUMN_1, Bytes.toString(cell.getColumn())); assertEquals(values[i], Bytes.toString(cell.getValue())); } @@ -458,16 +443,14 @@ public void testStartEndRowGetPutXML() throws IOException, JAXBException { public void testInvalidCheckParam() throws IOException, JAXBException { CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); final String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "?check=blah"; - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); assertEquals(400, response.getCode()); } @@ -476,40 +459,33 @@ public void testInvalidColumnPut() throws IOException, JAXBException { String dummyColumn = "doesnot:exist"; CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn), - Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn), Bytes.toBytes(VALUE_1))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); final String path = "/" + TABLE + "/" + ROW_1 + "/" + dummyColumn; - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); assertEquals(404, response.getCode()); } @Test public void testMultiCellGetJson() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(path, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(path, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); // make sure the fake row was not actually created @@ -531,8 +507,7 @@ public void testMultiCellGetJson() throws IOException, JAXBException { @Test public void testMetrics() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; - Response response = client.put(path, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_4)); + Response response = client.put(path, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_4)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -579,7 +554,7 @@ public void testMultiColumnGetXML() throws Exception { response = client.get(path, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); CellSetModel cellSet = - (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertTrue(cellSet.getRows().size() == 1); assertTrue(cellSet.getRows().get(0).getCells().size() == 3); List cells = cellSet.getRows().get(0).getCells(); @@ -594,8 +569,10 @@ public void testMultiColumnGetXML() throws Exception { private boolean containsCellModel(List cells, String column, String value) { boolean contains = false; for (CellModel cell : cells) { - if (Bytes.toString(cell.getColumn()).equals(column) - && Bytes.toString(cell.getValue()).equals(value)) { + if ( + Bytes.toString(cell.getColumn()).equals(column) + && Bytes.toString(cell.getValue()).equals(value) + ) { contains = true; return contains; } @@ -605,25 +582,20 @@ private boolean containsCellModel(List cells, String column, String v @Test public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -639,8 +611,8 @@ public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBExcept response = client.get(query.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertTrue(cellSet.getRows().size() == 2); response = deleteRow(TABLE, ROW_1); @@ -651,25 +623,20 @@ public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBExcept @Test public void testSuffixGlobbingXML() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -687,8 +654,8 @@ public void testSuffixGlobbingXML() throws IOException, JAXBException { response = client.get(query.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); List rows = cellSet.getRows(); assertTrue(rows.size() == 2); for (RowModel row : rows) { @@ -706,7 +673,7 @@ public void testAppendXML() throws IOException, JAXBException { Response response = getValueXML(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -723,7 +690,7 @@ public void testAppendPB() throws IOException, JAXBException { Response response = getValuePB(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -740,7 +707,7 @@ public void testAppendJSON() throws IOException, JAXBException { Response response = getValueJson(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); putValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -757,14 +724,14 @@ public void testIncrementXML() throws IOException, JAXBException { Response response = getValueXML(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append single cell + // append single cell response = incrementValueXML(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValueXML(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValueXML(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValueXML(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); @@ -775,14 +742,14 @@ public void testIncrementPB() throws IOException, JAXBException { Response response = getValuePB(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = incrementValuePB(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValuePB(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValuePB(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValuePB(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); @@ -793,14 +760,14 @@ public void testIncrementJSON() throws IOException, JAXBException { Response response = getValueJson(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = incrementValueJson(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValueJSON(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValueJson(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValueJSON(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java index 11f14f1b5ff7..b04ea00829d6 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,12 +49,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestGzipFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGzipFilter.class); + HBaseClassTestRule.forClass(TestGzipFilter.class); private static final TableName TABLE = TableName.valueOf("TestGzipFilter"); private static final String CFA = "a"; @@ -64,22 +64,19 @@ public class TestGzipFilter { private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1"); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE)) { return; } - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); @@ -151,4 +148,3 @@ void testScannerResultCodes() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index 8337eace55c2..215d4f9c346c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,12 +56,12 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestMultiRowResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiRowResource.class); + HBaseClassTestRule.forClass(TestMultiRowResource.class); private static final TableName TABLE = TableName.valueOf("TestRowResource"); private static final String CFA = "a"; @@ -104,10 +104,7 @@ public static void setUpBeforeClass() throws Exception { extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, ""); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); @@ -115,13 +112,11 @@ public static void setUpBeforeClass() throws Exception { if (admin.tableExists(TABLE)) { return; } - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); } @@ -264,8 +259,8 @@ public void testMultiCellGetWithColsInQueryPathJSON() throws IOException { Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); - ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper( - CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); assertEquals(1, cellSet.getRows().size()); assertEquals(ROW_1, Bytes.toString(cellSet.getRows().get(0).getKey())); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java index 488a95a1d7fa..ca013390a7b1 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,24 +63,23 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestNamespacesInstanceResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespacesInstanceResource.class); + HBaseClassTestRule.forClass(TestNamespacesInstanceResource.class); private static String NAMESPACE1 = "TestNamespacesInstanceResource1"; - private static Map NAMESPACE1_PROPS = new HashMap<>(); + private static Map NAMESPACE1_PROPS = new HashMap<>(); private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; - private static Map NAMESPACE2_PROPS = new HashMap<>(); + private static Map NAMESPACE2_PROPS = new HashMap<>(); private static String NAMESPACE3 = "TestNamespacesInstanceResource3"; - private static Map NAMESPACE3_PROPS = new HashMap<>(); + private static Map NAMESPACE3_PROPS = new HashMap<>(); private static String NAMESPACE4 = "TestNamespacesInstanceResource4"; - private static Map NAMESPACE4_PROPS = new HashMap<>(); + private static Map NAMESPACE4_PROPS = new HashMap<>(); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -92,12 +91,11 @@ public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); testNamespacesInstanceModel = new TestNamespacesInstanceModel(); context = JAXBContext.newInstance(NamespacesInstanceModel.class, TableListModel.class); - jsonMapper = new JacksonJaxbJsonProvider() - .locateMapper(NamespacesInstanceModel.class, MediaType.APPLICATION_JSON_TYPE); + jsonMapper = new JacksonJaxbJsonProvider().locateMapper(NamespacesInstanceModel.class, + MediaType.APPLICATION_JSON_TYPE); NAMESPACE1_PROPS.put("key1", "value1"); NAMESPACE2_PROPS.put("key2a", "value2a"); NAMESPACE2_PROPS.put("key2b", "value2b"); @@ -119,12 +117,11 @@ private static byte[] toXML(NamespacesInstanceModel model) throws JAXBException } @SuppressWarnings("unchecked") - private static T fromXML(byte[] content) - throws JAXBException { + private static T fromXML(byte[] content) throws JAXBException { return (T) context.createUnmarshaller().unmarshal(new ByteArrayInputStream(content)); } - private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) throws IOException{ + private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) throws IOException { NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); for (NamespaceDescriptor namespaceDescriptor : nd) { if (namespaceDescriptor.getName().equals(namespaceName)) { @@ -134,19 +131,19 @@ private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) thr return null; } - private void checkNamespaceProperties(NamespaceDescriptor nd, Map testProps){ + private void checkNamespaceProperties(NamespaceDescriptor nd, Map testProps) { checkNamespaceProperties(nd.getConfiguration(), testProps); } - private void checkNamespaceProperties(Map namespaceProps, - Map testProps){ + private void checkNamespaceProperties(Map namespaceProps, + Map testProps) { assertTrue(namespaceProps.size() == testProps.size()); - for (String key: testProps.keySet()) { + for (String key : testProps.keySet()) { assertEquals(testProps.get(key), namespaceProps.get(key)); } } - private void checkNamespaceTables(List namespaceTables, List testTables){ + private void checkNamespaceTables(List namespaceTables, List testTables) { assertEquals(namespaceTables.size(), testTables.size()); for (TableModel namespaceTable : namespaceTables) { String tableName = namespaceTable.getName(); @@ -189,8 +186,7 @@ public void testGetNamespaceTablesAndCannotDeleteNamespace() throws IOException, // Create two tables via admin. TableName tn1 = TableName.valueOf(nsName + ":table1"); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tn1); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tn1); ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); @@ -336,7 +332,7 @@ public void testNamespaceCreateAndDeleteXMLAndJSON() throws IOException, JAXBExc jsonString = jsonMapper.writeValueAsString(model2); response = client.post(namespacePath2, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); assertEquals(201, response.getCode()); - //check passing null content-type with a payload returns 415 + // check passing null content-type with a payload returns 415 Header[] nullHeaders = null; response = client.post(namespacePath1, nullHeaders, toXML(model1)); assertEquals(415, response.getCode()); @@ -392,23 +388,23 @@ public void testNamespaceCreateAndDeletePBAndNoBody() throws IOException { model4 = testNamespacesInstanceModel.buildTestModel(NAMESPACE4, NAMESPACE4_PROPS); testNamespacesInstanceModel.checkModel(model4, NAMESPACE4, NAMESPACE4_PROPS); - //Defines null headers for use in tests where no body content is provided, so that we set + // Defines null headers for use in tests where no body content is provided, so that we set // no content-type in the request Header[] nullHeaders = null; // Test cannot PUT (alter) non-existent namespace. - response = client.put(namespacePath3, nullHeaders, new byte[]{}); + response = client.put(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); // Test cannot create tables when in read only mode. conf.set("hbase.rest.readonly", "true"); - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); NamespaceDescriptor nd3 = findNamespace(admin, NAMESPACE3); NamespaceDescriptor nd4 = findNamespace(admin, NAMESPACE4); @@ -417,14 +413,14 @@ public void testNamespaceCreateAndDeletePBAndNoBody() throws IOException { conf.set("hbase.rest.readonly", "false"); // Create namespace with no body and binary content type. - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(201, response.getCode()); // Create namespace with protobuf content-type. - response = client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(201, response.getCode()); - //check setting unsupported content-type returns 415 - response = client.post(namespacePath3, Constants.MIMETYPE_BINARY, new byte[]{}); + // check setting unsupported content-type returns 415 + response = client.post(namespacePath3, Constants.MIMETYPE_BINARY, new byte[] {}); assertEquals(415, response.getCode()); // Check that created namespaces correctly. @@ -436,10 +432,10 @@ public void testNamespaceCreateAndDeletePBAndNoBody() throws IOException { checkNamespaceProperties(nd4, NAMESPACE4_PROPS); // Check cannot post tables that already exist. - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); // Check cannot post tables when in read only mode. diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java index d8729f6656b5..9508c95a1eb4 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,18 +43,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestNamespacesResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespacesResource.class); + HBaseClassTestRule.forClass(TestNamespacesResource.class); private static String NAMESPACE1 = "TestNamespacesInstanceResource1"; private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java index 4201c3210fe5..f4bce03abca5 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.rest; import static org.junit.Assert.assertEquals; + import java.io.File; import java.lang.reflect.Method; import java.security.KeyPair; @@ -42,12 +43,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestRESTServerSSL { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRESTServerSSL.class); + HBaseClassTestRule.forClass(TestRESTServerSSL.class); private static final Logger LOG = LoggerFactory.getLogger(TestRESTServerSSL.class); @@ -79,8 +80,8 @@ public static void beforeClass() throws Exception { initializeAlgorithmId(); keyDir = initKeystoreDir(); KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - X509Certificate serverCertificate = KeyStoreTestUtil.generateCertificate( - "CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); + X509Certificate serverCertificate = + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); generateTrustStore("jks", serverCertificate); generateTrustStore("jceks", serverCertificate); @@ -162,8 +163,6 @@ public void testSslConnectionUsingKeystoreFormatPKCS12() throws Exception { assertEquals(200, response.getCode()); } - - private static File initKeystoreDir() { String dataTestDir = TEST_UTIL.getDataTestDir().toString(); File keystoreDir = new File(dataTestDir, TestRESTServerSSL.class.getSimpleName() + "_keys"); @@ -213,7 +212,7 @@ private void startRESTServer(String storeType) throws Exception { REST_TEST_UTIL.startServletContainer(conf); Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); sslClient = new Client(localCluster, getTruststoreFilePath(storeType), - Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); + Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java index 7c0294372f01..efd276d06884 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,16 +32,15 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestResourceFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResourceFilter.class); + HBaseClassTestRule.forClass(TestResourceFilter.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @BeforeClass @@ -49,8 +48,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().set(Constants.FILTER_CLASSES, DummyFilter.class.getName()); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); } @AfterClass diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java index 9a2542e70518..4e23c708ff1c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Marshaller; @@ -68,12 +67,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestScannerResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerResource.class); + HBaseClassTestRule.forClass(TestScannerResource.class); private static final Logger LOG = LoggerFactory.getLogger(TestScannerResource.class); private static final TableName TABLE = TableName.valueOf("TestScannerResource"); @@ -85,8 +84,7 @@ public class TestScannerResource { private static final String COLUMN_2 = CFB + ":2"; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Marshaller marshaller; @@ -96,10 +94,10 @@ public class TestScannerResource { private static Configuration conf; static int insertData(Configuration conf, TableName tableName, String column, double prob) - throws IOException { + throws IOException { Random rng = ThreadLocalRandom.current(); byte[] k = new byte[3]; - byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); + byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); List puts = new ArrayList<>(); for (byte b1 = 'a'; b1 < 'z'; b1++) { for (byte b2 = 'a'; b2 < 'z'; b2++) { @@ -117,7 +115,7 @@ static int insertData(Configuration conf, TableName tableName, String column, do } } try (Connection conn = ConnectionFactory.createConnection(conf); - Table table = conn.getTable(tableName)) { + Table table = conn.getTable(tableName)) { table.put(puts); } return puts.size(); @@ -139,8 +137,8 @@ static int countCellSet(CellSetModel model) { private static int fullTableScan(ScannerModel model) throws IOException { model.setBatch(100); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -176,12 +174,8 @@ public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class, + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); @@ -189,25 +183,21 @@ public static void setUpBeforeClass() throws Exception { if (admin.tableExists(TABLE)) { return; } - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); expectedRows1 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_1, 1.0); expectedRows2 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_2, 0.5); - tableDescriptorBuilder=TableDescriptorBuilder.newBuilder(TABLE_TO_BE_DISABLED); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE_TO_BE_DISABLED); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); @@ -232,16 +222,14 @@ public void testSimpleScannerXML() throws IOException, JAXBException { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, - body); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -250,8 +238,8 @@ public void testSimpleScannerXML() throws IOException, JAXBException { response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // confirm batch size conformance assertEquals(BATCH_SIZE, countCellSet(cellSet)); @@ -276,16 +264,16 @@ public void testSimpleScannerPB() throws IOException { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -319,16 +307,16 @@ public void testSimpleScannerBinary() throws IOException { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -340,9 +328,8 @@ public void testSimpleScannerBinary() throws IOException { // verify that data was returned assertTrue(response.getBody().length > 0); // verify that the expected X-headers are present - boolean foundRowHeader = false, foundColumnHeader = false, - foundTimestampHeader = false; - for (Header header: response.getHeaders()) { + boolean foundRowHeader = false, foundColumnHeader = false, foundTimestampHeader = false; + for (Header header : response.getHeaders()) { if (header.getName().equals("X-Row")) { foundRowHeader = true; } else if (header.getName().equals("X-Column")) { @@ -383,8 +370,8 @@ public void testTableDoesNotExist() throws IOException, JAXBException { StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + NONEXISTENT_TABLE + - "/scanner", Constants.MIMETYPE_XML, body); + Response response = + client.put("/" + NONEXISTENT_TABLE + "/scanner", Constants.MIMETYPE_XML, body); String scannerURI = response.getLocation(); assertNotNull(scannerURI); response = client.get(scannerURI, Constants.MIMETYPE_XML); @@ -407,4 +394,3 @@ public void testTableScanWithTableDisable() throws IOException { assertEquals(410, response.getCode()); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 9f86b5815d41..5858b90177ba 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,47 +78,38 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestScannersWithFilters { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannersWithFilters.class); + HBaseClassTestRule.forClass(TestScannersWithFilters.class); private static final Logger LOG = LoggerFactory.getLogger(TestScannersWithFilters.class); private static final TableName TABLE = TableName.valueOf("TestScannersWithFilters"); - private static final byte[][] ROWS_ONE = { - Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"), - Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") - }; + private static final byte[][] ROWS_ONE = { Bytes.toBytes("testRowOne-0"), + Bytes.toBytes("testRowOne-1"), Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") }; - private static final byte[][] ROWS_TWO = { - Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"), - Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") - }; + private static final byte[][] ROWS_TWO = { Bytes.toBytes("testRowTwo-0"), + Bytes.toBytes("testRowTwo-1"), Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") }; - private static final byte[][] FAMILIES = { - Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") - }; + private static final byte[][] FAMILIES = + { Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") }; - private static final byte[][] QUALIFIERS_ONE = { - Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), - Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") - }; + private static final byte[][] QUALIFIERS_ONE = + { Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), + Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") }; - private static final byte[][] QUALIFIERS_TWO = { - Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), - Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") - }; + private static final byte[][] QUALIFIERS_TWO = + { Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), + Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") }; - private static final byte[][] VALUES = { - Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") - }; + private static final byte[][] VALUES = + { Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") }; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Marshaller marshaller; @@ -130,15 +121,11 @@ public class TestScannersWithFilters { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class, - ScannerModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, + ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (!admin.tableExists(TABLE)) { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE) @@ -221,16 +208,14 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - private static void verifyScan(Scan s, long expectedRows, long expectedKeys) - throws Exception { + private static void verifyScan(Scan s, long expectedRows, long expectedKeys) throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -239,16 +224,17 @@ private static void verifyScan(Scan s, long expectedRows, long expectedKeys) response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cells = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cells = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); int rows = cells.getRows().size(); - assertEquals("Scanned too many rows! Only expected " + expectedRows + - " total but scanned " + rows, expectedRows, rows); + assertEquals( + "Scanned too many rows! Only expected " + expectedRows + " total but scanned " + rows, + expectedRows, rows); for (RowModel row : cells.getRows()) { int count = row.getCells().size(); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + count, expectedKeys, count); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + count, + expectedKeys, count); } // delete the scanner @@ -256,15 +242,14 @@ private static void verifyScan(Scan s, long expectedRows, long expectedKeys) assertEquals(200, response.getCode()); } - private static void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception { + private static void verifyScanFull(Scan s, KeyValue[] kvs) throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -273,8 +258,8 @@ private static void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception { response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // delete the scanner response = client.delete(scannerURI); @@ -295,36 +280,29 @@ private static void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception { break; } - assertTrue("Scanned too many keys! Only expected " + kvs.length + - " total but already scanned " + (cells.size() + idx), - kvs.length >= idx + cells.size()); - for (CellModel cell: cells) { - assertTrue("Row mismatch", - Bytes.equals(rowModel.getKey(), CellUtil.cloneRow(kvs[idx]))); + assertTrue("Scanned too many keys! Only expected " + kvs.length + + " total but already scanned " + (cells.size() + idx), kvs.length >= idx + cells.size()); + for (CellModel cell : cells) { + assertTrue("Row mismatch", Bytes.equals(rowModel.getKey(), CellUtil.cloneRow(kvs[idx]))); byte[][] split = CellUtil.parseColumn(cell.getColumn()); - assertTrue("Family mismatch", - Bytes.equals(split[0], CellUtil.cloneFamily(kvs[idx]))); - assertTrue("Qualifier mismatch", - Bytes.equals(split[1], CellUtil.cloneQualifier(kvs[idx]))); - assertTrue("Value mismatch", - Bytes.equals(cell.getValue(), CellUtil.cloneValue(kvs[idx]))); + assertTrue("Family mismatch", Bytes.equals(split[0], CellUtil.cloneFamily(kvs[idx]))); + assertTrue("Qualifier mismatch", Bytes.equals(split[1], CellUtil.cloneQualifier(kvs[idx]))); + assertTrue("Value mismatch", Bytes.equals(cell.getValue(), CellUtil.cloneValue(kvs[idx]))); idx++; } } - assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, - kvs.length, idx); + assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx); } - private static void verifyScanNoEarlyOut(Scan s, long expectedRows, - long expectedKeys) throws Exception { + private static void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys) + throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -333,8 +311,8 @@ private static void verifyScanNoEarlyOut(Scan s, long expectedRows, response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // delete the scanner response = client.delete(scannerURI); @@ -354,13 +332,12 @@ private static void verifyScanNoEarlyOut(Scan s, long expectedRows, break; } - assertTrue("Scanned too many rows! Only expected " + expectedRows + - " total but already scanned " + (j+1), expectedRows > j); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + cells.size(), expectedKeys, cells.size()); + assertTrue("Scanned too many rows! Only expected " + expectedRows + + " total but already scanned " + (j + 1), expectedRows > j); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + cells.size(), + expectedKeys, cells.size()); } - assertEquals("Expected " + expectedRows + " rows but scanned " + j + - " rows", expectedRows, j); + assertEquals("Expected " + expectedRows + " rows but scanned " + j + " rows", expectedRows, j); } @Test @@ -376,7 +353,7 @@ public void testNoFilter() throws Exception { // One family s = new Scan(); s.addFamily(FAMILIES[0]); - verifyScan(s, expectedRows, expectedKeys/2); + verifyScan(s, expectedRows, expectedKeys / 2); } @Test @@ -392,7 +369,7 @@ public void testPrefixFilter() throws Exception { @Test public void testPageFilter() throws Exception { // KVs in first 6 rows - KeyValue [] expectedKVs = { + KeyValue[] expectedKVs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), @@ -434,8 +411,7 @@ public void testPageFilter() throws Exception { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; // Grab all 6 rows long expectedRows = 6; @@ -513,7 +489,7 @@ public void testQualifierFilter() throws Exception { long expectedRows = numRows / 2; long expectedKeys = 2; Filter f = new QualifierFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -523,7 +499,7 @@ public void testQualifierFilter() throws Exception { expectedRows = numRows / 2; expectedKeys = 2; f = new QualifierFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -532,7 +508,7 @@ public void testQualifierFilter() throws Exception { expectedRows = numRows / 2; expectedKeys = 4; f = new QualifierFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -542,9 +518,9 @@ public void testQualifierFilter() throws Exception { expectedRows = numRows / 2; expectedKeys = 4; f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); - s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + s = + new Scan().withStartRow(HConstants.EMPTY_START_ROW).withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -553,9 +529,9 @@ public void testQualifierFilter() throws Exception { expectedRows = numRows / 2; expectedKeys = 4; f = new QualifierFilter(CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); - s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + s = + new Scan().withStartRow(HConstants.EMPTY_START_ROW).withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -564,20 +540,19 @@ public void testQualifierFilter() throws Exception { expectedRows = numRows / 2; expectedKeys = 2; f = new QualifierFilter(CompareOperator.GREATER, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); - s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + s = + new Scan().withStartRow(HConstants.EMPTY_START_ROW).withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); // Match keys not equal to. Look across rows and fully validate the keys and ordering // Expect varied numbers of keys, 4 per row in group one, 6 per row in group two - f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(QUALIFIERS_ONE[2])); + f = new QualifierFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(QUALIFIERS_ONE[2])); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), @@ -613,18 +588,16 @@ public void testQualifierFilter() throws Exception { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); // Test across rows and groups with a regex. Filter out "test*-2" // Expect 4 keys per row across both groups - f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new RegexStringComparator("test.+-2")); + f = new QualifierFilter(CompareOperator.NOT_EQUAL, new RegexStringComparator("test.+-2")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { + kvs = new KeyValue[] { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), @@ -654,8 +627,7 @@ public void testQualifierFilter() throws Exception { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -664,8 +636,8 @@ public void testRowFilter() throws Exception { // Match a single row, all keys long expectedRows = 1; long expectedKeys = colsPerRow; - Filter f = new RowFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + Filter f = + new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -673,8 +645,7 @@ public void testRowFilter() throws Exception { // Match a two rows, one from each group, using regex expectedRows = 2; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator("testRow.+-2")); + f = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("testRow.+-2")); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -683,8 +654,7 @@ public void testRowFilter() throws Exception { // Expect all keys in one row expectedRows = 1; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = new RowFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -694,7 +664,7 @@ public void testRowFilter() throws Exception { expectedRows = 2; expectedKeys = colsPerRow; f = new RowFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -703,8 +673,8 @@ public void testRowFilter() throws Exception { // Expect all keys in all but one row expectedRows = numRows - 1; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = + new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -714,7 +684,7 @@ public void testRowFilter() throws Exception { expectedRows = numRows - 1; expectedKeys = colsPerRow; f = new RowFilter(CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -723,8 +693,7 @@ public void testRowFilter() throws Exception { // Expect all keys in all but two rows expectedRows = numRows - 2; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.GREATER, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = new RowFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -732,12 +701,12 @@ public void testRowFilter() throws Exception { // Match rows not equal to testRowTwo-2 // Look across rows and fully validate the keys and ordering // Should see all keys in all rows but testRowTwo-2 - f = new RowFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = + new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), @@ -772,19 +741,17 @@ public void testRowFilter() throws Exception { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); // Test across rows and groups with a regex // Filter out everything that doesn't match "*-2" // Expect all keys in two rows - f = new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2")); + f = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { + kvs = new KeyValue[] { // testRowOne-2 new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), @@ -798,8 +765,7 @@ public void testRowFilter() throws Exception { new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; verifyScanFull(s, kvs); } @@ -808,8 +774,8 @@ public void testValueFilter() throws Exception { // Match group one rows long expectedRows = numRows / 2; long expectedKeys = colsPerRow; - Filter f = new ValueFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + Filter f = + new ValueFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -817,8 +783,7 @@ public void testValueFilter() throws Exception { // Match group two rows expectedRows = numRows / 2; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + f = new ValueFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -826,8 +791,7 @@ public void testValueFilter() throws Exception { // Match all values using regex expectedRows = numRows; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.EQUAL, - new RegexStringComparator("testValue((One)|(Two))")); + f = new ValueFilter(CompareOperator.EQUAL, new RegexStringComparator("testValue((One)|(Two))")); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -836,8 +800,7 @@ public void testValueFilter() throws Exception { // Expect group one rows expectedRows = numRows / 2; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + f = new ValueFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -847,7 +810,7 @@ public void testValueFilter() throws Exception { expectedRows = numRows; expectedKeys = colsPerRow; f = new ValueFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -857,7 +820,7 @@ public void testValueFilter() throws Exception { expectedRows = numRows / 2; expectedKeys = colsPerRow; f = new ValueFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -867,7 +830,7 @@ public void testValueFilter() throws Exception { expectedRows = numRows / 2; expectedKeys = colsPerRow; f = new ValueFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -877,7 +840,7 @@ public void testValueFilter() throws Exception { expectedRows = numRows; expectedKeys = colsPerRow; f = new ValueFilter(CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -886,8 +849,8 @@ public void testValueFilter() throws Exception { // Expect half rows expectedRows = numRows / 2; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.GREATER, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + f = + new ValueFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -896,11 +859,11 @@ public void testValueFilter() throws Exception { // Look across rows and fully validate the keys and ordering // Should see all keys in all group two rows f = new ValueFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowTwo-0 new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), @@ -921,8 +884,7 @@ public void testValueFilter() throws Exception { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -931,11 +893,11 @@ public void testSkipFilter() throws Exception { // Test for qualifier regex: "testQualifierOne-2" // Should only get rows from second group, and all keys Filter f = new SkipFilter(new QualifierFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes("testQualifierOne-2")))); + new BinaryComparator(Bytes.toBytes("testQualifierOne-2")))); Scan s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowTwo-0 new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), @@ -956,8 +918,7 @@ public void testSkipFilter() throws Exception { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -967,30 +928,22 @@ public void testFilterList() throws Exception { // regular expression and substring filters // Use must pass all List filters = new ArrayList<>(3); - filters.add(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("One"))); + filters.add(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("One"))); Filter f = new FilterList(Operator.MUST_PASS_ALL, filters); Scan s = new Scan(); s.addFamily(FAMILIES[0]); s.setFilter(f); - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) - }; + KeyValue[] kvs = { new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) }; verifyScanFull(s, kvs); // Test getting everything with a MUST_PASS_ONE filter including row, qf, // val, regular expression and substring filters filters.clear(); - filters.add(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+Two.+"))); - filters.add(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("One"))); + filters.add(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+Two.+"))); + filters.add(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("One"))); f = new FilterList(Operator.MUST_PASS_ONE, filters); s = new Scan(); s.setFilter(f); @@ -1002,14 +955,12 @@ public void testFirstKeyOnlyFilter() throws Exception { Scan s = new Scan(); s.setFilter(new FirstKeyOnlyFilter()); // Expected KVs, the first KV from each of the remaining 6 rows - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + KeyValue[] kvs = { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) - }; + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) }; verifyScanFull(s, kvs); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java index f7f9def44778..e792935fe605 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Response; @@ -70,11 +69,13 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; + +@Category({ RestTests.class, MediumTests.class }) public class TestScannersWithLabels { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannersWithLabels.class); + HBaseClassTestRule.forClass(TestScannersWithLabels.class); private static final TableName TABLE = TableName.valueOf("TestScannersWithLabels"); private static final String CFA = "a"; @@ -97,7 +98,7 @@ public class TestScannersWithLabels { private static Configuration conf; private static int insertData(TableName tableName, String column, double prob) - throws IOException { + throws IOException { byte[] k = new byte[3]; byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); @@ -106,8 +107,8 @@ private static int insertData(TableName tableName, String column, double prob) Put put = new Put(Bytes.toBytes("row" + i)); put.setDurability(Durability.SKIP_WAL); put.addColumn(famAndQf[0], famAndQf[1], k); - put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" - + TOPSECRET)); + put.setCellVisibility( + new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET)); puts.add(put); } try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { @@ -132,11 +133,10 @@ private static int countCellSet(CellSetModel model) { @BeforeClass public static void setUpBeforeClass() throws Exception { - SUPERUSER = User.createUserForTesting(conf, "admin", - new String[] { "supergroup" }); + SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); conf = TEST_UTIL.getConfiguration(); - conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, - SimpleScanLabelGenerator.class, ScanLabelGenerator.class); + conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, + ScanLabelGenerator.class); conf.set("hbase.superuser", SUPERUSER.getShortName()); VisibilityTestUtil.enableVisiblityLabels(conf); TEST_UTIL.startMiniCluster(1); @@ -147,20 +147,18 @@ public static void setUpBeforeClass() throws Exception { REST_TEST_UTIL.startServletContainer(conf); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, - ScannerModel.class); + ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE)) { return; } - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); insertData(TABLE, COLUMN_1, 1.0); @@ -243,8 +241,8 @@ public void testSimpleScannerXMLWithLabelsThatReceivesData() throws IOException, // Respond with 204 as there are no cells to be retrieved assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response - .getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(5, countCellSet(cellSet)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java index 14768f9da504..45787e419861 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,19 +53,18 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestSchemaResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSchemaResource.class); + HBaseClassTestRule.forClass(TestSchemaResource.class); private static String TABLE1 = "TestSchemaResource1"; private static String TABLE2 = "TestSchemaResource2"; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -93,12 +92,9 @@ public static void setUpBeforeClass() throws Exception { extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, ""); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); testTableSchemaModel = new TestTableSchemaModel(); - context = JAXBContext.newInstance( - ColumnSchemaModel.class, - TableSchemaModel.class); + context = JAXBContext.newInstance(ColumnSchemaModel.class, TableSchemaModel.class); } @AfterClass @@ -111,7 +107,7 @@ public static void tearDownAfterClass() throws Exception { public void tearDown() throws Exception { Admin admin = TEST_UTIL.getAdmin(); - for (String table : new String[] {TABLE1, TABLE2}) { + for (String table : new String[] { TABLE1, TABLE2 }) { TableName t = TableName.valueOf(table); if (admin.tableExists(t)) { admin.disableTable(t); @@ -128,8 +124,7 @@ private static byte[] toXML(TableSchemaModel model) throws JAXBException { return Bytes.toBytes(writer.toString()); } - private static TableSchemaModel fromXML(byte[] content) - throws JAXBException { + private static TableSchemaModel fromXML(byte[] content) throws JAXBException { return (TableSchemaModel) context.createUnmarshaller() .unmarshal(new ByteArrayInputStream(content)); } @@ -142,7 +137,7 @@ public void testTableCreateAndDeleteXML() throws IOException, JAXBException { Admin admin = TEST_UTIL.getAdmin(); assertFalse("Table " + TABLE1 + " should not exist", - admin.tableExists(TableName.valueOf(TABLE1))); + admin.tableExists(TableName.valueOf(TABLE1))); // create the table model = testTableSchemaModel.buildTestModel(TABLE1); @@ -154,8 +149,8 @@ public void testTableCreateAndDeleteXML() throws IOException, JAXBException { } response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model), extraHdr); - assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), - 201, response.getCode()); + assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), 201, + response.getCode()); // recall the same put operation but in read-only mode conf.set("hbase.rest.readonly", "true"); @@ -213,15 +208,15 @@ public void testTableCreateAndDeletePB() throws IOException { response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); assertEquals(400, response.getCode()); } - response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput(), extraHdr); - assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), - 201, response.getCode()); + response = + client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput(), extraHdr); + assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), 201, + response.getCode()); // recall the same put operation but in read-only mode conf.set("hbase.rest.readonly", "true"); - response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput(), extraHdr); + response = + client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput(), extraHdr); assertNotNull(extraHdr); assertEquals(403, response.getCode()); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java index 60896446e95f..4ad065cbdc43 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -101,12 +101,12 @@ * Test class for SPNEGO authentication on the HttpServer. Uses Kerby's MiniKDC and Apache * HttpComponents to verify that a simple Servlet is reachable via SPNEGO and unreachable w/o. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestSecureRESTServer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureRESTServer.class); + HBaseClassTestRule.forClass(TestSecureRESTServer.class); private static final Logger LOG = LoggerFactory.getLogger(TestSecureRESTServer.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -138,8 +138,7 @@ public static void setupServer() throws Exception { /* * Keytabs */ - File keytabDir = new File(target, TestSecureRESTServer.class.getSimpleName() - + "_keytabs"); + File keytabDir = new File(target, TestSecureRESTServer.class.getSimpleName() + "_keytabs"); if (keytabDir.exists()) { FileUtils.deleteDirectory(keytabDir); } @@ -175,15 +174,14 @@ public static void setupServer() throws Exception { conf.set("hbase.master.keytab.file", serviceKeytab.getAbsolutePath()); conf.set("hbase.unsafe.regionserver.hostname", "localhost"); conf.set("hbase.master.hostname", "localhost"); - HBaseKerberosUtils.setSecuredConfiguration(conf, - SERVICE_PRINCIPAL+ "@" + KDC.getRealm(), SPNEGO_SERVICE_PRINCIPAL+ "@" + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, SERVICE_PRINCIPAL + "@" + KDC.getRealm(), + SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); setHdfsSecuredConfiguration(conf); - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - TokenProvider.class.getName(), AccessController.class.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - AccessController.class.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName(), + AccessController.class.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName()); conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - AccessController.class.getName()); + AccessController.class.getName()); // Enable EXEC permission checking conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); conf.set("hbase.superuser", "hbase"); @@ -194,18 +192,15 @@ public static void setupServer() throws Exception { UserGroupInformation.setConfiguration(conf); updateKerberosConfiguration(conf, REST_SERVER_PRINCIPAL, SPNEGO_SERVICE_PRINCIPAL, - restServerKeytab); + restServerKeytab); // Start HDFS - TEST_UTIL.startMiniCluster(StartTestingClusterOption.builder() - .numMasters(1) - .numRegionServers(1) - .numZkServers(1) - .build()); + TEST_UTIL.startMiniCluster(StartTestingClusterOption.builder().numMasters(1).numRegionServers(1) + .numZkServers(1).build()); // Start REST - UserGroupInformation restUser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - REST_SERVER_PRINCIPAL, restServerKeytab.getAbsolutePath()); + UserGroupInformation restUser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(REST_SERVER_PRINCIPAL, restServerKeytab.getAbsolutePath()); restUser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { @@ -215,18 +210,18 @@ public Void run() throws Exception { }); baseUrl = new URL("http://localhost:" + REST_TEST.getServletPort()); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); TEST_UTIL.waitTableAvailable(TableName.valueOf("hbase:acl")); // Let the REST server create, read, and write globally - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); superuser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - AccessControlClient.grant( - conn, REST_SERVER_PRINCIPAL, Action.CREATE, Action.READ, Action.WRITE); + AccessControlClient.grant(conn, REST_SERVER_PRINCIPAL, Action.CREATE, Action.READ, + Action.WRITE); } catch (Throwable t) { if (t instanceof Exception) { throw (Exception) t; @@ -268,13 +263,13 @@ public static void stopServer() throws Exception { private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception { // Set principal+keytab configuration for HDFS conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, - SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, serviceKeytab.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, - SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, serviceKeytab.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, - SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); // Enable token access for HDFS blocks conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); // Only use HTTPS (required because we aren't using "secure" ports) @@ -293,8 +288,8 @@ private static void setHdfsSecuredConfiguration(Configuration conf) throws Excep conf.setBoolean("ignore.secure.ports.for.testing", true); } - private static void updateKerberosConfiguration(Configuration conf, - String serverPrincipal, String spnegoPrincipal, File serverKeytab) { + private static void updateKerberosConfiguration(Configuration conf, String serverPrincipal, + String spnegoPrincipal, File serverKeytab) { KerberosName.setRules("DEFAULT"); // Enable Kerberos (pre-req) @@ -312,16 +307,15 @@ private static void updateKerberosConfiguration(Configuration conf, private static void instertData() throws IOException, InterruptedException { // Create a table, write a row to it, grant read perms to the client - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); final TableName table = TableName.valueOf("publicTable"); superuser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { TableDescriptor desc = TableDescriptorBuilder.newBuilder(table) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")).build(); conn.getAdmin().createTable(desc); try (Table t = conn.getTable(table)) { Put p = new Put(Bytes.toBytes("a")); @@ -341,21 +335,22 @@ public Void run() throws Exception { }); } - public void testProxy(String extraArgs, String PRINCIPAL, File keytab, int responseCode) throws Exception{ - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + public void testProxy(String extraArgs, String PRINCIPAL, File keytab, int responseCode) + throws Exception { + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); final TableName table = TableName.valueOf("publicTable"); // Read that row as the client - Pair pair = getClient(); + Pair pair = getClient(); CloseableHttpClient client = pair.getFirst(); HttpClientContext context = pair.getSecond(); - HttpGet get = new HttpGet(new URL("http://localhost:"+ REST_TEST.getServletPort()).toURI() + HttpGet get = new HttpGet(new URL("http://localhost:" + REST_TEST.getServletPort()).toURI() + "/" + table + "/a" + extraArgs); get.addHeader("Accept", "application/json"); - UserGroupInformation user = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - PRINCIPAL, keytab.getAbsolutePath()); + UserGroupInformation user = + UserGroupInformation.loginUserFromKeytabAndReturnUGI(PRINCIPAL, keytab.getAbsolutePath()); String jsonResponse = user.doAs(new PrivilegedExceptionAction() { @Override public String run() throws Exception { @@ -367,8 +362,9 @@ public String run() throws Exception { } } }); - if(responseCode == HttpURLConnection.HTTP_OK) { - ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + if (responseCode == HttpURLConnection.HTTP_OK) { + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(jsonResponse, CellSetModel.class); assertEquals(1, model.getRows().size()); RowModel row = model.getRows().get(0); @@ -386,12 +382,12 @@ public void testPositiveAuthorization() throws Exception { @Test public void testDoAs() throws Exception { - testProxy("?doAs="+CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); + testProxy("?doAs=" + CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); } @Test public void testDoas() throws Exception { - testProxy("?doas="+CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); + testProxy("?doas=" + CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); } @Test @@ -399,48 +395,44 @@ public void testWithoutDoAs() throws Exception { testProxy("", WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_FORBIDDEN); } - @Test public void testNegativeAuthorization() throws Exception { - Pair pair = getClient(); + Pair pair = getClient(); CloseableHttpClient client = pair.getFirst(); HttpClientContext context = pair.getSecond(); StringEntity entity = new StringEntity( - "{\"name\":\"test\", \"ColumnSchema\":[{\"name\":\"f\"}]}", ContentType.APPLICATION_JSON); - HttpPut put = new HttpPut("http://localhost:"+ REST_TEST.getServletPort() + "/test/schema"); + "{\"name\":\"test\", \"ColumnSchema\":[{\"name\":\"f\"}]}", ContentType.APPLICATION_JSON); + HttpPut put = new HttpPut("http://localhost:" + REST_TEST.getServletPort() + "/test/schema"); put.setEntity(entity); - - UserGroupInformation unprivileged = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); + UserGroupInformation unprivileged = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); unprivileged.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (CloseableHttpResponse response = client.execute(put, context)) { final int statusCode = response.getStatusLine().getStatusCode(); HttpEntity entity = response.getEntity(); - assertEquals("Got response: "+ EntityUtils.toString(entity), - HttpURLConnection.HTTP_FORBIDDEN, statusCode); + assertEquals("Got response: " + EntityUtils.toString(entity), + HttpURLConnection.HTTP_FORBIDDEN, statusCode); } return null; } }); } - private Pair getClient() { + private Pair getClient() { HttpClientConnectionManager pool = new PoolingHttpClientConnectionManager(); HttpHost host = new HttpHost("localhost", REST_TEST.getServletPort()); - Registry authRegistry = - RegistryBuilder.create().register(AuthSchemes.SPNEGO, - new SPNegoSchemeFactory(true, true)).build(); + Registry authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials(AuthScope.ANY, EmptyCredentials.INSTANCE); AuthCache authCache = new BasicAuthCache(); - CloseableHttpClient client = HttpClients.custom() - .setDefaultAuthSchemeRegistry(authRegistry) - .setConnectionManager(pool).build(); + CloseableHttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) + .setConnectionManager(pool).build(); HttpClientContext context = HttpClientContext.create(); context.setTargetHost(host); @@ -454,10 +446,13 @@ private Pair getClient() { private static class EmptyCredentials implements Credentials { public static final EmptyCredentials INSTANCE = new EmptyCredentials(); - @Override public String getPassword() { + @Override + public String getPassword() { return null; } - @Override public Principal getUserPrincipal() { + + @Override + public Principal getUserPrincipal() { return null; } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java index a741801df077..3da31de8431c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,16 +34,15 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestSecurityHeadersFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); + HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @After @@ -56,56 +55,53 @@ public void tearDown() throws Exception { public void testDefaultValues() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); String path = "/version/cluster"; Response response = client.get(path); assertThat(response.getCode(), equalTo(200)); assertThat("Header 'X-Content-Type-Options' is missing from Rest response", - response.getHeader("X-Content-Type-Options"), is(not((String)null))); + response.getHeader("X-Content-Type-Options"), is(not((String) null))); assertThat("Header 'X-Content-Type-Options' has invalid default value", - response.getHeader("X-Content-Type-Options"), equalTo("nosniff")); + response.getHeader("X-Content-Type-Options"), equalTo("nosniff")); assertThat("Header 'X-XSS-Protection' is missing from Rest response", - response.getHeader("X-XSS-Protection"), is(not((String)null))); + response.getHeader("X-XSS-Protection"), is(not((String) null))); assertThat("Header 'X-XSS-Protection' has invalid default value", - response.getHeader("X-XSS-Protection"), equalTo("1; mode=block")); + response.getHeader("X-XSS-Protection"), equalTo("1; mode=block")); - assertThat("Header 'Strict-Transport-Security' should be missing from Rest response," + - "but it's present", - response.getHeader("Strict-Transport-Security"), is((String)null)); - assertThat("Header 'Content-Security-Policy' should be missing from Rest response," + - "but it's present", - response.getHeader("Content-Security-Policy"), is((String)null)); + assertThat("Header 'Strict-Transport-Security' should be missing from Rest response," + + "but it's present", response.getHeader("Strict-Transport-Security"), is((String) null)); + assertThat( + "Header 'Content-Security-Policy' should be missing from Rest response," + "but it's present", + response.getHeader("Content-Security-Policy"), is((String) null)); } @Test public void testHstsAndCspSettings() throws Exception { TEST_UTIL.getConfiguration().set("hbase.http.filter.hsts.value", - "max-age=63072000;includeSubDomains;preload"); + "max-age=63072000;includeSubDomains;preload"); TEST_UTIL.getConfiguration().set("hbase.http.filter.csp.value", - "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); String path = "/version/cluster"; Response response = client.get(path); assertThat(response.getCode(), equalTo(200)); assertThat("Header 'Strict-Transport-Security' is missing from Rest response", - response.getHeader("Strict-Transport-Security"), is(not((String)null))); + response.getHeader("Strict-Transport-Security"), is(not((String) null))); assertThat("Header 'Strict-Transport-Security' has invalid value", - response.getHeader("Strict-Transport-Security"), - equalTo("max-age=63072000;includeSubDomains;preload")); + response.getHeader("Strict-Transport-Security"), + equalTo("max-age=63072000;includeSubDomains;preload")); assertThat("Header 'Content-Security-Policy' is missing from Rest response", - response.getHeader("Content-Security-Policy"), is(not((String)null))); + response.getHeader("Content-Security-Policy"), is(not((String) null))); assertThat("Header 'Content-Security-Policy' has invalid value", - response.getHeader("Content-Security-Policy"), - equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); + response.getHeader("Content-Security-Policy"), + equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index b30a276cd45d..a115fd17af3f 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,20 +46,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestStatusResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStatusResource.class); + HBaseClassTestRule.forClass(TestStatusResource.class); private static final Logger LOG = LoggerFactory.getLogger(TestStatusResource.class); private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME + ",,1"); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -73,11 +72,11 @@ private static void validate(StorageClusterStatusModel model) { assertNotNull(model.getDeadNodes()); assertFalse(model.getLiveNodes().isEmpty()); boolean foundMeta = false; - for (StorageClusterStatusModel.Node node: model.getLiveNodes()) { + for (StorageClusterStatusModel.Node node : model.getLiveNodes()) { assertNotNull(node.getName()); assertTrue(node.getStartCode() > 0L); assertTrue(node.getRequests() >= 0); - for (StorageClusterStatusModel.Node.Region region: node.getRegions()) { + for (StorageClusterStatusModel.Node.Region region : node.getRegions()) { if (Bytes.equals(region.getName(), META_REGION_NAME)) { foundMeta = true; } @@ -116,9 +115,8 @@ public void testGetClusterStatusXML() throws IOException, JAXBException { Response response = client.get("/status/cluster", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - StorageClusterStatusModel model = (StorageClusterStatusModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + StorageClusterStatusModel model = (StorageClusterStatusModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); validate(model); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index 36b2d3db6317..c27d8ee2347a 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,12 +58,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestTableResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableResource.class); + HBaseClassTestRule.forClass(TestTableResource.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableResource.class); @@ -74,8 +74,7 @@ public class TestTableResource { private static List regionMap; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; @@ -83,16 +82,12 @@ public class TestTableResource { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - TableModel.class, - TableInfoModel.class, - TableListModel.class, - TableRegionModel.class); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(TableModel.class, TableInfoModel.class, TableListModel.class, + TableRegionModel.class); TEST_UTIL.createMultiRegionTable(TABLE, Bytes.toBytes(COLUMN_FAMILY), NUM_REGIONS); byte[] k = new byte[3]; - byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(COLUMN)); + byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(COLUMN)); List puts = new ArrayList<>(); for (byte b1 = 'a'; b1 < 'z'; b1++) { for (byte b2 = 'a'; b2 < 'z'; b2++) { @@ -110,7 +105,7 @@ public static void setUpBeforeClass() throws Exception { Connection connection = TEST_UTIL.getConnection(); - Table table = connection.getTable(TABLE); + Table table = connection.getTable(TABLE); table.put(puts); table.close(); @@ -152,7 +147,7 @@ void checkTableInfo(TableInfoModel model) { TableRegionModel region = regions.next(); boolean found = false; LOG.debug("looking for region " + region.getName()); - for (HRegionLocation e: regionMap) { + for (HRegionLocation e : regionMap) { RegionInfo hri = e.getRegion(); // getRegionNameAsString uses Bytes.toStringBinary which escapes some non-printable // characters @@ -165,9 +160,8 @@ void checkTableInfo(TableInfoModel model) { byte[] endKey = hri.getEndKey(); ServerName serverName = e.getServerName(); InetSocketAddress sa = - new InetSocketAddress(serverName.getHostname(), serverName.getPort()); - String location = sa.getHostName() + ":" + - Integer.valueOf(sa.getPort()); + new InetSocketAddress(serverName.getHostname(), serverName.getPort()); + String location = sa.getHostName() + ":" + Integer.valueOf(sa.getPort()); assertEquals(hri.getRegionId(), region.getId()); assertTrue(Bytes.equals(startKey, region.getStartKey())); assertTrue(Bytes.equals(endKey, region.getEndKey())); @@ -191,9 +185,8 @@ public void testTableListXML() throws IOException, JAXBException { Response response = client.get("/", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - TableListModel model = (TableListModel) - context.createUnmarshaller() - .unmarshal(new ByteArrayInputStream(response.getBody())); + TableListModel model = (TableListModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); checkTableList(model); } @@ -229,12 +222,11 @@ public void testTableInfoText() throws IOException { @Test public void testTableInfoXML() throws IOException, JAXBException { - Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - TableInfoModel model = (TableInfoModel) - context.createUnmarshaller() - .unmarshal(new ByteArrayInputStream(response.getBody())); + TableInfoModel model = (TableInfoModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); checkTableInfo(model); } @@ -271,4 +263,3 @@ public void testTableNotFound() throws IOException { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java index c0e230bcb621..0a55c07bba99 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,11 +76,11 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestTableScan { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableScan.class); + HBaseClassTestRule.forClass(TestTableScan.class); private static final TableName TABLE = TableName.valueOf("TestScanResource"); private static final String CFA = "a"; @@ -95,8 +95,7 @@ public class TestTableScan { private static Configuration conf; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -104,17 +103,14 @@ public static void setUpBeforeClass() throws Exception { conf.set(Constants.CUSTOM_FILTERS, "CustomFilter:" + CustomFilter.class.getName()); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (!admin.tableExists(TABLE)) { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); expectedRows1 = TestScannerResource.insertData(conf, TABLE, COLUMN_1, 1.0); @@ -140,8 +136,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=10"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); @@ -151,13 +146,12 @@ public void testSimpleScannerXML() throws IOException, JAXBException { assertEquals(10, count); checkRowsNotNull(model); - //Test with no limit. + // Test with no limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); model = (CellSetModel) ush.unmarshal(response.getStream()); @@ -165,7 +159,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { assertEquals(expectedRows1, count); checkRowsNotNull(model); - //Test with start and end row. + // Test with start and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -174,8 +168,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); model = (CellSetModel) ush.unmarshal(response.getStream()); count = TestScannerResource.countCellSet(model); @@ -186,7 +179,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { assertEquals(24, count); checkRowsNotNull(model); - //Test with start row and limit. + // Test with start row and limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -195,8 +188,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=15"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); model = (CellSetModel) ush.unmarshal(response.getStream()); @@ -216,24 +208,22 @@ public void testSimpleScannerJson() throws IOException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=2"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); int count = TestScannerResource.countCellSet(model); assertEquals(2, count); checkRowsNotNull(model); - //Test scanning with no limit. + // Test scanning with no limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); model = mapper.readValue(response.getStream(), CellSetModel.class); @@ -241,7 +231,7 @@ public void testSimpleScannerJson() throws IOException { assertEquals(expectedRows2, count); checkRowsNotNull(model); - //Test with start row and end row. + // Test with start row and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -250,8 +240,7 @@ public void testSimpleScannerJson() throws IOException { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); model = mapper.readValue(response.getStream(), CellSetModel.class); RowModel startRow = model.getRows().get(0); @@ -275,12 +264,11 @@ public void testScanUsingListenerUnmarshallerXML() throws Exception { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=10"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - JAXBContext context = JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class, - CellModel.class); + JAXBContext context = + JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class, CellModel.class); Unmarshaller unmarshaller = context.createUnmarshaller(); final ClientSideCellSetModel.Listener listener = new ClientSideCellSetModel.Listener() { @@ -293,19 +281,19 @@ public void handleRowModel(ClientSideCellSetModel helper, RowModel row) { // install the callback on all ClientSideCellSetModel instances unmarshaller.setListener(new Unmarshaller.Listener() { - @Override - public void beforeUnmarshal(Object target, Object parent) { - if (target instanceof ClientSideCellSetModel) { - ((ClientSideCellSetModel) target).setCellSetModelListener(listener); - } + @Override + public void beforeUnmarshal(Object target, Object parent) { + if (target instanceof ClientSideCellSetModel) { + ((ClientSideCellSetModel) target).setCellSetModelListener(listener); } + } - @Override - public void afterUnmarshal(Object target, Object parent) { - if (target instanceof ClientSideCellSetModel) { - ((ClientSideCellSetModel) target).setCellSetModelListener(null); - } + @Override + public void afterUnmarshal(Object target, Object parent) { + if (target instanceof ClientSideCellSetModel) { + ((ClientSideCellSetModel) target).setCellSetModelListener(null); } + } }); // create a new XML parser @@ -321,7 +309,7 @@ public void afterUnmarshal(Object target, Object parent) { @Test public void testStreamingJSON() throws Exception { - //Test with start row and end row. + // Test with start row and end row. StringBuilder builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -330,18 +318,17 @@ public void testStreamingJSON() throws Exception { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); int count = 0; - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); JsonFactory jfactory = new JsonFactory(mapper); JsonParser jParser = jfactory.createJsonParser(response.getStream()); boolean found = false; while (jParser.nextToken() != JsonToken.END_OBJECT) { - if(jParser.getCurrentToken() == JsonToken.START_OBJECT && found) { + if (jParser.getCurrentToken() == JsonToken.START_OBJECT && found) { RowModel row = jParser.readValueAs(RowModel.class); assertNotNull(row.getKey()); for (int i = 0; i < row.getCells().size(); i++) { @@ -369,14 +356,13 @@ public void testSimpleScannerProtobuf() throws Exception { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=15"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_PROTOBUF); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); int rowCount = readProtobufStream(response.getStream()); assertEquals(15, rowCount); - //Test with start row and end row. + // Test with start row and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -385,8 +371,7 @@ public void testSimpleScannerProtobuf() throws Exception { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_PROTOBUF); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_PROTOBUF); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); rowCount = readProtobufStream(response.getStream()); @@ -394,7 +379,7 @@ public void testSimpleScannerProtobuf() throws Exception { } private void checkRowsNotNull(CellSetModel model) { - for (RowModel row: model.getRows()) { + for (RowModel row : model.getRows()) { assertTrue(row.getKey() != null); assertTrue(row.getCells().size() > 0); } @@ -406,7 +391,7 @@ private void checkRowsNotNull(CellSetModel model) { * @return The number of rows in the cell set model. * @throws IOException Signals that an I/O exception has occurred. */ - public int readProtobufStream(InputStream inputStream) throws IOException{ + public int readProtobufStream(InputStream inputStream) throws IOException { DataInputStream stream = new DataInputStream(inputStream); CellSetModel model = null; int rowCount = 0; @@ -441,8 +426,7 @@ public void testScanningUnknownColumnJson() throws IOException { builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=a:test"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, @@ -464,8 +448,7 @@ public void testSimpleFilter() throws IOException, JAXBException { builder.append(Constants.SCAN_END_ROW + "=aay"); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("PrefixFilter('aab')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -473,7 +456,7 @@ public void testSimpleFilter() throws IOException, JAXBException { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("aab", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -481,10 +464,9 @@ public void testQualifierAndPrefixFilters() throws IOException, JAXBException { StringBuilder builder = new StringBuilder(); builder.append("/abc*"); builder.append("?"); - builder.append(Constants.SCAN_FILTER + "=" - + URLEncoder.encode("QualifierFilter(=,'binary:1')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + builder.append( + Constants.SCAN_FILTER + "=" + URLEncoder.encode("QualifierFilter(=,'binary:1')", "UTF-8")); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -492,7 +474,7 @@ public void testQualifierAndPrefixFilters() throws IOException, JAXBException { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -501,9 +483,8 @@ public void testCompoundFilter() throws IOException, JAXBException { builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_FILTER + "=" - + URLEncoder.encode("PrefixFilter('abc') AND QualifierFilter(=,'binary:1')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + + URLEncoder.encode("PrefixFilter('abc') AND QualifierFilter(=,'binary:1')", "UTF-8")); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -511,7 +492,7 @@ public void testCompoundFilter() throws IOException, JAXBException { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -522,8 +503,7 @@ public void testCustomFilter() throws IOException, JAXBException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -531,7 +511,7 @@ public void testCustomFilter() throws IOException, JAXBException { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -542,8 +522,7 @@ public void testNegativeCustomFilter() throws IOException, JAXBException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -572,7 +551,7 @@ public void testReversed() throws IOException, JAXBException { assertEquals(24, count); List rowModels = model.getRows().subList(1, count); - //reversed + // reversed builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -597,9 +576,9 @@ public void testReversed() throws IOException, JAXBException { RowModel reversedRowModel = reversedRowModels.get(i); assertEquals(new String(rowModel.getKey(), StandardCharsets.UTF_8), - new String(reversedRowModel.getKey(), StandardCharsets.UTF_8)); + new String(reversedRowModel.getKey(), StandardCharsets.UTF_8)); assertEquals(new String(rowModel.getCells().get(0).getValue(), StandardCharsets.UTF_8), - new String(reversedRowModel.getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(reversedRowModel.getCells().get(0).getValue(), StandardCharsets.UTF_8)); } } @@ -610,12 +589,11 @@ public void testColumnWithEmptyQualifier() throws IOException { builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_EMPTY); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); int count = TestScannerResource.countCellSet(model); assertEquals(expectedRows3, count); @@ -631,12 +609,11 @@ public void testColumnWithEmptyQualifier() throws IOException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_EMPTY); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); model = mapper.readValue(response.getStream(), CellSetModel.class); count = TestScannerResource.countCellSet(model); assertEquals(expectedRows1 + expectedRows3, count); @@ -653,7 +630,7 @@ public CustomFilter(byte[] key) { @Override public boolean filterRowKey(Cell cell) { int cmp = Bytes.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), - this.key, 0, this.key.length); + this.key, 0, this.key.length); return cmp != 0; } @@ -676,14 +653,13 @@ public static class ClientSideCellSetModel implements Serializable { * This list is not a real list; instead it will notify a listener whenever JAXB has * unmarshalled the next row. */ - @XmlElement(name="Row") + @XmlElement(name = "Row") private List row; static boolean listenerInvoked = false; /** - * Install a listener for row model on this object. If l is null, the listener - * is removed again. + * Install a listener for row model on this object. If l is null, the listener is removed again. */ public void setCellSetModelListener(final Listener l) { row = (l == null) ? null : new ArrayList() { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java index 782c89cf0bd9..39593d566aac 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,18 +47,17 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestVersionResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVersionResource.class); + HBaseClassTestRule.forClass(TestVersionResource.class); private static final Logger LOG = LoggerFactory.getLogger(TestVersionResource.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; @@ -66,11 +65,8 @@ public class TestVersionResource { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - VersionModel.class, - StorageClusterVersionModel.class); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(VersionModel.class, StorageClusterVersionModel.class); } @AfterClass @@ -123,9 +119,8 @@ public void testGetStargateVersionXML() throws IOException, JAXBException { Response response = client.get("/version", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - VersionModel model = (VersionModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + VersionModel model = (VersionModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); validate(model); LOG.info("success retrieving Stargate version as XML"); } @@ -135,10 +130,9 @@ public void testGetStargateVersionJSON() throws IOException { Response response = client.get("/version", Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(VersionModel.class, MediaType.APPLICATION_JSON_TYPE); - VersionModel model - = mapper.readValue(response.getBody(), VersionModel.class); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(VersionModel.class, + MediaType.APPLICATION_JSON_TYPE); + VersionModel model = mapper.readValue(response.getBody(), VersionModel.class); validate(model); LOG.info("success retrieving Stargate version as JSON"); } @@ -167,15 +161,12 @@ public void testGetStorageClusterVersionText() throws IOException { } @Test - public void testGetStorageClusterVersionXML() throws IOException, - JAXBException { - Response response = client.get("/version/cluster",Constants.MIMETYPE_XML); + public void testGetStorageClusterVersionXML() throws IOException, JAXBException { + Response response = client.get("/version/cluster", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - StorageClusterVersionModel clusterVersionModel = - (StorageClusterVersionModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + StorageClusterVersionModel clusterVersionModel = (StorageClusterVersionModel) context + .createUnmarshaller().unmarshal(new ByteArrayInputStream(response.getBody())); assertNotNull(clusterVersionModel); assertNotNull(clusterVersionModel.getVersion()); LOG.info("success retrieving storage cluster version as XML"); @@ -187,12 +178,11 @@ public void testGetStorageClusterVersionJSON() throws IOException { assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(StorageClusterVersionModel.class, MediaType.APPLICATION_JSON_TYPE); - StorageClusterVersionModel clusterVersionModel - = mapper.readValue(response.getBody(), StorageClusterVersionModel.class); + .locateMapper(StorageClusterVersionModel.class, MediaType.APPLICATION_JSON_TYPE); + StorageClusterVersionModel clusterVersionModel = + mapper.readValue(response.getBody(), StorageClusterVersionModel.class); assertNotNull(clusterVersionModel); assertNotNull(clusterVersionModel.getVersion()); LOG.info("success retrieving storage cluster version as JSON"); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java index a97f98afbd9e..82f3a9481c8c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InterruptedIOException; - import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Unmarshaller; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; - -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; - +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.rest.Constants; import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel; @@ -41,6 +35,7 @@ import org.apache.hadoop.hbase.rest.model.TableSchemaModel; import org.apache.hadoop.hbase.rest.model.VersionModel; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class RemoteAdmin { @@ -57,10 +52,7 @@ public class RemoteAdmin { private static volatile Unmarshaller versionClusterUnmarshaller; /** - * Constructor - * - * @param client - * @param conf + * Constructor nn */ public RemoteAdmin(Client client, Configuration conf) { this(client, conf, null); @@ -70,17 +62,14 @@ static Unmarshaller getUnmarsheller() throws JAXBException { if (versionClusterUnmarshaller == null) { - RemoteAdmin.versionClusterUnmarshaller = JAXBContext.newInstance( - StorageClusterVersionModel.class).createUnmarshaller(); + RemoteAdmin.versionClusterUnmarshaller = + JAXBContext.newInstance(StorageClusterVersionModel.class).createUnmarshaller(); } return RemoteAdmin.versionClusterUnmarshaller; } /** - * Constructor - * @param client - * @param conf - * @param accessToken + * Constructor nnn */ public RemoteAdmin(Client client, Configuration conf, String accessToken) { this.client = client; @@ -100,10 +89,8 @@ public boolean isTableAvailable(String tableName) throws IOException { } /** - * @return string representing the rest api's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @return string representing the rest api's version n * if the endpoint does not exist, there is + * a timeout, or some other general failure mode */ public VersionModel getRestVersion() throws IOException { @@ -118,26 +105,24 @@ public VersionModel getRestVersion() throws IOException { int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - - VersionModel v = new VersionModel(); - return (VersionModel) v.getObjectFromMessage(response.getBody()); - case 404: - throw new IOException("REST version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() - + " returned " + code); + case 200: + + VersionModel v = new VersionModel(); + return (VersionModel) v.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("REST version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " returned " + code); } } throw new IOException("get request to " + path.toString() + " timed out"); @@ -145,50 +130,47 @@ public VersionModel getRestVersion() throws IOException { /** * @return string representing the cluster's version - * @throws IOException if the endpoint does not exist, there is a timeout, or some other general failure mode + * @throws IOException if the endpoint does not exist, there is a timeout, or some other general + * failure mode */ public StorageClusterStatusModel getClusterStatus() throws IOException { - StringBuilder path = new StringBuilder(); + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); path.append('/'); - if (accessToken !=null) { - path.append(accessToken); - path.append('/'); - } + } path.append("status/cluster"); int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - StorageClusterStatusModel s = new StorageClusterStatusModel(); - return (StorageClusterStatusModel) s.getObjectFromMessage(response - .getBody()); - case 404: - throw new IOException("Cluster version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path + " returned " + code); + case 200: + StorageClusterStatusModel s = new StorageClusterStatusModel(); + return (StorageClusterStatusModel) s.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("Cluster version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path + " returned " + code); } } throw new IOException("get request to " + path + " timed out"); } /** - * @return string representing the cluster's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @return string representing the cluster's version n * if the endpoint does not exist, there is + * a timeout, or some other general failure mode */ public StorageClusterVersionModel getClusterVersion() throws IOException { @@ -206,32 +188,30 @@ public StorageClusterVersionModel getClusterVersion() throws IOException { Response response = client.get(path.toString(), Constants.MIMETYPE_XML); code = response.getCode(); switch (code) { - case 200: - try { - - return (StorageClusterVersionModel) getUnmarsheller().unmarshal( - getInputStream(response)); - } catch (JAXBException jaxbe) { - - throw new IOException( - "Issue parsing StorageClusterVersionModel object in XML form: " - + jaxbe.getLocalizedMessage(), jaxbe); - } - case 404: - throw new IOException("Cluster version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException(path.toString() + " request returned " + code); + case 200: + try { + + return (StorageClusterVersionModel) getUnmarsheller() + .unmarshal(getInputStream(response)); + } catch (JAXBException jaxbe) { + + throw new IOException("Issue parsing StorageClusterVersionModel object in XML form: " + + jaxbe.getLocalizedMessage(), jaxbe); + } + case 404: + throw new IOException("Cluster version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException(path.toString() + " request returned " + code); } } - throw new IOException("get request to " + path.toString() - + " request timed out"); + throw new IOException("get request to " + path.toString() + " request timed out"); } /** @@ -254,19 +234,19 @@ public boolean isTableAvailable(byte[] tableName) throws IOException { Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - return true; - case 404: - return false; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() + " returned " + code); + case 200: + return true; + case 404: + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " returned " + code); } } throw new IOException("get request to " + path.toString() + " timed out"); @@ -277,8 +257,7 @@ public boolean isTableAvailable(byte[] tableName) throws IOException { * @param desc table descriptor for table * @throws IOException if a remote or network exception occurs */ - public void createTable(TableDescriptor desc) - throws IOException { + public void createTable(TableDescriptor desc) throws IOException { TableSchemaModel model = new TableSchemaModel(desc); StringBuilder path = new StringBuilder(); path.append('/'); @@ -291,21 +270,21 @@ public void createTable(TableDescriptor desc) path.append("schema"); int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.put(path.toString(), Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput()); + Response response = + client.put(path.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); code = response.getCode(); switch (code) { - case 201: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("create request to " + path.toString() + " returned " + code); + case 201: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("create request to " + path.toString() + " returned " + code); } } throw new IOException("create request to " + path.toString() + " timed out"); @@ -325,7 +304,7 @@ public void deleteTable(final String tableName) throws IOException { * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs */ - public void deleteTable(final byte [] tableName) throws IOException { + public void deleteTable(final byte[] tableName) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); if (accessToken != null) { @@ -340,27 +319,25 @@ public void deleteTable(final byte [] tableName) throws IOException { Response response = client.delete(path.toString()); code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("delete request to " + path.toString() + " returned " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("delete request to " + path.toString() + " returned " + code); } } throw new IOException("delete request to " + path.toString() + " timed out"); } /** - * @return string representing the cluster's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @return string representing the cluster's version n * if the endpoint does not exist, there is + * a timeout, or some other general failure mode */ public TableListModel getTableList() throws IOException { @@ -375,34 +352,30 @@ public TableListModel getTableList() throws IOException { for (int i = 0; i < maxRetries; i++) { // Response response = client.get(path.toString(), // Constants.MIMETYPE_XML); - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - TableListModel t = new TableListModel(); - return (TableListModel) t.getObjectFromMessage(response.getBody()); - case 404: - throw new IOException("Table list not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() - + " request returned " + code); + case 200: + TableListModel t = new TableListModel(); + return (TableListModel) t.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("Table list not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " request returned " + code); } } - throw new IOException("get request to " + path.toString() - + " request timed out"); + throw new IOException("get request to " + path.toString() + " request timed out"); } /** * Convert the REST server's response to an XML reader. - * * @param response The REST server's response. * @return A reader over the parsed XML document. * @throws IOException If the document fails to parse diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 4ae6d243752b..bb80996b3194 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -96,7 +95,7 @@ public class RemoteHTable implements Table { @SuppressWarnings("rawtypes") protected String buildRowSpec(final byte[] row, final Map familyMap, final long startTime, - final long endTime, final int maxVersions) { + final long endTime, final int maxVersions) { StringBuffer sb = new StringBuffer(); sb.append('/'); sb.append(Bytes.toString(name)); @@ -665,7 +664,7 @@ public boolean isAutoFlush() { } private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) - throws IOException { + throws IOException { // column to check-the-value put.add(new KeyValue(row, family, qualifier, value)); @@ -701,7 +700,7 @@ private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, byte[ } private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, - Delete delete) throws IOException { + Delete delete) throws IOException { Put put = new Put(row, HConstants.LATEST_TIMESTAMP, delete.getFamilyCellMap()); // column to check-the-value put.add(new KeyValue(row, family, qualifier, value)); @@ -768,13 +767,13 @@ public Result append(Append append) throws IOException { @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) - throws IOException { + throws IOException { throw new IOException("incrementColumnValue not supported"); } @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, - Durability durability) throws IOException { + Durability durability) throws IOException { throw new IOException("incrementColumnValue not supported"); } @@ -785,7 +784,7 @@ public void batch(List actions, Object[] results) throws IOExcept @Override public void batchCallback(List actions, Object[] results, - Batch.Callback callback) throws IOException, InterruptedException { + Batch.Callback callback) throws IOException, InterruptedException { throw new IOException("batchCallback not supported"); } @@ -796,14 +795,14 @@ public CoprocessorRpcChannel coprocessorService(byte[] row) { @Override public Map coprocessorService(Class service, byte[] startKey, - byte[] endKey, Batch.Call callable) throws ServiceException, Throwable { + byte[] endKey, Batch.Call callable) throws ServiceException, Throwable { throw new UnsupportedOperationException("coprocessorService not implemented"); } @Override public void coprocessorService(Class service, byte[] startKey, - byte[] endKey, Batch.Call callable, Batch.Callback callback) - throws ServiceException, Throwable { + byte[] endKey, Batch.Call callable, Batch.Callback callback) + throws ServiceException, Throwable { throw new UnsupportedOperationException("coprocessorService not implemented"); } @@ -814,15 +813,15 @@ public Result mutateRow(RowMutations rm) throws IOException { @Override public Map batchCoprocessorService( - Descriptors.MethodDescriptor method, Message request, byte[] startKey, byte[] endKey, - R responsePrototype) throws ServiceException, Throwable { + Descriptors.MethodDescriptor method, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { throw new UnsupportedOperationException("batchCoprocessorService not implemented"); } @Override public void batchCoprocessorService(Descriptors.MethodDescriptor method, - Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) - throws ServiceException, Throwable { + Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) + throws ServiceException, Throwable { throw new UnsupportedOperationException("batchCoprocessorService not implemented"); } @@ -873,8 +872,8 @@ private class CheckAndMutateBuilderImpl implements CheckAndMutateBuilder { @Override public CheckAndMutateBuilder qualifier(byte[] qualifier) { - this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + - " an empty byte array, or just do not call this method if you want a null qualifier"); + this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + + " an empty byte array, or just do not call this method if you want a null qualifier"); return this; } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java index 8e8ba36c834d..0db1b4afa1dc 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,12 +45,12 @@ /** * Tests {@link RemoteAdmin} retries. */ -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRemoteAdminRetries { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteAdminRetries.class); + HBaseClassTestRule.forClass(TestRemoteAdminRetries.class); private static final int SLEEP_TIME = 50; private static final int RETRIES = 3; @@ -78,7 +78,7 @@ public void setup() throws Exception { } @Test - public void testFailingGetRestVersion() throws Exception { + public void testFailingGetRestVersion() throws Exception { testTimedOutGetCall(new CallExecutor() { @Override public void run() throws Exception { @@ -88,7 +88,7 @@ public void run() throws Exception { } @Test - public void testFailingGetClusterStatus() throws Exception { + public void testFailingGetClusterStatus() throws Exception { testTimedOutGetCall(new CallExecutor() { @Override public void run() throws Exception { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java index 55d6d904eedf..9e8c9c475d2b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,11 +49,11 @@ /** * Test RemoteHTable retries. */ -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRemoteHTableRetries { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteHTableRetries.class); + HBaseClassTestRule.forClass(TestRemoteHTableRetries.class); private static final int SLEEP_TIME = 50; private static final int RETRIES = 3; @@ -75,17 +75,14 @@ public void setup() throws Exception { Response response = new Response(509); when(client.get(anyString(), anyString())).thenReturn(response); when(client.delete(anyString())).thenReturn(response); - when(client.put(anyString(), anyString(), any())).thenReturn( - response); - when(client.post(anyString(), anyString(), any())).thenReturn( - response); + when(client.put(anyString(), anyString(), any())).thenReturn(response); + when(client.post(anyString(), anyString(), any())).thenReturn(response); Configuration configuration = TEST_UTIL.getConfiguration(); configuration.setInt("hbase.rest.client.max.retries", RETRIES); configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME); - remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(), - "MyTable"); + remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(), "MyTable"); } @After @@ -156,8 +153,8 @@ public void testCheckAndPut() throws Exception { public void run() throws Exception { Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenPut(put); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenPut(put); } }); verify(client, times(RETRIES)).put(anyString(), anyString(), any()); @@ -170,9 +167,9 @@ public void testCheckAndDelete() throws Exception { public void run() throws Exception { Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); - Delete delete= new Delete(ROW_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenDelete(delete); + Delete delete = new Delete(ROW_1); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenDelete(delete); } }); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index d37c8113fbf8..021c03ce85b8 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,19 +60,19 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestRemoteTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteTable.class); + HBaseClassTestRule.forClass(TestRemoteTable.class); // Verify that invalid URL characters and arbitrary bytes are escaped when // constructing REST URLs per HBASE-7621. RemoteHTable should support row keys // and qualifiers containing any byte for all table operations. private static final String INVALID_URL_CHARS_1 = - "|\"\\^{}\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\u0009\u000B\u000C"; + "|\"\\^{}\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\u0009\u000B\u000C"; - // HColumnDescriptor prevents certain characters in column names. The following + // HColumnDescriptor prevents certain characters in column names. The following // are examples of characters are allowed in column names but are not valid in // URLs. private static final String INVALID_URL_CHARS_2 = "|^{}\u0242"; @@ -81,12 +81,12 @@ public class TestRemoteTable { private static final String VALID_TABLE_NAME_CHARS = "_-."; private static final TableName TABLE = - TableName.valueOf("TestRemoteTable" + VALID_TABLE_NAME_CHARS); + TableName.valueOf("TestRemoteTable" + VALID_TABLE_NAME_CHARS); private static final byte[] ROW_1 = Bytes.toBytes("testrow1" + INVALID_URL_CHARS_1); private static final byte[] ROW_2 = Bytes.toBytes("testrow2" + INVALID_URL_CHARS_1); private static final byte[] ROW_3 = Bytes.toBytes("testrow3" + INVALID_URL_CHARS_1); - private static final byte[] ROW_4 = Bytes.toBytes("testrow4"+ INVALID_URL_CHARS_1); + private static final byte[] ROW_4 = Bytes.toBytes("testrow4" + INVALID_URL_CHARS_1); private static final byte[] COLUMN_1 = Bytes.toBytes("a" + INVALID_URL_CHARS_2); private static final byte[] COLUMN_2 = Bytes.toBytes("b" + INVALID_URL_CHARS_2); @@ -102,8 +102,7 @@ public class TestRemoteTable { private static final long TS_1 = TS_2 - ONE_HOUR; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private RemoteHTable remoteTable; @BeforeClass @@ -113,7 +112,7 @@ public static void setUpBeforeClass() throws Exception { } @Before - public void before() throws Exception { + public void before() throws Exception { Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE)) { if (admin.isTableEnabled(TABLE)) { @@ -139,9 +138,8 @@ public void before() throws Exception { put.addColumn(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2); table.put(put); } - remoteTable = new RemoteHTable( - new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())), + remoteTable = + new RemoteHTable(new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), TEST_UTIL.getConfiguration(), TABLE.toBytes()); } @@ -251,7 +249,7 @@ public void testGet() throws IOException { get.readVersions(2); result = remoteTable.get(get); int count = 0; - for (Cell kv: result.listCells()) { + for (Cell kv : result.listCells()) { if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_1 == kv.getTimestamp()) { assertTrue(CellUtil.matchingValue(kv, VALUE_1)); // @TS_1 count++; @@ -275,7 +273,7 @@ public void testMultiGet() throws Exception { assertEquals(1, results[0].size()); assertEquals(2, results[1].size()); - //Test Versions + // Test Versions gets = new ArrayList<>(2); Get g = new Get(ROW_1); g.readVersions(3); @@ -287,7 +285,7 @@ public void testMultiGet() throws Exception { assertEquals(1, results[0].size()); assertEquals(3, results[1].size()); - //404 + // 404 gets = new ArrayList<>(1); gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); results = remoteTable.get(gets); @@ -345,7 +343,7 @@ public void testPut() throws IOException { assertTrue(Bytes.equals(VALUE_2, value)); assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable" + VALID_TABLE_NAME_CHARS), - remoteTable.getTableName())); + remoteTable.getTableName())); } @Test @@ -481,7 +479,7 @@ public void testScanner() throws IOException { scanner.close(); - scanner = remoteTable.getScanner(COLUMN_1,QUALIFIER_1); + scanner = remoteTable.getScanner(COLUMN_1, QUALIFIER_1); results = scanner.next(4); assertNotNull(results); assertEquals(4, results.length); @@ -506,18 +504,18 @@ public void testCheckAndDelete() throws IOException { assertEquals(1, remoteTable.exists(Collections.singletonList(get)).length); Delete delete = new Delete(ROW_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenDelete(delete); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenDelete(delete); assertFalse(remoteTable.exists(get)); Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); remoteTable.put(put); - assertTrue(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenPut(put)); - assertFalse(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_2).thenPut(put)); + assertTrue(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenPut(put)); + assertFalse(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_2) + .thenPut(put)); } /** @@ -555,7 +553,7 @@ public void testIteratorScaner() throws IOException { * Test a some methods of class Response. */ @Test - public void testResponse(){ + public void testResponse() { Response response = new Response(200); assertEquals(200, response.getCode()); Header[] headers = new Header[2]; @@ -576,10 +574,8 @@ public void testResponse(){ } /** - * Tests scanner with limitation - * limit the number of rows each scanner scan fetch at life time - * The number of rows returned should be equal to the limit - * @throws Exception + * Tests scanner with limitation limit the number of rows each scanner scan fetch at life time The + * number of rows returned should be equal to the limit n */ @Test public void testLimitedScan() throws Exception { @@ -621,7 +617,6 @@ public void testLimitedScan() throws Exception { /** * Tests keeping a HBase scanner alive for long periods of time. Each call to next() should reset * the ConnectionCache timeout for the scanner's connection. - * * @throws Exception if starting the servlet container or disabling or truncating the table fails */ @Test @@ -644,8 +639,8 @@ public void testLongLivedScan() throws Exception { TEST_UTIL.getAdmin().disableTable(TABLE); TEST_UTIL.getAdmin().truncateTable(TABLE, false); - remoteTable = new RemoteHTable( - new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), + remoteTable = + new RemoteHTable(new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), TEST_UTIL.getConfiguration(), TABLE.toBytes()); String row = "testrow"; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java index 26190f66f472..2618ff54180b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,14 +46,14 @@ public class TestXmlParsing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestXmlParsing.class); + HBaseClassTestRule.forClass(TestXmlParsing.class); private static final Logger LOG = LoggerFactory.getLogger(TestXmlParsing.class); @Test public void testParsingClusterVersion() throws Exception { final String xml = "" - + ""; + + ""; Client client = mock(Client.class); RemoteAdmin admin = new RemoteAdmin(client, HBaseConfiguration.create(), null); Response resp = new Response(200, null, Bytes.toBytes(xml)); @@ -66,10 +66,9 @@ public void testParsingClusterVersion() throws Exception { @Test public void testFailOnExternalEntities() throws Exception { - final String externalEntitiesXml = - "" - + " ] >" - + " &xee;"; + final String externalEntitiesXml = "" + + " ] >" + + " &xee;"; Client client = mock(Client.class); RemoteAdmin admin = new RemoteAdmin(client, HBaseConfiguration.create(), null); Response resp = new Response(200, null, Bytes.toBytes(externalEntitiesXml)); @@ -80,9 +79,9 @@ public void testFailOnExternalEntities() throws Exception { admin.getClusterVersion(); fail("Expected getClusterVersion() to throw an exception"); } catch (IOException e) { - assertEquals("Cause of exception ought to be a failure to parse the stream due to our " + - "invalid external entity. Make sure this isn't just a false positive due to " + - "implementation. see HBASE-19020.", UnmarshalException.class, e.getCause().getClass()); + assertEquals("Cause of exception ought to be a failure to parse the stream due to our " + + "invalid external entity. Make sure this isn't just a false positive due to " + + "implementation. see HBASE-19020.", UnmarshalException.class, e.getCause().getClass()); final String exceptionText = StringUtils.stringifyException(e); final String expectedText = "\"xee\""; LOG.debug("exception text: '" + exceptionText + "'", e); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java index b8305d56a180..829091708573 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,12 +32,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestCellModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellModel.class); + HBaseClassTestRule.forClass(TestCellModel.class); private static final long TIMESTAMP = 1245219839331L; private static final byte[] COLUMN = Bytes.toBytes("testcolumn"); @@ -45,11 +45,9 @@ public class TestCellModel extends TestModelBase { public TestCellModel() throws Exception { super(CellModel.class); - AS_XML = - "dGVzdHZhbHVl"; - AS_PB = - "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl"; + AS_XML = "dGVzdHZhbHVl"; + AS_PB = "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl"; AS_JSON = "{\"column\":\"dGVzdGNvbHVtbg==\",\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVl\"}"; @@ -108,4 +106,3 @@ public void testToString() throws Exception { assertTrue(StringUtils.contains(cellModel.toString(), expectedColumn)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java index 1d40effb47d0..38f0f43c0ceb 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestCellSetModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellSetModel.class); + HBaseClassTestRule.forClass(TestCellSetModel.class); private static final byte[] ROW1 = Bytes.toBytes("testrow1"); private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1"); @@ -50,41 +50,30 @@ public class TestCellSetModel extends TestModelBase { public TestCellSetModel() throws Exception { super(CellSetModel.class); - AS_XML = - "" + - "" + - "" + - "dGVzdHZhbHVlMQ==" + - "" + - "" + - "" + - "dGVzdHZhbHVlMg==" + - "" + - "dGVzdHZhbHVlMw==" + - "" + - ""; - - AS_PB = - "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" + - "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" + - "Igp0ZXN0dmFsdWUz"; - - AS_XML = - "" + - "" + - "dGVzdHZhbHVlMQ==" + - "" + - "dGVzdHZhbHVlMg==" + - "dGVzdHZhbHVlMw==" + - ""; - - AS_JSON = - "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," + - "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," + - "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," + - "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," + - "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," + - "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}"; + AS_XML = "" + "" + + "" + "dGVzdHZhbHVlMQ==" + + "" + "" + + "" + "dGVzdHZhbHVlMg==" + + "" + "dGVzdHZhbHVlMw==" + + "" + ""; + + AS_PB = "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" + + "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" + + "Igp0ZXN0dmFsdWUz"; + + AS_XML = "" + + "" + + "dGVzdHZhbHVlMQ==" + + "" + "dGVzdHZhbHVlMg==" + + "dGVzdHZhbHVlMw==" + + ""; + + AS_JSON = "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," + + "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," + + "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," + + "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," + + "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," + + "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}"; } @Override @@ -147,4 +136,3 @@ public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java index a52358cbe525..0001abe02d0b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,12 +27,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestColumnSchemaModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestColumnSchemaModel.class); + HBaseClassTestRule.forClass(TestColumnSchemaModel.class); protected static final String COLUMN_NAME = "testcolumn"; protected static final boolean BLOCKCACHE = true; @@ -45,15 +45,13 @@ public class TestColumnSchemaModel extends TestModelBase { public TestColumnSchemaModel() throws Exception { super(ColumnSchemaModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_JSON = - "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," + - "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," + - "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}"; + AS_JSON = "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," + + "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," + + "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}"; } @Override @@ -87,4 +85,3 @@ protected void checkModel(ColumnSchemaModel model) { public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java index 63124113da51..4cfe70e06399 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,8 +52,7 @@ protected TestModelBase(Class clazz) throws Exception { super(); this.clazz = clazz; context = new JAXBContextResolver().getContext(clazz); - mapper = new JacksonJaxbJsonProvider().locateMapper(clazz, - MediaType.APPLICATION_JSON_TYPE); + mapper = new JacksonJaxbJsonProvider().locateMapper(clazz, MediaType.APPLICATION_JSON_TYPE); } protected abstract T buildTestModel(); @@ -68,19 +67,17 @@ protected String toXML(T model) throws JAXBException { protected String toJSON(T model) throws JAXBException, IOException { StringWriter writer = new StringWriter(); mapper.writeValue(writer, model); -// original marshaller, uncomment this and comment mapper to verify backward compatibility -// ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer); + // original marshaller, uncomment this and comment mapper to verify backward compatibility + // ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer); return writer.toString(); } public T fromJSON(String json) throws JAXBException, IOException { - return (T) - mapper.readValue(json, clazz); + return (T) mapper.readValue(json, clazz); } public T fromXML(String xml) throws JAXBException { - return (T) - context.createUnmarshaller().unmarshal(new StringReader(xml)); + return (T) context.createUnmarshaller().unmarshal(new StringReader(xml)); } @SuppressWarnings("unused") @@ -88,14 +85,12 @@ protected byte[] toPB(ProtobufMessageHandler model) { return model.createProtobufOutput(); } - protected T fromPB(String pb) throws - Exception { - return (T)clazz.getMethod("getObjectFromMessage", byte[].class).invoke( - clazz.getDeclaredConstructor().newInstance(), - Base64.getDecoder().decode(AS_PB)); + protected T fromPB(String pb) throws Exception { + return (T) clazz.getMethod("getObjectFromMessage", byte[].class) + .invoke(clazz.getDeclaredConstructor().newInstance(), Base64.getDecoder().decode(AS_PB)); } - protected abstract void checkModel(T model); + protected abstract void checkModel(T model); @Test public void testBuildModel() throws Exception { @@ -124,7 +119,7 @@ public void testToJSON() throws Exception { ObjectNode expObj = mapper.readValue(AS_JSON, ObjectNode.class); ObjectNode actObj = mapper.readValue(toJSON(buildTestModel()), ObjectNode.class); assertEquals(expObj, actObj); - } catch(Exception e) { + } catch (Exception e) { assertEquals(AS_JSON, toJSON(buildTestModel())); } } @@ -134,4 +129,3 @@ public void testFromJSON() throws Exception { checkModel(fromJSON(AS_JSON)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java index e7a9188b5e35..831a5642fb64 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,34 +29,33 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestNamespacesInstanceModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespacesInstanceModel.class); + HBaseClassTestRule.forClass(TestNamespacesInstanceModel.class); - public static final Map NAMESPACE_PROPERTIES = new HashMap<>(); + public static final Map NAMESPACE_PROPERTIES = new HashMap<>(); public static final String NAMESPACE_NAME = "namespaceName"; public TestNamespacesInstanceModel() throws Exception { super(NamespacesInstanceModel.class); - NAMESPACE_PROPERTIES.put("KEY_1","VALUE_1"); - NAMESPACE_PROPERTIES.put("KEY_2","VALUE_2"); - NAMESPACE_PROPERTIES.put("NAME","testNamespace"); + NAMESPACE_PROPERTIES.put("KEY_1", "VALUE_1"); + NAMESPACE_PROPERTIES.put("KEY_2", "VALUE_2"); + NAMESPACE_PROPERTIES.put("NAME", "testNamespace"); - AS_XML = - "" + - "NAMEtestNamespace" + - "KEY_2VALUE_2" + - "KEY_1VALUE_1" + - ""; + AS_XML = "" + + "NAMEtestNamespace" + + "KEY_2VALUE_2" + + "KEY_1VALUE_1" + + ""; AS_PB = "ChUKBE5BTUUSDXRlc3ROYW1lc3BhY2UKEAoFS0VZXzESB1ZBTFVFXzEKEAoFS0VZXzISB1ZBTFVFXzI="; - AS_JSON = "{\"properties\":{\"NAME\":\"testNamespace\"," + - "\"KEY_1\":\"VALUE_1\",\"KEY_2\":\"VALUE_2\"}}"; + AS_JSON = "{\"properties\":{\"NAME\":\"testNamespace\"," + + "\"KEY_1\":\"VALUE_1\",\"KEY_2\":\"VALUE_2\"}}"; } @Override @@ -64,9 +63,9 @@ protected NamespacesInstanceModel buildTestModel() { return buildTestModel(NAMESPACE_NAME, NAMESPACE_PROPERTIES); } - public NamespacesInstanceModel buildTestModel(String namespace, Map properties) { + public NamespacesInstanceModel buildTestModel(String namespace, Map properties) { NamespacesInstanceModel model = new NamespacesInstanceModel(); - for(String key: properties.keySet()){ + for (String key : properties.keySet()) { model.addProperty(key, properties.get(key)); } return model; @@ -78,12 +77,12 @@ protected void checkModel(NamespacesInstanceModel model) { } public void checkModel(NamespacesInstanceModel model, String namespace, - Map properties) { - Map modProperties = model.getProperties(); + Map properties) { + Map modProperties = model.getProperties(); assertEquals(properties.size(), modProperties.size()); // Namespace name comes from REST URI, not properties. assertNotSame(namespace, model.getNamespaceName()); - for(String property: properties.keySet()){ + for (String property : properties.keySet()) { assertEquals(properties.get(property), modProperties.get(property)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java index 5da776ab7350..30e0c44bcd86 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestNamespacesModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespacesModel.class); + HBaseClassTestRule.forClass(TestNamespacesModel.class); public static final String NAMESPACE_NAME_1 = "testNamespace1"; public static final String NAMESPACE_NAME_2 = "testNamespace2"; @@ -42,10 +42,9 @@ public class TestNamespacesModel extends TestModelBase { public TestNamespacesModel() throws Exception { super(NamespacesModel.class); - AS_XML = - "" + - "testNamespace1" + - "testNamespace2"; + AS_XML = "" + + "testNamespace1" + + "testNamespace2"; AS_PB = "Cg50ZXN0TmFtZXNwYWNlMQoOdGVzdE5hbWVzcGFjZTI="; @@ -71,7 +70,7 @@ protected void checkModel(NamespacesModel model) { public void checkModel(NamespacesModel model, String... namespaceName) { List namespaces = model.getNamespaces(); assertEquals(namespaceName.length, namespaces.size()); - for(int i = 0; i < namespaceName.length; i++){ + for (int i = 0; i < namespaceName.length; i++) { assertTrue(namespaces.contains(namespaceName[i])); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java index 808b77bc9d6b..ad539e128481 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,12 +33,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRowModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowModel.class); + HBaseClassTestRule.forClass(TestRowModel.class); private static final byte[] ROW1 = Bytes.toBytes("testrow1"); private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1"); @@ -52,9 +52,8 @@ public TestRowModel() throws Exception { + "dGVzdHZhbHVlMQ==" + ""; - AS_JSON = - "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," + - "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}"; + AS_JSON = "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," + + "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}"; } @Override @@ -79,7 +78,7 @@ protected void checkModel(RowModel model) { @Override public void testFromPB() throws Exception { - //do nothing row model has no PB + // do nothing row model has no PB } @Test @@ -103,4 +102,3 @@ public void testToString() throws Exception { assertTrue(StringUtils.contains(rowModel.toString(), expectedRowKey)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java index 4835b7b0fc93..a2617c812d55 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.databind.JsonMappingException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.rest.ScannerResultGenerator; import org.apache.hadoop.hbase.testclassification.RestTests; @@ -32,11 +31,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestScannerModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerModel.class); + HBaseClassTestRule.forClass(TestScannerModel.class); private static final String PRIVATE = "private"; private static final String PUBLIC = "public"; @@ -63,11 +62,10 @@ public TestScannerModel() throws Exception { AS_JSON = "{\"batch\":100,\"caching\":1000,\"cacheBlocks\":false,\"endRow\":\"enp5eng=\"," + "\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\"," + "\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"]," - +"\"labels\":[\"private\",\"public\"]," - +"\"limit\":10000}"; + + "\"labels\":[\"private\",\"public\"]," + "\"limit\":10000}"; AS_PB = "CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mfJDj" - +"/////B0joB1IHcHJpdmF0ZVIGcHVibGljWABgkE4="; + + "/////B0joB1IHcHJpdmF0ZVIGcHVibGljWABgkE4="; } @Override diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java index 2611a067437a..8310232890dd 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,58 +30,54 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestStorageClusterStatusModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStorageClusterStatusModel.class); + HBaseClassTestRule.forClass(TestStorageClusterStatusModel.class); public TestStorageClusterStatusModel() throws Exception { super(StorageClusterStatusModel.class); - AS_XML = - "" + - "" + - "" + - "" + - "" + - "" + - ""; + AS_XML = "" + + "" + + "" + + "" + + "" + "" + + ""; - AS_PB = - "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" + - "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" + - "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8="; + AS_PB = "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" + + "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" + + "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8="; - - //Using jackson will break json backward compatibilty for this representation - //but the original one was broken as it would only print one Node element - //so the format itself was broken + // Using jackson will break json backward compatibilty for this representation + // but the original one was broken as it would only print one Node element + // so the format itself was broken AS_JSON = - "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," + - "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," + - "\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + - "\"readRequestsCount\":1,\"cpRequestsCount\":1,\"writeRequestsCount\":2," + - "\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1," + - "\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}],\"requests\":0," + - "\"startCode\":1245219839331,\"heapSizeMB\":128,\"maxHeapSizeMB\":1024}," + - "{\"name\":\"test2\",\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\"," + - "\"stores\":1,\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0," + - "\"storefileIndexSizeKB\":0,\"readRequestsCount\":1,\"cpRequestsCount\":1," + - "\"writeRequestsCount\":2,\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1," + - "\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}]," + - "\"requests\":0,\"startCode\":1245239331198,\"heapSizeMB\":512," + - "\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}"; + "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," + + "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," + + "\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + + "\"readRequestsCount\":1,\"cpRequestsCount\":1,\"writeRequestsCount\":2," + + "\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1," + + "\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}],\"requests\":0," + + "\"startCode\":1245219839331,\"heapSizeMB\":128,\"maxHeapSizeMB\":1024}," + + "{\"name\":\"test2\",\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\"," + + "\"stores\":1,\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0," + + "\"storefileIndexSizeKB\":0,\"readRequestsCount\":1,\"cpRequestsCount\":1," + + "\"writeRequestsCount\":2,\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1," + + "\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}]," + + "\"requests\":0,\"startCode\":1245239331198,\"heapSizeMB\":512," + + "\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}"; } @Override @@ -90,11 +86,11 @@ protected StorageClusterStatusModel buildTestModel() { model.setRegions(2); model.setRequests(0); model.setAverageLoad(1.0); - model.addLiveNode("test1", 1245219839331L, 128, 1024) - .addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1); - model.addLiveNode("test2", 1245239331198L, 512, 1024) - .addRegion(Bytes.toBytes(TableName.META_TABLE_NAME+",,1246000043724"),1, 1, 0, 0, 0, - 1, 1, 2, 1, 1, 1, 1, 1); + model.addLiveNode("test1", 1245219839331L, 128, 1024).addRegion(Bytes.toBytes("hbase:root,,0"), + 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1); + model.addLiveNode("test2", 1245239331198L, 512, 1024).addRegion( + Bytes.toBytes(TableName.META_TABLE_NAME + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, + 1, 1); return model; } @@ -103,18 +99,15 @@ protected void checkModel(StorageClusterStatusModel model) { assertEquals(2, model.getRegions()); assertEquals(0, model.getRequests()); assertEquals(1.0, model.getAverageLoad(), 0.0); - Iterator nodes = - model.getLiveNodes().iterator(); + Iterator nodes = model.getLiveNodes().iterator(); StorageClusterStatusModel.Node node = nodes.next(); assertEquals("test1", node.getName()); assertEquals(1245219839331L, node.getStartCode()); assertEquals(128, node.getHeapSizeMB()); assertEquals(1024, node.getMaxHeapSizeMB()); - Iterator regions = - node.getRegions().iterator(); + Iterator regions = node.getRegions().iterator(); StorageClusterStatusModel.Node.Region region = regions.next(); - assertTrue(Bytes.toString(region.getName()).equals( - "hbase:root,,0")); + assertTrue(Bytes.toString(region.getName()).equals("hbase:root,,0")); assertEquals(1, region.getStores()); assertEquals(1, region.getStorefiles()); assertEquals(0, region.getStorefileSizeMB()); @@ -135,8 +128,7 @@ protected void checkModel(StorageClusterStatusModel model) { assertEquals(1024, node.getMaxHeapSizeMB()); regions = node.getRegions().iterator(); region = regions.next(); - assertEquals(Bytes.toString(region.getName()), - TableName.META_TABLE_NAME+",,1246000043724"); + assertEquals(Bytes.toString(region.getName()), TableName.META_TABLE_NAME + ",,1246000043724"); assertEquals(1, region.getStores()); assertEquals(1, region.getStorefiles()); assertEquals(0, region.getStorefileSizeMB()); @@ -154,4 +146,3 @@ protected void checkModel(StorageClusterStatusModel model) { assertFalse(nodes.hasNext()); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java index b6101462aa09..6004cedcebcc 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,20 +25,19 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestStorageClusterVersionModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStorageClusterVersionModel.class); + HBaseClassTestRule.forClass(TestStorageClusterVersionModel.class); private static final String VERSION = "0.0.1-testing"; public TestStorageClusterVersionModel() throws Exception { super(StorageClusterVersionModel.class); - AS_XML = - ""+ - ""; + AS_XML = "" + + ""; AS_JSON = "{\"Version\": \"0.0.1-testing\"}"; } @@ -57,7 +56,6 @@ protected void checkModel(StorageClusterVersionModel model) { @Override public void testFromPB() throws Exception { - //ignore test no pb + // ignore test no pb } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java index 2ada01c58877..a47cee53b182 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableInfoModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInfoModel.class); + HBaseClassTestRule.forClass(TestTableInfoModel.class); private static final String TABLE = "testtable"; private static final byte[] START_KEY = Bytes.toBytes("abracadbra"); @@ -44,22 +44,19 @@ public class TestTableInfoModel extends TestModelBase { public TestTableInfoModel() throws Exception { super(TableInfoModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_PB = - "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" + - "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY="; + AS_PB = "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" + + "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY="; - AS_JSON = - "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," + - "\"location\":\"testhost:9876\",\"" + - "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + - "startKey\":\"YWJyYWNhZGJyYQ==\"}]}"; + AS_JSON = "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," + + "\"location\":\"testhost:9876\",\"" + + "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + + "startKey\":\"YWJyYWNhZGJyYQ==\"}]}"; } @Override @@ -98,4 +95,3 @@ public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java index eca14978c909..c034f0602dd1 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,12 +27,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableListModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableListModel.class); + HBaseClassTestRule.forClass(TestTableListModel.class); private static final String TABLE1 = "table1"; private static final String TABLE2 = "table2"; @@ -40,14 +40,12 @@ public class TestTableListModel extends TestModelBase { public TestTableListModel() throws Exception { super(TableListModel.class); - AS_XML = - "

    "; + AS_XML = "
    "; AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz"; - AS_JSON = - "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}"; + AS_JSON = "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}"; } @Override @@ -71,4 +69,3 @@ protected void checkModel(TableListModel model) { assertFalse(tables.hasNext()); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java index 20577cfc536f..0fefbbc1e154 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,12 +31,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableRegionModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableRegionModel.class); + HBaseClassTestRule.forClass(TestTableRegionModel.class); private static final String TABLE = "testtable"; private static final byte[] START_KEY = Bytes.toBytes("abracadbra"); @@ -48,21 +48,19 @@ public TestTableRegionModel() throws Exception { super(TableRegionModel.class); AS_XML = - ""; + ""; - AS_JSON = - "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," + - "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + - "startKey\":\"YWJyYWNhZGJyYQ==\"}"; + AS_JSON = "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," + + "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + + "startKey\":\"YWJyYWNhZGJyYQ==\"}"; } @Override protected TableRegionModel buildTestModel() { - TableRegionModel model = - new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); + TableRegionModel model = new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); return model; } @@ -72,9 +70,8 @@ protected void checkModel(TableRegionModel model) { assertTrue(Bytes.equals(model.getEndKey(), END_KEY)); assertEquals(ID, model.getId()); assertEquals(LOCATION, model.getLocation()); - assertEquals(model.getName(), - TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + - ".ad9860f031282c46ed431d7af8f94aca."); + assertEquals(model.getName(), TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + + ".ad9860f031282c46ed431d7af8f94aca."); } @Test @@ -96,7 +93,6 @@ public void testSetName() { @Override public void testFromPB() throws Exception { - //no pb ignore + // no pb ignore } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java index 6b50ab700489..c12288e02093 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,12 +31,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableSchemaModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSchemaModel.class); + HBaseClassTestRule.forClass(TestTableSchemaModel.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableSchemaModel.class); @@ -51,24 +51,21 @@ public TestTableSchemaModel() throws Exception { super(TableSchemaModel.class); testColumnSchemaModel = new TestColumnSchemaModel(); - AS_XML = - "" + - "" + - "" + - ""; - - AS_PB = - "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" + - "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" + - "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" + - "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA"; - - AS_JSON = - "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," + - "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," + - "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," + - "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}"; + AS_XML = "" + + "" + + "" + + ""; + + AS_PB = "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" + + "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" + + "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" + + "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA"; + + AS_JSON = "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," + + "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," + + "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," + + "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}"; } @Override @@ -122,4 +119,3 @@ public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java index b35295059cfa..166a68c5228b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,36 +25,31 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestVersionModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVersionModel.class); + HBaseClassTestRule.forClass(TestVersionModel.class); private static final String REST_VERSION = "0.0.1"; - private static final String OS_VERSION = - "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"; - private static final String JVM_VERSION = - "Sun Microsystems Inc. 1.6.0_13-11.3-b02"; + private static final String OS_VERSION = "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"; + private static final String JVM_VERSION = "Sun Microsystems Inc. 1.6.0_13-11.3-b02"; private static final String JETTY_VERSION = "6.1.14"; private static final String JERSEY_VERSION = "1.1.0-ea"; public TestVersionModel() throws Exception { super(VersionModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_PB = - "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" + - "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE="; + AS_PB = "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" + + "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE="; - AS_JSON = - "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," + - "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" + - "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}"; + AS_JSON = "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," + + "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" + + "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}"; } @Override @@ -77,4 +72,3 @@ protected void checkModel(VersionModel model) { assertEquals(JERSEY_VERSION, model.getJerseyVersion()); } } - diff --git a/hbase-rest/src/test/resources/mapred-site.xml b/hbase-rest/src/test/resources/mapred-site.xml index 787ffb75511c..b8949fef6a01 100644 --- a/hbase-rest/src/test/resources/mapred-site.xml +++ b/hbase-rest/src/test/resources/mapred-site.xml @@ -31,4 +31,3 @@ -Djava.awt.headless=true - diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index e2f16df0809e..fbb7463cda75 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -36,203 +36,6 @@ true true - - - - - - ${project.build.directory} - - hbase-webapps/** - - - - src/main/resources - - **/** - - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - 2048 - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - - org.apache.hbase - hbase-resource-bundle - ${project.version} - - - - - default - - false - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - - org/apache/jute/** - org/apache/zookeeper/** - **/*.jsp - hbase-site.xml - hdfs-site.xml - log4j.properties - mapred-queues.xml - mapred-site.xml - - - - - - maven-antrun-plugin - - - - generate - generate-sources - - - - - - - - - - - - - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-jamon - ${project.build.directory}/generated-sources/java - - - - - - - org.jamon - jamon-maven-plugin - - - generate-sources - - translate - - - src/main/jamon - target/generated-jamon - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.hbase.thirdparty @@ -304,8 +107,8 @@ hbase-balancer - hbase-balancer org.apache.hbase + hbase-balancer test-jar test @@ -372,7 +175,7 @@ org.glassfish.web javax.servlet.jsp - + javax.servlet.jsp javax.servlet.jsp-api @@ -537,6 +340,203 @@ test + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + src/main/resources + + **/** + + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + 2048 + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + + org.apache.hbase + hbase-resource-bundle + ${project.version} + + + + + default + + false + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org/apache/jute/** + org/apache/zookeeper/** + **/*.jsp + hbase-site.xml + hdfs-site.xml + log4j.properties + mapred-queues.xml + mapred-site.xml + + + + + + maven-antrun-plugin + + + + generate + + run + + generate-sources + + + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-jamon + ${project.build.directory}/generated-sources/java + + + + + + + org.jamon + jamon-maven-plugin + + + + translate + + generate-sources + + src/main/jamon + target/generated-jamon + + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -549,10 +549,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -600,15 +600,17 @@ make + + run + compile - run - + - + @@ -626,7 +628,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -705,10 +709,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources @@ -777,10 +755,10 @@ - - false - true - + + false + true + @@ -809,7 +787,7 @@ - + @@ -818,6 +796,31 @@ + + + org.apache.maven.plugins + maven-eclipse-plugin + + + org.jamon.project.jamonnature + + + org.jamon.project.templateBuilder + org.eclipse.jdt.core.javabuilder + org.jamon.project.markerUpdater + + + + .settings/org.jamon.prefs + # now + eclipse.preferences.version=1 + templateSourceDir=src/main/jamon + templateOutputDir=target/generated-jamon + + + + + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java index 1f351c52da29..52bca682c812 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,14 +22,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Implementations of this interface will keep and return to clients - * implementations of classes providing API to execute - * coordinated operations. This interface is client-side, so it does NOT - * include methods to retrieve the particular interface implementations. - * - * For each coarse-grained area of operations there will be a separate - * interface with implementation, providing API for relevant operations - * requiring coordination. + * Implementations of this interface will keep and return to clients implementations of classes + * providing API to execute coordinated operations. This interface is client-side, so it does NOT + * include methods to retrieve the particular interface implementations. For each coarse-grained + * area of operations there will be a separate interface with implementation, providing API for + * relevant operations requiring coordination. */ @InterfaceAudience.Private public interface CoordinatedStateManager { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java index 4b4aef30bbc5..619da6980275 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ import org.slf4j.LoggerFactory; /** - * The Class ExecutorStatusChore for collect Executor status info periodically - * and report to metrics system + * The Class ExecutorStatusChore for collect Executor status info periodically and report to metrics + * system */ @InterfaceAudience.Private public class ExecutorStatusChore extends ScheduledChore { @@ -43,7 +43,7 @@ public class ExecutorStatusChore extends ScheduledChore { private DynamicMetricsRegistry metricsRegistry; public ExecutorStatusChore(int sleepTime, Stoppable stopper, ExecutorService service, - MetricsRegionServerSource metrics) { + MetricsRegionServerSource metrics) { super("ExecutorStatusChore", stopper, sleepTime); LOG.info("ExecutorStatusChore runs every {} ", StringUtils.formatTime(sleepTime)); this.service = service; @@ -52,7 +52,7 @@ public ExecutorStatusChore(int sleepTime, Stoppable stopper, ExecutorService ser @Override protected void chore() { - try{ + try { // thread pool monitor Map statuses = service.getAllExecutorStatuses(); for (Map.Entry statusEntry : statuses.entrySet()) { @@ -71,7 +71,7 @@ protected void chore() { queued.set(queueSize); running.set(runningSize); } - } catch(Throwable e) { + } catch (Throwable e) { LOG.error(e.getMessage(), e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java index b5329b136293..291b38acb322 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -136,8 +136,8 @@ protected HBaseRpcServicesBase(S server, String processName) throws IOException priority = createPriority(); // Using Address means we don't get the IP too. Shorten it more even to just the host name // w/o the domain. - final String name = processName + "/" + - Address.fromParts(initialIsa.getHostName(), initialIsa.getPort()).toStringWithoutDomain(); + final String name = processName + "/" + + Address.fromParts(initialIsa.getHostName(), initialIsa.getPort()).toStringWithoutDomain(); server.setName(name); // Set how many times to retry talking to another server over Connection. ConnectionUtils.setServerSideHConnectionRetriesConfig(conf, name, LOG); @@ -148,8 +148,8 @@ protected HBaseRpcServicesBase(S server, String processName) throws IOException rpcServer = RpcServerFactory.createRpcServer(server, name, getServices(), bindAddress, conf, rpcSchedulerFactory.create(conf, this, server), reservoirEnabled); } catch (BindException be) { - throw new IOException(be.getMessage() + ". To switch ports use the '" + getPortConfigName() + - "' configuration property.", be.getCause() != null ? be.getCause() : be); + throw new IOException(be.getMessage() + ". To switch ports use the '" + getPortConfigName() + + "' configuration property.", be.getCause() != null ? be.getCause() : be); } final InetSocketAddress address = rpcServer.getListenerAddress(); if (address == null) { @@ -351,8 +351,9 @@ private List getSlowLogPayloads(SlowLogResponseRequest request, namedQueueGetRequest.setSlowLogResponseRequest(request); NamedQueueGetResponse namedQueueGetResponse = namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); - slowLogPayloads = namedQueueGetResponse != null ? namedQueueGetResponse.getSlowLogPayloads() : - Collections.emptyList(); + slowLogPayloads = namedQueueGetResponse != null + ? namedQueueGetResponse.getSlowLogPayloads() + : Collections.emptyList(); return slowLogPayloads; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java index c28ea29215a4..4a916dedfb77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -241,8 +241,9 @@ public HBaseServerBase(Configuration conf, String name) this.rpcServices = createRpcServices(); useThisHostnameInstead = getUseThisHostnameInstead(conf); InetSocketAddress addr = rpcServices.getSocketAddress(); - String hostName = StringUtils.isBlank(useThisHostnameInstead) ? addr.getHostName() : - this.useThisHostnameInstead; + String hostName = StringUtils.isBlank(useThisHostnameInstead) + ? addr.getHostName() + : this.useThisHostnameInstead; serverName = ServerName.valueOf(hostName, addr.getPort(), this.startcode); // login the zookeeper client principal (if using security) ZKAuthentication.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE, @@ -266,8 +267,9 @@ public HBaseServerBase(Configuration conf, String name) this.metaRegionLocationCache = new MetaRegionLocationCache(zooKeeper); if (clusterMode()) { - if (conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, - DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) { + if ( + conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) + ) { csm = new ZkCoordinatedStateManager(this); } else { csm = null; @@ -299,9 +301,9 @@ private void putUpWebUI() throws IOException { } if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) { - String msg = "Failed to start http info server. Address " + addr + - " does not belong to this host. Correct configuration parameter: " + - "hbase.regionserver.info.bindAddress"; + String msg = "Failed to start http info server. Address " + addr + + " does not belong to this host. Correct configuration parameter: " + + "hbase.regionserver.info.bindAddress"; LOG.error(msg); throw new IOException(msg); } @@ -383,8 +385,9 @@ protected final void initializeMemStoreChunkCreator(HeapMemoryManager hMemManage long globalMemStoreSize = pair.getFirst(); boolean offheap = pair.getSecond() == MemoryType.NON_HEAP; // When off heap memstore in use, take full area for chunk pool. - float poolSizePercentage = offheap ? 1.0F : - conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT); + float poolSizePercentage = offheap + ? 1.0F + : conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT); float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT); int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT); @@ -536,8 +539,7 @@ public int getMsgInterval() { } /** - * get NamedQueue Provider to add different logs to ringbuffer - * @return NamedQueueRecorder + * get NamedQueue Provider to add different logs to ringbuffer n */ public NamedQueueRecorder getNamedQueueRecorder() { return this.namedQueueRecorder; @@ -556,7 +558,7 @@ public R getRpcServices() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public MetaRegionLocationCache getMetaRegionLocationCache() { return this.metaRegionLocationCache; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java index 8db0ca272d8a..fce64aad2e35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java @@ -49,7 +49,7 @@ public HealthCheckChore(int sleepTime, Stoppable stopper, Configuration conf) { healthChecker.init(healthCheckScript, scriptTimeout); this.threshold = config.getInt(HConstants.HEALTH_FAILURE_THRESHOLD, HConstants.DEFAULT_HEALTH_FAILURE_THRESHOLD); - this.failureWindow = (long)this.threshold * (long)sleepTime; + this.failureWindow = (long) this.threshold * (long) sleepTime; } @Override @@ -59,13 +59,12 @@ protected void chore() { if (!isHealthy) { boolean needToStop = decideToStop(); if (needToStop) { - this.getStopper().stop("The node reported unhealthy " + threshold - + " number of times consecutively."); + this.getStopper() + .stop("The node reported unhealthy " + threshold + " number of times consecutively."); } // Always log health report. - LOG.info("Health status at " + - StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + " : " + - report.getHealthReport()); + LOG.info("Health status at " + StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + + " : " + report.getHealthReport()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java index e47afd58d68b..5e8473d1a3cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java @@ -19,17 +19,15 @@ import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A utility for executing an external script that checks the health of - * the node. An example script can be found at - * src/main/sh/healthcheck/healthcheck.sh in the - * hbase-examples module. + * A utility for executing an external script that checks the health of the node. An example script + * can be found at src/main/sh/healthcheck/healthcheck.sh in the hbase-examples + * module. */ class HealthChecker { @@ -53,9 +51,8 @@ enum HealthCheckerExitStatus { /** * Initialize. - * * @param location the location of the health script - * @param timeout the timeout to be used for the health script + * @param timeout the timeout to be used for the health script */ public void init(String location, long timeout) { this.healthCheckScript = location; @@ -63,9 +60,9 @@ public void init(String location, long timeout) { ArrayList execScript = new ArrayList<>(); execScript.add(healthCheckScript); this.shexec = new ShellCommandExecutor(execScript.toArray(new String[execScript.size()]), null, - null, scriptTimeout); - LOG.info("HealthChecker initialized with script at " + this.healthCheckScript + - ", timeout=" + timeout); + null, scriptTimeout); + LOG.info("HealthChecker initialized with script at " + this.healthCheckScript + ", timeout=" + + timeout); } public HealthReport checkHealth() { @@ -104,24 +101,24 @@ private boolean hasErrors(String output) { return false; } - private String getHealthReport(HealthCheckerExitStatus status){ + private String getHealthReport(HealthCheckerExitStatus status) { String healthReport = null; switch (status) { - case SUCCESS: - healthReport = "Server is healthy."; - break; - case TIMED_OUT: - healthReport = "Health script timed out"; - break; - case FAILED_WITH_EXCEPTION: - healthReport = exceptionStackTrace; - break; - case FAILED_WITH_EXIT_CODE: - healthReport = "Health script failed with exit code."; - break; - case FAILED: - healthReport = shexec.getOutput(); - break; + case SUCCESS: + healthReport = "Server is healthy."; + break; + case TIMED_OUT: + healthReport = "Health script timed out"; + break; + case FAILED_WITH_EXCEPTION: + healthReport = exceptionStackTrace; + break; + case FAILED_WITH_EXIT_CODE: + healthReport = "Health script failed with exit code."; + break; + case FAILED: + healthReport = shexec.getOutput(); + break; } return healthReport; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java index 83882b0cdcca..444982009913 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,9 +34,7 @@ class HealthReport { } /** - * Gets the status of the region server. - * - * @return HealthCheckerExitStatus + * Gets the status of the region server. n */ HealthCheckerExitStatus getStatus() { return status; @@ -48,9 +46,7 @@ public String toString() { } /** - * Gets the health report of the region server. - * - * @return String + * Gets the health report of the region server. n */ String getHealthReport() { return healthReport; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java index f8fb4bd9a426..7f86d4de0b1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,11 +41,9 @@ import org.slf4j.LoggerFactory; /** - * Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue - * of the out-of-the-box JMX Agent): - * 1)connector port can share with the registry port if SSL is OFF - * 2)support password authentication - * 3)support subset of SSL (with default configuration) + * Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue of the out-of-the-box JMX + * Agent): 1)connector port can share with the registry port if SSL is OFF 2)support password + * authentication 3)support subset of SSL (with default configuration) */ @InterfaceAudience.Private public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { @@ -57,16 +54,15 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { public static final int defRegionserverRMIRegistryPort = 10102; /** - * workaround for HBASE-11146 - * master and regionserver are in 1 JVM in standalone mode - * only 1 JMX instance is allowed, otherwise there is port conflict even if - * we only load regionserver coprocessor on master + * workaround for HBASE-11146 master and regionserver are in 1 JVM in standalone mode only 1 JMX + * instance is allowed, otherwise there is port conflict even if we only load regionserver + * coprocessor on master */ private static JMXConnectorServer JMX_CS = null; private Registry rmiRegistry = null; - public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, - int rmiConnectorPort) throws IOException { + public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, int rmiConnectorPort) + throws IOException { // Build jmxURL StringBuilder url = new StringBuilder(); url.append("service:jmx:rmi://localhost:"); @@ -79,8 +75,7 @@ public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, } - public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) - throws IOException { + public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) throws IOException { boolean rmiSSL = false; boolean authenticate = true; String passwordFile = null; @@ -88,19 +83,18 @@ public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) System.setProperty("java.rmi.server.randomIDs", "true"); - String rmiSSLValue = System.getProperty("com.sun.management.jmxremote.ssl", - "false"); + String rmiSSLValue = System.getProperty("com.sun.management.jmxremote.ssl", "false"); rmiSSL = Boolean.parseBoolean(rmiSSLValue); String authenticateValue = - System.getProperty("com.sun.management.jmxremote.authenticate", "false"); + System.getProperty("com.sun.management.jmxremote.authenticate", "false"); authenticate = Boolean.parseBoolean(authenticateValue); passwordFile = System.getProperty("com.sun.management.jmxremote.password.file"); accessFile = System.getProperty("com.sun.management.jmxremote.access.file"); - LOG.info("rmiSSL:" + rmiSSLValue + ",authenticate:" + authenticateValue - + ",passwordFile:" + passwordFile + ",accessFile:" + accessFile); + LOG.info("rmiSSL:" + rmiSSLValue + ",authenticate:" + authenticateValue + ",passwordFile:" + + passwordFile + ",accessFile:" + accessFile); // Environment map HashMap jmxEnv = new HashMap<>(); @@ -110,8 +104,8 @@ public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) if (rmiSSL) { if (rmiRegistryPort == rmiConnectorPort) { - throw new IOException("SSL is enabled. " + - "rmiConnectorPort cannot share with the rmiRegistryPort!"); + throw new IOException( + "SSL is enabled. " + "rmiConnectorPort cannot share with the rmiRegistryPort!"); } csf = new SslRMIClientSocketFactorySecure(); ssf = new SslRMIServerSocketFactorySecure(); @@ -140,7 +134,7 @@ public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) try { // Start the JMXListener with the connection string - synchronized(JMXListener.class) { + synchronized (JMXListener.class) { if (JMX_CS != null) { throw new RuntimeException("Started by another thread?"); } @@ -172,7 +166,6 @@ public void stopConnectorServer() throws IOException { } } - @Override public void start(CoprocessorEnvironment env) throws IOException { int rmiRegistryPort = -1; @@ -182,30 +175,27 @@ public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof MasterCoprocessorEnvironment) { // running on Master rmiRegistryPort = - conf.getInt("master" + RMI_REGISTRY_PORT_CONF_KEY, defMasterRMIRegistryPort); + conf.getInt("master" + RMI_REGISTRY_PORT_CONF_KEY, defMasterRMIRegistryPort); rmiConnectorPort = conf.getInt("master" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); LOG.info("Master rmiRegistryPort:" + rmiRegistryPort + ",Master rmiConnectorPort:" - + rmiConnectorPort); + + rmiConnectorPort); } else if (env instanceof RegionServerCoprocessorEnvironment) { // running on RegionServer rmiRegistryPort = - conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, - defRegionserverRMIRegistryPort); - rmiConnectorPort = - conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); - LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort - + ",RegionServer rmiConnectorPort:" + rmiConnectorPort); + conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, defRegionserverRMIRegistryPort); + rmiConnectorPort = conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); + LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort + ",RegionServer rmiConnectorPort:" + + rmiConnectorPort); } else if (env instanceof RegionCoprocessorEnvironment) { LOG.error("JMXListener should not be loaded in Region Environment!"); return; } - synchronized(JMXListener.class) { + synchronized (JMXListener.class) { if (JMX_CS != null) { LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort); - } - else { + } else { startConnectorServer(rmiRegistryPort, rmiConnectorPort); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 2dca089f0bd3..8fceff2ff4dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,26 +40,22 @@ import org.slf4j.LoggerFactory; /** - * This class creates a single process HBase cluster. One thread is created for - * a master and one per region server. - * - * Call {@link #startup()} to start the cluster running and {@link #shutdown()} - * to close it all down. {@link #join} the cluster is you want to wait on - * shutdown completion. - * - *

    Runs master on port 16000 by default. Because we can't just kill the - * process -- not till HADOOP-1700 gets fixed and even then.... -- we need to - * be able to find the master with a remote client to run shutdown. To use a - * port other than 16000, set the hbase.master to a value of 'local:PORT': - * that is 'local', not 'localhost', and the port number the master should use - * instead of 16000. - * + * This class creates a single process HBase cluster. One thread is created for a master and one per + * region server. Call {@link #startup()} to start the cluster running and {@link #shutdown()} to + * close it all down. {@link #join} the cluster is you want to wait on shutdown completion. + *

    + * Runs master on port 16000 by default. Because we can't just kill the process -- not till + * HADOOP-1700 gets fixed and even then.... -- we need to be able to find the master with a remote + * client to run shutdown. To use a port other than 16000, set the hbase.master to a value of + * 'local:PORT': that is 'local', not 'localhost', and the port number the master should use instead + * of 16000. */ @InterfaceAudience.Private public class LocalHBaseCluster { private static final Logger LOG = LoggerFactory.getLogger(LocalHBaseCluster.class); private final List masterThreads = new CopyOnWriteArrayList<>(); - private final List regionThreads = new CopyOnWriteArrayList<>(); + private final List regionThreads = + new CopyOnWriteArrayList<>(); private final static int DEFAULT_NO = 1; /** local mode */ public static final String LOCAL = "local"; @@ -73,108 +68,103 @@ public class LocalHBaseCluster { private final Class regionServerClass; /** - * Constructor. - * @param conf - * @throws IOException + * Constructor. nn */ - public LocalHBaseCluster(final Configuration conf) - throws IOException { + public LocalHBaseCluster(final Configuration conf) throws IOException { this(conf, DEFAULT_NO); } /** * Constructor. - * @param conf Configuration to use. Post construction has the master's - * address. - * @param noRegionServers Count of regionservers to start. - * @throws IOException + * @param conf Configuration to use. Post construction has the master's address. + * @param noRegionServers Count of regionservers to start. n */ - public LocalHBaseCluster(final Configuration conf, final int noRegionServers) - throws IOException { + public LocalHBaseCluster(final Configuration conf, final int noRegionServers) throws IOException { this(conf, 1, 0, noRegionServers, getMasterImplementation(conf), - getRegionServerImplementation(conf)); + getRegionServerImplementation(conf)); } /** * Constructor. - * @param conf Configuration to use. Post construction has the active master - * address. - * @param noMasters Count of masters to start. - * @param noRegionServers Count of regionservers to start. - * @throws IOException + * @param conf Configuration to use. Post construction has the active master address. + * @param noMasters Count of masters to start. + * @param noRegionServers Count of regionservers to start. n */ - public LocalHBaseCluster(final Configuration conf, final int noMasters, - final int noRegionServers) - throws IOException { + public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers) + throws IOException { this(conf, noMasters, 0, noRegionServers, getMasterImplementation(conf), - getRegionServerImplementation(conf)); + getRegionServerImplementation(conf)); } @SuppressWarnings("unchecked") - private static Class getRegionServerImplementation(final Configuration conf) { - return (Class)conf.getClass(HConstants.REGION_SERVER_IMPL, - HRegionServer.class); + private static Class + getRegionServerImplementation(final Configuration conf) { + return (Class) conf.getClass(HConstants.REGION_SERVER_IMPL, + HRegionServer.class); } @SuppressWarnings("unchecked") private static Class getMasterImplementation(final Configuration conf) { - return (Class)conf.getClass(HConstants.MASTER_IMPL, - HMaster.class); + return (Class) conf.getClass(HConstants.MASTER_IMPL, HMaster.class); } public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers, - final Class masterClass, - final Class regionServerClass) throws IOException { + final Class masterClass, + final Class regionServerClass) throws IOException { this(conf, noMasters, 0, noRegionServers, masterClass, regionServerClass); } /** * Constructor. - * @param conf Configuration to use. Post construction has the master's - * address. - * @param noMasters Count of masters to start. - * @param noRegionServers Count of regionservers to start. - * @param masterClass - * @param regionServerClass - * @throws IOException + * @param conf Configuration to use. Post construction has the master's address. + * @param noMasters Count of masters to start. + * @param noRegionServers Count of regionservers to start. nnn */ @SuppressWarnings("unchecked") public LocalHBaseCluster(final Configuration conf, final int noMasters, - final int noAlwaysStandByMasters, final int noRegionServers, - final Class masterClass, - final Class regionServerClass) throws IOException { + final int noAlwaysStandByMasters, final int noRegionServers, + final Class masterClass, + final Class regionServerClass) throws IOException { this.conf = conf; // When active, if a port selection is default then we switch to random if (conf.getBoolean(ASSIGN_RANDOM_PORTS, false)) { - if (conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT) - == HConstants.DEFAULT_MASTER_PORT) { + if ( + conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT) + == HConstants.DEFAULT_MASTER_PORT + ) { LOG.debug("Setting Master Port to random."); conf.set(HConstants.MASTER_PORT, "0"); } - if (conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT) - == HConstants.DEFAULT_REGIONSERVER_PORT) { + if ( + conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT) + == HConstants.DEFAULT_REGIONSERVER_PORT + ) { LOG.debug("Setting RegionServer Port to random."); conf.set(HConstants.REGIONSERVER_PORT, "0"); } // treat info ports special; expressly don't change '-1' (keep off) // in case we make that the default behavior. - if (conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 && - conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) - == HConstants.DEFAULT_REGIONSERVER_INFOPORT) { + if ( + conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 + && conf.getInt(HConstants.REGIONSERVER_INFO_PORT, + HConstants.DEFAULT_REGIONSERVER_INFOPORT) == HConstants.DEFAULT_REGIONSERVER_INFOPORT + ) { LOG.debug("Setting RS InfoServer Port to random."); conf.set(HConstants.REGIONSERVER_INFO_PORT, "0"); } - if (conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 && - conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) - == HConstants.DEFAULT_MASTER_INFOPORT) { + if ( + conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 + && conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) + == HConstants.DEFAULT_MASTER_INFOPORT + ) { LOG.debug("Setting Master InfoServer Port to random."); conf.set(HConstants.MASTER_INFO_PORT, "0"); } } - this.masterClass = (Class) - conf.getClass(HConstants.MASTER_IMPL, masterClass); + this.masterClass = + (Class) conf.getClass(HConstants.MASTER_IMPL, masterClass); // Start the HMasters. int i; for (i = 0; i < noMasters; i++) { @@ -186,45 +176,40 @@ public LocalHBaseCluster(final Configuration conf, final int noMasters, addMaster(c, i + j); } // Start the HRegionServers. - this.regionServerClass = - (Class)conf.getClass(HConstants.REGION_SERVER_IMPL, - regionServerClass); + this.regionServerClass = (Class) conf + .getClass(HConstants.REGION_SERVER_IMPL, regionServerClass); for (int j = 0; j < noRegionServers; j++) { addRegionServer(new Configuration(conf), j); } } - public JVMClusterUtil.RegionServerThread addRegionServer() - throws IOException { + public JVMClusterUtil.RegionServerThread addRegionServer() throws IOException { return addRegionServer(new Configuration(conf), this.regionThreads.size()); } @SuppressWarnings("unchecked") - public JVMClusterUtil.RegionServerThread addRegionServer( - Configuration config, final int index) - throws IOException { + public JVMClusterUtil.RegionServerThread addRegionServer(Configuration config, final int index) + throws IOException { // Create each regionserver with its own Configuration instance so each has // its Connection instance rather than share (see HBASE_INSTANCES down in // the guts of ConnectionManager). JVMClusterUtil.RegionServerThread rst = - JVMClusterUtil.createRegionServerThread(config, (Class) conf - .getClass(HConstants.REGION_SERVER_IMPL, this.regionServerClass), index); + JVMClusterUtil.createRegionServerThread(config, (Class) conf + .getClass(HConstants.REGION_SERVER_IMPL, this.regionServerClass), index); this.regionThreads.add(rst); return rst; } - public JVMClusterUtil.RegionServerThread addRegionServer( - final Configuration config, final int index, User user) - throws IOException, InterruptedException { - return user.runAs( - new PrivilegedExceptionAction() { - @Override - public JVMClusterUtil.RegionServerThread run() throws Exception { - return addRegionServer(config, index); - } - }); + public JVMClusterUtil.RegionServerThread addRegionServer(final Configuration config, + final int index, User user) throws IOException, InterruptedException { + return user.runAs(new PrivilegedExceptionAction() { + @Override + public JVMClusterUtil.RegionServerThread run() throws Exception { + return addRegionServer(config, index); + } + }); } public JVMClusterUtil.MasterThread addMaster() throws IOException { @@ -232,36 +217,33 @@ public JVMClusterUtil.MasterThread addMaster() throws IOException { } public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index) - throws IOException { + throws IOException { // Create each master with its own Configuration instance so each has // its Connection instance rather than share (see HBASE_INSTANCES down in // the guts of ConnectionManager. JVMClusterUtil.MasterThread mt = JVMClusterUtil.createMasterThread(c, - (Class) c.getClass(HConstants.MASTER_IMPL, this.masterClass), index); + (Class) c.getClass(HConstants.MASTER_IMPL, this.masterClass), index); this.masterThreads.add(mt); // Refresh the master address config. List masterHostPorts = new ArrayList<>(); - getMasters().forEach(masterThread -> - masterHostPorts.add(masterThread.getMaster().getServerName().getAddress().toString())); + getMasters().forEach(masterThread -> masterHostPorts + .add(masterThread.getMaster().getServerName().getAddress().toString())); conf.set(HConstants.MASTER_ADDRS_KEY, String.join(",", masterHostPorts)); return mt; } - public JVMClusterUtil.MasterThread addMaster( - final Configuration c, final int index, User user) - throws IOException, InterruptedException { - return user.runAs( - new PrivilegedExceptionAction() { - @Override - public JVMClusterUtil.MasterThread run() throws Exception { - return addMaster(c, index); - } - }); + public JVMClusterUtil.MasterThread addMaster(final Configuration c, final int index, User user) + throws IOException, InterruptedException { + return user.runAs(new PrivilegedExceptionAction() { + @Override + public JVMClusterUtil.MasterThread run() throws Exception { + return addMaster(c, index); + } + }); } /** - * @param serverNumber - * @return region server + * n * @return region server */ public HRegionServer getRegionServer(int serverNumber) { return regionThreads.get(serverNumber).getRegionServer(); @@ -275,14 +257,13 @@ public List getRegionServers() { } /** - * @return List of running servers (Some servers may have been killed or - * aborted during lifetime of cluster; these servers are not included in this - * list). + * @return List of running servers (Some servers may have been killed or aborted during lifetime + * of cluster; these servers are not included in this list). */ public List getLiveRegionServers() { List liveServers = new ArrayList<>(); List list = getRegionServers(); - for (JVMClusterUtil.RegionServerThread rst: list) { + for (JVMClusterUtil.RegionServerThread rst : list) { if (rst.isAlive()) liveServers.add(rst); else LOG.info("Not alive " + rst.getName()); } @@ -335,15 +316,14 @@ public HMaster getMaster(int serverNumber) { } /** - * Gets the current active master, if available. If no active master, returns - * null. + * Gets the current active master, if available. If no active master, returns null. * @return the HMaster for the active master */ public HMaster getActiveMaster() { for (JVMClusterUtil.MasterThread mt : masterThreads) { // Ensure that the current active master is not stopped. // We don't want to return a stopping master as an active master. - if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { + if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { return mt.getMaster(); } } @@ -358,14 +338,13 @@ public List getMasters() { } /** - * @return List of running master servers (Some servers may have been killed - * or aborted during lifetime of cluster; these servers are not included in - * this list). + * @return List of running master servers (Some servers may have been killed or aborted during + * lifetime of cluster; these servers are not included in this list). */ public List getLiveMasters() { List liveServers = new ArrayList<>(); List list = getMasters(); - for (JVMClusterUtil.MasterThread mt: list) { + for (JVMClusterUtil.MasterThread mt : list) { if (mt.isAlive()) { liveServers.add(mt); } @@ -394,7 +373,7 @@ public String waitOnMaster(JVMClusterUtil.MasterThread masterThread) { masterThread.join(); } catch (InterruptedException e) { LOG.error("Interrupted while waiting for {} to finish. Retrying join", - masterThread.getName(), e); + masterThread.getName(), e); interrupted = true; } } @@ -406,12 +385,11 @@ public String waitOnMaster(JVMClusterUtil.MasterThread masterThread) { } /** - * Wait for Mini HBase Cluster to shut down. - * Presumes you've already called {@link #shutdown()}. + * Wait for Mini HBase Cluster to shut down. Presumes you've already called {@link #shutdown()}. */ public void join() { if (this.regionThreads != null) { - for(Thread t: this.regionThreads) { + for (Thread t : this.regionThreads) { if (t.isAlive()) { try { Threads.threadDumpingIsAlive(t); @@ -453,8 +431,9 @@ public void shutdown() { * @return True if a 'local' address in hbase.master value. */ public static boolean isLocal(final Configuration c) { - boolean mode = c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); - return(mode == HConstants.CLUSTER_IS_LOCAL); + boolean mode = + c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); + return (mode == HConstants.CLUSTER_IS_LOCAL); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java index 2e0f21379c7a..b4ab1f28944f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java @@ -41,9 +41,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * A cache of meta region location metadata. Registers a listener on ZK to track changes to the - * meta table znodes. Clients are expected to retry if the meta information is stale. This class - * is thread-safe (a single instance of this class can be shared by multiple threads without race + * A cache of meta region location metadata. Registers a listener on ZK to track changes to the meta + * table znodes. Clients are expected to retry if the meta information is stale. This class is + * thread-safe (a single instance of this class can be shared by multiple threads without race * conditions). */ @InterfaceAudience.Private @@ -61,14 +61,14 @@ public class MetaRegionLocationCache extends ZKListener { private static final int SLEEP_INTERVAL_MS_BETWEEN_RETRIES = 1000; private static final int SLEEP_INTERVAL_MS_MAX = 10000; private final RetryCounterFactory retryCounterFactory = - new RetryCounterFactory(MAX_ZK_META_FETCH_RETRIES, SLEEP_INTERVAL_MS_BETWEEN_RETRIES); + new RetryCounterFactory(MAX_ZK_META_FETCH_RETRIES, SLEEP_INTERVAL_MS_BETWEEN_RETRIES); /** - * Cached meta region locations indexed by replica ID. - * CopyOnWriteArrayMap ensures synchronization during updates and a consistent snapshot during - * client requests. Even though CopyOnWriteArrayMap copies the data structure for every write, - * that should be OK since the size of the list is often small and mutations are not too often - * and we do not need to block client requests while mutations are in progress. + * Cached meta region locations indexed by replica ID. CopyOnWriteArrayMap ensures synchronization + * during updates and a consistent snapshot during client requests. Even though + * CopyOnWriteArrayMap copies the data structure for every write, that should be OK since the size + * of the list is often small and mutations are not too often and we do not need to block client + * requests while mutations are in progress. */ private final CopyOnWriteArrayMap cachedMetaLocations; @@ -132,25 +132,24 @@ private void loadMetaLocationsFromZk(RetryCounter retryCounter, ZNodeOpType opTy // No new meta znodes got added. return; } - for (String znode: znodes) { + for (String znode : znodes) { String path = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode); updateMetaLocation(path, opType); } } /** - * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for - * future updates. + * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for future + * updates. * @param replicaId ReplicaID of the region. * @return HRegionLocation for the meta replica. * @throws KeeperException if there is any issue fetching/parsing the serialized data. */ - private HRegionLocation getMetaRegionLocation(int replicaId) - throws KeeperException { + private HRegionLocation getMetaRegionLocation(int replicaId) throws KeeperException { RegionState metaRegionState; try { - byte[] data = ZKUtil.getDataAndWatch(watcher, - watcher.getZNodePaths().getZNodeForReplica(replicaId)); + byte[] data = + ZKUtil.getDataAndWatch(watcher, watcher.getZNodePaths().getZNodeForReplica(replicaId)); metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId); } catch (DeserializationException e) { throw ZKUtil.convert(e); @@ -201,11 +200,10 @@ private void updateMetaLocation(String path, ZNodeOpType opType) { /** * @return Optional list of HRegionLocations for meta replica(s), null if the cache is empty. - * */ public List getMetaRegionLocations() { ConcurrentNavigableMap snapshot = - cachedMetaLocations.tailMap(cachedMetaLocations.firstKey()); + cachedMetaLocations.tailMap(cachedMetaLocations.firstKey()); if (snapshot.isEmpty()) { // This could be possible if the master has not successfully initialized yet or meta region // is stuck in some weird state. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 512916f483e0..87ec4908cae7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -101,7 +101,7 @@ private MetaTableAccessor() { /** * Performs a full scan of hbase:meta for regions. * @param connection connection we're using - * @param visitor Visitor invoked against each row in regions family. + * @param visitor Visitor invoked against each row in regions family. */ public static void fullScanRegions(Connection connection, final ClientMetaTableAccessor.Visitor visitor) throws IOException { @@ -119,7 +119,7 @@ public static List fullScanRegions(Connection connection) throws IOExcep /** * Performs a full scan of hbase:meta for tables. * @param connection connection we're using - * @param visitor Visitor invoked against each row in tables family. + * @param visitor Visitor invoked against each row in tables family. */ public static void fullScanTables(Connection connection, final ClientMetaTableAccessor.Visitor visitor) throws IOException { @@ -129,7 +129,7 @@ public static void fullScanTables(Connection connection, /** * Performs a full scan of hbase:meta. * @param connection connection we're using - * @param type scanned part of meta + * @param type scanned part of meta * @return List of {@link Result} */ private static List fullScan(Connection connection, QueryType type) throws IOException { @@ -190,8 +190,9 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re r = t.get(get); } RegionLocations locations = CatalogFamilyFormat.getRegionLocations(r); - return locations == null ? null : - locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId()); + return locations == null + ? null + : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId()); } /** @@ -236,28 +237,28 @@ public static Result getRegionResult(Connection connection, byte[] regionName) /** * Scans META table for a row whose key contains the specified regionEncodedName, returning * a single related Result instance if any row is found, null otherwise. - * @param connection the connection to query META table. + * @param connection the connection to query META table. * @param regionEncodedName the region encoded name to look for at META. * @return Result instance with the row related info in META, null otherwise. * @throws IOException if any errors occur while querying META. */ public static Result scanByRegionEncodedName(Connection connection, String regionEncodedName) - throws IOException { + throws IOException { RowFilter rowFilter = - new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); + new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setFilter(rowFilter); try (Table table = getMetaHTable(connection); - ResultScanner resultScanner = table.getScanner(scan)) { + ResultScanner resultScanner = table.getScanner(scan)) { return resultScanner.next(); } } /** * Lists all of the regions currently in META. - * @param connection to connect with + * @param connection to connect with * @param excludeOfflinedSplitParents False if we are to include offlined/splitparents regions, - * true and we'll leave out offlined regions from returned list + * true and we'll leave out offlined regions from returned list * @return List of all user-space regions. */ public static List getAllRegions(Connection connection, @@ -274,7 +275,7 @@ public static List getAllRegions(Connection connection, * Gets all of the regions of the specified table. Do not use this method to get meta table * regions, use methods in MetaTableLocator instead. * @param connection connection we're using - * @param tableName table we're looking for + * @param tableName table we're looking for * @return Ordered list of {@link RegionInfo}. */ public static List getTableRegions(Connection connection, TableName tableName) @@ -285,10 +286,10 @@ public static List getTableRegions(Connection connection, TableName /** * Gets all of the regions of the specified table. Do not use this method to get meta table * regions, use methods in MetaTableLocator instead. - * @param connection connection we're using - * @param tableName table we're looking for + * @param connection connection we're using + * @param tableName table we're looking for * @param excludeOfflinedSplitParents If true, do not include offlined split parents in the - * return. + * return. * @return Ordered list of {@link RegionInfo}. */ public static List getTableRegions(Connection connection, TableName tableName, @@ -347,7 +348,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { /** * Do not use this method to get meta table regions, use methods in MetaTableLocator instead. * @param connection connection we're using - * @param tableName table we're looking for + * @param tableName table we're looking for * @return Return list of regioninfos and server. */ public static List> @@ -357,8 +358,8 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { /** * Do not use this method to get meta table regions, use methods in MetaTableLocator instead. - * @param connection connection we're using - * @param tableName table to work with, can be null for getting all regions + * @param connection connection we're using + * @param tableName table to work with, can be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return Return list of regioninfos and server addresses. */ @@ -425,10 +426,10 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR /** * Performs a scan of META table for given table starting from given row. * @param connection connection we're using - * @param visitor visitor to call - * @param tableName table withing we scan - * @param row start scan from this row - * @param rowLimit max number of rows to return + * @param visitor visitor to call + * @param tableName table withing we scan + * @param row start scan from this row + * @param rowLimit max number of rows to return */ public static void scanMeta(Connection connection, final ClientMetaTableAccessor.Visitor visitor, final TableName tableName, final byte[] row, final int rowLimit) throws IOException { @@ -449,11 +450,11 @@ public static void scanMeta(Connection connection, final ClientMetaTableAccessor /** * Performs a scan of META table. * @param connection connection we're using - * @param startRow Where to start the scan. Pass null if want to begin scan at first row. - * @param stopRow Where to stop the scan. Pass null if want to scan all rows from the start one - * @param type scanned part of meta - * @param maxRows maximum rows to return - * @param visitor Visitor invoked against each row. + * @param startRow Where to start the scan. Pass null if want to begin scan at first row. + * @param stopRow Where to stop the scan. Pass null if want to scan all rows from the start one + * @param type scanned part of meta + * @param maxRows maximum rows to return + * @param visitor Visitor invoked against each row. */ public static void scanMeta(Connection connection, @Nullable final byte[] startRow, @Nullable final byte[] stopRow, QueryType type, int maxRows, @@ -481,9 +482,9 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR } if (LOG.isTraceEnabled()) { - LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) + - " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit + - " with caching=" + scan.getCaching()); + LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) + + " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit + + " with caching=" + scan.getCaching()); } int currentRow = 0; @@ -527,13 +528,13 @@ private static RegionInfo getClosestRegionInfo(Connection connection, try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { Result result = resultScanner.next(); if (result == null) { - throw new TableNotFoundException("Cannot find row in META " + " for table: " + tableName + - ", row=" + Bytes.toStringBinary(row)); + throw new TableNotFoundException("Cannot find row in META " + " for table: " + tableName + + ", row=" + Bytes.toStringBinary(row)); } RegionInfo regionInfo = CatalogFamilyFormat.getRegionInfo(result); if (regionInfo == null) { - throw new IOException("RegionInfo was null or empty in Meta for " + tableName + ", row=" + - Bytes.toStringBinary(row)); + throw new IOException("RegionInfo was null or empty in Meta for " + tableName + ", row=" + + Bytes.toStringBinary(row)); } return regionInfo; } @@ -578,7 +579,7 @@ public static PairOfSameType getDaughterRegions(Result data) { /** * Fetch table state for given table from META table - * @param conn connection to use + * @param conn connection to use * @param tableName table to fetch state for */ @Nullable @@ -613,7 +614,7 @@ public static Map getTableStates(Connection conn) throws /** * Updates state in META Do not use. For internal use only. - * @param conn connection to use + * @param conn connection to use * @param tableName table to look for */ public static void updateTableState(Connection conn, TableName tableName, TableState.State actual) @@ -666,7 +667,7 @@ public static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo split /** * Put the passed p to the hbase:meta table. * @param connection connection we're using - * @param p Put to add to hbase:meta + * @param p Put to add to hbase:meta */ private static void putToMetaTable(Connection connection, Put p) throws IOException { try (Table table = getMetaHTable(connection)) { @@ -686,7 +687,7 @@ private static void put(Table t, Put p) throws IOException { /** * Put the passed ps to the hbase:meta table. * @param connection connection we're using - * @param ps Put to add to hbase:meta + * @param ps Put to add to hbase:meta */ public static void putsToMetaTable(final Connection connection, final List ps) throws IOException { @@ -707,7 +708,7 @@ public static void putsToMetaTable(final Connection connection, final List /** * Delete the passed d from the hbase:meta table. * @param connection connection we're using - * @param d Delete to add to hbase:meta + * @param d Delete to add to hbase:meta */ private static void deleteFromMetaTable(final Connection connection, final Delete d) throws IOException { @@ -719,7 +720,7 @@ private static void deleteFromMetaTable(final Connection connection, final Delet /** * Delete the passed deletes from the hbase:meta table. * @param connection connection we're using - * @param deletes Deletes to add to hbase:meta This list should support #remove. + * @param deletes Deletes to add to hbase:meta This list should support #remove. */ private static void deleteFromMetaTable(final Connection connection, final List deletes) throws IOException { @@ -755,8 +756,8 @@ public static void updateRegionState(Connection connection, RegionInfo ri, * region. * @param connection connection we're using * @param regionInfo RegionInfo of parent region - * @param splitA first split daughter of the parent regionInfo - * @param splitB second split daughter of the parent regionInfo + * @param splitA first split daughter of the parent regionInfo + * @param splitB second split daughter of the parent regionInfo * @throws IOException if problem connecting or updating meta */ public static void addSplitsToParent(Connection connection, RegionInfo regionInfo, @@ -773,7 +774,7 @@ public static void addSplitsToParent(Connection connection, RegionInfo regionInf /** * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is * CLOSED. - * @param connection connection we're using + * @param connection connection we're using * @param regionInfos region information list * @throws IOException if problem connecting or updating meta */ @@ -786,9 +787,9 @@ public static void addRegionsToMeta(Connection connection, List regi /** * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is * CLOSED. - * @param connection connection we're using + * @param connection connection we're using * @param regionInfos region information list - * @param ts desired timestamp + * @param ts desired timestamp * @throws IOException if problem connecting or updating meta */ public static void addRegionsToMeta(Connection connection, List regionInfos, @@ -815,7 +816,7 @@ public static void addRegionsToMeta(Connection connection, List regi /** * Update state of the table in meta. * @param connection what we use for update - * @param state new state + * @param state new state */ private static void updateTableState(Connection connection, TableState state) throws IOException { Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime()); @@ -837,7 +838,7 @@ public static Put makePutFromTableState(TableState state, long ts) { /** * Remove state for table from meta * @param connection to use for deletion - * @param table to delete state for + * @param table to delete state for */ public static void deleteTableState(Connection connection, TableName table) throws IOException { long time = EnvironmentEdgeManager.currentTime(); @@ -853,10 +854,10 @@ public static void deleteTableState(Connection connection, TableName table) thro *

    * Uses passed catalog tracker to get a connection to the server hosting hbase:meta and makes * edits to that region. - * @param connection connection we're using - * @param regionInfo region to update location of - * @param openSeqNum the latest sequence number obtained when the region was open - * @param sn Server name + * @param connection connection we're using + * @param regionInfo region to update location of + * @param openSeqNum the latest sequence number obtained when the region was open + * @param sn Server name * @param masterSystemTime wall clock time from master if passed in the open region RPC */ public static void updateRegionLocation(Connection connection, RegionInfo regionInfo, @@ -869,13 +870,13 @@ public static void updateRegionLocation(Connection connection, RegionInfo region *

    * Connects to the specified server which should be hosting the specified catalog region name to * perform the edit. - * @param connection connection we're using - * @param regionInfo region to update location of - * @param sn Server name - * @param openSeqNum the latest sequence number obtained when the region was open + * @param connection connection we're using + * @param regionInfo region to update location of + * @param sn Server name + * @param openSeqNum the latest sequence number obtained when the region was open * @param masterSystemTime wall clock time from master if passed in the open region RPC * @throws IOException In particular could throw {@link java.net.ConnectException} if the server - * is down on other end. + * is down on other end. */ private static void updateLocation(Connection connection, RegionInfo regionInfo, ServerName sn, long openSeqNum, long masterSystemTime) throws IOException { @@ -929,7 +930,6 @@ public static Put addEmptyLocation(Put p, int replicaId) throws IOException { .setType(Cell.Type.Put).build()); } - private static void debugLogMutations(List mutations) throws IOException { if (!METALOG.isDebugEnabled()) { return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java index e57471a778f7..47f6938652d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -27,27 +26,23 @@ */ @InterfaceAudience.Private public interface RegionStateListener { -// TODO: Get rid of this!!!! Ain't there a better way to watch region -// state than introduce a whole new listening mechanism? St.Ack + // TODO: Get rid of this!!!! Ain't there a better way to watch region + // state than introduce a whole new listening mechanism? St.Ack /** * Process region split event. - * - * @param hri An instance of RegionInfo - * @throws IOException + * @param hri An instance of RegionInfo n */ void onRegionSplit(RegionInfo hri) throws IOException; /** * Process region split reverted event. - * * @param hri An instance of RegionInfo * @throws IOException Signals that an I/O exception has occurred. */ void onRegionSplitReverted(RegionInfo hri) throws IOException; /** - * Process region merge event. - * @throws IOException + * Process region merge event. n */ void onRegionMerged(RegionInfo mergedRegion) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index 31b8226cd4a9..b5f025f44a26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,9 +27,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Defines a curated set of shared functions implemented by HBase servers (Masters - * and RegionServers). For use internally only. Be judicious adding API. Changes cause ripples - * through the code base. + * Defines a curated set of shared functions implemented by HBase servers (Masters and + * RegionServers). For use internally only. Be judicious adding API. Changes cause ripples through + * the code base. */ @InterfaceAudience.Private public interface Server extends Abortable, Stoppable { @@ -44,10 +44,9 @@ public interface Server extends Abortable, Stoppable { ZKWatcher getZooKeeper(); /** - * Returns a reference to the servers' connection. - * - * Important note: this method returns a reference to Connection which is managed - * by Server itself, so callers must NOT attempt to close connection obtained. + * Returns a reference to the servers' connection. Important note: this method returns a reference + * to Connection which is managed by Server itself, so callers must NOT attempt to close + * connection obtained. */ default Connection getConnection() { return getAsyncConnection().toConnection(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java index dfe8780ee20f..7aaadc4018c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase; /** @@ -19,21 +36,20 @@ */ import java.lang.reflect.Field; import java.util.concurrent.atomic.LongAdder; - import org.apache.yetus.audience.InterfaceAudience; /** - * Counters kept by the distributed WAL split log process. - * Used by master and regionserver packages. + * Counters kept by the distributed WAL split log process. Used by master and regionserver packages. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private public class SplitLogCounters { - private SplitLogCounters() {} + private SplitLogCounters() { + } - //Spnager counters + // Spnager counters public final static LongAdder tot_mgr_log_split_batch_start = new LongAdder(); public final static LongAdder tot_mgr_log_split_batch_success = new LongAdder(); public final static LongAdder tot_mgr_log_split_batch_err = new LongAdder(); @@ -92,7 +108,7 @@ public static void resetCounters() throws Exception { for (Field fld : cl.getDeclaredFields()) { /* Guard against source instrumentation. */ if ((!fld.isSynthetic()) && (LongAdder.class.isAssignableFrom(fld.getType()))) { - ((LongAdder)fld.get(null)).reset(); + ((LongAdder) fld.get(null)).reset(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java index ca07fcb1ee33..280ad3b7c47e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +18,22 @@ package org.apache.hadoop.hbase; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * State of a WAL log split during distributed splitting. State is kept up in zookeeper. - * Encapsulates protobuf serialization/deserialization so we don't leak generated pb outside of - * this class. Used by regionserver and master packages. - *

    Immutable + * State of a WAL log split during distributed splitting. State is kept up in zookeeper. + * Encapsulates protobuf serialization/deserialization so we don't leak generated pb outside of this + * class. Used by regionserver and master packages. + *

    + * Immutable * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private @@ -132,7 +133,7 @@ public String toString() { @Override public boolean equals(Object obj) { if (!(obj instanceof SplitLogTask)) return false; - SplitLogTask other = (SplitLogTask)obj; + SplitLogTask other = (SplitLogTask) obj; return other.state.equals(this.state) && other.originServer.equals(this.originServer); } @@ -145,11 +146,10 @@ public int hashCode() { /** * @param data Serialized date to parse. - * @return An SplitLogTaskState instance made of the passed data - * @throws DeserializationException - * @see #toByteArray() + * @return An SplitLogTaskState instance made of the passed data n * @see + * #toByteArray() */ - public static SplitLogTask parseFrom(final byte [] data) throws DeserializationException { + public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(data); try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); @@ -165,9 +165,9 @@ public static SplitLogTask parseFrom(final byte [] data) throws DeserializationE * @return This instance serialized into a byte array * @see #parseFrom(byte[]) */ - public byte [] toByteArray() { - // First create a pb ServerName. Then create a ByteString w/ the TaskState - // bytes in it. Finally create a SplitLogTaskState passing in the two + public byte[] toByteArray() { + // First create a pb ServerName. Then create a ByteString w/ the TaskState + // bytes in it. Finally create a SplitLogTaskState passing in the two // pbs just created. HBaseProtos.ServerName snpb = ProtobufUtil.toServerName(this.originServer); ZooKeeperProtos.SplitLogTask slts = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java index 5dffb73d3ed4..5b089d1f2921 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; @@ -32,8 +39,7 @@ public Socket createSocket(String host, int port) throws IOException { secureProtocols.add(p); } } - socket.setEnabledProtocols(secureProtocols.toArray( - new String[secureProtocols.size()])); + socket.setEnabledProtocols(secureProtocols.toArray(new String[secureProtocols.size()])); return socket; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java index 8a9223675a70..9e4a22cb84b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; @@ -38,9 +45,8 @@ public ServerSocket createServerSocket(int port) throws IOException { public Socket accept() throws IOException { Socket socket = super.accept(); SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); - SSLSocket sslSocket = - (SSLSocket) sslSocketFactory.createSocket(socket, - socket.getInetAddress().getHostName(), socket.getPort(), true); + SSLSocket sslSocket = (SSLSocket) sslSocketFactory.createSocket(socket, + socket.getInetAddress().getHostName(), socket.getPort(), true); sslSocket.setUseClientMode(false); sslSocket.setNeedClientAuth(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index f7e07045f4c5..1dc17eff0d02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +19,8 @@ import java.io.IOException; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.yetus.audience.InterfaceAudience; /** * Get, remove and modify table descriptors. @@ -66,10 +65,10 @@ default void update(TableDescriptor htd) throws IOException { /** * Add or update descriptor - * @param htd Descriptor to set into TableDescriptors + * @param htd Descriptor to set into TableDescriptors * @param cacheOnly only add the given {@code htd} to cache, without updating the storage. For - * example, when creating table, we will write the descriptor to fs when creating the fs - * layout, so we do not need to update the fs again. + * example, when creating table, we will write the descriptor to fs when creating + * the fs layout, so we do not need to update the fs again. */ void update(TableDescriptor htd, boolean cacheOnly) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java index 6ba719a4acb1..5dec53e27a32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index b884669fe645..630a1c0fd127 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.BufferedReader; @@ -33,22 +32,29 @@ import org.slf4j.LoggerFactory; /** - *

    Contains a set of methods for the collaboration between the start/stop scripts and the - * servers. It allows to delete immediately the znode when the master or the regions server crashes. - * The region server / master writes a specific file when it starts / becomes main master. When they - * end properly, they delete the file.

    - *

    In the script, we check for the existence of these files when the program ends. If they still + *

    + * Contains a set of methods for the collaboration between the start/stop scripts and the servers. + * It allows to delete immediately the znode when the master or the regions server crashes. The + * region server / master writes a specific file when it starts / becomes main master. When they end + * properly, they delete the file. + *

    + *

    + * In the script, we check for the existence of these files when the program ends. If they still * exist we conclude that the server crashed, likely without deleting their znode. To have a faster - * recovery we delete immediately the znode.

    - *

    The strategy depends on the server type. For a region server we store the znode path in the - * file, and use it to delete it. for a master, as the znode path constant whatever the server, we - * check its content to make sure that the backup server is not now in charge.

    + * recovery we delete immediately the znode. + *

    + *

    + * The strategy depends on the server type. For a region server we store the znode path in the file, + * and use it to delete it. for a master, as the znode path constant whatever the server, we check + * its content to make sure that the backup server is not now in charge. + *

    */ @InterfaceAudience.Private public final class ZNodeClearer { private static final Logger LOG = LoggerFactory.getLogger(ZNodeClearer.class); - private ZNodeClearer() {} + private ZNodeClearer() { + } /** * Logs the errors without failing on exception. @@ -56,8 +62,8 @@ private ZNodeClearer() {} public static void writeMyEphemeralNodeOnDisk(String fileContent) { String fileName = ZNodeClearer.getMyEphemeralNodeFileName(); if (fileName == null) { - LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " + - "on crash by start scripts (Longer MTTR!)"); + LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " + + "on crash by start scripts (Longer MTTR!)"); return; } @@ -65,7 +71,7 @@ public static void writeMyEphemeralNodeOnDisk(String fileContent) { try { fstream = new FileWriter(fileName); } catch (IOException e) { - LOG.warn("Can't write znode file "+fileName, e); + LOG.warn("Can't write znode file " + fileName, e); return; } @@ -82,7 +88,7 @@ public static void writeMyEphemeralNodeOnDisk(String fileContent) { } } } catch (IOException e) { - LOG.warn("Can't write znode file "+fileName, e); + LOG.warn("Can't write znode file " + fileName, e); } } @@ -91,7 +97,7 @@ public static void writeMyEphemeralNodeOnDisk(String fileContent) { */ public static String readMyEphemeralNodeOnDisk() throws IOException { String fileName = getMyEphemeralNodeFileName(); - if (fileName == null){ + if (fileName == null) { throw new FileNotFoundException("No filename; set environment variable HBASE_ZNODE_FILE"); } FileReader znodeFile = new FileReader(fileName); @@ -113,7 +119,7 @@ public static String getMyEphemeralNodeFileName() { } /** - * delete the znode file + * delete the znode file */ public static void deleteMyEphemeralNodeOnDisk() { String fileName = getMyEphemeralNodeFileName(); @@ -124,8 +130,8 @@ public static void deleteMyEphemeralNodeOnDisk() { } /** - * See HBASE-14861. We are extracting master ServerName from rsZnodePath - * example: "/hbase/rs/server.example.com,16020,1448266496481" + * See HBASE-14861. We are extracting master ServerName from rsZnodePath example: + * "/hbase/rs/server.example.com,16020,1448266496481" * @param rsZnodePath from HBASE_ZNODE_FILE * @return String representation of ServerName or null if fails */ @@ -134,7 +140,7 @@ public static String parseMasterServerName(String rsZnodePath) { String masterServerName = null; try { String[] rsZnodeParts = rsZnodePath.split("/"); - masterServerName = rsZnodeParts[rsZnodeParts.length -1]; + masterServerName = rsZnodeParts[rsZnodeParts.length - 1]; } catch (IndexOutOfBoundsException e) { LOG.warn("String " + rsZnodePath + " has wrong format", e); } @@ -142,9 +148,9 @@ public static String parseMasterServerName(String rsZnodePath) { } /** - * Delete the master znode if its content (ServerName string) is the same - * as the one in the znode file. (env: HBASE_ZNODE_FILE). I case of master-rs - * colloaction we extract ServerName string from rsZnode path.(HBASE-14861) + * Delete the master znode if its content (ServerName string) is the same as the one in the znode + * file. (env: HBASE_ZNODE_FILE). I case of master-rs colloaction we extract ServerName string + * from rsZnode path.(HBASE-14861) * @return true on successful deletion, false otherwise. */ public static boolean clear(Configuration conf) { @@ -153,11 +159,16 @@ public static boolean clear(Configuration conf) { ZKWatcher zkw; try { - zkw = new ZKWatcher(tempConf, "clean znode for master", - new Abortable() { - @Override public void abort(String why, Throwable e) {} - @Override public boolean isAborted() { return false; } - }); + zkw = new ZKWatcher(tempConf, "clean znode for master", new Abortable() { + @Override + public void abort(String why, Throwable e) { + } + + @Override + public boolean isAborted() { + return false; + } + }); } catch (IOException e) { LOG.warn("Can't connect to zookeeper to read the master znode", e); return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java index fa081948f3f8..51aeabb7b457 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; -import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.IOException; import java.util.Collection; +import org.apache.hadoop.fs.Path; +import org.apache.yetus.audience.InterfaceAudience; /** * Exception indicating that some files in the requested set could not be archived. @@ -42,9 +40,6 @@ public Collection getFailedFiles() { @Override public String getMessage() { - return new StringBuilder(super.getMessage()) - .append("; files=") - .append(failedFiles) - .toString(); + return new StringBuilder(super.getMessage()).append("; files=").append(failedFiles).toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index 6400976bf43e..8e666308d4fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,13 +67,12 @@ public class HFileArchiver { /** Number of retries in case of fs operation failure */ private static final int DEFAULT_RETRIES_NUMBER = 3; - private static final Function FUNC_FILE_TO_PATH = - new Function() { - @Override - public Path apply(File file) { - return file == null ? null : file.getPath(); - } - }; + private static final Function FUNC_FILE_TO_PATH = new Function() { + @Override + public Path apply(File file) { + return file == null ? null : file.getPath(); + } + }; private static ThreadPoolExecutor archiveExecutor; @@ -85,7 +84,7 @@ private HFileArchiver() { * @return True if the Region exits in the filesystem. */ public static boolean exists(Configuration conf, FileSystem fs, RegionInfo info) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, info); return fs.exists(regionDir); @@ -94,11 +93,11 @@ public static boolean exists(Configuration conf, FileSystem fs, RegionInfo info) /** * Cleans up all the files for a HRegion by archiving the HFiles to the archive directory * @param conf the configuration to use - * @param fs the file system object + * @param fs the file system object * @param info RegionInfo for region to be deleted */ public static void archiveRegion(Configuration conf, FileSystem fs, RegionInfo info) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); archiveRegion(fs, rootDir, CommonFSUtils.getTableDir(rootDir, info.getTable()), FSUtils.getRegionDirFromRootDir(rootDir, info)); @@ -106,22 +105,23 @@ public static void archiveRegion(Configuration conf, FileSystem fs, RegionInfo i /** * Remove an entire region from the table directory via archiving the region's hfiles. - * @param fs {@link FileSystem} from which to remove the region - * @param rootdir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) - * @param tableDir {@link Path} to where the table is being stored (for building the archive path) + * @param fs {@link FileSystem} from which to remove the region + * @param rootdir {@link Path} to the root directory where hbase files are stored (for building + * the archive path) + * @param tableDir {@link Path} to where the table is being stored (for building the archive + * path) * @param regionDir {@link Path} to where a region is being stored (for building the archive path) * @return true if the region was successfully deleted. false if the filesystem * operations could not complete. * @throws IOException if the request cannot be completed */ public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) - throws IOException { + throws IOException { // otherwise, we archive the files // make sure we can archive if (tableDir == null || regionDir == null) { LOG.error("No archive directory could be found because tabledir (" + tableDir - + ") or regiondir (" + regionDir + "was null. Deleting files instead."); + + ") or regiondir (" + regionDir + "was null. Deleting files instead."); if (regionDir != null) { deleteRegionWithoutArchiving(fs, regionDir); } @@ -159,12 +159,12 @@ public boolean accept(Path file) { // convert the files in the region to a File Stream.of(storeDirs).map(getAsFile).forEachOrdered(toArchive::add); LOG.debug("Archiving " + toArchive); - List failedArchive = resolveAndArchive(fs, regionArchiveDir, toArchive, - EnvironmentEdgeManager.currentTime()); + List failedArchive = + resolveAndArchive(fs, regionArchiveDir, toArchive, EnvironmentEdgeManager.currentTime()); if (!failedArchive.isEmpty()) { throw new FailedArchiveException( - "Failed to archive/delete all the files for region:" + regionDir.getName() + " into " + - regionArchiveDir + ". Something is probably awry on the filesystem.", + "Failed to archive/delete all the files for region:" + regionDir.getName() + " into " + + regionArchiveDir + ". Something is probably awry on the filesystem.", failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } // if that was successful, then we delete the region @@ -173,20 +173,20 @@ public boolean accept(Path file) { /** * Archive the specified regions in parallel. - * @param conf the configuration to use - * @param fs {@link FileSystem} from which to remove the region - * @param rootDir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) - * @param tableDir {@link Path} to where the table is being stored (for building the archive - * path) + * @param conf the configuration to use + * @param fs {@link FileSystem} from which to remove the region + * @param rootDir {@link Path} to the root directory where hbase files are stored (for + * building the archive path) + * @param tableDir {@link Path} to where the table is being stored (for building the archive + * path) * @param regionDirList {@link Path} to where regions are being stored (for building the archive - * path) + * path) * @throws IOException if the request cannot be completed */ public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDir, Path tableDir, List regionDirList) throws IOException { List> futures = new ArrayList<>(regionDirList.size()); - for (Path regionDir: regionDirList) { + for (Path regionDir : regionDirList) { Future future = getArchiveExecutor(conf).submit(() -> { archiveRegion(fs, rootDir, tableDir, regionDir); return null; @@ -194,7 +194,7 @@ public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDi futures.add(future); } try { - for (Future future: futures) { + for (Future future : futures) { future.get(); } } catch (InterruptedException e) { @@ -207,8 +207,8 @@ public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDi private static synchronized ThreadPoolExecutor getArchiveExecutor(final Configuration conf) { if (archiveExecutor == null) { int maxThreads = conf.getInt("hbase.hfilearchiver.thread.pool.max", 8); - archiveExecutor = Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, - getThreadFactory()); + archiveExecutor = + Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, getThreadFactory()); // Shutdown this ThreadPool in a shutdown hook Runtime.getRuntime().addShutdownHook(new Thread(() -> archiveExecutor.shutdown())); @@ -235,37 +235,37 @@ public Thread newThread(Runnable r) { } /** - * Remove from the specified region the store files of the specified column family, - * either by archiving them or outright deletion - * @param fs the filesystem where the store files live - * @param conf {@link Configuration} to examine to determine the archive directory - * @param parent Parent region hosting the store files + * Remove from the specified region the store files of the specified column family, either by + * archiving them or outright deletion + * @param fs the filesystem where the store files live + * @param conf {@link Configuration} to examine to determine the archive directory + * @param parent Parent region hosting the store files * @param tableDir {@link Path} to where the table is being stored (for building the archive path) - * @param family the family hosting the store files + * @param family the family hosting the store files * @throws IOException if the files could not be correctly disposed. */ - public static void archiveFamily(FileSystem fs, Configuration conf, - RegionInfo parent, Path tableDir, byte[] family) throws IOException { + public static void archiveFamily(FileSystem fs, Configuration conf, RegionInfo parent, + Path tableDir, byte[] family) throws IOException { Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family))); archiveFamilyByFamilyDir(fs, conf, parent, familyDir, family); } /** - * Removes from the specified region the store files of the specified column family, - * either by archiving them or outright deletion - * @param fs the filesystem where the store files live - * @param conf {@link Configuration} to examine to determine the archive directory - * @param parent Parent region hosting the store files + * Removes from the specified region the store files of the specified column family, either by + * archiving them or outright deletion + * @param fs the filesystem where the store files live + * @param conf {@link Configuration} to examine to determine the archive directory + * @param parent Parent region hosting the store files * @param familyDir {@link Path} to where the family is being stored - * @param family the family hosting the store files + * @param family the family hosting the store files * @throws IOException if the files could not be correctly disposed. */ - public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, - RegionInfo parent, Path familyDir, byte[] family) throws IOException { + public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, RegionInfo parent, + Path familyDir, byte[] family) throws IOException { FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, familyDir); if (storeFiles == null) { LOG.debug("No files to dispose of in {}, family={}", parent.getRegionNameAsString(), - Bytes.toString(family)); + Bytes.toString(family)); return; } @@ -274,29 +274,29 @@ public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family); // do the actual archive - List failedArchive = resolveAndArchive(fs, storeArchiveDir, toArchive, - EnvironmentEdgeManager.currentTime()); - if (!failedArchive.isEmpty()){ - throw new FailedArchiveException("Failed to archive/delete all the files for region:" - + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) - + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", - failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); + List failedArchive = + resolveAndArchive(fs, storeArchiveDir, toArchive, EnvironmentEdgeManager.currentTime()); + if (!failedArchive.isEmpty()) { + throw new FailedArchiveException( + "Failed to archive/delete all the files for region:" + + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + + storeArchiveDir + ". Something is probably awry on the filesystem.", + failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } } /** * Remove the store files, either by archiving them or outright deletion - * @param conf {@link Configuration} to examine to determine the archive directory - * @param fs the filesystem where the store files live - * @param regionInfo {@link RegionInfo} of the region hosting the store files - * @param family the family hosting the store files + * @param conf {@link Configuration} to examine to determine the archive directory + * @param fs the filesystem where the store files live + * @param regionInfo {@link RegionInfo} of the region hosting the store files + * @param family the family hosting the store files * @param compactedFiles files to be disposed of. No further reading of these files should be - * attempted; otherwise likely to cause an {@link IOException} + * attempted; otherwise likely to cause an {@link IOException} * @throws IOException if the files could not be correctly disposed. */ public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionInfo regionInfo, - Path tableDir, byte[] family, Collection compactedFiles) - throws IOException { + Path tableDir, byte[] family, Collection compactedFiles) throws IOException { Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); archive(fs, regionInfo, family, compactedFiles, storeArchiveDir); } @@ -304,30 +304,29 @@ public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionIn /** * Archive recovered edits using existing logic for archiving store files. This is currently only * relevant when hbase.region.archive.recovered.edits is true, as recovered edits shouldn't - * be kept after replay. In theory, we could use very same method available for archiving - * store files, but supporting WAL dir and store files on different FileSystems added the need for - * extra validation of the passed FileSystem instance and the path where the archiving edits - * should be placed. - * @param conf {@link Configuration} to determine the archive directory. - * @param fs the filesystem used for storing WAL files. - * @param regionInfo {@link RegionInfo} a pseudo region representation for the archiving logic. - * @param family a pseudo familiy representation for the archiving logic. + * be kept after replay. In theory, we could use very same method available for archiving store + * files, but supporting WAL dir and store files on different FileSystems added the need for extra + * validation of the passed FileSystem instance and the path where the archiving edits should be + * placed. + * @param conf {@link Configuration} to determine the archive directory. + * @param fs the filesystem used for storing WAL files. + * @param regionInfo {@link RegionInfo} a pseudo region representation for the archiving logic. + * @param family a pseudo familiy representation for the archiving logic. * @param replayedEdits the recovered edits to be archived. * @throws IOException if files can't be achived due to some internal error. */ public static void archiveRecoveredEdits(Configuration conf, FileSystem fs, RegionInfo regionInfo, - byte[] family, Collection replayedEdits) - throws IOException { + byte[] family, Collection replayedEdits) throws IOException { String workingDir = conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR)); - //extra sanity checks for the right FS + // extra sanity checks for the right FS Path path = new Path(workingDir); - if(path.isAbsoluteAndSchemeAuthorityNull()){ - //no schema specified on wal dir value, so it's on same FS as StoreFiles + if (path.isAbsoluteAndSchemeAuthorityNull()) { + // no schema specified on wal dir value, so it's on same FS as StoreFiles path = new Path(conf.get(HConstants.HBASE_DIR)); } - if(path.toUri().getScheme()!=null && !path.toUri().getScheme().equals(fs.getScheme())){ - throw new IOException("Wrong file system! Should be " + path.toUri().getScheme() + - ", but got " + fs.getScheme()); + if (path.toUri().getScheme() != null && !path.toUri().getScheme().equals(fs.getScheme())) { + throw new IOException( + "Wrong file system! Should be " + path.toUri().getScheme() + ", but got " + fs.getScheme()); } path = HFileArchiveUtil.getStoreArchivePathForRootDir(path, regionInfo, family); archive(fs, regionInfo, family, replayedEdits, path); @@ -337,8 +336,9 @@ private static void archive(FileSystem fs, RegionInfo regionInfo, byte[] family, Collection compactedFiles, Path storeArchiveDir) throws IOException { // sometimes in testing, we don't have rss, so we need to check for that if (fs == null) { - LOG.warn("Passed filesystem is null, so just deleting files without archiving for {}," + - "family={}", Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family)); + LOG.warn( + "Passed filesystem is null, so just deleting files without archiving for {}," + "family={}", + Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family)); deleteStoreFilesWithoutArchiving(compactedFiles); return; } @@ -350,12 +350,12 @@ private static void archive(FileSystem fs, RegionInfo regionInfo, byte[] family, } // build the archive path - if (regionInfo == null || family == null) throw new IOException( - "Need to have a region and a family to archive from."); + if (regionInfo == null || family == null) + throw new IOException("Need to have a region and a family to archive from."); // make sure we don't archive if we can't and that the archive dir exists if (!fs.mkdirs(storeArchiveDir)) { throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" - + Bytes.toString(family) + ", deleting compacted files instead."); + + Bytes.toString(family) + ", deleting compacted files instead."); } // otherwise we attempt to archive the store files @@ -370,31 +370,33 @@ private static void archive(FileSystem fs, RegionInfo regionInfo, byte[] family, List failedArchive = resolveAndArchive(fs, storeArchiveDir, storeFiles, EnvironmentEdgeManager.currentTime()); - if (!failedArchive.isEmpty()){ - throw new FailedArchiveException("Failed to archive/delete all the files for region:" + if (!failedArchive.isEmpty()) { + throw new FailedArchiveException( + "Failed to archive/delete all the files for region:" + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", - failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); + failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } } /** * Archive the store file - * @param fs the filesystem where the store files live + * @param fs the filesystem where the store files live * @param regionInfo region hosting the store files - * @param conf {@link Configuration} to examine to determine the archive directory - * @param tableDir {@link Path} to where the table is being stored (for building the archive path) - * @param family the family hosting the store files - * @param storeFile file to be archived + * @param conf {@link Configuration} to examine to determine the archive directory + * @param tableDir {@link Path} to where the table is being stored (for building the archive + * path) + * @param family the family hosting the store files + * @param storeFile file to be archived * @throws IOException if the files could not be correctly disposed. */ public static void archiveStoreFile(Configuration conf, FileSystem fs, RegionInfo regionInfo, - Path tableDir, byte[] family, Path storeFile) throws IOException { + Path tableDir, byte[] family, Path storeFile) throws IOException { Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); // make sure we don't archive if we can't and that the archive dir exists if (!fs.mkdirs(storeArchiveDir)) { throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" - + Bytes.toString(family) + ", deleting compacted files instead."); + + Bytes.toString(family) + ", deleting compacted files instead."); } // do the actual archive @@ -402,27 +404,25 @@ public static void archiveStoreFile(Configuration conf, FileSystem fs, RegionInf File file = new FileablePath(fs, storeFile); if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) { throw new IOException("Failed to archive/delete the file for region:" - + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) - + " into " + storeArchiveDir + ". Something is probably awry on the filesystem."); + + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) + " into " + + storeArchiveDir + ". Something is probably awry on the filesystem."); } } /** - * Resolve any conflict with an existing archive file via timestamp-append - * renaming of the existing file and then archive the passed in files. - * @param fs {@link FileSystem} on which to archive the files - * @param baseArchiveDir base archive directory to store the files. If any of - * the files to archive are directories, will append the name of the - * directory to the base archive directory name, creating a parallel - * structure. - * @param toArchive files/directories that need to be archvied - * @param start time the archiving started - used for resolving archive - * conflicts. + * Resolve any conflict with an existing archive file via timestamp-append renaming of the + * existing file and then archive the passed in files. + * @param fs {@link FileSystem} on which to archive the files + * @param baseArchiveDir base archive directory to store the files. If any of the files to archive + * are directories, will append the name of the directory to the base + * archive directory name, creating a parallel structure. + * @param toArchive files/directories that need to be archvied + * @param start time the archiving started - used for resolving archive conflicts. * @return the list of failed to archive files. * @throws IOException if an unexpected file operation exception occurred */ private static List resolveAndArchive(FileSystem fs, Path baseArchiveDir, - Collection toArchive, long start) throws IOException { + Collection toArchive, long start) throws IOException { // short circuit if no files to move if (toArchive.isEmpty()) { return Collections.emptyList(); @@ -434,7 +434,7 @@ private static List resolveAndArchive(FileSystem fs, Path baseArchiveDir, if (!fs.exists(baseArchiveDir)) { if (!fs.mkdirs(baseArchiveDir)) { throw new IOException("Failed to create the archive directory:" + baseArchiveDir - + ", quitting archive attempt."); + + ", quitting archive attempt."); } LOG.trace("Created archive directory {}", baseArchiveDir); } @@ -474,15 +474,15 @@ private static List resolveAndArchive(FileSystem fs, Path baseArchiveDir, *

    * If the same file already exists in the archive, it is moved to a timestamped directory under * the archive directory and the new file is put in its place. - * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles - * @param currentFile {@link Path} to the original HFile that will be archived + * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles + * @param currentFile {@link Path} to the original HFile that will be archived * @param archiveStartTime time the archiving started, to resolve naming conflicts * @return true if the file is successfully archived. false if there was a * problem, but the operation still completed. * @throws IOException on failure to complete {@link FileSystem} operations. */ private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, - String archiveStartTime) throws IOException { + String archiveStartTime) throws IOException { // build path as it should be in the archive String filename = currentFile.getName(); Path archiveFile = new Path(archiveDir, filename); @@ -514,14 +514,16 @@ private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, long curMtime = curStatus.getModificationTime(); long archiveMtime = archiveStatus.getModificationTime(); if (curLen != archiveLen) { - LOG.error("{} already exists in archive with different size than current {}." + LOG.error( + "{} already exists in archive with different size than current {}." + " archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}", archiveFile, currentFile, archiveLen, curLen, archiveMtime, curMtime); - throw new IOException(archiveFile + " already exists in archive with different size" + - " than " + currentFile); + throw new IOException( + archiveFile + " already exists in archive with different size" + " than " + currentFile); } - LOG.error("{} already exists in archive, moving to timestamped backup and overwriting" + LOG.error( + "{} already exists in archive, moving to timestamped backup and overwriting" + " current {}. archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}", archiveFile, currentFile, archiveLen, curLen, archiveMtime, curMtime); @@ -529,12 +531,12 @@ private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime); if (!fs.rename(archiveFile, backedupArchiveFile)) { LOG.error("Could not rename archive file to backup: " + backedupArchiveFile - + ", deleting existing file in favor of newer."); + + ", deleting existing file in favor of newer."); // try to delete the existing file, if we can't rename it if (!fs.delete(archiveFile, false)) { throw new IOException("Couldn't delete existing archive file (" + archiveFile - + ") or rename it to the backup file (" + backedupArchiveFile - + ") to make room for similarly named file."); + + ") or rename it to the backup file (" + backedupArchiveFile + + ") to make room for similarly named file."); } } else { LOG.info("Backed up archive file from {} to {}.", archiveFile, backedupArchiveFile); @@ -565,8 +567,8 @@ private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, try { success = currentFile.moveAndClose(archiveFile); } catch (FileNotFoundException fnfe) { - LOG.warn("Failed to archive " + currentFile + - " because it does not exist! Skipping and continuing on.", fnfe); + LOG.warn("Failed to archive " + currentFile + + " because it does not exist! Skipping and continuing on.", fnfe); success = true; } catch (IOException e) { LOG.warn("Failed to archive " + currentFile + " on try #" + i, e); @@ -586,12 +588,12 @@ private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, /** * Without regard for backup, delete a region. Should be used with caution. * @param regionDir {@link Path} to the region to be deleted. - * @param fs FileSystem from which to delete the region + * @param fs FileSystem from which to delete the region * @return true on successful deletion, false otherwise * @throws IOException on filesystem operation failure */ private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir) - throws IOException { + throws IOException { if (fs.delete(regionDir, true)) { LOG.debug("Deleted {}", regionDir); return true; @@ -607,10 +609,10 @@ private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDi *

    * @param compactedFiles store files to delete from the file system. * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before - * throwing the exception, rather than failing at the first file. + * throwing the exception, rather than failing at the first file. */ private static void deleteStoreFilesWithoutArchiving(Collection compactedFiles) - throws IOException { + throws IOException { LOG.debug("Deleting files without archiving."); List errors = new ArrayList<>(0); for (HStoreFile hsf : compactedFiles) { @@ -654,8 +656,7 @@ public File apply(FileStatus input) { } /** - * Convert the {@link HStoreFile} into something we can manage in the archive - * methods + * Convert the {@link HStoreFile} into something we can manage in the archive methods */ private static class StoreToFile extends FileConverter { public StoreToFile(FileSystem fs) { @@ -692,21 +693,18 @@ public File(FileSystem fs) { abstract boolean isFile() throws IOException; /** - * @return if this is a directory, returns all the children in the - * directory, otherwise returns an empty list - * @throws IOException + * @return if this is a directory, returns all the children in the directory, otherwise returns + * an empty list n */ abstract Collection getChildren() throws IOException; /** - * close any outside readers of the file - * @throws IOException + * close any outside readers of the file n */ abstract void close() throws IOException; /** - * @return the name of the file (not the full fs path, just the individual - * file name) + * @return the name of the file (not the full fs path, just the individual file name) */ abstract String getName(); @@ -716,10 +714,7 @@ public File(FileSystem fs) { abstract Path getPath(); /** - * Move the file to the given destination - * @param dest - * @return true on success - * @throws IOException + * Move the file to the given destination n * @return true on success n */ public boolean moveAndClose(Path dest) throws IOException { this.close(); @@ -788,8 +783,7 @@ Path getPath() { } /** - * {@link File} adapter for a {@link HStoreFile} living on a {@link FileSystem} - * . + * {@link File} adapter for a {@link HStoreFile} living on a {@link FileSystem} . */ private static class FileableStoreFile extends File { HStoreFile file; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index 9978f4a67d80..17ecdf1a83ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Connection; @@ -46,11 +45,11 @@ class HFileArchiveManager { private volatile boolean stopped = false; public HFileArchiveManager(Connection connection, Configuration conf) - throws ZooKeeperConnectionException, IOException { - this.zooKeeper = new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), - connection); - this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), - this.zooKeeper); + throws ZooKeeperConnectionException, IOException { + this.zooKeeper = + new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), connection); + this.archiveZnode = + ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), this.zooKeeper); } /** @@ -101,7 +100,7 @@ public HFileArchiveManager disableHFileBackup() throws IOException { * No attempt is made to make sure that backups are successfully created - it is inherently an * asynchronous operation. * @param zooKeeper watcher connection to zk cluster - * @param table table name on which to enable archiving + * @param table table name on which to enable archiving * @throws KeeperException if a ZooKeeper operation fails */ private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { @@ -119,7 +118,7 @@ private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { *

    * Inherently an asynchronous operation. * @param zooKeeper watcher for the ZK cluster - * @param table name of the table to disable + * @param table name of the table to disable * @throws KeeperException if an unexpected ZK connection issues occurs */ private void disable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java index a4daaf011391..80cdc7587a15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +36,7 @@ public class HFileArchiveTableMonitor { private final Set archivedTables = new TreeSet<>(); /** - * Set the tables to be archived. Internally adds each table and attempts to - * register it. + * Set the tables to be archived. Internally adds each table and attempts to register it. *

    * Note: All previous tables will be removed in favor of these tables. * @param tables add each of the tables to be archived. @@ -48,8 +47,7 @@ public synchronized void setArchiveTables(List tables) { } /** - * Add the named table to be those being archived. Attempts to register the - * table + * Add the named table to be those being archived. Attempts to register the table * @param table name of the table to be registered */ public synchronized void addTable(String table) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java index 946f7593d43f..b26b6bc4ef92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,10 +35,9 @@ * currently being archived. *

    * This only works properly if the - * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} - * is also enabled (it always should be), since it may take a little time - * for the ZK notification to propagate, in which case we may accidentally - * delete some files. + * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} is also enabled (it always + * should be), since it may take a little time for the ZK notification to propagate, in which case + * we may accidentally delete some files. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { @@ -55,7 +54,7 @@ public boolean isFileDeletable(FileStatus fStat) { if (fStat.isDirectory()) { return true; } - + Path file = fStat.getPath(); // check to see if FileStatus[] deleteStatus = CommonFSUtils.listStatus(this.fs, file, null); @@ -72,8 +71,8 @@ public boolean isFileDeletable(FileStatus fStat) { String tableName = table.getName(); boolean ret = !archiveTracker.keepHFiles(tableName); - LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + - tableName); + LOG + .debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName); return ret; } catch (IOException e) { LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java index 49b0e827758b..1896199ede2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.zookeeper.ZKListener; @@ -49,8 +48,8 @@ private TableHFileArchiveTracker(ZKWatcher watcher, HFileArchiveTableMonitor mon super(watcher); watcher.registerListener(this); this.monitor = monitor; - this.archiveHFileZNode = ZKTableArchiveClient.getArchiveZNode(watcher.getConfiguration(), - watcher); + this.archiveHFileZNode = + ZKTableArchiveClient.getArchiveZNode(watcher.getConfiguration(), watcher); } /** @@ -84,8 +83,8 @@ public void nodeCreated(String path) { try { addAndReWatchTable(path); } catch (KeeperException e) { - LOG.warn("Couldn't read zookeeper data for table for path:" + path - + ", not preserving a table.", e); + LOG.warn( + "Couldn't read zookeeper data for table for path:" + path + ", not preserving a table.", e); } } @@ -235,11 +234,11 @@ public final HFileArchiveTableMonitor getMonitor() { * @param conf to read for zookeeper connection information * @return ZooKeeper tracker to monitor for this server if this server should archive hfiles for a * given table - * @throws IOException If a unexpected exception occurs + * @throws IOException If a unexpected exception occurs * @throws ZooKeeperConnectionException if we can't reach zookeeper */ public static TableHFileArchiveTracker create(Configuration conf) - throws ZooKeeperConnectionException, IOException { + throws ZooKeeperConnectionException, IOException { ZKWatcher zkw = new ZKWatcher(conf, "hfileArchiveCleaner", null); return create(zkw, new HFileArchiveTableMonitor()); } @@ -247,13 +246,12 @@ public static TableHFileArchiveTracker create(Configuration conf) /** * Create an archive tracker with the special passed in table monitor. Should only be used in * special cases (e.g. testing) - * @param zkw Watcher for the ZooKeeper cluster that we should track + * @param zkw Watcher for the ZooKeeper cluster that we should track * @param monitor Monitor for which tables need hfile archiving * @return ZooKeeper tracker to monitor for this server if this server should archive hfiles for a * given table */ - private static TableHFileArchiveTracker create(ZKWatcher zkw, - HFileArchiveTableMonitor monitor) { + private static TableHFileArchiveTracker create(ZKWatcher zkw, HFileArchiveTableMonitor monitor) { return new TableHFileArchiveTracker(zkw, monitor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java index 59c7537c84a0..2d33d9aaa20c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.client.Connection; @@ -54,7 +53,7 @@ public ZKTableArchiveClient(Configuration conf, Connection connection) { * If the table does not exist, the archiving the table's hfiles is still enabled as a future * table with that name may be created shortly. * @param table name of the table to start backing up - * @throws IOException if an unexpected exception occurs + * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void enableHFileBackupAsync(final byte[] table) throws IOException, KeeperException { @@ -69,7 +68,7 @@ public void enableHFileBackupAsync(final byte[] table) throws IOException, Keepe * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. * @param table name of the table stop backing up - * @throws IOException if an unexpected exception occurs + * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup(String table) throws IOException, KeeperException { @@ -84,7 +83,7 @@ public void disableHFileBackup(String table) throws IOException, KeeperException * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. * @param table name of the table stop backing up - * @throws IOException if an unexpected exception occurs + * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup(final byte[] table) throws IOException, KeeperException { @@ -98,7 +97,7 @@ public void disableHFileBackup(final byte[] table) throws IOException, KeeperExc *

    * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. - * @throws IOException if an unexpected exception occurs + * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup() throws IOException, KeeperException { @@ -109,7 +108,7 @@ public void disableHFileBackup() throws IOException, KeeperException { * Determine if archiving is enabled (but not necessarily fully propagated) for a table * @param table name of the table to check * @return true if it is, false otherwise - * @throws IOException if a connection to ZooKeeper cannot be established + * @throws IOException if a connection to ZooKeeper cannot be established * @throws KeeperException if a ZooKeeper operation fails */ public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException { @@ -125,7 +124,7 @@ public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperExcep * Determine if archiving is enabled (but not necessarily fully propagated) for a table * @param table name of the table to check * @return true if it is, false otherwise - * @throws IOException if an unexpected network issue occurs + * @throws IOException if an unexpected network issue occurs * @throws KeeperException if zookeeper can't be reached */ public boolean getArchivingEnabled(String table) throws IOException, KeeperException { @@ -136,20 +135,20 @@ public boolean getArchivingEnabled(String table) throws IOException, KeeperExcep * @return A new {@link HFileArchiveManager} to manage which tables' hfiles should be archived * rather than deleted. * @throws KeeperException if we can't reach zookeeper - * @throws IOException if an unexpected network issue occurs + * @throws IOException if an unexpected network issue occurs */ - private synchronized HFileArchiveManager createHFileArchiveManager() throws KeeperException, - IOException { + private synchronized HFileArchiveManager createHFileArchiveManager() + throws KeeperException, IOException { return new HFileArchiveManager(this.connection, this.getConf()); } /** - * @param conf conf to read for the base archive node + * @param conf conf to read for the base archive node * @param zooKeeper zookeeper to used for building the full path * @return get the znode for long-term archival of a table for */ public static String getArchiveZNode(Configuration conf, ZKWatcher zooKeeper) { - return ZNodePaths.joinZNode(zooKeeper.getZNodePaths().baseZNode, conf.get( - ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY, TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT)); + return ZNodePaths.joinZNode(zooKeeper.getZNodePaths().baseZNode, conf + .get(ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY, TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java index 11c4f4f359cd..9a03db6cc45b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public interface AsyncClusterConnection extends AsyncConnection { * Return all the replicas for a region. Used for region replica replication. */ CompletableFuture getRegionLocations(TableName tableName, byte[] row, - boolean reload); + boolean reload); /** * Return the token for this bulk load. @@ -74,15 +74,15 @@ CompletableFuture getRegionLocations(TableName tableName, byte[ *

    * Defined as default here to avoid breaking callers who rely on the bulkLoad version that does * not expect additional clusterIds param. - * @param tableName the target table - * @param familyPaths hdfs path for the the table family dirs containg files to be loaded. - * @param row row key. + * @param tableName the target table + * @param familyPaths hdfs path for the the table family dirs containg files to be loaded. + * @param row row key. * @param assignSeqNum seq num for the event on WAL. - * @param userToken user token. - * @param bulkToken bulk load token. - * @param copyFiles flag for copying the loaded hfiles. - * @param clusterIds list of cluster ids where the given bulk load has already been processed. - * @param replicate flags if the bulkload is targeted for replication. + * @param userToken user token. + * @param bulkToken bulk load token. + * @param copyFiles flag for copying the loaded hfiles. + * @param clusterIds list of cluster ids where the given bulk load has already been processed. + * @param replicate flags if the bulkload is targeted for replication. */ CompletableFuture bulkLoad(TableName tableName, List> familyPaths, byte[] row, boolean assignSeqNum, Token userToken, String bulkToken, boolean copyFiles, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java index 789d6162988f..1dda6c32ca04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClusterConnection { public AsyncClusterConnectionImpl(Configuration conf, ConnectionRegistry registry, - String clusterId, SocketAddress localAddress, User user) { + String clusterId, SocketAddress localAddress, User user) { super(conf, registry, clusterId, localAddress, user); } @@ -79,29 +79,27 @@ public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) { @Override public CompletableFuture flush(byte[] regionName, - boolean writeFlushWALMarker) { + boolean writeFlushWALMarker) { RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin(); return admin.flushRegionInternal(regionName, null, writeFlushWALMarker); } @Override public CompletableFuture getRegionLocations(TableName tableName, byte[] row, - boolean reload) { + boolean reload) { return getLocator().getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L); } @Override public CompletableFuture prepareBulkLoad(TableName tableName) { return callerFactory. single().table(tableName).row(HConstants.EMPTY_START_ROW) - .action((controller, loc, stub) -> ConnectionUtils - . call(controller, loc, - stub, tableName, (rn, tn) -> { - RegionSpecifier region = - RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn); - return PrepareBulkLoadRequest.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tn)).setRegion(region).build(); - }, (s, c, req, done) -> s.prepareBulkLoad(c, req, done), - (c, resp) -> resp.getBulkToken())) + .action((controller, loc, stub) -> ConnectionUtils. call(controller, loc, stub, tableName, (rn, tn) -> { + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn); + return PrepareBulkLoadRequest.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tn)) + .setRegion(region).build(); + }, (s, c, req, done) -> s.prepareBulkLoad(c, req, done), (c, resp) -> resp.getBulkToken())) .call(); } @@ -110,9 +108,8 @@ public CompletableFuture bulkLoad(TableName tableName, List> familyPaths, byte[] row, boolean assignSeqNum, Token userToken, String bulkToken, boolean copyFiles, List clusterIds, boolean replicate) { return callerFactory. single().table(tableName).row(row) - .action((controller, loc, stub) -> ConnectionUtils - . call(controller, loc, stub, - null, + .action((controller, loc, stub) -> ConnectionUtils. call(controller, loc, stub, null, (rn, nil) -> RequestConverter.buildBulkLoadHFileRequest(familyPaths, rn, assignSeqNum, userToken, bulkToken, copyFiles, clusterIds, replicate), (s, c, req, done) -> s.bulkLoadHFile(c, req, done), (c, resp) -> resp.getLoaded())) @@ -122,13 +119,12 @@ public CompletableFuture bulkLoad(TableName tableName, @Override public CompletableFuture cleanupBulkLoad(TableName tableName, String bulkToken) { return callerFactory. single().table(tableName).row(HConstants.EMPTY_START_ROW) - .action((controller, loc, stub) -> ConnectionUtils - . call(controller, loc, stub, - bulkToken, (rn, bt) -> { - RegionSpecifier region = - RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn); - return CleanupBulkLoadRequest.newBuilder().setRegion(region).setBulkToken(bt).build(); - }, (s, c, req, done) -> s.cleanupBulkLoad(c, req, done), (c, resp) -> null)) + .action((controller, loc, stub) -> ConnectionUtils. call(controller, loc, stub, bulkToken, (rn, bt) -> { + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn); + return CleanupBulkLoadRequest.newBuilder().setRegion(region).setBulkToken(bt).build(); + }, (s, c, req, done) -> s.cleanupBulkLoad(c, req, done), (c, resp) -> null)) .call(); } @@ -170,9 +166,8 @@ public CompletableFuture> getAllBootstrapNodes(ServerName regio } @Override - public CompletableFuture replicate(RegionInfo replica, - List entries, int retries, long rpcTimeoutNs, - long operationTimeoutNs) { + public CompletableFuture replicate(RegionInfo replica, List entries, int retries, + long rpcTimeoutNs, long operationTimeoutNs) { return new AsyncRegionReplicationRetryingCaller(RETRY_TIMER, this, ConnectionUtils.retries2Attempts(retries), rpcTimeoutNs, operationTimeoutNs, replica, entries) .call(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java index 734baad1e0a3..02718145c9b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,8 +61,10 @@ public AsyncRegionReplicationRetryingCaller(HashedWheelTimer retryTimer, @Override protected Throwable preProcessError(Throwable error) { - if (error instanceof DoNotRetryIOException && - error.getCause() instanceof UnsupportedOperationException) { + if ( + error instanceof DoNotRetryIOException + && error.getCause() instanceof UnsupportedOperationException + ) { // fallback to use replay, and also return the cause to let the upper retry useReplay = true; return error.getCause(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java index 8ff869fcdb94..f5fcc02e9186 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -128,8 +128,8 @@ public CompletableFuture getStoreFile(GetStoreFileRequest return call((stub, controller, done) -> stub.getStoreFile(controller, request, done)); } - public CompletableFuture getOnlineRegion( - GetOnlineRegionRequest request) { + public CompletableFuture + getOnlineRegion(GetOnlineRegionRequest request) { return call((stub, controller, done) -> stub.getOnlineRegion(controller, request, done)); } @@ -149,8 +149,8 @@ public CompletableFuture flushRegion(FlushRegionRequest req return call((stub, controller, done) -> stub.flushRegion(controller, request, done)); } - public CompletableFuture compactionSwitch( - CompactionSwitchRequest request) { + public CompletableFuture + compactionSwitch(CompactionSwitchRequest request) { return call((stub, controller, done) -> stub.compactionSwitch(controller, request, done)); } @@ -158,8 +158,8 @@ public CompletableFuture compactRegion(CompactRegionReque return call((stub, controller, done) -> stub.compactRegion(controller, request, done)); } - public CompletableFuture replicateWALEntry( - ReplicateWALEntryRequest request, CellScanner cellScanner, int timeout) { + public CompletableFuture + replicateWALEntry(ReplicateWALEntryRequest request, CellScanner cellScanner, int timeout) { return call((stub, controller, done) -> { controller.setCallTimeout(timeout); stub.replicateWALEntry(controller, request, done); @@ -167,7 +167,7 @@ public CompletableFuture replicateWALEntry( } public CompletableFuture replay(ReplicateWALEntryRequest request, - CellScanner cellScanner) { + CellScanner cellScanner) { return call((stub, controller, done) -> stub.replay(controller, request, done), cellScanner); } @@ -183,13 +183,13 @@ public CompletableFuture stopServer(StopServerRequest reques return call((stub, controller, done) -> stub.stopServer(controller, request, done)); } - public CompletableFuture updateFavoredNodes( - UpdateFavoredNodesRequest request) { + public CompletableFuture + updateFavoredNodes(UpdateFavoredNodesRequest request) { return call((stub, controller, done) -> stub.updateFavoredNodes(controller, request, done)); } - public CompletableFuture updateConfiguration( - UpdateConfigurationRequest request) { + public CompletableFuture + updateConfiguration(UpdateConfigurationRequest request) { return call((stub, controller, done) -> stub.updateConfiguration(controller, request, done)); } @@ -197,23 +197,23 @@ public CompletableFuture getRegionLoad(GetRegionLoadReque return call((stub, controller, done) -> stub.getRegionLoad(controller, request, done)); } - public CompletableFuture clearCompactionQueues( - ClearCompactionQueuesRequest request) { + public CompletableFuture + clearCompactionQueues(ClearCompactionQueuesRequest request) { return call((stub, controller, done) -> stub.clearCompactionQueues(controller, request, done)); } - public CompletableFuture clearRegionBlockCache( - ClearRegionBlockCacheRequest request) { + public CompletableFuture + clearRegionBlockCache(ClearRegionBlockCacheRequest request) { return call((stub, controller, done) -> stub.clearRegionBlockCache(controller, request, done)); } - public CompletableFuture getSpaceQuotaSnapshots( - GetSpaceQuotaSnapshotsRequest request) { + public CompletableFuture + getSpaceQuotaSnapshots(GetSpaceQuotaSnapshotsRequest request) { return call((stub, controller, done) -> stub.getSpaceQuotaSnapshots(controller, request, done)); } - public CompletableFuture executeProcedures( - ExecuteProceduresRequest request) { + public CompletableFuture + executeProcedures(ExecuteProceduresRequest request) { return call((stub, controller, done) -> stub.executeProcedures(controller, request, done)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index 1feafc18993f..4a8dd1d3ac86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -38,8 +37,8 @@ import org.slf4j.LoggerFactory; /** - * A client scanner for a region opened for read-only on the client side. Assumes region data - * is not changing. + * A client scanner for a region opened for read-only on the client side. Assumes region data is not + * changing. */ @InterfaceAudience.Private public class ClientSideRegionScanner extends AbstractClientScanner { @@ -50,9 +49,8 @@ public class ClientSideRegionScanner extends AbstractClientScanner { RegionScanner scanner; List values; - public ClientSideRegionScanner(Configuration conf, FileSystem fs, - Path rootDir, TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics) - throws IOException { + public ClientSideRegionScanner(Configuration conf, FileSystem fs, Path rootDir, + TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics) throws IOException { // region is immutable, set isolation level scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); @@ -67,7 +65,7 @@ public ClientSideRegionScanner(Configuration conf, FileSystem fs, // IndexOnlyLruBlockCache and set a value to HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU"); conf.setIfUnset(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, - String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT)); + String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT)); // don't allow L2 bucket cache for non RS process to avoid unexpected disk usage. conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); @@ -95,7 +93,7 @@ public Result next() throws IOException { values.clear(); scanner.nextRaw(values); if (values.isEmpty()) { - //we are done + // we are done return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java index 0c216c6daa38..579da46af1c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java index c8b0a26e7878..a75faf3db75b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java index 0f05b21c05b6..ebffc7ee5111 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,7 +69,7 @@ public AsyncTableBuilder getTableBuilder(TableName t @Override public AsyncTableBuilder getTableBuilder(TableName tableName, - ExecutorService pool) { + ExecutorService pool) { return conn.getTableBuilder(tableName, pool); } @@ -90,7 +90,7 @@ public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName @Override public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName, - ExecutorService pool) { + ExecutorService pool) { return conn.getBufferedMutatorBuilder(tableName, pool); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java index a6efc1134a77..1eb4e2d08ea8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index dd03ab26675d..72242c47558c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -42,18 +41,14 @@ * A Scanner which performs a scan over snapshot files. Using this class requires copying the * snapshot to a temporary empty directory, which will copy the snapshot reference files into that * directory. Actual data files are not copied. - * *

    - * This also allows one to run the scan from an - * online or offline hbase cluster. The snapshot files can be exported by using the - * org.apache.hadoop.hbase.snapshot.ExportSnapshot tool, - * to a pure-hdfs cluster, and this scanner can be used to - * run the scan directly over the snapshot files. The snapshot should not be deleted while there - * are open scanners reading from snapshot files. - * + * This also allows one to run the scan from an online or offline hbase cluster. The snapshot files + * can be exported by using the org.apache.hadoop.hbase.snapshot.ExportSnapshot tool, to a pure-hdfs + * cluster, and this scanner can be used to run the scan directly over the snapshot files. The + * snapshot should not be deleted while there are open scanners reading from snapshot files. *

    - * An internal RegionScanner is used to execute the {@link Scan} obtained - * from the user for each region in the snapshot. + * An internal RegionScanner is used to execute the {@link Scan} obtained from the user for each + * region in the snapshot. *

    * HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from * snapshot files and data files. HBase also enforces security because all the requests are handled @@ -62,8 +57,8 @@ * permissions to access snapshot and reference files. This means that to run mapreduce over * snapshot files, the job has to be run as the HBase user or the user must have group or other * priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from - * snapshot/data files will completely circumvent the access control enforced by HBase. - * See org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat. + * snapshot/data files will completely circumvent the access control enforced by HBase. See + * org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat. */ @InterfaceAudience.Private public class TableSnapshotScanner extends AbstractClientScanner { @@ -80,44 +75,47 @@ public class TableSnapshotScanner extends AbstractClientScanner { private TableDescriptor htd; private final boolean snapshotAlreadyRestored; - private ClientSideRegionScanner currentRegionScanner = null; + private ClientSideRegionScanner currentRegionScanner = null; private int currentRegion = -1; private int numOfCompleteRows = 0; + /** * Creates a TableSnapshotScanner. - * @param conf the configuration - * @param restoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of - * rootDir. The scanner deletes the contents of the directory once the scanner is closed. + * @param conf the configuration + * @param restoreDir a temporary directory to copy the snapshot files into. Current user should + * have write permissions to this directory, and this should not be a + * subdirectory of rootDir. The scanner deletes the contents of the directory + * once the scanner is closed. * @param snapshotName the name of the snapshot to read from - * @param scan a Scan representing scan parameters + * @param scan a Scan representing scan parameters * @throws IOException in case of error */ public TableSnapshotScanner(Configuration conf, Path restoreDir, String snapshotName, Scan scan) - throws IOException { + throws IOException { this(conf, CommonFSUtils.getRootDir(conf), restoreDir, snapshotName, scan); } public TableSnapshotScanner(Configuration conf, Path rootDir, Path restoreDir, - String snapshotName, Scan scan) throws IOException { + String snapshotName, Scan scan) throws IOException { this(conf, rootDir, restoreDir, snapshotName, scan, false); } /** * Creates a TableSnapshotScanner. - * @param conf the configuration - * @param rootDir root directory for HBase. - * @param restoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of - * rootdir. The scanner deletes the contents of the directory once the scanner is closed. - * @param snapshotName the name of the snapshot to read from - * @param scan a Scan representing scan parameters + * @param conf the configuration + * @param rootDir root directory for HBase. + * @param restoreDir a temporary directory to copy the snapshot files into. Current + * user should have write permissions to this directory, and this + * should not be a subdirectory of rootdir. The scanner deletes the + * contents of the directory once the scanner is closed. + * @param snapshotName the name of the snapshot to read from + * @param scan a Scan representing scan parameters * @param snapshotAlreadyRestored true to indicate that snapshot has been restored. * @throws IOException in case of error */ public TableSnapshotScanner(Configuration conf, Path rootDir, Path restoreDir, - String snapshotName, Scan scan, boolean snapshotAlreadyRestored) throws IOException { + String snapshotName, Scan scan, boolean snapshotAlreadyRestored) throws IOException { this.conf = conf; this.snapshotName = snapshotName; this.rootDir = rootDir; @@ -140,7 +138,7 @@ public TableSnapshotScanner(Configuration conf, Path rootDir, Path restoreDir, private void openWithoutRestoringSnapshot() throws IOException { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotProtos.SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); List regionManifests = manifest.getRegionManifests(); @@ -165,7 +163,7 @@ private boolean isValidRegion(RegionInfo hri) { private void openWithRestoringSnapshot() throws IOException { final RestoreSnapshotHelper.RestoreMetaChanges meta = - RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); + RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); final List restoredRegions = meta.getRegionsToAdd(); htd = meta.getTableDescriptor(); @@ -184,8 +182,8 @@ public Result next() throws IOException { } RegionInfo hri = regions.get(currentRegion); - currentRegionScanner = new ClientSideRegionScanner(conf, fs, - restoreDir, htd, hri, scan, scanMetrics); + currentRegionScanner = + new ClientSideRegionScanner(conf, fs, restoreDir, htd, hri, scan, scanMetrics); if (this.scanMetrics != null) { this.scanMetrics.countOfRegions.incrementAndGet(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java index 8418161048f6..dadb0998ed71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; - /** * Class to help with parsing the version info. */ @@ -40,9 +39,8 @@ public static boolean currentClientHasMinimumVersion(int major, int minor) { return hasMinimumVersion(getCurrentClientVersionInfo(), major, minor); } - public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, - int major, - int minor) { + public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, int major, + int minor) { if (versionInfo != null) { if (versionInfo.hasVersionMajor() && versionInfo.hasVersionMinor()) { int clientMajor = versionInfo.getVersionMajor(); @@ -70,15 +68,15 @@ public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, } /** - * We intend to use the local version for service call shortcut(s), so we use an interface - * compatible with a typical service call, with 2 args, return type, and an exception type. + * We intend to use the local version for service call shortcut(s), so we use an interface + * compatible with a typical service call, with 2 args, return type, and an exception type. */ public interface ServiceCallFunction { R apply(T1 t1, T2 t2) throws E; } - public static R callWithVersion( - ServiceCallFunction f, T1 t1, T2 t2) throws E { + public static R + callWithVersion(ServiceCallFunction f, T1 t1, T2 t2) throws E { // Note: just as RpcServer.CurCall, this will only apply on the current thread. NonCallVersion.set(ProtobufUtil.getVersionInfo()); try { @@ -92,27 +90,22 @@ public static R callWithVersion( * @return the versionInfo extracted from the current RpcCallContext */ public static HBaseProtos.VersionInfo getCurrentClientVersionInfo() { - return RpcServer.getCurrentCall().map( - RpcCallContext::getClientVersionInfo).orElse(NonCallVersion.get()); + return RpcServer.getCurrentCall().map(RpcCallContext::getClientVersionInfo) + .orElse(NonCallVersion.get()); } - /** - * @param version - * @return the passed-in version int as a version String - * (e.g. 0x0103004 is 1.3.4) + * n * @return the passed-in version int as a version String (e.g. 0x0103004 is + * 1.3.4) */ public static String versionNumberToString(final int version) { - return String.format("%d.%d.%d", - ((version >> 20) & 0xff), - ((version >> 12) & 0xff), - (version & 0xfff)); + return String.format("%d.%d.%d", ((version >> 20) & 0xff), ((version >> 12) & 0xff), + (version & 0xfff)); } /** - * Pack the full number version in a int. by shifting each component by 8bit, - * except the dot release which has 12bit. - * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) + * Pack the full number version in a int. by shifting each component by 8bit, except the dot + * release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) * @param versionInfo the VersionInfo object to pack * @return the version number as int. (e.g. 0x0103004 is 1.3.4) */ @@ -130,13 +123,12 @@ public static int getVersionNumber(final HBaseProtos.VersionInfo versionInfo) { return buildVersionNumber(clientMajor, clientMinor, 0); } } - return(0); // no version + return (0); // no version } /** - * Pack the full number version in a int. by shifting each component by 8bit, - * except the dot release which has 12bit. - * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) + * Pack the full number version in a int. by shifting each component by 8bit, except the dot + * release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) * @param major version major number * @param minor version minor number * @param patch version patch number @@ -147,8 +139,8 @@ private static int buildVersionNumber(int major, int minor, int patch) { } /** - * Returns the version components - * Examples: "1.4.3" returns [1, 4, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"] + * Returns the version components Examples: "1.4.3" returns [1, 4, 3], "4.5.6-SNAPSHOT" returns + * [4, 5, 6, "SNAPSHOT"] * @return the components of the version string */ private static String[] getVersionComponents(final HBaseProtos.VersionInfo versionInfo) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java index e27574a0f924..50c6dc276579 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java @@ -15,57 +15,52 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.locking; import java.io.IOException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; -import org.apache.hadoop.hbase.util.Threads; /** - * Lock for HBase Entity either a Table, a Namespace, or Regions. - * - * These are remote locks which live on master, and need periodic heartbeats to keep them alive. - * (Once we request the lock, internally an heartbeat thread will be started on the client). - * If master does not receive the heartbeat in time, it'll release the lock and make it available - * to other users. - * - *

    Use {@link LockServiceClient} to build instances. Then call {@link #requestLock()}. - * {@link #requestLock} will contact master to queue the lock and start the heartbeat thread - * which will check lock's status periodically and once the lock is acquired, it will send the - * heartbeats to the master. - * - *

    Use {@link #await} or {@link #await(long, TimeUnit)} to wait for the lock to be acquired. - * Always call {@link #unlock()} irrespective of whether lock was acquired or not. If the lock - * was acquired, it'll be released. If it was not acquired, it is possible that master grants the - * lock in future and the heartbeat thread keeps it alive forever by sending heartbeats. - * Calling {@link #unlock()} will stop the heartbeat thread and cancel the lock queued on master. - * - *

    There are 4 ways in which these remote locks may be released/can be lost: - *

    • Call {@link #unlock}.
    • - *
    • Lock times out on master: Can happen because of network issues, GC pauses, etc. - * Worker thread will call the given abortable as soon as it detects such a situation.
    • + * Lock for HBase Entity either a Table, a Namespace, or Regions. These are remote locks which live + * on master, and need periodic heartbeats to keep them alive. (Once we request the lock, internally + * an heartbeat thread will be started on the client). If master does not receive the heartbeat in + * time, it'll release the lock and make it available to other users. + *

      + * Use {@link LockServiceClient} to build instances. Then call {@link #requestLock()}. + * {@link #requestLock} will contact master to queue the lock and start the heartbeat thread which + * will check lock's status periodically and once the lock is acquired, it will send the heartbeats + * to the master. + *

      + * Use {@link #await} or {@link #await(long, TimeUnit)} to wait for the lock to be acquired. Always + * call {@link #unlock()} irrespective of whether lock was acquired or not. If the lock was + * acquired, it'll be released. If it was not acquired, it is possible that master grants the lock + * in future and the heartbeat thread keeps it alive forever by sending heartbeats. Calling + * {@link #unlock()} will stop the heartbeat thread and cancel the lock queued on master. + *

      + * There are 4 ways in which these remote locks may be released/can be lost: + *

        + *
      • Call {@link #unlock}.
      • + *
      • Lock times out on master: Can happen because of network issues, GC pauses, etc. Worker thread + * will call the given abortable as soon as it detects such a situation.
      • *
      • Fail to contact master: If worker thread can not contact mater and thus fails to send - * heartbeat before the timeout expires, it assumes that lock is lost and calls the - * abortable.
      • + * heartbeat before the timeout expires, it assumes that lock is lost and calls the abortable. *
      • Worker thread is interrupted.
      • *
      - * - * Use example: - * + * Use example: * EntityLock lock = lockServiceClient.*Lock(...., "exampled lock", abortable); * lock.requestLock(); * .... @@ -81,8 +76,7 @@ public class EntityLock { private static final Logger LOG = LoggerFactory.getLogger(EntityLock.class); - public static final String HEARTBEAT_TIME_BUFFER = - "hbase.client.locks.heartbeat.time.buffer.ms"; + public static final String HEARTBEAT_TIME_BUFFER = "hbase.client.locks.heartbeat.time.buffer.ms"; private final AtomicBoolean locked = new AtomicBoolean(false); private final CountDownLatch latch = new CountDownLatch(1); @@ -102,12 +96,12 @@ public class EntityLock { private Long procId = null; /** - * Abortable.abort() is called when the lease of the lock will expire. - * It's up to the user decide if simply abort the process or handle the loss of the lock - * by aborting the operation that was supposed to be under lock. + * Abortable.abort() is called when the lease of the lock will expire. It's up to the user decide + * if simply abort the process or handle the loss of the lock by aborting the operation that was + * supposed to be under lock. */ - EntityLock(Configuration conf, LockService.BlockingInterface stub, - LockRequest request, Abortable abort) { + EntityLock(Configuration conf, LockService.BlockingInterface stub, LockRequest request, + Abortable abort) { this.stub = stub; this.lockRequest = request; this.abort = abort; @@ -158,10 +152,9 @@ public boolean isLocked() { } /** - * Sends rpc to the master to request lock. - * The lock request is queued with other lock requests. - * Call {@link #await()} to wait on lock. - * Always call {@link #unlock()} after calling the below, even after error. + * Sends rpc to the master to request lock. The lock request is queued with other lock requests. + * Call {@link #await()} to wait on lock. Always call {@link #unlock()} after calling the below, + * even after error. */ public void requestLock() throws IOException { if (procId == null) { @@ -179,7 +172,7 @@ public void requestLock() throws IOException { /** * @param timeout in milliseconds. If set to 0, waits indefinitely. * @return true if lock was acquired; and false if waiting time elapsed before lock could be - * acquired. + * acquired. */ public boolean await(long timeout, TimeUnit timeUnit) throws InterruptedException { final boolean result = latch.await(timeout, timeUnit); @@ -188,7 +181,7 @@ public boolean await(long timeout, TimeUnit timeUnit) throws InterruptedExceptio LOG.info("Acquired " + lockRequestStr); } else { LOG.info(String.format("Failed acquire in %s %s of %s", timeout, timeUnit.toString(), - lockRequestStr)); + lockRequestStr)); } return result; } @@ -227,7 +220,7 @@ Thread shutdown() { @Override public void run() { final LockHeartbeatRequest lockHeartbeatRequest = - LockHeartbeatRequest.newBuilder().setProcId(procId).build(); + LockHeartbeatRequest.newBuilder().setProcId(procId).build(); LockHeartbeatResponse response; while (true) { @@ -243,12 +236,13 @@ public void run() { if (!isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) { locked.set(true); latch.countDown(); - } else if (isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) { - // Lock timed out. - locked.set(false); - abort.abort("Lock timed out.", null); - return; - } + } else + if (isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) { + // Lock timed out. + locked.set(false); + abort.abort("Lock timed out.", null); + return; + } try { // If lock not acquired yet, poll faster so we can notify faster. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java index 24f2835af8b2..8f3705bab012 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.locking; import java.util.List; @@ -35,12 +33,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType; /** - * Helper class to create "master locks" for namespaces, tables and regions. - * DEV-NOTE: At the moment this class is used only by the RS for MOB, - * to prevent other MOB compaction to conflict. - * The RS has already the stub of the LockService, so we have only one constructor that - * takes the LockService stub. If in the future we are going to use this in other places - * we should add a constructor that from conf or connection, creates the stub. + * Helper class to create "master locks" for namespaces, tables and regions. DEV-NOTE: At the moment + * this class is used only by the RS for MOB, to prevent other MOB compaction to conflict. The RS + * has already the stub of the LockService, so we have only one constructor that takes the + * LockService stub. If in the future we are going to use this in other places we should add a + * constructor that from conf or connection, creates the stub. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -50,30 +47,30 @@ public class LockServiceClient { private final NonceGenerator ng; public LockServiceClient(final Configuration conf, final LockService.BlockingInterface stub, - final NonceGenerator ng) { + final NonceGenerator ng) { this.conf = conf; this.stub = stub; this.ng = ng; } /** - * Create a new EntityLock object to acquire an exclusive or shared lock on a table. - * Internally, the table namespace will also be locked in shared mode. + * Create a new EntityLock object to acquire an exclusive or shared lock on a table. Internally, + * the table namespace will also be locked in shared mode. */ public EntityLock tableLock(final TableName tableName, final boolean exclusive, - final String description, final Abortable abort) { + final String description, final Abortable abort) { LockRequest lockRequest = buildLockRequest(exclusive ? LockType.EXCLUSIVE : LockType.SHARED, - tableName.getNameAsString(), null, null, description, ng.getNonceGroup(), ng.newNonce()); + tableName.getNameAsString(), null, null, description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } /** - * LocCreate a new EntityLock object to acquire exclusive lock on a namespace. - * Clients can not acquire shared locks on namespace. + * LocCreate a new EntityLock object to acquire exclusive lock on a namespace. Clients can not + * acquire shared locks on namespace. */ public EntityLock namespaceLock(String namespace, String description, Abortable abort) { - LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, - namespace, null, null, description, ng.getNonceGroup(), ng.newNonce()); + LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, namespace, null, null, + description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } @@ -82,21 +79,19 @@ public EntityLock namespaceLock(String namespace, String description, Abortable * Internally, the table and its namespace will also be locked in shared mode. */ public EntityLock regionLock(List regionInfos, String description, Abortable abort) { - LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, - null, null, regionInfos, description, ng.getNonceGroup(), ng.newNonce()); + LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, null, null, regionInfos, + description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } @InterfaceAudience.Private - public static LockRequest buildLockRequest(final LockType type, - final String namespace, final TableName tableName, final List regionInfos, - final String description, final long nonceGroup, final long nonce) { - final LockRequest.Builder builder = LockRequest.newBuilder() - .setLockType(type) - .setNonceGroup(nonceGroup) - .setNonce(nonce); + public static LockRequest buildLockRequest(final LockType type, final String namespace, + final TableName tableName, final List regionInfos, final String description, + final long nonceGroup, final long nonce) { + final LockRequest.Builder builder = + LockRequest.newBuilder().setLockType(type).setNonceGroup(nonceGroup).setNonce(nonce); if (regionInfos != null) { - for (RegionInfo hri: regionInfos) { + for (RegionInfo hri : regionInfos) { builder.addRegionInfo(ProtobufUtil.toRegionInfo(hri)); } } else if (namespace != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java index ddbbb5fc8bdc..a6d6940e1e4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,22 +20,23 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.ByteBuffInputStream; import org.apache.hadoop.hbase.nio.ByteBuff; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.ExtendedCellBuilder; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.CellProtos; /** - * Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp. - * Use a different codec if you want that in the stream. + * Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp. Use a + * different codec if you want that in the stream. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class MessageCodec implements Codec { @@ -48,26 +49,27 @@ static class MessageEncoder extends BaseEncoder { public void write(Cell cell) throws IOException { checkFlushed(); CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder(); - // This copies bytes from Cell to ByteString. I don't see anyway around the copy. + // This copies bytes from Cell to ByteString. I don't see anyway around the copy. // ByteString is final. builder.setRow(UnsafeByteOperations.unsafeWrap(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength())); + cell.getRowLength())); builder.setFamily(UnsafeByteOperations.unsafeWrap(cell.getFamilyArray(), - cell.getFamilyOffset(), - cell.getFamilyLength())); + cell.getFamilyOffset(), cell.getFamilyLength())); builder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength())); + cell.getQualifierOffset(), cell.getQualifierLength())); builder.setTimestamp(cell.getTimestamp()); builder.setCellType(CellProtos.CellType.valueOf(cell.getTypeByte())); builder.setValue(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); CellProtos.Cell pbcell = builder.build(); pbcell.writeDelimitedTo(this.out); } } static class MessageDecoder extends BaseDecoder { - private final ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + private final ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + MessageDecoder(final InputStream in) { super(in); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java index e5b6f4a166fb..de2a470bd5ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hbase.constraint; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configured; +import org.apache.yetus.audience.InterfaceAudience; /** - * Base class to use when actually implementing a {@link Constraint}. It takes - * care of getting and setting of configuration for the constraint. + * Base class to use when actually implementing a {@link Constraint}. It takes care of getting and + * setting of configuration for the constraint. */ @InterfaceAudience.Private public abstract class BaseConstraint extends Configured implements Constraint { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java index c0c4b6063f99..6c99e6425351 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.constraint; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.client.Put; +import org.apache.yetus.audience.InterfaceAudience; /** * Apply a {@link Constraint} (in traditional database terminology) to a Table. Any number of @@ -68,7 +68,7 @@ public interface Constraint extends Configurable { * to fail. * @param p {@link Put} to check * @throws org.apache.hadoop.hbase.constraint.ConstraintException when the {@link Put} does not - * match the constraint. + * match the constraint. */ void check(Put p) throws ConstraintException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java index 51641b91ce67..cb7af0f9d3b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,11 @@ /** * Exception that a user defined constraint throws on failure of a - * {@link org.apache.hadoop.hbase.client.Put}. - *

      Does NOT attempt the - * {@link org.apache.hadoop.hbase.client.Put} multiple times, - * since the constraint should fail every time for - * the same {@link org.apache.hadoop.hbase.client.Put} (it should be - * idempotent). + * {@link org.apache.hadoop.hbase.client.Put}. + *

      + * Does NOT attempt the {@link org.apache.hadoop.hbase.client.Put} multiple times, since the + * constraint should fail every time for the same {@link org.apache.hadoop.hbase.client.Put} + * (it should be idempotent). */ @InterfaceAudience.Private public class ConstraintException extends org.apache.hadoop.hbase.DoNotRetryIOException { @@ -36,12 +35,10 @@ public ConstraintException() { super(); } - public ConstraintException(String msg) - { + public ConstraintException(String msg) { super(msg); } - - + public ConstraintException(String msg, Throwable cause) { super(msg, cause); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index b0a04c5044ac..81ed9592d278 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; - import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -32,15 +31,14 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; /*** * Processes multiple {@link Constraint Constraints} on a given table. *

      - * This is an ease of use mechanism - all the functionality here could be - * implemented on any given system by a coprocessor. + * This is an ease of use mechanism - all the functionality here could be implemented on any given + * system by a coprocessor. */ @InterfaceAudience.Private public class ConstraintProcessor implements RegionCoprocessor, RegionObserver { @@ -70,7 +68,7 @@ public void start(CoprocessorEnvironment environment) { // make sure we are on a region server if (!(environment instanceof RegionCoprocessorEnvironment)) { throw new IllegalArgumentException( - "Constraints only act on regions - started in an environment that was not a region"); + "Constraints only act on regions - started in an environment that was not a region"); } RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) environment; TableDescriptor desc = env.getRegion().getTableDescriptor(); @@ -82,15 +80,15 @@ public void start(CoprocessorEnvironment environment) { } if (LOG.isInfoEnabled()) { - LOG.info("Finished loading " + constraints.size() - + " user Constraints on table: " + desc.getTableName()); + LOG.info("Finished loading " + constraints.size() + " user Constraints on table: " + + desc.getTableName()); } } @Override - public void prePut(ObserverContext e, Put put, - WALEdit edit, Durability durability) throws IOException { + public void prePut(ObserverContext e, Put put, WALEdit edit, + Durability durability) throws IOException { // check the put against the stored constraints for (Constraint c : constraints) { c.check(put); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java index a9438e3f25e7..29b13e1b8766 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -110,7 +110,7 @@ public static TableDescriptorBuilder remove(TableDescriptorBuilder builder) thro /** * Check to see if the Constraint is currently set. - * @param desc {@link TableDescriptor} to check + * @param desc {@link TableDescriptor} to check * @param clazz {@link Constraint} class to check for. * @return true if the {@link Constraint} is present, even if it is disabled. * false otherwise. @@ -121,7 +121,7 @@ public static boolean has(TableDescriptor desc, Class claz /** * Get the kv {@link Entry} in the descriptor for the specified class - * @param desc {@link TableDescriptor} to read + * @param desc {@link TableDescriptor} to read * @param clazz To search for * @return The {@link Pair} of {@literal } in the table, if that class is present. * {@code null} otherwise. @@ -138,7 +138,7 @@ private static Pair getKeyValueForClass(TableDescriptor desc, /** * Get the kv {@link Entry} in the descriptor builder for the specified class * @param builder {@link TableDescriptorBuilder} to read - * @param clazz To search for + * @param clazz To search for * @return The {@link Pair} of {@literal } in the table, if that class is present. * {@code null} otherwise. */ @@ -161,9 +161,9 @@ private static Pair getKeyValueForClass(TableDescriptorBuilder b * which the {@link Constraint} will be run. A {@link Constraint} earlier in the list will be run * before those later in the list. The same logic applies between two Constraints over time * (earlier added is run first on the regionserver). - * @param builder {@link TableDescriptorBuilder} to add a {@link Constraint} + * @param builder {@link TableDescriptorBuilder} to add a {@link Constraint} * @param constraints {@link Constraint Constraints} to add. All constraints are considered - * automatically enabled on add + * automatically enabled on add * @throws IOException If constraint could not be serialized/added to table */ @SafeVarargs @@ -189,13 +189,14 @@ public static TableDescriptorBuilder add(TableDescriptorBuilder builder, * which the {@link Constraint} will be run. A {@link Constraint} earlier in the list will be run * before those later in the list. The same logic applies between two Constraints over time * (earlier added is run first on the regionserver). - * @param builder {@link TableDescriptorBuilder} to add a {@link Constraint} + * @param builder {@link TableDescriptorBuilder} to add a {@link Constraint} * @param constraints {@link Pair} of a {@link Constraint} and its associated - * {@link Configuration}. The Constraint will be configured on load with the specified - * configuration.All constraints are considered automatically enabled on add + * {@link Configuration}. The Constraint will be configured on load with the + * specified configuration.All constraints are considered automatically enabled + * on add * @throws IOException if any constraint could not be deserialized. Assumes if 1 constraint is not - * loaded properly, something has gone terribly wrong and that all constraints need to - * be enforced. + * loaded properly, something has gone terribly wrong and that all constraints + * need to be enforced. */ @SafeVarargs public static TableDescriptorBuilder add(TableDescriptorBuilder builder, @@ -214,12 +215,12 @@ public static TableDescriptorBuilder add(TableDescriptorBuilder builder, * Each constraint, when added to the table, will have a specific priority, dictating the order in * which the {@link Constraint} will be run. A {@link Constraint} added will run on the * regionserver before those added to the {@link TableDescriptorBuilder} later. - * @param builder {@link TableDescriptorBuilder} to add a {@link Constraint} + * @param builder {@link TableDescriptorBuilder} to add a {@link Constraint} * @param constraint to be added - * @param conf configuration associated with the constraint + * @param conf configuration associated with the constraint * @throws IOException if any constraint could not be deserialized. Assumes if 1 constraint is not - * loaded properly, something has gone terribly wrong and that all constraints need to - * be enforced. + * loaded properly, something has gone terribly wrong and that all constraints + * need to be enforced. */ public static TableDescriptorBuilder add(TableDescriptorBuilder builder, Class constraint, Configuration conf) throws IOException { @@ -246,8 +247,8 @@ private static TableDescriptorBuilder addConstraint(TableDescriptorBuilder build /** * Setup the configuration for a constraint as to whether it is enabled and its priority - * @param conf on which to base the new configuration - * @param enabled true if it should be run + * @param conf on which to base the new configuration + * @param enabled true if it should be run * @param priority relative to other constraints * @return a new configuration, storable in the {@link TableDescriptor} */ @@ -347,10 +348,10 @@ private static TableDescriptorBuilder updateLatestPriority(TableDescriptorBuilde /** * Update the configuration for the {@link Constraint}; does not change the order in which the * constraint is run. - * @param builder {@link TableDescriptorBuilder} to update - * @param clazz {@link Constraint} to update + * @param builder {@link TableDescriptorBuilder} to update + * @param clazz {@link Constraint} to update * @param configuration to update the {@link Constraint} with. - * @throws IOException if the Constraint was not stored correctly + * @throws IOException if the Constraint was not stored correctly * @throws IllegalArgumentException if the Constraint was not present on this table. */ public static TableDescriptorBuilder setConfiguration(TableDescriptorBuilder builder, @@ -381,7 +382,7 @@ public static TableDescriptorBuilder setConfiguration(TableDescriptorBuilder bui /** * Remove the constraint (and associated information) for the table descriptor. * @param builder {@link TableDescriptorBuilder} to modify - * @param clazz {@link Constraint} class to remove + * @param clazz {@link Constraint} class to remove */ public static TableDescriptorBuilder remove(TableDescriptorBuilder builder, Class clazz) { @@ -393,7 +394,7 @@ public static TableDescriptorBuilder remove(TableDescriptorBuilder builder, * Enable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the * {@link Constraint}, but makes sure that it gets loaded on the table. * @param builder {@link TableDescriptorBuilder} to modify - * @param clazz {@link Constraint} to enable + * @param clazz {@link Constraint} to enable * @throws IOException If the constraint cannot be properly deserialized */ public static void enableConstraint(TableDescriptorBuilder builder, @@ -405,7 +406,7 @@ public static void enableConstraint(TableDescriptorBuilder builder, * Disable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the * {@link Constraint}, but it just doesn't load the {@link Constraint} on the table. * @param builder {@link TableDescriptorBuilder} to modify - * @param clazz {@link Constraint} to disable. + * @param clazz {@link Constraint} to disable. * @throws IOException if the constraint cannot be found */ public static void disableConstraint(TableDescriptorBuilder builder, @@ -421,8 +422,8 @@ private static TableDescriptorBuilder changeConstraintEnabled(TableDescriptorBui // get the original constraint Pair entry = getKeyValueForClass(builder, clazz); if (entry == null) { - throw new IllegalArgumentException("Constraint: " + clazz.getName() + - " is not associated with this table. You can't enable it!"); + throw new IllegalArgumentException("Constraint: " + clazz.getName() + + " is not associated with this table. You can't enable it!"); } // create a new configuration from that conf @@ -437,7 +438,7 @@ private static TableDescriptorBuilder changeConstraintEnabled(TableDescriptorBui /** * Check to see if the given constraint is enabled. - * @param desc {@link TableDescriptor} to check. + * @param desc {@link TableDescriptor} to check. * @param clazz {@link Constraint} to check for * @return true if the {@link Constraint} is present and enabled. false * otherwise. @@ -460,11 +461,11 @@ public static boolean enabled(TableDescriptor desc, Class /** * Get the constraints stored in the table descriptor - * @param desc To read from + * @param desc To read from * @param classloader To use when loading classes. If a special classloader is used on a region, - * for instance, then that should be the classloader used to load the constraints. This - * could also apply to unit-testing situation, where want to ensure that class is - * reloaded or not. + * for instance, then that should be the classloader used to load the + * constraints. This could also apply to unit-testing situation, where want to + * ensure that class is reloaded or not. * @return List of configured {@link Constraint Constraints} * @throws IOException if any part of reading/arguments fails */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java index 9508321a625a..324a6f37b99b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import java.io.IOException; import java.util.Set; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective; @@ -39,7 +36,7 @@ * {@link #checkTaskStillAvailable(String)} Check that task is still there
      * {@link #checkTasks()} check for unassigned tasks and resubmit them * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @InterfaceAudience.Private @Deprecated @@ -53,7 +50,7 @@ class SplitLogManagerDetails { final private Set failedDeletions; public SplitLogManagerDetails(ConcurrentMap tasks, MasterServices master, - Set failedDeletions) { + Set failedDeletions) { this.tasks = tasks; this.master = master; this.failedDeletions = failedDeletions; @@ -124,8 +121,8 @@ public ServerName getServerName() { /** * Resubmit the task in case if found unassigned or failed * @param taskName path related to task - * @param task to resubmit - * @param force whether it should be forced + * @param task to resubmit + * @param force whether it should be forced * @return whether it was successful */ @@ -142,8 +139,7 @@ public ServerName getServerName() { void deleteTask(String taskName); /** - * Support method to init constants such as timeout. Mostly required for UTs. - * @throws IOException + * Support method to init constants such as timeout. Mostly required for UTs. n */ void init() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java index 5452578a2c26..ff2fbfbe7e72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java @@ -1,21 +1,22 @@ - /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.coordination; + import java.util.concurrent.atomic.LongAdder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -28,8 +29,8 @@ /** * Coordinated operations for {@link SplitLogWorker} and - * {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important - * methods for SplitLogWorker:
      + * {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important methods for + * SplitLogWorker:
      * {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is * ready to supply the tasks
      * {@link #taskLoop()} loop for new tasks until the worker is stopped
      @@ -41,7 +42,7 @@ * Important methods for WALSplitterHandler:
      * splitting task has completed. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private @@ -50,16 +51,16 @@ public interface SplitLogWorkerCoordination { /** * Initialize internal values. This method should be used when corresponding SplitLogWorker * instance is created - * @param server instance of RegionServerServices to work with - * @param conf is current configuration. + * @param server instance of RegionServerServices to work with + * @param conf is current configuration. * @param splitTaskExecutor split executor from SplitLogWorker - * @param worker instance of SplitLogWorker + * @param worker instance of SplitLogWorker */ - void init(RegionServerServices server, Configuration conf, - TaskExecutor splitTaskExecutor, SplitLogWorker worker); + void init(RegionServerServices server, Configuration conf, TaskExecutor splitTaskExecutor, + SplitLogWorker worker); /** - * called when Coordination should stop processing tasks and exit + * called when Coordination should stop processing tasks and exit */ void stopProcessingTasks(); @@ -77,8 +78,8 @@ void init(RegionServerServices server, Configuration conf, /** * marks log file as corrupted * @param rootDir where to find the log - * @param name of the log - * @param fs file system + * @param name of the log + * @param fs file system */ void markCorrupted(Path rootDir, String name, FileSystem fs); @@ -109,10 +110,10 @@ void init(RegionServerServices server, Configuration conf, /** * Notify coordination engine that splitting task has completed. - * @param slt See {@link SplitLogTask} - * @param ctr counter to be updated + * @param slt See {@link SplitLogTask} + * @param ctr counter to be updated * @param splitTaskDetails details about log split task (specific to coordination engine being - * used). + * used). */ void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails splitTaskDetails); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index dee94be9fad3..fcf103c82e21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -1,5 +1,5 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one +/* + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK; @@ -28,7 +27,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -60,12 +58,11 @@ import org.slf4j.LoggerFactory; /** - * ZooKeeper based implementation of - * {@link SplitLogManagerCoordination} + * ZooKeeper based implementation of {@link SplitLogManagerCoordination} */ @InterfaceAudience.Private -public class ZKSplitLogManagerCoordination extends ZKListener implements - SplitLogManagerCoordination { +public class ZKSplitLogManagerCoordination extends ZKListener + implements SplitLogManagerCoordination { public static final int DEFAULT_TIMEOUT = 120000; public static final int DEFAULT_ZK_RETRIES = 3; @@ -121,8 +118,8 @@ public String prepareTask(String taskname) { public int remainingTasksInCoordination() { int count = 0; try { - List tasks = ZKUtil.listChildrenNoWatch(watcher, - watcher.getZNodePaths().splitLogZNode); + List tasks = + ZKUtil.listChildrenNoWatch(watcher, watcher.getZNodePaths().splitLogZNode); if (tasks != null) { int listSize = tasks.size(); for (int i = 0; i < listSize; i++) { @@ -142,8 +139,7 @@ public int remainingTasksInCoordination() { * It is possible for a task to stay in UNASSIGNED state indefinitely - say SplitLogManager wants * to resubmit a task. It forces the task to UNASSIGNED state but it dies before it could create * the RESCAN task node to signal the SplitLogWorkers to pick up the task. To prevent this - * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup. - * @param path + * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup. n */ private void handleUnassignedTask(String path) { if (ZKSplitLog.isRescanNode(watcher, path)) { @@ -177,13 +173,13 @@ public boolean resubmitTask(String path, Task task, ResubmitDirective directive) // finished the task. This allows to continue if the worker cannot actually handle it, // for any reason. final long time = EnvironmentEdgeManager.currentTime() - task.last_update; - final boolean alive = - details.getMaster().getServerManager() != null ? details.getMaster().getServerManager() - .isServerOnline(task.cur_worker_name) : true; + final boolean alive = details.getMaster().getServerManager() != null + ? details.getMaster().getServerManager().isServerOnline(task.cur_worker_name) + : true; if (alive && time < timeout) { LOG.trace("Skipping the resubmit of " + task.toString() + " because the server " - + task.cur_worker_name + " is not marked as dead, we waited for " + time - + " while the timeout is " + timeout); + + task.cur_worker_name + " is not marked as dead, we waited for " + time + + " while the timeout is " + timeout); return false; } @@ -192,7 +188,7 @@ public boolean resubmitTask(String path, Task task, ResubmitDirective directive) task.resubmitThresholdReached = true; SplitLogCounters.tot_mgr_resubmit_threshold_reached.increment(); LOG.info("Skipping resubmissions of task " + path + " because threshold " - + resubmitThreshold + " reached"); + + resubmitThreshold + " reached"); } return false; } @@ -219,7 +215,6 @@ public boolean resubmitTask(String path, Task task, ResubmitDirective directive) return true; } - @Override public void checkTasks() { rescan(Long.MAX_VALUE); @@ -237,11 +232,9 @@ private void rescan(long retries) { // Since the TimeoutMonitor will keep resubmitting UNASSIGNED tasks // therefore this behavior is safe. SplitLogTask slt = new SplitLogTask.Done(this.details.getServerName()); - this.watcher - .getRecoverableZooKeeper() - .getZooKeeper() - .create(ZKSplitLog.getRescanNode(watcher), slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, - CreateMode.EPHEMERAL_SEQUENTIAL, new CreateRescanAsyncCallback(), Long.valueOf(retries)); + this.watcher.getRecoverableZooKeeper().getZooKeeper().create(ZKSplitLog.getRescanNode(watcher), + slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL, + new CreateRescanAsyncCallback(), Long.valueOf(retries)); } @Override @@ -252,11 +245,8 @@ public void submitTask(String path) { @Override public void checkTaskStillAvailable(String path) { // A negative retry count will lead to ignoring all error processing. - this.watcher - .getRecoverableZooKeeper() - .getZooKeeper() - .getData(path, this.watcher, new GetDataAsyncCallback(), - Long.valueOf(-1) /* retry count */); + this.watcher.getRecoverableZooKeeper().getZooKeeper().getData(path, this.watcher, + new GetDataAsyncCallback(), Long.valueOf(-1) /* retry count */); SplitLogCounters.tot_mgr_get_data_queued.increment(); } @@ -265,8 +255,8 @@ private void deleteNode(String path, Long retries) { // Once a task znode is ready for delete, that is it is in the TASK_DONE // state, then no one should be writing to it anymore. That is no one // will be updating the znode version any more. - this.watcher.getRecoverableZooKeeper().getZooKeeper() - .delete(path, -1, new DeleteAsyncCallback(), retries); + this.watcher.getRecoverableZooKeeper().getZooKeeper().delete(path, -1, + new DeleteAsyncCallback(), retries); } private void deleteNodeSuccess(String path) { @@ -307,13 +297,13 @@ private void createRescanFailure() { /** * Helper function to check whether to abandon retries in ZooKeeper AsyncCallback functions * @param statusCode integer value of a ZooKeeper exception code - * @param action description message about the retried action + * @param action description message about the retried action * @return true when need to abandon retries otherwise false */ private boolean needAbandonRetries(int statusCode, String action) { if (statusCode == KeeperException.Code.SESSIONEXPIRED.intValue()) { LOG.error("ZK session expired. Master is expected to shut down. Abandoning retries for " - + "action=" + action); + + "action=" + action); return true; } return false; @@ -339,13 +329,13 @@ private void createNodeFailure(String path) { } private void getDataSetWatch(String path, Long retry_count) { - this.watcher.getRecoverableZooKeeper().getZooKeeper() - .getData(path, this.watcher, new GetDataAsyncCallback(), retry_count); + this.watcher.getRecoverableZooKeeper().getZooKeeper().getData(path, this.watcher, + new GetDataAsyncCallback(), retry_count); SplitLogCounters.tot_mgr_get_data_queued.increment(); } private void getDataSetWatchSuccess(String path, byte[] data, int version) - throws DeserializationException { + throws DeserializationException { if (data == null) { if (version == Integer.MIN_VALUE) { // assume all done. The task znode suddenly disappeared. @@ -382,8 +372,8 @@ private void getDataSetWatchSuccess(String path, byte[] data, int version) LOG.info("Task " + path + " entered state=" + slt.toString()); resubmitOrFail(path, CHECK); } else { - LOG.error(HBaseMarkers.FATAL, "logic error - unexpected zk state for path = " - + path + " data = " + slt.toString()); + LOG.error(HBaseMarkers.FATAL, + "logic error - unexpected zk state for path = " + path + " data = " + slt.toString()); setDone(path, FAILURE); } } @@ -466,15 +456,15 @@ private void heartbeat(String path, int new_version, ServerName workerName) { private void lookForOrphans() { List orphans; try { - orphans = ZKUtil.listChildrenNoWatch(this.watcher, - this.watcher.getZNodePaths().splitLogZNode); + orphans = + ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.getZNodePaths().splitLogZNode); if (orphans == null) { LOG.warn("Could not get children of " + this.watcher.getZNodePaths().splitLogZNode); return; } } catch (KeeperException e) { LOG.warn("Could not get children of " + this.watcher.getZNodePaths().splitLogZNode + " " - + StringUtils.stringifyException(e)); + + StringUtils.stringifyException(e)); return; } int rescan_nodes = 0; @@ -491,7 +481,7 @@ private void lookForOrphans() { getDataSetWatch(nodepath, zkretries); } LOG.info("Found " + (orphans.size() - rescan_nodes) + " orphan tasks and " + rescan_nodes - + " rescan nodes"); + + " rescan nodes"); } @Override @@ -509,15 +499,14 @@ public void nodeDataChanged(String path) { private boolean resubmit(String path, int version) { try { // blocking zk call but this is done from the timeout thread - SplitLogTask slt = - new SplitLogTask.Unassigned(this.details.getServerName()); + SplitLogTask slt = new SplitLogTask.Unassigned(this.details.getServerName()); if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { LOG.debug("Failed to resubmit task " + path + " version changed"); return false; } } catch (NoNodeException e) { LOG.warn("Failed to resubmit because znode doesn't exist " + path - + " task done (or forced done by removing the znode)"); + + " task done (or forced done by removing the znode)"); try { getDataSetWatchSuccess(path, null, Integer.MIN_VALUE); } catch (DeserializationException e1) { @@ -536,12 +525,11 @@ private boolean resubmit(String path, int version) { return true; } - /** * {@link org.apache.hadoop.hbase.master.SplitLogManager} can use objects implementing this * interface to finish off a partially done task by - * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a - * serialization point at the end of the task processing. Must be restartable and idempotent. + * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a serialization + * point at the end of the task processing. Must be restartable and idempotent. */ public interface TaskFinisher { /** @@ -563,9 +551,7 @@ enum Status { * partially done tasks are present. taskname is the name of the task that was put up in * zookeeper. *

      - * @param workerName - * @param taskname - * @return DONE if task completed successfully, ERR otherwise + * nn * @return DONE if task completed successfully, ERR otherwise */ Status finish(ServerName workerName, String taskname); } @@ -596,7 +582,7 @@ public void processResult(int rc, String path, Object ctx, String name) { } else { Long retry_count = (Long) ctx; LOG.warn("Create rc=" + KeeperException.Code.get(rc) + " for " + path - + " remaining retries=" + retry_count); + + " remaining retries=" + retry_count); if (retry_count == 0) { SplitLogCounters.tot_mgr_node_create_err.increment(); createNodeFailure(path); @@ -636,11 +622,11 @@ public void processResult(int rc, String path, Object ctx, byte[] data, Stat sta if (retry_count < 0) { LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path - + ". Ignoring error. No error handling. No retrying."); + + ". Ignoring error. No error handling. No retrying."); return; } - LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path - + " remaining retries=" + retry_count); + LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path + " remaining retries=" + + retry_count); if (retry_count == 0) { SplitLogCounters.tot_mgr_get_data_err.increment(); getDataSetWatchFailure(path); @@ -677,7 +663,7 @@ public void processResult(int rc, String path, Object ctx) { SplitLogCounters.tot_mgr_node_delete_err.increment(); Long retry_count = (Long) ctx; LOG.warn("Delete rc=" + KeeperException.Code.get(rc) + " for " + path - + " remaining retries=" + retry_count); + + " remaining retries=" + retry_count); if (retry_count == 0) { LOG.warn("Delete failed " + path); details.getFailedDeletions().add(path); @@ -688,8 +674,8 @@ public void processResult(int rc, String path, Object ctx) { return; } else { LOG.info(path + " does not exist. Either was created but deleted behind our" - + " back by another pending delete OR was deleted" - + " in earlier retry rounds. zkretries = " + ctx); + + " back by another pending delete OR was deleted" + + " in earlier retry rounds. zkretries = " + ctx); } } else { LOG.debug("Deleted " + path); @@ -715,7 +701,7 @@ public void processResult(int rc, String path, Object ctx, String name) { } Long retry_count = (Long) ctx; LOG.warn("rc=" + KeeperException.Code.get(rc) + " for " + path + " remaining retries=" - + retry_count); + + retry_count); if (retry_count == 0) { createRescanFailure(); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 323e5752ace9..8f3d8fdd6e62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,8 @@ /** * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter (see SplitWALManager) which doesn't use this zk-based coordinator. + * distributed WAL splitter (see SplitWALManager) which doesn't use this zk-based + * coordinator. */ @Deprecated @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -38,8 +39,8 @@ public class ZkCoordinatedStateManager implements CoordinatedStateManager { public ZkCoordinatedStateManager(Server server) { this.watcher = server.getZooKeeper(); splitLogWorkerCoordination = new ZkSplitLogWorkerCoordination(server.getServerName(), watcher); - splitLogManagerCoordination = new ZKSplitLogManagerCoordination(server.getConfiguration(), - watcher); + splitLogManagerCoordination = + new ZKSplitLogManagerCoordination(server.getConfiguration(), watcher); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 07e751716bf2..1b255b25e17c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER; @@ -59,20 +57,18 @@ import org.slf4j.LoggerFactory; /** - * ZooKeeper based implementation of {@link SplitLogWorkerCoordination} - * It listen for changes in ZooKeeper and - * + * ZooKeeper based implementation of {@link SplitLogWorkerCoordination} It listen for changes in + * ZooKeeper and */ @InterfaceAudience.Private -public class ZkSplitLogWorkerCoordination extends ZKListener implements - SplitLogWorkerCoordination { +public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLogWorkerCoordination { private static final Logger LOG = LoggerFactory.getLogger(ZkSplitLogWorkerCoordination.class); private static final int checkInterval = 5000; // 5 seconds private static final int FAILED_TO_OWN_TASK = -1; - private SplitLogWorker worker; + private SplitLogWorker worker; private TaskExecutor splitTaskExecutor; @@ -132,17 +128,16 @@ public void nodeDataChanged(String path) { * Override setter from {@link SplitLogWorkerCoordination} */ @Override - public void init(RegionServerServices server, Configuration conf, - TaskExecutor splitExecutor, SplitLogWorker worker) { + public void init(RegionServerServices server, Configuration conf, TaskExecutor splitExecutor, + SplitLogWorker worker) { this.server = server; this.worker = worker; this.splitTaskExecutor = splitExecutor; maxConcurrentTasks = - conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER); - reportPeriod = - conf.getInt("hbase.splitlog.report.period", - conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, - ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3); + conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER); + reportPeriod = conf.getInt("hbase.splitlog.report.period", + conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, + ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3); } /* Support functions for ZooKeeper async callback */ @@ -165,8 +160,8 @@ void getDataSetWatchFailure(String path) { } public void getDataSetWatchAsync() { - watcher.getRecoverableZooKeeper().getZooKeeper() - .getData(currentTask, watcher, new GetDataAsyncCallback(), null); + watcher.getRecoverableZooKeeper().getZooKeeper().getData(currentTask, watcher, + new GetDataAsyncCallback(), null); SplitLogCounters.tot_wkr_get_data_queued.increment(); } @@ -189,10 +184,12 @@ void getDataSetWatchSuccess(String path, byte[] data) { // UNASSIGNED because by the time this worker sets the data watch // the node might have made two transitions - from owned by this // worker to unassigned to owned by another worker - if (!slt.isOwned(serverName) && !slt.isDone(serverName) && !slt.isErr(serverName) - && !slt.isResigned(serverName)) { + if ( + !slt.isOwned(serverName) && !slt.isDone(serverName) && !slt.isErr(serverName) + && !slt.isResigned(serverName) + ) { LOG.info("task " + taskpath + " preempted from " + serverName - + ", current task state and owner=" + slt.toString()); + + ", current task state and owner=" + slt.toString()); worker.stopTask(); } } @@ -241,7 +238,7 @@ private boolean grabTask(String path) { } currentVersion = - attemptToOwnTask(true, watcher, server.getServerName(), path, stat.getVersion()); + attemptToOwnTask(true, watcher, server.getServerName(), path, stat.getVersion()); if (currentVersion < 0) { SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.increment(); return false; @@ -249,7 +246,7 @@ private boolean grabTask(String path) { if (ZKSplitLog.isRescanNode(watcher, currentTask)) { ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = - new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); + new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); splitTaskDetails.setTaskNode(currentTask); splitTaskDetails.setCurTaskZKVersion(new MutableInt(currentVersion)); @@ -285,7 +282,7 @@ private boolean grabTask(String path) { /** * Submit a log split task to executor service - * @param curTask task to submit + * @param curTask task to submit * @param curTaskZKVersion current version of task */ void submitTask(final String curTask, final int curTaskZKVersion, final int reportPeriod) { @@ -300,8 +297,7 @@ public boolean progress() { if ((t - last_report_at) > reportPeriod) { last_report_at = t; int latestZKVersion = - attemptToOwnTask(false, watcher, server.getServerName(), curTask, - zkVersion.intValue()); + attemptToOwnTask(false, watcher, server.getServerName(), curTask, zkVersion.intValue()); if (latestZKVersion < 0) { LOG.warn("Failed to heartbeat the task" + curTask); return false; @@ -312,13 +308,12 @@ public boolean progress() { } }; ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = - new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); + new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); splitTaskDetails.setTaskNode(curTask); splitTaskDetails.setCurTaskZKVersion(zkVersion); - WALSplitterHandler hsh = - new WALSplitterHandler(server, this, splitTaskDetails, reporter, - this.tasksInProgress, splitTaskExecutor); + WALSplitterHandler hsh = new WALSplitterHandler(server, this, splitTaskDetails, reporter, + this.tasksInProgress, splitTaskExecutor); server.getExecutorService().submit(hsh); } @@ -335,15 +330,15 @@ private boolean areSplittersAvailable() { * This method is also used to periodically heartbeat the task progress by transitioning the node * from OWNED to OWNED. *

      - * @param isFirstTime shows whther it's the first attempt. - * @param zkw zk wathcer - * @param server name - * @param task to own + * @param isFirstTime shows whther it's the first attempt. + * @param zkw zk wathcer + * @param server name + * @param task to own * @param taskZKVersion version of the task in zk * @return non-negative integer value when task can be owned by current region server otherwise -1 */ - protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, - ServerName server, String task, int taskZKVersion) { + protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, ServerName server, + String task, int taskZKVersion) { int latestZKVersion = FAILED_TO_OWN_TASK; try { SplitLogTask slt = new SplitLogTask.Owned(server); @@ -368,7 +363,7 @@ protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, } } catch (InterruptedException e1) { LOG.warn("Interrupted while trying to assert ownership of " + task + " " - + StringUtils.stringifyException(e1)); + + StringUtils.stringifyException(e1)); Thread.currentThread().interrupt(); } SplitLogCounters.tot_wkr_task_heartbeat_failed.increment(); @@ -381,8 +376,7 @@ protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, * in a cluster. *

      * Synchronization using taskReadySeq ensures that it will try to grab every task - * that has been put up - * @throws InterruptedException + * that has been put up n */ @Override public void taskLoop() throws InterruptedException { @@ -392,7 +386,7 @@ public void taskLoop() throws InterruptedException { paths = getTaskList(); if (paths == null) { LOG.warn("Could not get tasks, did someone remove " + watcher.getZNodePaths().splitLogZNode - + " ... worker thread exiting."); + + " ... worker thread exiting."); return; } // shuffle the paths to prevent different split log worker start from the same log file after @@ -418,8 +412,8 @@ public void taskLoop() throws InterruptedException { int idx = (i + offset) % paths.size(); // don't call ZKSplitLog.getNodeName() because that will lead to // double encoding of the path name - taskGrabbed |= grabTask(ZNodePaths.joinZNode( - watcher.getZNodePaths().splitLogZNode, paths.get(idx))); + taskGrabbed |= + grabTask(ZNodePaths.joinZNode(watcher.getZNodePaths().splitLogZNode, paths.get(idx))); break; } else { if (LOG.isTraceEnabled()) { @@ -453,8 +447,8 @@ private List getTaskList() throws InterruptedException { // it will come out if worker thread exited. while (!shouldStop) { try { - childrenPaths = ZKUtil.listChildrenAndWatchForNewChildren(watcher, - watcher.getZNodePaths().splitLogZNode); + childrenPaths = + ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.getZNodePaths().splitLogZNode); if (childrenPaths != null) { return childrenPaths; } @@ -462,7 +456,7 @@ private List getTaskList() throws InterruptedException { LOG.warn("Could not get children of znode " + watcher.getZNodePaths().splitLogZNode, e); } LOG.debug("Retry listChildren of znode " + watcher.getZNodePaths().splitLogZNode - + " after sleep for " + sleepTime + "ms!"); + + " after sleep for " + sleepTime + "ms!"); Thread.sleep(sleepTime); } return childrenPaths; @@ -480,12 +474,13 @@ public boolean isReady() throws InterruptedException { result = ZKUtil.checkExists(watcher, watcher.getZNodePaths().splitLogZNode); } catch (KeeperException e) { // ignore - LOG.warn("Exception when checking for " + watcher.getZNodePaths().splitLogZNode - + " ... retrying", e); + LOG.warn( + "Exception when checking for " + watcher.getZNodePaths().splitLogZNode + " ... retrying", + e); } if (result == -1) { LOG.info(watcher.getZNodePaths().splitLogZNode - + " znode does not exist, waiting for master to create"); + + " znode does not exist, waiting for master to create"); Thread.sleep(1000); } return (result != -1); @@ -506,7 +501,6 @@ public void removeListener() { watcher.unregisterListener(this); } - @Override public void stopProcessingTasks() { this.shouldStop = true; @@ -542,9 +536,7 @@ public void processResult(int rc, String path, Object ctx, byte[] data, Stat sta */ /** * endTask() can fail and the only way to recover out of it is for the - * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. - * @param slt - * @param ctr + * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. nn */ @Override public void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails details) { @@ -558,7 +550,7 @@ public void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails details) { return; } LOG.warn("failed to transistion task " + task + " to end state " + slt - + " because of version mismatch "); + + " because of version mismatch "); } catch (KeeperException.BadVersionException bve) { LOG.warn("transisition task " + task + " to " + slt + " failed because of version mismatch", bve); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java index 2818dcd675f1..b8cabe8cfed7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,9 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -27,8 +26,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; - /** * Encapsulation of the environment of each coprocessor */ @@ -48,10 +45,11 @@ public class BaseEnvironment implements CoprocessorEnviro /** * Constructor - * @param impl the coprocessor instance + * @param impl the coprocessor instance * @param priority chaining priority */ - public BaseEnvironment(final C impl, final int priority, final int seq, final Configuration conf) { + public BaseEnvironment(final C impl, final int priority, final int seq, + final Configuration conf) { this.impl = impl; this.classLoader = impl.getClass().getClassLoader(); this.priority = priority; @@ -62,8 +60,7 @@ public BaseEnvironment(final C impl, final int priority, final int seq, final Co /** Initialize the environment */ public void startup() throws IOException { - if (state == Coprocessor.State.INSTALLED || - state == Coprocessor.State.STOPPED) { + if (state == Coprocessor.State.INSTALLED || state == Coprocessor.State.STOPPED) { state = Coprocessor.State.STARTING; Thread currentThread = Thread.currentThread(); ClassLoader hostClassLoader = currentThread.getContextClassLoader(); @@ -75,8 +72,8 @@ public void startup() throws IOException { currentThread.setContextClassLoader(hostClassLoader); } } else { - LOG.warn("Not starting coprocessor " + impl.getClass().getName() + - " because not inactive (state=" + state.toString() + ")"); + LOG.warn("Not starting coprocessor " + impl.getClass().getName() + + " because not inactive (state=" + state.toString() + ")"); } } @@ -91,13 +88,13 @@ public void shutdown() { impl.stop(this); state = Coprocessor.State.STOPPED; } catch (IOException ioe) { - LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe); + LOG.error("Error stopping coprocessor " + impl.getClass().getName(), ioe); } finally { currentThread.setContextClassLoader(hostClassLoader); } } else { - LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+ - " because not active (state="+state.toString()+")"); + LOG.warn("Not stopping coprocessor " + impl.getClass().getName() + + " because not active (state=" + state.toString() + ")"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java index 094a7d932f3b..0396e0afeaf4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,55 +15,53 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Coprocessors implement this interface to observe and mediate bulk load operations. - *

      - * - *

      Exception Handling

      - * For all functions, exception handling is done as follows: + * Coprocessors implement this interface to observe and mediate bulk load operations.
      + *
      + *

      Exception Handling

      For all functions, exception handling is done as follows: + *
        + *
      • Exceptions of type {@link IOException} are reported back to client.
      • + *
      • For any other kind of exception: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
        • + *
        • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
        • + *
        + *
      • *
      */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface BulkLoadObserver { - /** - * Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. - * It can't bypass the default action, e.g., ctx.bypass() won't have effect. - * If you need to get the region or table name, get it from the - * ctx as follows: code>ctx.getEnvironment().getRegion(). Use - * getRegionInfo to fetch the encodedName and use getDescriptor() to get the tableName. - * @param ctx the environment to interact with the framework and master - */ - default void prePrepareBulkLoad(ObserverContext ctx) - throws IOException {} + /** + * Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. It can't bypass the + * default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table + * name, get it from the ctx as follows: + * code>ctx.getEnvironment().getRegion(). Use getRegionInfo to fetch the encodedName + * and use getDescriptor() to get the tableName. + * @param ctx the environment to interact with the framework and master + */ + default void prePrepareBulkLoad(ObserverContext ctx) + throws IOException { + } - /** - * Called as part of SecureBulkLoadEndpoint.cleanupBulkLoad() RPC call. - * It can't bypass the default action, e.g., ctx.bypass() won't have effect. - * If you need to get the region or table name, get it from the - * ctx as follows: code>ctx.getEnvironment().getRegion(). Use - * getRegionInfo to fetch the encodedName and use getDescriptor() to get the tableName. - * @param ctx the environment to interact with the framework and master - */ - default void preCleanupBulkLoad(ObserverContext ctx) - throws IOException {} + /** + * Called as part of SecureBulkLoadEndpoint.cleanupBulkLoad() RPC call. It can't bypass the + * default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table + * name, get it from the ctx as follows: + * code>ctx.getEnvironment().getRegion(). Use getRegionInfo to fetch the encodedName + * and use getDescriptor() to get the tableName. + * @param ctx the environment to interact with the framework and master + */ + default void preCleanupBulkLoad(ObserverContext ctx) + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 319936d9ebfe..cc8977f4581d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -31,10 +30,6 @@ import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; @@ -46,43 +41,42 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.SortedList; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Strings; /** - * Provides the common setup framework and runtime services for coprocessor - * invocation from HBase services. + * Provides the common setup framework and runtime services for coprocessor invocation from HBase + * services. * @param type of specific coprocessor this host will handle - * @param type of specific coprocessor environment this host requires. - * provides + * @param type of specific coprocessor environment this host requires. provides */ @InterfaceAudience.Private public abstract class CoprocessorHost> { - public static final String REGION_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.region.classes"; + public static final String REGION_COPROCESSOR_CONF_KEY = "hbase.coprocessor.region.classes"; public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.regionserver.classes"; + "hbase.coprocessor.regionserver.classes"; public static final String USER_REGION_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.user.region.classes"; - public static final String MASTER_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.master.classes"; - public static final String WAL_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.wal.classes"; + "hbase.coprocessor.user.region.classes"; + public static final String MASTER_COPROCESSOR_CONF_KEY = "hbase.coprocessor.master.classes"; + public static final String WAL_COPROCESSOR_CONF_KEY = "hbase.coprocessor.wal.classes"; public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; public static final boolean DEFAULT_ABORT_ON_ERROR = true; public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; - public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = - "hbase.coprocessor.user.enabled"; + public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.user.enabled"; public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; public static final String SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR = - "hbase.skip.load.duplicate.table.coprocessor"; + "hbase.skip.load.duplicate.table.coprocessor"; public static final boolean DEFAULT_SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR = false; private static final Logger LOG = LoggerFactory.getLogger(CoprocessorHost.class); protected Abortable abortable; /** Ordered set of loaded coprocessors with lock */ protected final SortedList coprocEnvironments = - new SortedList<>(new EnvironmentPriorityComparator()); + new SortedList<>(new EnvironmentPriorityComparator()); protected Configuration conf; // unique file prefix to use for local copies of jars when classloading protected String pathPrefix; @@ -94,15 +88,13 @@ public CoprocessorHost(Abortable abortable) { } /** - * Not to be confused with the per-object _coprocessors_ (above), - * coprocessorNames is static and stores the set of all coprocessors ever - * loaded by any thread in this JVM. It is strictly additive: coprocessors are - * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since - * the intention is to preserve a history of all loaded coprocessors for - * diagnosis in case of server crash (HBASE-4014). + * Not to be confused with the per-object _coprocessors_ (above), coprocessorNames is static and + * stores the set of all coprocessors ever loaded by any thread in this JVM. It is strictly + * additive: coprocessors are added to coprocessorNames, by checkAndLoadInstance() but are never + * removed, since the intention is to preserve a history of all loaded coprocessors for diagnosis + * in case of server crash (HBASE-4014). */ - private static Set coprocessorNames = - Collections.synchronizedSet(new HashSet()); + private static Set coprocessorNames = Collections.synchronizedSet(new HashSet()); public static Set getLoadedCoprocessors() { synchronized (coprocessorNames) { @@ -111,27 +103,25 @@ public static Set getLoadedCoprocessors() { } /** - * Used to create a parameter to the HServerLoad constructor so that - * HServerLoad can provide information about the coprocessors loaded by this - * regionserver. - * (HBASE-4070: Improve region server metrics to report loaded coprocessors - * to master). + * Used to create a parameter to the HServerLoad constructor so that HServerLoad can provide + * information about the coprocessors loaded by this regionserver. (HBASE-4070: Improve region + * server metrics to report loaded coprocessors to master). */ public Set getCoprocessors() { Set returnValue = new TreeSet<>(); - for (E e: coprocEnvironments) { + for (E e : coprocEnvironments) { returnValue.add(e.getInstance().getClass().getSimpleName()); } return returnValue; } /** - * Load system coprocessors once only. Read the class names from configuration. - * Called by constructor. + * Load system coprocessors once only. Read the class names from configuration. Called by + * constructor. */ protected void loadSystemCoprocessors(Configuration conf, String confKey) { - boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, - DEFAULT_COPROCESSORS_ENABLED); + boolean coprocessorsEnabled = + conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, DEFAULT_COPROCESSORS_ENABLED); if (!coprocessorsEnabled) { return; } @@ -140,8 +130,7 @@ protected void loadSystemCoprocessors(Configuration conf, String confKey) { // load default coprocessors from configure file String[] defaultCPClasses = conf.getStrings(confKey); - if (defaultCPClasses == null || defaultCPClasses.length == 0) - return; + if (defaultCPClasses == null || defaultCPClasses.length == 0) return; int currentSystemPriority = Coprocessor.PRIORITY_SYSTEM; for (String className : defaultCPClasses) { @@ -196,16 +185,15 @@ protected void loadSystemCoprocessors(Configuration conf, String confKey) { /** * Load a coprocessor implementation into the host - * @param path path to implementation jar + * @param path path to implementation jar * @param className the main class name - * @param priority chaining priority - * @param conf configuration for coprocessor + * @param priority chaining priority + * @param conf configuration for coprocessor * @throws java.io.IOException Exception */ - public E load(Path path, String className, int priority, - Configuration conf) throws IOException { + public E load(Path path, String className, int priority, Configuration conf) throws IOException { String[] includedClassPrefixes = null; - if (conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){ + if (conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null) { String prefixes = conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY); includedClassPrefixes = prefixes.split(";"); } @@ -214,18 +202,18 @@ public E load(Path path, String className, int priority, /** * Load a coprocessor implementation into the host - * @param path path to implementation jar - * @param className the main class name - * @param priority chaining priority - * @param conf configuration for coprocessor + * @param path path to implementation jar + * @param className the main class name + * @param priority chaining priority + * @param conf configuration for coprocessor * @param includedClassPrefixes class name prefixes to include * @throws java.io.IOException Exception */ - public E load(Path path, String className, int priority, - Configuration conf, String[] includedClassPrefixes) throws IOException { + public E load(Path path, String className, int priority, Configuration conf, + String[] includedClassPrefixes) throws IOException { Class implClass; - LOG.debug("Loading coprocessor class " + className + " with path " + - path + " and priority " + priority); + LOG.debug("Loading coprocessor class " + className + " with path " + path + " and priority " + + priority); boolean skipLoadDuplicateCoprocessor = conf.getBoolean(SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR, DEFAULT_SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR); @@ -243,19 +231,19 @@ public E load(Path path, String className, int priority, throw new IOException("No jar path specified for " + className); } } else { - cl = CoprocessorClassLoader.getClassLoader( - path, getClass().getClassLoader(), pathPrefix, conf); + cl = + CoprocessorClassLoader.getClassLoader(path, getClass().getClassLoader(), pathPrefix, conf); try { - implClass = ((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes); + implClass = ((CoprocessorClassLoader) cl).loadClass(className, includedClassPrefixes); } catch (ClassNotFoundException e) { throw new IOException("Cannot load external coprocessor class " + className, e); } } - //load custom code for coprocessor + // load custom code for coprocessor Thread currentThread = Thread.currentThread(); ClassLoader hostClassLoader = currentThread.getContextClassLoader(); - try{ + try { // switch temporarily to the thread classloader for custom CP currentThread.setContextClassLoader(cl); E cpInstance = checkAndLoadInstance(implClass, priority, conf); @@ -267,19 +255,19 @@ public E load(Path path, String className, int priority, } public void load(Class implClass, int priority, Configuration conf) - throws IOException { + throws IOException { E env = checkAndLoadInstance(implClass, priority, conf); coprocEnvironments.add(env); } /** * @param implClass Implementation class - * @param priority priority - * @param conf configuration + * @param priority priority + * @param conf configuration * @throws java.io.IOException Exception */ public E checkAndLoadInstance(Class implClass, int priority, Configuration conf) - throws IOException { + throws IOException { // create the instance C impl; try { @@ -288,7 +276,7 @@ public E checkAndLoadInstance(Class implClass, int priority, Configuration co LOG.error("Cannot load coprocessor " + implClass.getSimpleName()); return null; } - } catch (InstantiationException|IllegalAccessException e) { + } catch (InstantiationException | IllegalAccessException e) { throw new IOException(e); } // create the environment @@ -307,14 +295,14 @@ public E checkAndLoadInstance(Class implClass, int priority, Configuration co public abstract E createEnvironment(C instance, int priority, int sequence, Configuration conf); /** - * Called when a new Coprocessor class needs to be loaded. Checks if type of the given class - * is what the corresponding host implementation expects. If it is of correct type, returns an - * instance of the coprocessor to be loaded. If not, returns null. - * If an exception occurs when trying to create instance of a coprocessor, it's passed up and - * eventually results into server aborting. + * Called when a new Coprocessor class needs to be loaded. Checks if type of the given class is + * what the corresponding host implementation expects. If it is of correct type, returns an + * instance of the coprocessor to be loaded. If not, returns null. If an exception occurs when + * trying to create instance of a coprocessor, it's passed up and eventually results into server + * aborting. */ public abstract C checkAndGetInstance(Class implClass) - throws InstantiationException, IllegalAccessException; + throws InstantiationException, IllegalAccessException; public void shutdown(E e) { assert e instanceof BaseEnvironment; @@ -328,9 +316,11 @@ public void shutdown(E e) { * Find coprocessors by full class name or simple name. */ public C findCoprocessor(String className) { - for (E env: coprocEnvironments) { - if (env.getInstance().getClass().getName().equals(className) || - env.getInstance().getClass().getSimpleName().equals(className)) { + for (E env : coprocEnvironments) { + if ( + env.getInstance().getClass().getName().equals(className) + || env.getInstance().getClass().getSimpleName().equals(className) + ) { return env.getInstance(); } } @@ -338,7 +328,7 @@ public C findCoprocessor(String className) { } public T findCoprocessor(Class cls) { - for (E env: coprocEnvironments) { + for (E env : coprocEnvironments) { if (cls.isAssignableFrom(env.getInstance().getClass())) { return (T) env.getInstance(); } @@ -354,12 +344,12 @@ public T findCoprocessor(Class cls) { public List findCoprocessors(Class cls) { ArrayList ret = new ArrayList<>(); - for (E env: coprocEnvironments) { + for (E env : coprocEnvironments) { C cp = env.getInstance(); - if(cp != null) { + if (cp != null) { if (cls.isAssignableFrom(cp.getClass())) { - ret.add((T)cp); + ret.add((T) cp); } } } @@ -372,9 +362,11 @@ public List findCoprocessors(Class cls) { * @return the coprocessor, or null if not found */ public E findCoprocessorEnvironment(String className) { - for (E env: coprocEnvironments) { - if (env.getInstance().getClass().getName().equals(className) || - env.getInstance().getClass().getSimpleName().equals(className)) { + for (E env : coprocEnvironments) { + if ( + env.getInstance().getClass().getName().equals(className) + || env.getInstance().getClass().getSimpleName().equals(className) + ) { return env; } } @@ -391,8 +383,8 @@ Set getExternalClassLoaders() { final ClassLoader systemClassLoader = this.getClass().getClassLoader(); for (E env : coprocEnvironments) { ClassLoader cl = env.getInstance().getClass().getClassLoader(); - if (cl != systemClassLoader){ - //do not include system classloader + if (cl != systemClassLoader) { + // do not include system classloader externalClassLoaders.add(cl); } } @@ -400,13 +392,11 @@ Set getExternalClassLoaders() { } /** - * Environment priority comparator. - * Coprocessors are chained in sorted order. + * Environment priority comparator. Coprocessors are chained in sorted order. */ static class EnvironmentPriorityComparator implements Comparator { @Override - public int compare(final CoprocessorEnvironment env1, - final CoprocessorEnvironment env2) { + public int compare(final CoprocessorEnvironment env1, final CoprocessorEnvironment env2) { if (env1.getPriority() < env2.getPriority()) { return -1; } else if (env1.getPriority() > env2.getPriority()) { @@ -436,18 +426,15 @@ protected void abortServer(final String coprocessorName, final Throwable e) { } /** - * This is used by coprocessor hooks which are declared to throw IOException - * (or its subtypes). For such hooks, we should handle throwable objects - * depending on the Throwable's type. Those which are instances of - * IOException should be passed on to the client. This is in conformance with - * the HBase idiom regarding IOException: that it represents a circumstance - * that should be passed along to the client for its own handling. For - * example, a coprocessor that implements access controls would throw a - * subclass of IOException, such as AccessDeniedException, in its preGet() - * method to prevent an unauthorized client's performing a Get on a particular - * table. + * This is used by coprocessor hooks which are declared to throw IOException (or its subtypes). + * For such hooks, we should handle throwable objects depending on the Throwable's type. Those + * which are instances of IOException should be passed on to the client. This is in conformance + * with the HBase idiom regarding IOException: that it represents a circumstance that should be + * passed along to the client for its own handling. For example, a coprocessor that implements + * access controls would throw a subclass of IOException, such as AccessDeniedException, in its + * preGet() method to prevent an unauthorized client's performing a Get on a particular table. * @param env Coprocessor Environment - * @param e Throwable object thrown by coprocessor. + * @param e Throwable object thrown by coprocessor. * @exception IOException Exception */ // Note to devs: Class comments of all observers ({@link MasterObserver}, {@link WALObserver}, @@ -456,7 +443,7 @@ protected void abortServer(final String coprocessorName, final Throwable e) { // update all classes' comments. protected void handleCoprocessorThrowable(final E env, final Throwable e) throws IOException { if (e instanceof IOException) { - throw (IOException)e; + throw (IOException) e; } // If we got here, e is not an IOException. A loaded coprocessor has a // fatal bug, and the server (master or regionserver) should remove the @@ -469,24 +456,23 @@ protected void handleCoprocessorThrowable(final E env, final Throwable e) throws abortServer(env, e); } else { // If available, pull a table name out of the environment - if(env instanceof RegionCoprocessorEnvironment) { - String tableName = ((RegionCoprocessorEnvironment)env).getRegionInfo().getTable().getNameAsString(); - LOG.error("Removing coprocessor '" + env.toString() + "' from table '"+ tableName + "'", e); + if (env instanceof RegionCoprocessorEnvironment) { + String tableName = + ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable().getNameAsString(); + LOG.error("Removing coprocessor '" + env.toString() + "' from table '" + tableName + "'", + e); } else { - LOG.error("Removing coprocessor '" + env.toString() + "' from " + - "environment",e); + LOG.error("Removing coprocessor '" + env.toString() + "' from " + "environment", e); } coprocEnvironments.remove(env); try { shutdown(env); } catch (Exception x) { - LOG.error("Uncaught exception when shutting down coprocessor '" - + env.toString() + "'", x); + LOG.error("Uncaught exception when shutting down coprocessor '" + env.toString() + "'", x); } - throw new DoNotRetryIOException("Coprocessor: '" + env.toString() + - "' threw: '" + e + "' and has been removed from the active " + - "coprocessor set.", e); + throw new DoNotRetryIOException("Coprocessor: '" + env.toString() + "' threw: '" + e + + "' and has been removed from the active " + "coprocessor set.", e); } } @@ -494,27 +480,26 @@ protected void handleCoprocessorThrowable(final E env, final Throwable e) throws * Used to limit legacy handling to once per Coprocessor class per classloader. */ private static final Set> legacyWarning = - new ConcurrentSkipListSet<>( - new Comparator>() { - @Override - public int compare(Class c1, Class c2) { - if (c1.equals(c2)) { - return 0; - } - return c1.getName().compareTo(c2.getName()); - } - }); + new ConcurrentSkipListSet<>(new Comparator>() { + @Override + public int compare(Class c1, Class c2) { + if (c1.equals(c2)) { + return 0; + } + return c1.getName().compareTo(c2.getName()); + } + }); /** * Implementations defined function to get an observer of type {@code O} from a coprocessor of - * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each - * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for - * each of RegionObserver, EndpointObserver and BulkLoadObserver. - * These getters are used by {@code ObserverOperation} to get appropriate observer from the - * coprocessor. + * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each observer + * they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for each of + * RegionObserver, EndpointObserver and BulkLoadObserver. These getters are used by + * {@code ObserverOperation} to get appropriate observer from the coprocessor. */ @FunctionalInterface - public interface ObserverGetter extends Function> {} + public interface ObserverGetter extends Function> { + } private abstract class ObserverOperation extends ObserverContextImpl { ObserverGetter observerGetter; @@ -532,12 +517,14 @@ private abstract class ObserverOperation extends ObserverContextImpl { } ObserverOperation(ObserverGetter observerGetter, User user, boolean bypassable) { - super(user != null? user: RpcServer.getRequestUser().orElse(null), bypassable); + super(user != null ? user : RpcServer.getRequestUser().orElse(null), bypassable); this.observerGetter = observerGetter; } abstract void callObserver() throws IOException; - protected void postEnvCall() {} + + protected void postEnvCall() { + } } // Can't derive ObserverOperation from ObserverOperationWithResult (R = Void) because then all @@ -555,15 +542,14 @@ public ObserverOperationWithoutResult(ObserverGetter observerGetter, User } public ObserverOperationWithoutResult(ObserverGetter observerGetter, User user, - boolean bypassable) { + boolean bypassable) { super(observerGetter, user, bypassable); } /** * In case of coprocessors which have many kinds of observers (for eg, {@link RegionCoprocessor} - * has BulkLoadObserver, RegionObserver, etc), some implementations may not need all - * observers, in which case they will return null for that observer's getter. - * We simply ignore such cases. + * has BulkLoadObserver, RegionObserver, etc), some implementations may not need all observers, + * in which case they will return null for that observer's getter. We simply ignore such cases. */ @Override void callObserver() throws IOException { @@ -584,17 +570,16 @@ public ObserverOperationWithResult(ObserverGetter observerGetter, R result } public ObserverOperationWithResult(ObserverGetter observerGetter, R result, - boolean bypassable) { + boolean bypassable) { this(observerGetter, result, null, bypassable); } - public ObserverOperationWithResult(ObserverGetter observerGetter, R result, - User user) { + public ObserverOperationWithResult(ObserverGetter observerGetter, R result, User user) { this(observerGetter, result, user, false); } private ObserverOperationWithResult(ObserverGetter observerGetter, R result, User user, - boolean bypassable) { + boolean bypassable) { super(observerGetter, user, bypassable); this.result = result; } @@ -620,18 +605,18 @@ void callObserver() throws IOException { * Do not call with an observerOperation that is null! Have the caller check. */ protected R execOperationWithResult( - final ObserverOperationWithResult observerOperation) throws IOException { + final ObserverOperationWithResult observerOperation) throws IOException { boolean bypass = execOperation(observerOperation); R result = observerOperation.getResult(); - return bypass == observerOperation.isBypassable()? result: null; + return bypass == observerOperation.isBypassable() ? result : null; } /** * @return True if we are to bypass (Can only be true if - * ObserverOperation#isBypassable(). + * ObserverOperation#isBypassable(). */ protected boolean execOperation(final ObserverOperation observerOperation) - throws IOException { + throws IOException { boolean bypass = false; if (observerOperation == null) { return bypass; @@ -662,19 +647,18 @@ protected boolean execOperation(final ObserverOperation observerOperation } /** - * Coprocessor classes can be configured in any order, based on that priority is set and - * chained in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is - * going down. This function first calls coprocessor methods (using ObserverOperation.call()) - * and then shutdowns the environment in postEnvCall().
      + * Coprocessor classes can be configured in any order, based on that priority is set and chained + * in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is going down. + * This function first calls coprocessor methods (using ObserverOperation.call()) and then + * shutdowns the environment in postEnvCall().
      * Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors * may remain shutdown if any exception occurs during next coprocessor execution which prevent * master/regionserver stop or cluster shutdown. (Refer: * HBASE-16663 - * @return true if bypaas coprocessor execution, false if not. - * @throws IOException + * @return true if bypaas coprocessor execution, false if not. n */ protected boolean execShutdown(final ObserverOperation observerOperation) - throws IOException { + throws IOException { if (observerOperation == null) return false; boolean bypass = false; List envs = coprocEnvironments.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java index edb24cca35cc..34ee4b114711 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -25,8 +24,8 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Service; /** - * Coprocessor endpoints providing protobuf services should implement this - * interface and return the {@link Service} instance via {@link #getService()}. + * Coprocessor endpoints providing protobuf services should implement this interface and return the + * {@link Service} instance via {@link #getService()}. * @deprecated Since 2.0. Will be removed in 3.0 */ @Deprecated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java index 0eb5e156b7b3..e73523af47aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,20 +17,18 @@ */ package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; - import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import org.apache.yetus.audience.InterfaceAudience; /** - * Marker annotation that denotes Coprocessors that are core to HBase. - * A Core Coprocessor is a CP that realizes a core HBase feature. Features are sometimes - * implemented first as a Coprocessor to prove viability. The idea is that once proven, they then - * migrate to core. Meantime, HBase Core Coprocessors get this annotation. No other Coprocessors - * can carry this annotation. + * Marker annotation that denotes Coprocessors that are core to HBase. A Core Coprocessor is a CP + * that realizes a core HBase feature. Features are sometimes implemented first as a Coprocessor to + * prove viability. The idea is that once proven, they then migrate to core. Meantime, HBase Core + * Coprocessors get this annotation. No other Coprocessors can carry this annotation. */ // Core Coprocessors are generally naughty making use of HBase internals doing accesses no // Coprocessor should be up to so we mark these special Coprocessors with this annotation and on @@ -42,4 +39,5 @@ @InterfaceAudience.Private @Retention(RetentionPolicy.RUNTIME) // This Annotation is not @Documented because I don't want users figuring out its mechanics. -public @interface CoreCoprocessor {} +public @interface CoreCoprocessor { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java index d07d94202d5a..a821593d5303 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.coprocessor; @@ -27,22 +26,20 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Service; /** - * Coprocessors implement this interface to observe and mediate endpoint invocations - * on a region. - *

      - * - *

      Exception Handling

      - * For all functions, exception handling is done as follows: + * Coprocessors implement this interface to observe and mediate endpoint invocations on a region. + *
      + *
      + *

      Exception Handling

      For all functions, exception handling is done as follows: + *
        + *
      • Exceptions of type {@link IOException} are reported back to client.
      • + *
      • For any other kind of exception: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
        • + *
        • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
        • + *
        + *
      • *
      */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -50,35 +47,35 @@ public interface EndpointObserver { /** - * Called before an Endpoint service method is invoked. - * The request message can be altered by returning a new instance. Throwing an - * exception will abort the invocation. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * @param ctx the environment provided by the region server - * @param service the endpoint service - * @param request Request message expected by given {@code Service}'s method (by the name - * {@code methodName}). + * Called before an Endpoint service method is invoked. The request message can be altered by + * returning a new instance. Throwing an exception will abort the invocation. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. + * @param ctx the environment provided by the region server + * @param service the endpoint service + * @param request Request message expected by given {@code Service}'s method (by the name + * {@code methodName}). * @param methodName the invoked service method * @return the possibly modified message */ default Message preEndpointInvocation(ObserverContext ctx, - Service service, String methodName, Message request) throws IOException { + Service service, String methodName, Message request) throws IOException { return request; } /** - * Called after an Endpoint service method is invoked. The response message can be - * altered using the builder. - * @param ctx the environment provided by the region server - * @param service the endpoint service - * @param methodName the invoked service method - * @param request Request message expected by given {@code Service}'s method (by the name - * {@code methodName}). + * Called after an Endpoint service method is invoked. The response message can be altered using + * the builder. + * @param ctx the environment provided by the region server + * @param service the endpoint service + * @param methodName the invoked service method + * @param request Request message expected by given {@code Service}'s method (by the name + * {@code methodName}). * @param responseBuilder Builder for final response to the client, with original response from - * Service's method merged into it. + * Service's method merged into it. */ default void postEndpointInvocation(ObserverContext ctx, - Service service, String methodName, Message request, Message.Builder responseBuilder) - throws IOException {} + Service service, String methodName, Message request, Message.Builder responseBuilder) + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java index 595e2d7765fb..2682b78fd513 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java @@ -21,11 +21,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Mark a class that it has a MasterServices accessor. - * Temporary hack until core Coprocesssors are integrated. + * Mark a class that it has a MasterServices accessor. Temporary hack until core Coprocesssors are + * integrated. * @see CoreCoprocessor * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this - * facility as CoreCoprocessors are integated into core. + * facility as CoreCoprocessors are integated into core. */ @Deprecated @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java index 89a2c7294643..cef03390acb3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java @@ -21,11 +21,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Mark a class that it has a RegionServiceServices accessor. - * Temporary hack until core Coprocesssors are integrated. + * Mark a class that it has a RegionServiceServices accessor. Temporary hack until core + * Coprocesssors are integrated. * @see CoreCoprocessor * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this - * facility as CoreCoprocessors are integated into core. + * facility as CoreCoprocessors are integated into core. */ @Deprecated @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java index d940385ffaee..a288a4dd869d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface MasterCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java index cc72871b672b..c83b9da43080 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -39,48 +36,44 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironmentDo not close! This is a shared connection - * with the hosting server. Throws {@link UnsupportedOperationException} if you try to close - * or abort it. - * - * For light-weight usage only. Heavy-duty usage will pull down - * the hosting RegionServer responsiveness as well as that of other Coprocessors making use of - * this Connection. Use to create table on start or to do administrative operations. Coprocessors - * should create their own Connections if heavy usage to avoid impinging on hosting Server - * operation. To create a Connection or if a Coprocessor requires a region with a particular - * Configuration, use {@link org.apache.hadoop.hbase.client.ConnectionFactory} or + * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection with + * the hosting server. Throws {@link UnsupportedOperationException} if you try to close or abort + * it. For light-weight usage only. Heavy-duty usage will pull down the hosting RegionServer + * responsiveness as well as that of other Coprocessors making use of this Connection. Use to + * create table on start or to do administrative operations. Coprocessors should create their own + * Connections if heavy usage to avoid impinging on hosting Server operation. To create a + * Connection or if a Coprocessor requires a region with a particular Configuration, use + * {@link org.apache.hadoop.hbase.client.ConnectionFactory} or * {@link #createConnection(Configuration)}}. - * - *

      Be aware that operations that make use of this Connection are executed as the RegionServer + *

      + * Be aware that operations that make use of this Connection are executed as the RegionServer * User, the hbase super user that started this server process. Exercise caution running - * operations as this User (See {@link #createConnection(Configuration)}} to run as other than - * the RegionServer User). - * - *

      Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + * operations as this User (See {@link #createConnection(Configuration)}} to run as other than the + * RegionServer User). + *

      + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. - * * @see #createConnection(Configuration) * @return The host's Connection to the Cluster. */ Connection getConnection(); /** - * Creates a cluster connection using the passed Configuration. - * - * Creating a Connection is a heavy-weight operation. The resultant Connection's cache of - * region locations will be empty. Therefore you should cache and reuse Connections rather than - * create a Connection on demand. Create on start of your Coprocessor. You will have to cast - * the CoprocessorEnvironment appropriately to get at this API at start time because - * Coprocessor start method is passed a subclass of this CoprocessorEnvironment or fetch - * Connection using a synchronized accessor initializing the Connection on first access. Close - * the returned Connection when done to free resources. Using this API rather - * than {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} + * Creates a cluster connection using the passed Configuration. Creating a Connection is a + * heavy-weight operation. The resultant Connection's cache of region locations will be empty. + * Therefore you should cache and reuse Connections rather than create a Connection on demand. + * Create on start of your Coprocessor. You will have to cast the CoprocessorEnvironment + * appropriately to get at this API at start time because Coprocessor start method is passed a + * subclass of this CoprocessorEnvironment or fetch Connection using a synchronized accessor + * initializing the Connection on first access. Close the returned Connection when done to free + * resources. Using this API rather than + * {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} * returns a Connection that will short-circuit RPC if the target is a local resource. Use * ConnectionFactory if you don't need this ability. - * - *

      Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + *

      + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. @@ -90,9 +83,10 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

      + *

      + * See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples of how + * metrics can be instantiated and used. + *

      * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForMaster(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 877b722ccda3..8a7cd9cfc6b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -45,31 +44,27 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; - /** * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.master.HMaster} process. - *

      - * + * {@link org.apache.hadoop.hbase.master.HMaster} process.
      + *
      * Since most implementations will be interested in only a subset of hooks, this class uses * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

      - * - *

      Exception Handling

      - * For all functions, exception handling is done as follows: + * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. It + * is done in a way that these default definitions act as no-op. So our suggestion to implementation + * would be to not call these 'default' methods from overrides.
      + *
      + *

      Exception Handling

      For all functions, exception handling is done as follows: + *
        + *
      • Exceptions of type {@link IOException} are reported back to client.
      • + *
      • For any other kind of exception: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
        • + *
        • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
        • + *
        + *
      • *
      */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -79,184 +74,177 @@ public interface MasterObserver { /** * Called before we create the region infos for this table. Called as part of create table RPC * call. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param desc the TableDescriptor for the table * @return the TableDescriptor used to create the table. Default is the one passed in. Return * {@code null} means cancel the creation. */ default TableDescriptor preCreateTableRegionsInfos( - final ObserverContext ctx, TableDescriptor desc) - throws IOException { + final ObserverContext ctx, TableDescriptor desc) + throws IOException { return desc; } /** - * Called before a new table is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create - * table RPC call. - * @param ctx the environment to interact with the framework and master - * @param desc the TableDescriptor for the table + * Called before a new table is created by {@link org.apache.hadoop.hbase.master.HMaster}. Called + * as part of create table RPC call. + * @param ctx the environment to interact with the framework and master + * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void preCreateTable(final ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException {} + TableDescriptor desc, RegionInfo[] regions) throws IOException { + } /** - * Called after the createTable operation has been requested. Called as part - * of create table RPC call. - * @param ctx the environment to interact with the framework and master - * @param desc the TableDescriptor for the table + * Called after the createTable operation has been requested. Called as part of create table RPC + * call. + * @param ctx the environment to interact with the framework and master + * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void postCreateTable(final ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException {} + TableDescriptor desc, RegionInfo[] regions) throws IOException { + } /** - * Called before a new table is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create - * table procedure and it is async to the create RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param desc the TableDescriptor for the table + * Called before a new table is created by {@link org.apache.hadoop.hbase.master.HMaster}. Called + * as part of create table procedure and it is async to the create RPC call. + * @param ctx the environment to interact with the framework and master + * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ - default void preCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException {} + default void preCreateTableAction(final ObserverContext ctx, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { + } /** - * Called after the createTable operation has been requested. Called as part - * of create table RPC call. Called as part of create table procedure and - * it is async to the create RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param desc the TableDescriptor for the table + * Called after the createTable operation has been requested. Called as part of create table RPC + * call. Called as part of create table procedure and it is async to the create RPC call. + * @param ctx the environment to interact with the framework and master + * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void postCompletedCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException {} + final ObserverContext ctx, final TableDescriptor desc, + final RegionInfo[] regions) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table RPC call. - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preDeleteTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called after the deleteTable operation has been requested. Called as part - * of delete table RPC call. - * @param ctx the environment to interact with the framework and master + * Called after the deleteTable operation has been requested. Called as part of delete table RPC + * call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postDeleteTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table procedure and - * it is async to the delete RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table procedure and it is async to the delete RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preDeleteTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + default void preDeleteTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table procedure and it is async to the - * delete RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called after {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table procedure and it is async to the delete RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedDeleteTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table RPC call. - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preTruncateTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called after the truncateTable operation has been requested. Called as part - * of truncate table RPC call. - * The truncate is synchronous, so this method will be called when the - * truncate operation is terminated. - * @param ctx the environment to interact with the framework and master + * Called after the truncateTable operation has been requested. Called as part of truncate table + * RPC call. The truncate is synchronous, so this method will be called when the truncate + * operation is terminated. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postTruncateTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table procedure and it is async - * to the truncate RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table procedure and it is async to the truncate RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preTruncateTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + default void preTruncateTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table procedure and it is async to the - * truncate RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called after {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table procedure and it is async to the truncate RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedTruncateTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** - * Called prior to modifying a table's properties. Called as part of modify - * table RPC call. - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table + * Called prior to modifying a table's properties. Called as part of modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table - * @param newDescriptor after modify operation, table will have this descriptor + * @param newDescriptor after modify operation, table will have this descriptor */ default TableDescriptor preModifyTable(final ObserverContext ctx, - final TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) - throws IOException { + final TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) + throws IOException { return newDescriptor; } /** - * Called after the modifyTable operation has been requested. Called as part - * of modify table RPC call. - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table - * @param oldDescriptor descriptor of table before modify operation happened + * Called after the modifyTable operation has been requested. Called as part of modify table RPC + * call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table + * @param oldDescriptor descriptor of table before modify operation happened * @param currentDescriptor current TableDescriptor of the table */ default void postModifyTable(final ObserverContext ctx, - final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) - throws IOException {} + final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) + throws IOException { + } /** - * Called prior to modifying a table's store file tracker. Called as part of modify - * table store file tracker RPC call. - * @param ctx the environment to interact with the framework and master + * Called prior to modifying a table's store file tracker. Called as part of modify table store + * file tracker RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param dstSFT the store file tracker + * @param dstSFT the store file tracker * @return the store file tracker */ default String preModifyTableStoreFileTracker( @@ -266,23 +254,24 @@ default String preModifyTableStoreFileTracker( } /** - * Called after modifying a table's store file tracker. Called as part of modify - * table store file tracker RPC call. - * @param ctx the environment to interact with the framework and master + * Called after modifying a table's store file tracker. Called as part of modify table store file + * tracker RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param dstSFT the store file tracker + * @param dstSFT the store file tracker */ default void postModifyTableStoreFileTracker( final ObserverContext ctx, final TableName tableName, - String dstSFT) throws IOException {} + String dstSFT) throws IOException { + } /** * Called prior to modifying a family's store file tracker. Called as part of modify family store * file tracker RPC call. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param family the column family - * @param dstSFT the store file tracker + * @param family the column family + * @param dstSFT the store file tracker * @return the store file tracker */ default String preModifyColumnFamilyStoreFileTracker( @@ -292,1237 +281,1290 @@ default String preModifyColumnFamilyStoreFileTracker( } /** - * Called after modifying a family store file tracker. Called as part of modify family store - * file tracker RPC call. - * @param ctx the environment to interact with the framework and master + * Called after modifying a family store file tracker. Called as part of modify family store file + * tracker RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param family the column family - * @param dstSFT the store file tracker + * @param family the column family + * @param dstSFT the store file tracker */ default void postModifyColumnFamilyStoreFileTracker( final ObserverContext ctx, final TableName tableName, - final byte[] family, String dstSFT) throws IOException {} + final byte[] family, String dstSFT) throws IOException { + } /** - * Called prior to modifying a table's properties. Called as part of modify - * table procedure and it is async to the modify table RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table + * Called prior to modifying a table's properties. Called as part of modify table procedure and it + * is async to the modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table - * @param newDescriptor after modify operation, table will have this descriptor + * @param newDescriptor after modify operation, table will have this descriptor */ - default void preModifyTableAction( - final ObserverContext ctx, - final TableName tableName, - final TableDescriptor currentDescriptor, - final TableDescriptor newDescriptor) throws IOException {} + default void preModifyTableAction(final ObserverContext ctx, + final TableName tableName, final TableDescriptor currentDescriptor, + final TableDescriptor newDescriptor) throws IOException { + } /** - * Called after to modifying a table's properties. Called as part of modify - * table procedure and it is async to the modify table RPC call. - * - * @param ctx the environment to interact with the framework and master - * @param tableName the name of the table - * @param oldDescriptor descriptor of table before modify operation happened + * Called after to modifying a table's properties. Called as part of modify table procedure and it + * is async to the modify table RPC call. + * @param ctx the environment to interact with the framework and master + * @param tableName the name of the table + * @param oldDescriptor descriptor of table before modify operation happened * @param currentDescriptor current TableDescriptor of the table */ default void postCompletedModifyTableAction( - final ObserverContext ctx, - final TableName tableName, - final TableDescriptor oldDescriptor, - final TableDescriptor currentDescriptor) throws IOException {} + final ObserverContext ctx, final TableName tableName, + final TableDescriptor oldDescriptor, final TableDescriptor currentDescriptor) + throws IOException { + } /** - * Called prior to enabling a table. Called as part of enable table RPC call. - * @param ctx the environment to interact with the framework and master + * Called prior to enabling a table. Called as part of enable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preEnableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called after the enableTable operation has been requested. Called as part - * of enable table RPC call. - * @param ctx the environment to interact with the framework and master + * Called after the enableTable operation has been requested. Called as part of enable table RPC + * call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postEnableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called prior to enabling a table. Called as part of enable table procedure - * and it is async to the enable table RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called prior to enabling a table. Called as part of enable table procedure and it is async to + * the enable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preEnableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + default void preEnableTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after the enableTable operation has been requested. Called as part - * of enable table procedure and it is async to the enable table RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called after the enableTable operation has been requested. Called as part of enable table + * procedure and it is async to the enable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedEnableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** - * Called prior to disabling a table. Called as part of disable table RPC - * call. - * @param ctx the environment to interact with the framework and master + * Called prior to disabling a table. Called as part of disable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preDisableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called after the disableTable operation has been requested. Called as part - * of disable table RPC call. - * @param ctx the environment to interact with the framework and master + * Called after the disableTable operation has been requested. Called as part of disable table RPC + * call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postDisableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called prior to disabling a table. Called as part of disable table procedure - * and it is asyn to the disable table RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called prior to disabling a table. Called as part of disable table procedure and it is asyn to + * the disable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preDisableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + default void preDisableTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after the disableTable operation has been requested. Called as part - * of disable table procedure and it is asyn to the disable table RPC call. - * - * @param ctx the environment to interact with the framework and master + * Called after the disableTable operation has been requested. Called as part of disable table + * procedure and it is asyn to the disable table RPC call. + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedDisableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** * Called before a abortProcedure request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param procId the Id of the procedure */ - default void preAbortProcedure( - ObserverContext ctx, final long procId) throws IOException {} + default void preAbortProcedure(ObserverContext ctx, + final long procId) throws IOException { + } /** * Called after a abortProcedure request has been processed. * @param ctx the environment to interact with the framework and master */ default void postAbortProcedure(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before a getProcedures request has been processed. * @param ctx the environment to interact with the framework and master */ default void preGetProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after a getProcedures request has been processed. * @param ctx the environment to interact with the framework and master */ default void postGetProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before a getLocks request has been processed. * @param ctx the environment to interact with the framework and master * @throws IOException if something went wrong */ - default void preGetLocks(ObserverContext ctx) - throws IOException {} + default void preGetLocks(ObserverContext ctx) throws IOException { + } /** * Called after a getLocks request has been processed. * @param ctx the environment to interact with the framework and master * @throws IOException if something went wrong */ - default void postGetLocks( - ObserverContext ctx) throws IOException {} + default void postGetLocks(ObserverContext ctx) throws IOException { + } /** * Called prior to moving a given region from one region server to another. - * @param ctx the environment to interact with the framework and master - * @param region the RegionInfo - * @param srcServer the source ServerName + * @param ctx the environment to interact with the framework and master + * @param region the RegionInfo + * @param srcServer the source ServerName * @param destServer the destination ServerName */ default void preMove(final ObserverContext ctx, - final RegionInfo region, final ServerName srcServer, - final ServerName destServer) - throws IOException {} + final RegionInfo region, final ServerName srcServer, final ServerName destServer) + throws IOException { + } /** * Called after the region move has been requested. - * @param ctx the environment to interact with the framework and master - * @param region the RegionInfo - * @param srcServer the source ServerName + * @param ctx the environment to interact with the framework and master + * @param region the RegionInfo + * @param srcServer the source ServerName * @param destServer the destination ServerName */ default void postMove(final ObserverContext ctx, - final RegionInfo region, final ServerName srcServer, - final ServerName destServer) - throws IOException {} + final RegionInfo region, final ServerName srcServer, final ServerName destServer) + throws IOException { + } /** * Called prior to assigning a specific region. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regionInfo the regionInfo of the region */ default void preAssign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called after the region assignment has been requested. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regionInfo the regionInfo of the region */ default void postAssign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called prior to unassigning a given region. - * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param ctx the environment to interact with the framework and master n */ default void preUnassign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called after the region unassignment has been requested. - * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param ctx the environment to interact with the framework and master n */ default void postUnassign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called prior to marking a given region as offline. - * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param ctx the environment to interact with the framework and master n */ default void preRegionOffline(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called after the region has been marked offline. - * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param ctx the environment to interact with the framework and master n */ default void postRegionOffline(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** - * Called prior to requesting rebalancing of the cluster regions, though after - * the initial checks for regions in transition and the balance switch flag. - * @param ctx the environment to interact with the framework and master + * Called prior to requesting rebalancing of the cluster regions, though after the initial checks + * for regions in transition and the balance switch flag. + * @param ctx the environment to interact with the framework and master * @param request the request used to trigger the balancer */ - default void preBalance(final ObserverContext ctx, BalanceRequest request) - throws IOException {} + default void preBalance(final ObserverContext ctx, + BalanceRequest request) throws IOException { + } /** * Called after the balancing plan has been submitted. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param request the request used to trigger the balance - * @param plans the RegionPlans which master has executed. RegionPlan serves as hint - * as for the final destination for the underlying region but may not represent the - * final state of assignment + * @param plans the RegionPlans which master has executed. RegionPlan serves as hint as for the + * final destination for the underlying region but may not represent the final + * state of assignment */ - default void postBalance(final ObserverContext ctx, BalanceRequest request, List plans) - throws IOException {} + default void postBalance(final ObserverContext ctx, + BalanceRequest request, List plans) throws IOException { + } /** - * Called prior to setting split / merge switch - * Supports Coprocessor 'bypass'. - * @param ctx the coprocessor instance's environment - * @param newValue the new value submitted in the call + * Called prior to setting split / merge switch Supports Coprocessor 'bypass'. + * @param ctx the coprocessor instance's environment + * @param newValue the new value submitted in the call * @param switchType type of switch */ default void preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException {} + final boolean newValue, final MasterSwitchType switchType) throws IOException { + } /** * Called after setting split / merge switch - * @param ctx the coprocessor instance's environment - * @param newValue the new value submitted in the call + * @param ctx the coprocessor instance's environment + * @param newValue the new value submitted in the call * @param switchType type of switch */ default void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException {} + final boolean newValue, final MasterSwitchType switchType) throws IOException { + } /** * Called before the split region procedure is called. - * @param c the environment to interact with the framework and master + * @param c the environment to interact with the framework and master * @param tableName the table where the region belongs to - * @param splitRow split point + * @param splitRow split point */ - default void preSplitRegion( - final ObserverContext c, - final TableName tableName, - final byte[] splitRow) - throws IOException {} + default void preSplitRegion(final ObserverContext c, + final TableName tableName, final byte[] splitRow) throws IOException { + } /** * Called before the region is split. - * @param c the environment to interact with the framework and master + * @param c the environment to interact with the framework and master * @param tableName the table where the region belongs to - * @param splitRow split point + * @param splitRow split point */ - default void preSplitRegionAction( - final ObserverContext c, - final TableName tableName, - final byte[] splitRow) - throws IOException {} + default void preSplitRegionAction(final ObserverContext c, + final TableName tableName, final byte[] splitRow) throws IOException { + } /** * Called after the region is split. - * @param c the environment to interact with the framework and master + * @param c the environment to interact with the framework and master * @param regionInfoA the left daughter region * @param regionInfoB the right daughter region */ - default void postCompletedSplitRegionAction( - final ObserverContext c, - final RegionInfo regionInfoA, - final RegionInfo regionInfoB) throws IOException {} + default void postCompletedSplitRegionAction(final ObserverContext c, + final RegionInfo regionInfoA, final RegionInfo regionInfoB) throws IOException { + } /** * This will be called before update META step as part of split transaction. - * @param ctx the environment to interact with the framework and master - * @param splitKey - * @param metaEntries + * @param ctx the environment to interact with the framework and master nn */ default void preSplitRegionBeforeMETAAction( - final ObserverContext ctx, - final byte[] splitKey, - final List metaEntries) throws IOException {} - + final ObserverContext ctx, final byte[] splitKey, + final List metaEntries) throws IOException { + } /** * This will be called after update META step as part of split transaction * @param ctx the environment to interact with the framework and master */ default void preSplitRegionAfterMETAAction( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after the roll back of the split region is completed * @param ctx the environment to interact with the framework and master */ default void postRollBackSplitRegionAction( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * Called before the regions merge. * @param ctx the environment to interact with the framework and master */ - default void preMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + default void preMergeRegionsAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * called after the regions merge. * @param ctx the environment to interact with the framework and master */ default void postCompletedMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - final RegionInfo mergedRegion) throws IOException {} + final ObserverContext ctx, final RegionInfo[] regionsToMerge, + final RegionInfo mergedRegion) throws IOException { + } /** * This will be called before update META step as part of regions merge transaction. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param metaEntries mutations to execute on hbase:meta atomically with regions merge updates. - * Any puts or deletes to execute on hbase:meta can be added to the mutations. + * Any puts or deletes to execute on hbase:meta can be added to the mutations. */ - default void preMergeRegionsCommitAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - @MetaMutationAnnotation List metaEntries) throws IOException {} + default void preMergeRegionsCommitAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge, @MetaMutationAnnotation List metaEntries) + throws IOException { + } /** * This will be called after META step as part of regions merge transaction. * @param ctx the environment to interact with the framework and master */ - default void postMergeRegionsCommitAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - final RegionInfo mergedRegion) throws IOException {} + default void postMergeRegionsCommitAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion) throws IOException { + } /** * This will be called after the roll back of the regions merge. * @param ctx the environment to interact with the framework and master */ default void postRollBackMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + final ObserverContext ctx, final RegionInfo[] regionsToMerge) + throws IOException { + } /** * Called prior to modifying the flag used to enable/disable region balancing. * @param ctx the coprocessor instance's environment */ default void preBalanceSwitch(final ObserverContext ctx, - final boolean newValue) throws IOException {} + final boolean newValue) throws IOException { + } /** * Called after the flag to enable/disable balancing has changed. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param oldValue the previously set balanceSwitch value * @param newValue the newly set balanceSwitch value */ default void postBalanceSwitch(final ObserverContext ctx, - final boolean oldValue, final boolean newValue) throws IOException {} + final boolean oldValue, final boolean newValue) throws IOException { + } /** * Called prior to shutting down the full HBase cluster, including this * {@link org.apache.hadoop.hbase.master.HMaster} process. */ default void preShutdown(final ObserverContext ctx) - throws IOException {} - + throws IOException { + } /** - * Called immediately prior to stopping this - * {@link org.apache.hadoop.hbase.master.HMaster} process. + * Called immediately prior to stopping this {@link org.apache.hadoop.hbase.master.HMaster} + * process. */ default void preStopMaster(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** - * Called immediately after an active master instance has completed - * initialization. Will not be called on standby master instances unless - * they take over the active role. + * Called immediately after an active master instance has completed initialization. Will not be + * called on standby master instances unless they take over the active role. */ default void postStartMaster(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Call before the master initialization is set to true. * {@link org.apache.hadoop.hbase.master.HMaster} process. */ default void preMasterInitialization(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** - * Called before a new snapshot is taken. - * Called as part of snapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called before a new snapshot is taken. Called as part of snapshot RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void preSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called after the snapshot operation has been requested. - * Called as part of snapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called after the snapshot operation has been requested. Called as part of snapshot RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void postSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** * Called after the snapshot operation has been completed. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void postCompletedSnapshotAction(ObserverContext ctx, - SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { + SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { } /** * Called before listSnapshots request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to list */ default void preListSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** * Called after listSnapshots request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to list */ default void postListSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** - * Called before a snapshot is cloned. - * Called as part of restoreSnapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called before a snapshot is cloned. Called as part of restoreSnapshot RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to create */ default void preCloneSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called after a snapshot clone operation has been requested. - * Called as part of restoreSnapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called after a snapshot clone operation has been requested. Called as part of restoreSnapshot + * RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the v of the table to create */ default void postCloneSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called before a snapshot is restored. - * Called as part of restoreSnapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called before a snapshot is restored. Called as part of restoreSnapshot RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to restore */ default void preRestoreSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called after a snapshot restore operation has been requested. - * Called as part of restoreSnapshot RPC call. - * @param ctx the environment to interact with the framework and master - * @param snapshot the SnapshotDescriptor for the snapshot + * Called after a snapshot restore operation has been requested. Called as part of restoreSnapshot + * RPC call. + * @param ctx the environment to interact with the framework and master + * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to restore */ default void postRestoreSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { + } /** - * Called before a snapshot is deleted. - * Called as part of deleteSnapshot RPC call. - * @param ctx the environment to interact with the framework and master + * Called before a snapshot is deleted. Called as part of deleteSnapshot RPC call. + * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to delete */ default void preDeleteSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** - * Called after the delete snapshot operation has been requested. - * Called as part of deleteSnapshot RPC call. - * @param ctx the environment to interact with the framework and master + * Called after the delete snapshot operation has been requested. Called as part of deleteSnapshot + * RPC call. + * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to delete */ default void postDeleteSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** * Called before a getTableDescriptors request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableNamesList the list of table names, or null if querying for all - * @param descriptors an empty list, can be filled with what to return in coprocessor - * @param regex regular expression used for filtering the table names + * @param descriptors an empty list, can be filled with what to return in coprocessor + * @param regex regular expression used for filtering the table names */ default void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException {} + List tableNamesList, List descriptors, String regex) + throws IOException { + } /** * Called after a getTableDescriptors request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableNamesList the list of table names, or null if querying for all - * @param descriptors the list of descriptors about to be returned - * @param regex regular expression used for filtering the table names + * @param descriptors the list of descriptors about to be returned + * @param regex regular expression used for filtering the table names */ default void postGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException {} + List tableNamesList, List descriptors, String regex) + throws IOException { + } /** * Called before a getTableNames request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param descriptors an empty list, can be filled with what to return by coprocessor - * @param regex regular expression used for filtering the table names + * @param regex regular expression used for filtering the table names */ default void preGetTableNames(ObserverContext ctx, - List descriptors, String regex) throws IOException {} + List descriptors, String regex) throws IOException { + } /** * Called after a getTableNames request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param descriptors the list of descriptors about to be returned - * @param regex regular expression used for filtering the table names + * @param regex regular expression used for filtering the table names */ default void postGetTableNames(ObserverContext ctx, - List descriptors, String regex) throws IOException {} - - + List descriptors, String regex) throws IOException { + } /** - * Called before a new namespace is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. + * Called before a new namespace is created by {@link org.apache.hadoop.hbase.master.HMaster}. * @param ctx the environment to interact with the framework and master - * @param ns the NamespaceDescriptor for the table + * @param ns the NamespaceDescriptor for the table */ default void preCreateNamespace(final ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } + /** * Called after the createNamespace operation has been requested. * @param ctx the environment to interact with the framework and master - * @param ns the NamespaceDescriptor for the table + * @param ns the NamespaceDescriptor for the table */ default void postCreateNamespace(final ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * namespace - * @param ctx the environment to interact with the framework and master + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a namespace + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace */ default void preDeleteNamespace(final ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called after the deleteNamespace operation has been requested. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace */ default void postDeleteNamespace(final ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called prior to modifying a namespace's properties. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param currentNsDescriptor current NamespaceDescriptor of the namespace - * @param newNsDescriptor after modify operation, namespace will have this descriptor + * @param newNsDescriptor after modify operation, namespace will have this descriptor */ default void preModifyNamespace(final ObserverContext ctx, - NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor) - throws IOException {} + NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor) + throws IOException { + } /** * Called after the modifyNamespace operation has been requested. - * @param ctx the environment to interact with the framework and master - * @param oldNsDescriptor descriptor of namespace before modify operation happened + * @param ctx the environment to interact with the framework and master + * @param oldNsDescriptor descriptor of namespace before modify operation happened * @param currentNsDescriptor current NamespaceDescriptor of the namespace */ default void postModifyNamespace(final ObserverContext ctx, - NamespaceDescriptor oldNsDescriptor, NamespaceDescriptor currentNsDescriptor) - throws IOException {} + NamespaceDescriptor oldNsDescriptor, NamespaceDescriptor currentNsDescriptor) + throws IOException { + } /** * Called before a getNamespaceDescriptor request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace */ default void preGetNamespaceDescriptor(ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called after a getNamespaceDescriptor request has been processed. * @param ctx the environment to interact with the framework and master - * @param ns the NamespaceDescriptor + * @param ns the NamespaceDescriptor */ default void postGetNamespaceDescriptor(ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } /** * Called before a listNamespaces request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespaces an empty list, can be filled with what to return if bypassing * @throws IOException if something went wrong */ default void preListNamespaces(ObserverContext ctx, - List namespaces) throws IOException {} + List namespaces) throws IOException { + } /** * Called after a listNamespaces request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespaces the list of namespaces about to be returned * @throws IOException if something went wrong */ default void postListNamespaces(ObserverContext ctx, - List namespaces) throws IOException {}; + List namespaces) throws IOException { + }; /** * Called before a listNamespaceDescriptors request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param descriptors an empty list, can be filled with what to return by coprocessor */ default void preListNamespaceDescriptors(ObserverContext ctx, - List descriptors) throws IOException {} + List descriptors) throws IOException { + } /** * Called after a listNamespaceDescriptors request has been processed. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param descriptors the list of descriptors about to be returned */ default void postListNamespaceDescriptors(ObserverContext ctx, - List descriptors) throws IOException {} - + List descriptors) throws IOException { + } /** * Called before the table memstore is flushed to disk. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preTableFlush(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called after the table memstore is flushed to disk. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postTableFlush(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called before the quota for the user is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param userName the name of user - * @param quotas the current quota for the user + * @param quotas the current quota for the user */ default void preSetUserQuota(final ObserverContext ctx, - final String userName, final GlobalQuotaSettings quotas) throws IOException {} + final String userName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the user is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param userName the name of user - * @param quotas the resulting quota for the user + * @param quotas the resulting quota for the user */ default void postSetUserQuota(final ObserverContext ctx, - final String userName, final GlobalQuotaSettings quotas) throws IOException {} + final String userName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the user on the specified table is stored. - * @param ctx the environment to interact with the framework and master - * @param userName the name of user + * @param ctx the environment to interact with the framework and master + * @param userName the name of user * @param tableName the name of the table - * @param quotas the current quota for the user on the table + * @param quotas the current quota for the user on the table */ - default void preSetUserQuota( - final ObserverContext ctx, final String userName, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + default void preSetUserQuota(final ObserverContext ctx, + final String userName, final TableName tableName, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called after the quota for the user on the specified table is stored. - * @param ctx the environment to interact with the framework and master - * @param userName the name of user + * @param ctx the environment to interact with the framework and master + * @param userName the name of user * @param tableName the name of the table - * @param quotas the resulting quota for the user on the table + * @param quotas the resulting quota for the user on the table */ - default void postSetUserQuota( - final ObserverContext ctx, final String userName, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + default void postSetUserQuota(final ObserverContext ctx, + final String userName, final TableName tableName, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called before the quota for the user on the specified namespace is stored. - * @param ctx the environment to interact with the framework and master - * @param userName the name of user + * @param ctx the environment to interact with the framework and master + * @param userName the name of user * @param namespace the name of the namespace - * @param quotas the current quota for the user on the namespace + * @param quotas the current quota for the user on the namespace */ - default void preSetUserQuota( - final ObserverContext ctx, final String userName, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + default void preSetUserQuota(final ObserverContext ctx, + final String userName, final String namespace, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called after the quota for the user on the specified namespace is stored. - * @param ctx the environment to interact with the framework and master - * @param userName the name of user + * @param ctx the environment to interact with the framework and master + * @param userName the name of user * @param namespace the name of the namespace - * @param quotas the resulting quota for the user on the namespace + * @param quotas the resulting quota for the user on the namespace */ - default void postSetUserQuota( - final ObserverContext ctx, final String userName, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + default void postSetUserQuota(final ObserverContext ctx, + final String userName, final String namespace, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called before the quota for the table is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param quotas the current quota for the table + * @param quotas the current quota for the table */ default void preSetTableQuota(final ObserverContext ctx, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the table is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @param quotas the resulting quota for the table + * @param quotas the resulting quota for the table */ default void postSetTableQuota(final ObserverContext ctx, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the namespace is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace - * @param quotas the current quota for the namespace + * @param quotas the current quota for the namespace */ default void preSetNamespaceQuota(final ObserverContext ctx, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + final String namespace, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the namespace is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace - * @param quotas the resulting quota for the namespace + * @param quotas the resulting quota for the namespace */ default void postSetNamespaceQuota(final ObserverContext ctx, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + final String namespace, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the region server is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regionServer the name of the region server - * @param quotas the current quota for the region server + * @param quotas the current quota for the region server */ default void preSetRegionServerQuota(final ObserverContext ctx, - final String regionServer, final GlobalQuotaSettings quotas) throws IOException {} + final String regionServer, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the region server is stored. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regionServer the name of the region server - * @param quotas the resulting quota for the region server + * @param quotas the resulting quota for the region server */ default void postSetRegionServerQuota(final ObserverContext ctx, - final String regionServer, final GlobalQuotaSettings quotas) throws IOException {} + final String regionServer, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before merge regions request. - * @param ctx coprocessor environment + * @param ctx coprocessor environment * @param regionsToMerge regions to be merged */ - default void preMergeRegions( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + default void preMergeRegions(final ObserverContext ctx, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * called after merge regions request. - * @param c coprocessor environment + * @param c coprocessor environment * @param regionsToMerge regions to be merged */ - default void postMergeRegions( - final ObserverContext c, - final RegionInfo[] regionsToMerge) throws IOException {} + default void postMergeRegions(final ObserverContext c, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * Called before servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param servers set of servers to move + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move * @param targetGroup destination group */ default void preMoveServersAndTables(final ObserverContext ctx, - Set
      servers, Set tables, String targetGroup) throws IOException {} + Set
      servers, Set tables, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param servers set of servers to move + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move * @param targetGroup name of group */ default void postMoveServersAndTables(final ObserverContext ctx, - Set
      servers, Set tables, String targetGroup) throws IOException {} + Set
      servers, Set tables, String targetGroup) throws IOException { + } /** * Called before servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param servers set of servers to move + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move * @param targetGroup destination group */ default void preMoveServers(final ObserverContext ctx, - Set
      servers, String targetGroup) throws IOException {} + Set
      servers, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param servers set of servers to move + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move * @param targetGroup name of group */ default void postMoveServers(final ObserverContext ctx, - Set
      servers, String targetGroup) throws IOException {} + Set
      servers, String targetGroup) throws IOException { + } /** * Called before tables are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param tables set of tables to move + * @param ctx the environment to interact with the framework and master + * @param tables set of tables to move * @param targetGroup name of group */ default void preMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException {} + Set tables, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group - * @param ctx the environment to interact with the framework and master - * @param tables set of tables to move + * @param ctx the environment to interact with the framework and master + * @param tables set of tables to move * @param targetGroup name of group */ default void postMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException {} + Set tables, String targetGroup) throws IOException { + } /** * Called before a new region server group is added - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param name group name */ - default void preAddRSGroup(final ObserverContext ctx, - String name) throws IOException {} + default void preAddRSGroup(final ObserverContext ctx, String name) + throws IOException { + } /** * Called after a new region server group is added - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param name group name */ - default void postAddRSGroup(final ObserverContext ctx, - String name) throws IOException {} + default void postAddRSGroup(final ObserverContext ctx, String name) + throws IOException { + } /** * Called before a region server group is removed - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param name group name */ default void preRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException {} + String name) throws IOException { + } /** * Called after a region server group is removed - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param name group name */ default void postRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException {} + String name) throws IOException { + } /** * Called before a region server group is removed - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param groupName group name */ default void preBalanceRSGroup(final ObserverContext ctx, - String groupName, BalanceRequest request) throws IOException { + String groupName, BalanceRequest request) throws IOException { } /** * Called after a region server group is removed - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param groupName group name - * @param request the request sent to the balancer - * @param response the response returned by the balancer + * @param request the request sent to the balancer + * @param response the response returned by the balancer */ default void postBalanceRSGroup(final ObserverContext ctx, - String groupName, BalanceRequest request, BalanceResponse response) throws IOException { + String groupName, BalanceRequest request, BalanceResponse response) throws IOException { } /** * Called before servers are removed from rsgroup - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param servers set of decommissioned servers to remove */ - default void preRemoveServers( - final ObserverContext ctx, - Set
      servers) throws IOException {} + default void preRemoveServers(final ObserverContext ctx, + Set
      servers) throws IOException { + } /** * Called after servers are removed from rsgroup - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param servers set of servers to remove */ - default void postRemoveServers( - final ObserverContext ctx, - Set
      servers) throws IOException {} + default void postRemoveServers(final ObserverContext ctx, + Set
      servers) throws IOException { + } /** * Called before getting region server group info of the passed groupName. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param groupName name of the group to get RSGroupInfo for */ default void preGetRSGroupInfo(final ObserverContext ctx, - final String groupName) throws IOException {} + final String groupName) throws IOException { + } /** * Called after getting region server group info of the passed groupName. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param groupName name of the group to get RSGroupInfo for */ default void postGetRSGroupInfo(final ObserverContext ctx, - final String groupName) throws IOException {} + final String groupName) throws IOException { + } /** * Called before getting region server group info of the passed tableName. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName name of the table to get RSGroupInfo for */ default void preGetRSGroupInfoOfTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called after getting region server group info of the passed tableName. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param tableName name of the table to get RSGroupInfo for */ default void postGetRSGroupInfoOfTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called before listing region server group information. * @param ctx the environment to interact with the framework and master */ default void preListRSGroups(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after listing region server group information. * @param ctx the environment to interact with the framework and master */ default void postListRSGroups(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before listing all tables in the region server group. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param groupName name of the region server group */ default void preListTablesInRSGroup(final ObserverContext ctx, - final String groupName) throws IOException {} + final String groupName) throws IOException { + } /** * Called after listing all tables in the region server group. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param groupName name of the region server group */ default void postListTablesInRSGroup(final ObserverContext ctx, - final String groupName) throws IOException {} + final String groupName) throws IOException { + } /** * Called before rename rsgroup. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param oldName old rsgroup name * @param newName new rsgroup name */ default void preRenameRSGroup(final ObserverContext ctx, - final String oldName, final String newName) throws IOException {} + final String oldName, final String newName) throws IOException { + } /** * Called after rename rsgroup. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param oldName old rsgroup name * @param newName new rsgroup name */ default void postRenameRSGroup(final ObserverContext ctx, - final String oldName, final String newName) throws IOException {} + final String oldName, final String newName) throws IOException { + } /** * Called before update rsgroup config. - * @param ctx the environment to interact with the framework and master - * @param groupName the group name + * @param ctx the environment to interact with the framework and master + * @param groupName the group name * @param configuration new configuration of the group name to be set */ default void preUpdateRSGroupConfig(final ObserverContext ctx, - final String groupName, final Map configuration) - throws IOException {} + final String groupName, final Map configuration) throws IOException { + } /** * Called after update rsgroup config. - * @param ctx the environment to interact with the framework and master - * @param groupName the group name + * @param ctx the environment to interact with the framework and master + * @param groupName the group name * @param configuration new configuration of the group name to be set */ default void postUpdateRSGroupConfig(final ObserverContext ctx, - final String groupName, final Map configuration) - throws IOException {} + final String groupName, final Map configuration) throws IOException { + } /** * Called before getting the configured namespaces and tables in the region server group. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param groupName name of the region server group */ default void preGetConfiguredNamespacesAndTablesInRSGroup( final ObserverContext ctx, final String groupName) - throws IOException {} + throws IOException { + } /** * Called after getting the configured namespaces and tables in the region server group. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param groupName name of the region server group */ default void postGetConfiguredNamespacesAndTablesInRSGroup( final ObserverContext ctx, final String groupName) - throws IOException {} + throws IOException { + } /** * Called before getting region server group info of the passed server. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param server server to get RSGroupInfo for */ default void preGetRSGroupInfoOfServer(final ObserverContext ctx, - final Address server) throws IOException {} + final Address server) throws IOException { + } /** * Called after getting region server group info of the passed server. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param server server to get RSGroupInfo for */ default void postGetRSGroupInfoOfServer(final ObserverContext ctx, - final Address server) throws IOException {} + final Address server) throws IOException { + } /** * Called before add a replication peer - * @param ctx the environment to interact with the framework and master - * @param peerId a short name that identifies the peer + * @param ctx the environment to interact with the framework and master + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer */ default void preAddReplicationPeer(final ObserverContext ctx, - String peerId, ReplicationPeerConfig peerConfig) throws IOException {} + String peerId, ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called after add a replication peer - * @param ctx the environment to interact with the framework and master - * @param peerId a short name that identifies the peer + * @param ctx the environment to interact with the framework and master + * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer */ default void postAddReplicationPeer(final ObserverContext ctx, - String peerId, ReplicationPeerConfig peerConfig) throws IOException {} + String peerId, ReplicationPeerConfig peerConfig) throws IOException { + } /** - * Called before remove a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before remove a replication peer n * @param peerId a short name that identifies the peer */ default void preRemoveReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called after remove a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called after remove a replication peer n * @param peerId a short name that identifies the peer */ default void postRemoveReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called before enable a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before enable a replication peer n * @param peerId a short name that identifies the peer */ default void preEnableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called after enable a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called after enable a replication peer n * @param peerId a short name that identifies the peer */ default void postEnableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called before disable a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before disable a replication peer n * @param peerId a short name that identifies the + * peer */ default void preDisableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called after disable a replication peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called after disable a replication peer n * @param peerId a short name that identifies the peer */ default void postDisableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called before get the configured ReplicationPeerConfig for the specified peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before get the configured ReplicationPeerConfig for the specified peer n * @param peerId + * a short name that identifies the peer */ default void preGetReplicationPeerConfig(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** - * Called after get the configured ReplicationPeerConfig for the specified peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called after get the configured ReplicationPeerConfig for the specified peer n * @param peerId + * a short name that identifies the peer */ - default void postGetReplicationPeerConfig( - final ObserverContext ctx, String peerId) throws IOException {} + default void postGetReplicationPeerConfig(final ObserverContext ctx, + String peerId) throws IOException { + } /** - * Called before update peerConfig for the specified peer - * @param ctx - * @param peerId a short name that identifies the peer + * Called before update peerConfig for the specified peer n * @param peerId a short name that + * identifies the peer */ default void preUpdateReplicationPeerConfig( - final ObserverContext ctx, String peerId, - ReplicationPeerConfig peerConfig) throws IOException {} + final ObserverContext ctx, String peerId, + ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called after update peerConfig for the specified peer - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param peerId a short name that identifies the peer */ default void postUpdateReplicationPeerConfig( - final ObserverContext ctx, String peerId, - ReplicationPeerConfig peerConfig) throws IOException {} + final ObserverContext ctx, String peerId, + ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called before list replication peers. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regex The regular expression to match peer id */ default void preListReplicationPeers(final ObserverContext ctx, - String regex) throws IOException {} + String regex) throws IOException { + } /** * Called after list replication peers. - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param regex The regular expression to match peer id */ default void postListReplicationPeers(final ObserverContext ctx, - String regex) throws IOException {} + String regex) throws IOException { + } /** * Called before transit current cluster state for the specified synchronous replication peer - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param peerId a short name that identifies the peer - * @param state the new state + * @param state the new state */ default void preTransitReplicationPeerSyncReplicationState( - final ObserverContext ctx, String peerId, - SyncReplicationState state) throws IOException { + final ObserverContext ctx, String peerId, + SyncReplicationState state) throws IOException { } /** * Called after transit current cluster state for the specified synchronous replication peer - * @param ctx the environment to interact with the framework and master + * @param ctx the environment to interact with the framework and master * @param peerId a short name that identifies the peer - * @param from the old state - * @param to the new state + * @param from the old state + * @param to the new state */ default void postTransitReplicationPeerSyncReplicationState( - final ObserverContext ctx, String peerId, - SyncReplicationState from, SyncReplicationState to) throws IOException { + final ObserverContext ctx, String peerId, + SyncReplicationState from, SyncReplicationState to) throws IOException { } /** @@ -1530,107 +1572,120 @@ default void postTransitReplicationPeerSyncReplicationState( * @param ctx the environment to interact with the framework and master */ default void preRequestLock(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String description) throws IOException {} + TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { + } /** * Called after new LockProcedure is queued. * @param ctx the environment to interact with the framework and master */ default void postRequestLock(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String description) throws IOException {} + TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { + } /** * Called before heartbeat to a lock. * @param ctx the environment to interact with the framework and master */ - default void preLockHeartbeat(ObserverContext ctx, - TableName tn, String description) throws IOException {} + default void preLockHeartbeat(ObserverContext ctx, TableName tn, + String description) throws IOException { + } /** * Called after heartbeat to a lock. * @param ctx the environment to interact with the framework and master */ default void postLockHeartbeat(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before get cluster status. */ default void preGetClusterMetrics(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after get cluster status. */ default void postGetClusterMetrics(ObserverContext ctx, - ClusterMetrics status) throws IOException {} + ClusterMetrics status) throws IOException { + } /** * Called before clear dead region servers. */ default void preClearDeadServers(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after clear dead region servers. */ default void postClearDeadServers(ObserverContext ctx, - List servers, List notClearedServers) - throws IOException {} + List servers, List notClearedServers) throws IOException { + } /** * Called before decommission region servers. */ default void preDecommissionRegionServers(ObserverContext ctx, - List servers, boolean offload) throws IOException {} + List servers, boolean offload) throws IOException { + } /** * Called after decommission region servers. */ default void postDecommissionRegionServers(ObserverContext ctx, - List servers, boolean offload) throws IOException {} + List servers, boolean offload) throws IOException { + } /** * Called before list decommissioned region servers. */ default void preListDecommissionedRegionServers(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after list decommissioned region servers. */ - default void postListDecommissionedRegionServers(ObserverContext ctx) - throws IOException {} + default void postListDecommissionedRegionServers( + ObserverContext ctx) throws IOException { + } /** * Called before recommission region server. */ default void preRecommissionRegionServer(ObserverContext ctx, - ServerName server, List encodedRegionNames) throws IOException {} + ServerName server, List encodedRegionNames) throws IOException { + } /** * Called after recommission region server. */ default void postRecommissionRegionServer(ObserverContext ctx, - ServerName server, List encodedRegionNames) throws IOException {} + ServerName server, List encodedRegionNames) throws IOException { + } /** * Called before switching rpc throttle enabled state. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param enable the rpc throttle value */ default void preSwitchRpcThrottle(final ObserverContext ctx, - final boolean enable) throws IOException { + final boolean enable) throws IOException { } /** * Called after switching rpc throttle enabled state. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param oldValue the previously rpc throttle value * @param newValue the newly rpc throttle value */ default void postSwitchRpcThrottle(final ObserverContext ctx, - final boolean oldValue, final boolean newValue) throws IOException { + final boolean oldValue, final boolean newValue) throws IOException { } /** @@ -1638,104 +1693,104 @@ default void postSwitchRpcThrottle(final ObserverContext ctx) - throws IOException { + throws IOException { } /** * Called after getting if is rpc throttle enabled. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param rpcThrottleEnabled the rpc throttle enabled value */ default void postIsRpcThrottleEnabled(final ObserverContext ctx, - final boolean rpcThrottleEnabled) throws IOException { + final boolean rpcThrottleEnabled) throws IOException { } /** * Called before switching exceed throttle quota state. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param enable the exceed throttle quota value */ default void preSwitchExceedThrottleQuota(final ObserverContext ctx, - final boolean enable) throws IOException { + final boolean enable) throws IOException { } /** * Called after switching exceed throttle quota state. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param oldValue the previously exceed throttle quota value * @param newValue the newly exceed throttle quota value */ default void postSwitchExceedThrottleQuota( - final ObserverContext ctx, final boolean oldValue, - final boolean newValue) throws IOException { + final ObserverContext ctx, final boolean oldValue, + final boolean newValue) throws IOException { } /** * Called before granting user permissions. - * @param ctx the coprocessor instance's environment - * @param userPermission the user and permissions + * @param ctx the coprocessor instance's environment + * @param userPermission the user and permissions * @param mergeExistingPermissions True if merge with previous granted permissions */ default void preGrant(ObserverContext ctx, - UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { + UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { } /** * Called after granting user permissions. - * @param ctx the coprocessor instance's environment - * @param userPermission the user and permissions + * @param ctx the coprocessor instance's environment + * @param userPermission the user and permissions * @param mergeExistingPermissions True if merge with previous granted permissions */ default void postGrant(ObserverContext ctx, - UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { + UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { } /** * Called before revoking user permissions. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param userPermission the user and permissions */ default void preRevoke(ObserverContext ctx, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { } /** * Called after revoking user permissions. - * @param ctx the coprocessor instance's environment + * @param ctx the coprocessor instance's environment * @param userPermission the user and permissions */ default void postRevoke(ObserverContext ctx, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { } /** * Called before getting user permissions. - * @param ctx the coprocessor instance's environment - * @param userName the user name, null if get all user permissions + * @param ctx the coprocessor instance's environment + * @param userName the user name, null if get all user permissions * @param namespace the namespace, null if don't get namespace permission * @param tableName the table name, null if don't get table permission - * @param family the table column family, null if don't get table family permission + * @param family the table column family, null if don't get table family permission * @param qualifier the table column qualifier, null if don't get table qualifier permission * @throws IOException if something went wrong */ default void preGetUserPermissions(ObserverContext ctx, - String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) - throws IOException { + String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) + throws IOException { } /** * Called after getting user permissions. - * @param ctx the coprocessor instance's environment - * @param userName the user name, null if get all user permissions + * @param ctx the coprocessor instance's environment + * @param userName the user name, null if get all user permissions * @param namespace the namespace, null if don't get namespace permission * @param tableName the table name, null if don't get table permission - * @param family the table column family, null if don't get table family permission + * @param family the table column family, null if don't get table family permission * @param qualifier the table column qualifier, null if don't get table qualifier permission * @throws IOException if something went wrong */ default void postGetUserPermissions(ObserverContext ctx, - String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) - throws IOException { + String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) + throws IOException { } /* @@ -1745,16 +1800,16 @@ default void postGetUserPermissions(ObserverContext ctx, - String userName, List permissions) throws IOException { + String userName, List permissions) throws IOException { } /** * Called after checking if user has permissions. - * @param ctx the coprocessor instance's environment - * @param userName the user name + * @param ctx the coprocessor instance's environment + * @param userName the user name * @param permissions the permission list */ default void postHasUserPermissions(ObserverContext ctx, - String userName, List permissions) throws IOException { + String userName, List permissions) throws IOException { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java index 4acec8c0956e..499f8e4e31fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -40,6 +38,7 @@ import org.apache.hadoop.hbase.util.LossyCounting; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** @@ -61,15 +60,14 @@ public class MetaTableMetrics implements RegionCoprocessor { private Set metrics = ConcurrentHashMap.newKeySet(); enum MetaTableOps { - GET, PUT, DELETE, + GET, + PUT, + DELETE, } private ImmutableMap, MetaTableOps> opsNameMap = - ImmutableMap., MetaTableOps>builder() - .put(Put.class, MetaTableOps.PUT) - .put(Get.class, MetaTableOps.GET) - .put(Delete.class, MetaTableOps.DELETE) - .build(); + ImmutableMap., MetaTableOps> builder().put(Put.class, MetaTableOps.PUT) + .put(Get.class, MetaTableOps.GET).put(Delete.class, MetaTableOps.DELETE).build(); class ExampleRegionObserverMeta implements RegionCoprocessor, RegionObserver { @@ -80,23 +78,23 @@ public Optional getRegionObserver() { @Override public void preGetOp(ObserverContext e, Get get, - List results) throws IOException { + List results) throws IOException { registerAndMarkMetrics(e, get); } @Override public void prePut(ObserverContext e, Put put, WALEdit edit, - Durability durability) throws IOException { + Durability durability) throws IOException { registerAndMarkMetrics(e, put); } @Override public void preDelete(ObserverContext e, Delete delete, - WALEdit edit, Durability durability) { + WALEdit edit, Durability durability) { registerAndMarkMetrics(e, delete); } - private void registerAndMarkMetrics(ObserverContext e, Row row){ + private void registerAndMarkMetrics(ObserverContext e, Row row) { if (!active || !isMetaTableOp(e)) { return; } @@ -122,7 +120,7 @@ private String getTableNameFromOp(Row op) { /** * Get regionId from Ops such as: get, put, delete. - * @param op such as get, put or delete. + * @param op such as get, put or delete. */ private String getRegionIdFromOp(Row op) { final String tableRowKey = Bytes.toString(op.getRow()); @@ -134,8 +132,7 @@ private String getRegionIdFromOp(Row op) { } private boolean isMetaTableOp(ObserverContext e) { - return TableName.META_TABLE_NAME - .equals(e.getEnvironment().getRegionInfo().getTable()); + return TableName.META_TABLE_NAME.equals(e.getEnvironment().getRegionInfo().getTable()); } private void clientMetricRegisterAndMark() { @@ -193,7 +190,7 @@ private void registerAndMarkMeter(String requestMeter) { if (requestMeter.isEmpty()) { return; } - if(!registry.get(requestMeter).isPresent()){ + if (!registry.get(requestMeter).isPresent()) { metrics.add(requestMeter); } registry.meter(requestMeter).mark(); @@ -266,10 +263,12 @@ public Optional getRegionObserver() { @Override public void start(CoprocessorEnvironment env) throws IOException { observer = new ExampleRegionObserverMeta(); - if (env instanceof RegionCoprocessorEnvironment + if ( + env instanceof RegionCoprocessorEnvironment && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() - .equals(TableName.META_TABLE_NAME)) { + .equals(TableName.META_TABLE_NAME) + ) { RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env; registry = regionCoprocessorEnv.getMetricRegistryForRegionServer(); LossyCounting.LossyCountingListener listener = key -> { @@ -287,7 +286,7 @@ public void start(CoprocessorEnvironment env) throws IOException { @Override public void stop(CoprocessorEnvironment env) throws IOException { // since meta region can move around, clear stale metrics when stop. - for(String metric:metrics){ + for (String metric : metrics) { registry.remove(metric); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java index a77a0fe31f0c..2ecf63263e23 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility class for tracking metrics for various types of coprocessors. Each coprocessor instance @@ -36,49 +33,42 @@ public class MetricsCoprocessor { // Master coprocessor metrics private static final String MASTER_COPROC_METRICS_NAME = "Coprocessor.Master"; private static final String MASTER_COPROC_METRICS_CONTEXT = "master"; - private static final String MASTER_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase MasterObservers"; - private static final String MASTER_COPROC_METRICS_JMX_CONTEXT - = "Master,sub=" + MASTER_COPROC_METRICS_NAME; + private static final String MASTER_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase MasterObservers"; + private static final String MASTER_COPROC_METRICS_JMX_CONTEXT = + "Master,sub=" + MASTER_COPROC_METRICS_NAME; // RegionServer coprocessor metrics private static final String RS_COPROC_METRICS_NAME = "Coprocessor.RegionServer"; private static final String RS_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String RS_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase RegionServerObservers"; - private static final String RS_COPROC_METRICS_JMX_CONTEXT = "RegionServer,sub=" - + RS_COPROC_METRICS_NAME; + private static final String RS_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase RegionServerObservers"; + private static final String RS_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + RS_COPROC_METRICS_NAME; // Region coprocessor metrics private static final String REGION_COPROC_METRICS_NAME = "Coprocessor.Region"; private static final String REGION_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String REGION_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase RegionObservers"; - private static final String REGION_COPROC_METRICS_JMX_CONTEXT - = "RegionServer,sub=" + REGION_COPROC_METRICS_NAME; + private static final String REGION_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase RegionObservers"; + private static final String REGION_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + REGION_COPROC_METRICS_NAME; // WAL coprocessor metrics private static final String WAL_COPROC_METRICS_NAME = "Coprocessor.WAL"; private static final String WAL_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String WAL_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase WALObservers"; - private static final String WAL_COPROC_METRICS_JMX_CONTEXT - = "RegionServer,sub=" + WAL_COPROC_METRICS_NAME; + private static final String WAL_COPROC_METRICS_DESCRIPTION = "Metrics about HBase WALObservers"; + private static final String WAL_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + WAL_COPROC_METRICS_NAME; private static String suffix(String metricName, String cpName) { - return new StringBuilder(metricName) - .append(".") - .append("CP_") - .append(cpName) - .toString(); + return new StringBuilder(metricName).append(".").append("CP_").append(cpName).toString(); } static MetricRegistryInfo createRegistryInfoForMasterCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(MASTER_COPROC_METRICS_NAME, clazz), - MASTER_COPROC_METRICS_DESCRIPTION, - suffix(MASTER_COPROC_METRICS_JMX_CONTEXT, clazz), - MASTER_COPROC_METRICS_CONTEXT, false); + return new MetricRegistryInfo(suffix(MASTER_COPROC_METRICS_NAME, clazz), + MASTER_COPROC_METRICS_DESCRIPTION, suffix(MASTER_COPROC_METRICS_JMX_CONTEXT, clazz), + MASTER_COPROC_METRICS_CONTEXT, false); } public static MetricRegistry createRegistryForMasterCoprocessor(String clazz) { @@ -86,11 +76,9 @@ public static MetricRegistry createRegistryForMasterCoprocessor(String clazz) { } static MetricRegistryInfo createRegistryInfoForRSCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(RS_COPROC_METRICS_NAME, clazz), - RS_COPROC_METRICS_DESCRIPTION, - suffix(RS_COPROC_METRICS_JMX_CONTEXT, clazz), - RS_COPROC_METRICS_CONTEXT, false); + return new MetricRegistryInfo(suffix(RS_COPROC_METRICS_NAME, clazz), + RS_COPROC_METRICS_DESCRIPTION, suffix(RS_COPROC_METRICS_JMX_CONTEXT, clazz), + RS_COPROC_METRICS_CONTEXT, false); } public static MetricRegistry createRegistryForRSCoprocessor(String clazz) { @@ -98,11 +86,9 @@ public static MetricRegistry createRegistryForRSCoprocessor(String clazz) { } public static MetricRegistryInfo createRegistryInfoForRegionCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(REGION_COPROC_METRICS_NAME, clazz), - REGION_COPROC_METRICS_DESCRIPTION, - suffix(REGION_COPROC_METRICS_JMX_CONTEXT, clazz), - REGION_COPROC_METRICS_CONTEXT, false); + return new MetricRegistryInfo(suffix(REGION_COPROC_METRICS_NAME, clazz), + REGION_COPROC_METRICS_DESCRIPTION, suffix(REGION_COPROC_METRICS_JMX_CONTEXT, clazz), + REGION_COPROC_METRICS_CONTEXT, false); } public static MetricRegistry createRegistryForRegionCoprocessor(String clazz) { @@ -110,11 +96,9 @@ public static MetricRegistry createRegistryForRegionCoprocessor(String clazz) { } public static MetricRegistryInfo createRegistryInfoForWALCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(WAL_COPROC_METRICS_NAME, clazz), - WAL_COPROC_METRICS_DESCRIPTION, - suffix(WAL_COPROC_METRICS_JMX_CONTEXT, clazz), - WAL_COPROC_METRICS_CONTEXT, false); + return new MetricRegistryInfo(suffix(WAL_COPROC_METRICS_NAME, clazz), + WAL_COPROC_METRICS_DESCRIPTION, suffix(WAL_COPROC_METRICS_JMX_CONTEXT, clazz), + WAL_COPROC_METRICS_CONTEXT, false); } public static MetricRegistry createRegistryForWALCoprocessor(String clazz) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java index 5262732a45c9..61c4f86e1dfe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java @@ -63,17 +63,12 @@ /** * This class implements atomic multi row transactions using * {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} and Coprocessor - * endpoints. We can also specify some conditions to perform conditional update. - * - * Defines a protocol to perform multi row transactions. - * See {@link MultiRowMutationEndpoint} for the implementation. - *
      - * See - * {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} - * for details and limitations. + * endpoints. We can also specify some conditions to perform conditional update. Defines a protocol + * to perform multi row transactions. See {@link MultiRowMutationEndpoint} for the implementation. *
      - * Example: - * + * See {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} for details and + * limitations.
      + * Example: * Put p = new Put(row1); * Delete d = new Delete(row2); * Increment i = new Increment(row3); @@ -113,7 +108,7 @@ public class MultiRowMutationEndpoint extends MultiRowMutationService implements @Override public void mutateRows(RpcController controller, MutateRowsRequest request, - RpcCallback done) { + RpcCallback done) { boolean matches = true; List rowLocks = null; try { @@ -131,8 +126,7 @@ public void mutateRows(RpcController controller, MutateRowsRequest request, for (Mutation m : mutations) { // check whether rows are in range for this region if (!HRegion.rowIsInRange(regionInfo, m.getRow())) { - String msg = "Requested row out of range '" - + Bytes.toStringBinary(m.getRow()) + "'"; + String msg = "Requested row out of range '" + Bytes.toStringBinary(m.getRow()) + "'"; if (rowsToLock.isEmpty()) { // if this is the first row, region might have moved, // allow client to retry @@ -208,8 +202,9 @@ private boolean matches(Region region, ClientProtos.Condition condition) throws comparator = ProtobufUtil.toComparator(condition.getComparator()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = condition.hasTimeRange() + ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); Get get = new Get(row); if (family != null) { @@ -251,9 +246,8 @@ private boolean matches(Region region, ClientProtos.Condition condition) throws private void checkFamily(Region region, byte[] family) throws NoSuchColumnFamilyException { if (!region.getTableDescriptor().hasColumnFamily(family)) { - throw new NoSuchColumnFamilyException( - "Column family " + Bytes.toString(family) + " does not exist in region " + this - + " in table " + region.getTableDescriptor()); + throw new NoSuchColumnFamilyException("Column family " + Bytes.toString(family) + + " does not exist in region " + this + " in table " + region.getTableDescriptor()); } } @@ -284,17 +278,17 @@ public Iterable getServices() { /** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this - * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded - * on a table region, so always expects this to be an instance of + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on + * a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of - * {@code RegionCoprocessorEnvironment} + * {@code RegionCoprocessorEnvironment} */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException("Must be loaded on a table region!"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java index c756926fb213..c0fd791bcef8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java @@ -17,23 +17,22 @@ */ package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - /** * Carries the execution state for a given invocation of an Observer coprocessor - * ({@link RegionObserver}, {@link MasterObserver}, or {@link WALObserver}) - * method. The same ObserverContext instance is passed sequentially to all loaded - * coprocessors for a given Observer method trigger, with the - * CoprocessorEnvironment reference set appropriately for each Coprocessor type: - * e.g. the RegionCoprocessorEnvironment is passed to RegionCoprocessors, and so on. - * @param The {@link CoprocessorEnvironment} subclass applicable to the - * revelant Observer interface. + * ({@link RegionObserver}, {@link MasterObserver}, or {@link WALObserver}) method. The same + * ObserverContext instance is passed sequentially to all loaded coprocessors for a given Observer + * method trigger, with the CoprocessorEnvironment reference set appropriately for each + * Coprocessor type: e.g. the RegionCoprocessorEnvironment is passed to RegionCoprocessors, and so + * on. + * @param The {@link CoprocessorEnvironment} subclass applicable to the revelant Observer + * interface. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -47,29 +46,29 @@ public interface ObserverContext { * Coprocessor invocations, only on a small subset of methods, mostly preXXX calls in * RegionObserver. Check javadoc on the pertinent Coprocessor Observer to see if * bypass is supported. - *

      This behavior of honoring only a subset of methods is new since hbase-2.0.0. - *

      Where bypass is supported what is being bypassed is all of the core code - * implementing the remainder of the operation. In order to understand what - * calling bypass() will skip, a coprocessor implementer should read and - * understand all of the remaining code and its nuances. Although this - * is good practice for coprocessor developers in general, it demands a lot. - * What is skipped is extremely version dependent. The core code will vary, perhaps significantly, - * even between point releases. We do not provide the promise of consistent behavior even between - * point releases for the bypass semantic. To achieve - * that we could not change any code between hook points. Therefore the - * coprocessor implementer becomes an HBase core developer in practice as soon - * as they rely on bypass(). Every release of HBase may break the assumption - * that the replacement for the bypassed code takes care of all necessary - * skipped concerns. Because those concerns can change at any point, such an - * assumption is never safe.

      - *

      As of hbase2, when bypass has been set, we will NOT call any Coprocessors follow the - * bypassing Coprocessor; we cut short the processing and return the bypassing Coprocessors - * response (this used be a separate 'complete' option that has been folded into the - * 'bypass' in hbase2.

      + *

      + * This behavior of honoring only a subset of methods is new since hbase-2.0.0. + *

      + * Where bypass is supported what is being bypassed is all of the core code implementing the + * remainder of the operation. In order to understand what calling bypass() will skip, a + * coprocessor implementer should read and understand all of the remaining code and its nuances. + * Although this is good practice for coprocessor developers in general, it demands a lot. What is + * skipped is extremely version dependent. The core code will vary, perhaps significantly, even + * between point releases. We do not provide the promise of consistent behavior even between point + * releases for the bypass semantic. To achieve that we could not change any code between hook + * points. Therefore the coprocessor implementer becomes an HBase core developer in practice as + * soon as they rely on bypass(). Every release of HBase may break the assumption that the + * replacement for the bypassed code takes care of all necessary skipped concerns. Because those + * concerns can change at any point, such an assumption is never safe. + *

      + *

      + * As of hbase2, when bypass has been set, we will NOT call any Coprocessors follow the bypassing + * Coprocessor; we cut short the processing and return the bypassing Coprocessors response (this + * used be a separate 'complete' option that has been folded into the 'bypass' in hbase2. + *

      */ void bypass(); - /** * Returns the active user for the coprocessor call. If an explicit {@code User} instance was * provided to the constructor, that will be returned, otherwise if we are in the context of an diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java index 9a23ffaa4a87..a3a4a93005c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java @@ -69,7 +69,7 @@ public void bypass() { /** * @return {@code true}, if {@link ObserverContext#bypass()} was called by one of the loaded - * coprocessors, {@code false} otherwise. + * coprocessors, {@code false} otherwise. */ public boolean shouldBypass() { if (!isBypassable()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java index d7705ef25b7c..4b85cba2940d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java @@ -34,12 +34,10 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.yetus.audience.InterfaceAudience; - /** * Wraps a Configuration to make it read-only. */ @@ -292,7 +290,7 @@ public char[] getPassword(String name) throws IOException { @Override public InetSocketAddress getSocketAddr(String hostProperty, String addressProperty, - String defaultAddressValue, int defaultPort) { + String defaultAddressValue, int defaultPort) { return conf.getSocketAddr(hostProperty, addressProperty, defaultAddressValue, defaultPort); } @@ -308,7 +306,7 @@ public void setSocketAddr(String name, InetSocketAddress addr) { @Override public InetSocketAddress updateConnectAddr(String hostProperty, String addressProperty, - String defaultAddressValue, InetSocketAddress addr) { + String defaultAddressValue, InetSocketAddress addr) { throw new UnsupportedOperationException("Read-only Configuration"); } @@ -339,7 +337,7 @@ public Class getClass(String name, Class defaultValue) { @Override public Class getClass(String name, Class defaultValue, - Class xface) { + Class xface) { return conf.getClass(name, defaultValue, xface); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java index 16c6d3990402..15d6cba0de26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java index 84e6d25e7699..44db13505703 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -58,48 +55,44 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironmentDo not close! This is a shared connection - * with the hosting server. Throws {@link UnsupportedOperationException} if you try to close - * or abort it. - * - * For light-weight usage only. Heavy-duty usage will pull down - * the hosting RegionServer responsiveness as well as that of other Coprocessors making use of - * this Connection. Use to create table on start or to do administrative operations. Coprocessors - * should create their own Connections if heavy usage to avoid impinging on hosting Server - * operation. To create a Connection or if a Coprocessor requires a region with a particular - * Configuration, use {@link org.apache.hadoop.hbase.client.ConnectionFactory} or + * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection with + * the hosting server. Throws {@link UnsupportedOperationException} if you try to close or abort + * it. For light-weight usage only. Heavy-duty usage will pull down the hosting RegionServer + * responsiveness as well as that of other Coprocessors making use of this Connection. Use to + * create table on start or to do administrative operations. Coprocessors should create their own + * Connections if heavy usage to avoid impinging on hosting Server operation. To create a + * Connection or if a Coprocessor requires a region with a particular Configuration, use + * {@link org.apache.hadoop.hbase.client.ConnectionFactory} or * {@link #createConnection(Configuration)}}. - * - *

      Be aware that operations that make use of this Connection are executed as the RegionServer + *

      + * Be aware that operations that make use of this Connection are executed as the RegionServer * User, the hbase super user that started this server process. Exercise caution running - * operations as this User (See {@link #createConnection(Configuration)}} to run as other than - * the RegionServer User). - * - *

      Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + * operations as this User (See {@link #createConnection(Configuration)}} to run as other than the + * RegionServer User). + *

      + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. - * * @see #createConnection(Configuration) * @return The host's Connection to the Cluster. */ Connection getConnection(); /** - * Creates a cluster connection using the passed Configuration. - * - * Creating a Connection is a heavy-weight operation. The resultant Connection's cache of - * region locations will be empty. Therefore you should cache and reuse Connections rather than - * create a Connection on demand. Create on start of your Coprocessor. You will have to cast - * the CoprocessorEnvironment appropriately to get at this API at start time because - * Coprocessor start method is passed a subclass of this CoprocessorEnvironment or fetch - * Connection using a synchronized accessor initializing the Connection on first access. Close - * the returned Connection when done to free resources. Using this API rather - * than {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} + * Creates a cluster connection using the passed Configuration. Creating a Connection is a + * heavy-weight operation. The resultant Connection's cache of region locations will be empty. + * Therefore you should cache and reuse Connections rather than create a Connection on demand. + * Create on start of your Coprocessor. You will have to cast the CoprocessorEnvironment + * appropriately to get at this API at start time because Coprocessor start method is passed a + * subclass of this CoprocessorEnvironment or fetch Connection using a synchronized accessor + * initializing the Connection on first access. Close the returned Connection when done to free + * resources. Using this API rather than + * {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} * returns a Connection that will short-circuit RPC if the target is a local resource. Use * ConnectionFactory if you don't need this ability. - * - *

      Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + *

      + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. @@ -109,14 +102,15 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleRegionObserverWithMetrics class in the hbase-examples modules to see examples of how - * metrics can be instantiated and used.

      + * metrics tracked at this level will be shared by all the coprocessor instances of the same class + * in the same region server process. Note that there will be one region coprocessor environment + * per region in the server, but all of these instances will share the same MetricRegistry. The + * metric instances (like Counter, Timer, etc) will also be shared among all of the region + * coprocessor instances. + *

      + * See ExampleRegionObserverWithMetrics class in the hbase-examples modules to see examples of how + * metrics can be instantiated and used. + *

      * @return A MetricRegistry for the coprocessor class to track and export metrics. */ // Note: we are not exposing getMetricRegistryForRegion(). per-region metrics are already costly diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 4f4d79cdbe04..057a9c56814e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -7,23 +7,20 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -106,158 +103,171 @@ public interface RegionObserver { /** Mutation type for postMutationBeforeWAL hook */ enum MutationType { - APPEND, INCREMENT + APPEND, + INCREMENT } /** * Called before the region is reported as open to the master. * @param c the environment provided by the region server */ - default void preOpen(ObserverContext c) throws IOException {} + default void preOpen(ObserverContext c) throws IOException { + } /** * Called after the region is reported as open to the master. * @param c the environment provided by the region server */ - default void postOpen(ObserverContext c) {} + default void postOpen(ObserverContext c) { + } /** * Called before the memstore is flushed to disk. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param tracker tracker used to track the life cycle of a flush */ default void preFlush(final ObserverContext c, - FlushLifeCycleTracker tracker) throws IOException {} + FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before we open store scanner for flush. You can use the {@code options} to change max * versions and TTL for the scanner being opened. - * @param c the environment provided by the region server - * @param store the store where flush is being requested + * @param c the environment provided by the region server + * @param store the store where flush is being requested * @param options used to change max versions and TTL for the scanner being opened */ default void preFlushScannerOpen(ObserverContext c, Store store, - ScanOptions options,FlushLifeCycleTracker tracker) throws IOException {} + ScanOptions options, FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before a Store's memstore is flushed to disk. - * @param c the environment provided by the region server - * @param store the store where flush is being requested + * @param c the environment provided by the region server + * @param store the store where flush is being requested * @param scanner the scanner over existing data used in the memstore * @param tracker tracker used to track the life cycle of a flush * @return the scanner to use during flush. Should not be {@code null} unless the implementation * is writing new store files on its own. */ default InternalScanner preFlush(ObserverContext c, Store store, - InternalScanner scanner, FlushLifeCycleTracker tracker) throws IOException { + InternalScanner scanner, FlushLifeCycleTracker tracker) throws IOException { return scanner; } /** * Called after the memstore is flushed to disk. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param tracker tracker used to track the life cycle of a flush * @throws IOException if an error occurred on the coprocessor */ default void postFlush(ObserverContext c, - FlushLifeCycleTracker tracker) throws IOException {} + FlushLifeCycleTracker tracker) throws IOException { + } /** * Called after a Store's memstore is flushed to disk. - * @param c the environment provided by the region server - * @param store the store being flushed + * @param c the environment provided by the region server + * @param store the store being flushed * @param resultFile the new store file written out during compaction - * @param tracker tracker used to track the life cycle of a flush + * @param tracker tracker used to track the life cycle of a flush */ default void postFlush(ObserverContext c, Store store, - StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException {} + StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before in memory compaction started. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param store the store where in memory compaction is being requested */ default void preMemStoreCompaction(ObserverContext c, Store store) - throws IOException {} + throws IOException { + } /** * Called before we open store scanner for in memory compaction. You can use the {@code options} * to change max versions and TTL for the scanner being opened. Notice that this method will only * be called when you use {@code eager} mode. For {@code basic} mode we will not drop any cells * thus we do not open a store scanner. - * @param c the environment provided by the region server - * @param store the store where in memory compaction is being requested + * @param c the environment provided by the region server + * @param store the store where in memory compaction is being requested * @param options used to change max versions and TTL for the scanner being opened */ default void preMemStoreCompactionCompactScannerOpen( - ObserverContext c, Store store, ScanOptions options) - throws IOException {} + ObserverContext c, Store store, ScanOptions options) + throws IOException { + } /** * Called before we do in memory compaction. Notice that this method will only be called when you * use {@code eager} mode. For {@code basic} mode we will not drop any cells thus there is no * {@link InternalScanner}. - * @param c the environment provided by the region server - * @param store the store where in memory compaction is being executed + * @param c the environment provided by the region server + * @param store the store where in memory compaction is being executed * @param scanner the scanner over existing data used in the memstore segments being compact * @return the scanner to use during in memory compaction. Must be non-null. */ default InternalScanner preMemStoreCompactionCompact( - ObserverContext c, Store store, InternalScanner scanner) - throws IOException { + ObserverContext c, Store store, InternalScanner scanner) + throws IOException { return scanner; } /** * Called after the in memory compaction is finished. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param store the store where in memory compaction is being executed */ default void postMemStoreCompaction(ObserverContext c, Store store) - throws IOException {} + throws IOException { + } /** * Called prior to selecting the {@link StoreFile StoreFiles} to compact from the list of * available candidates. To alter the files used for compaction, you may mutate the passed in list * of candidates. If you remove all the candidates then the compaction will be canceled. - *

      Supports Coprocessor 'bypass' -- 'bypass' is how this method indicates that it changed - * the passed in candidates. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. - * @param c the environment provided by the region server - * @param store the store where compaction is being requested + *

      + * Supports Coprocessor 'bypass' -- 'bypass' is how this method indicates that it changed the + * passed in candidates. If 'bypass' is set, we skip out on calling any subsequent + * chained coprocessors. + * @param c the environment provided by the region server + * @param store the store where compaction is being requested * @param candidates the store files currently available for compaction - * @param tracker tracker used to track the life cycle of a compaction + * @param tracker tracker used to track the life cycle of a compaction */ default void preCompactSelection(ObserverContext c, Store store, - List candidates, CompactionLifeCycleTracker tracker) - throws IOException {} + List candidates, CompactionLifeCycleTracker tracker) throws IOException { + } /** * Called after the {@link StoreFile}s to compact have been selected from the available * candidates. - * @param c the environment provided by the region server - * @param store the store being compacted + * @param c the environment provided by the region server + * @param store the store being compacted * @param selected the store files selected to compact - * @param tracker tracker used to track the life cycle of a compaction - * @param request the requested compaction + * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void postCompactSelection(ObserverContext c, Store store, - List selected, CompactionLifeCycleTracker tracker, - CompactionRequest request) {} + List selected, CompactionLifeCycleTracker tracker, + CompactionRequest request) { + } /** - * Called before we open store scanner for compaction. You can use the {@code options} to change max - * versions and TTL for the scanner being opened. - * @param c the environment provided by the region server - * @param store the store being compacted + * Called before we open store scanner for compaction. You can use the {@code options} to change + * max versions and TTL for the scanner being opened. + * @param c the environment provided by the region server + * @param store the store being compacted * @param scanType type of Scan - * @param options used to change max versions and TTL for the scanner being opened - * @param tracker tracker used to track the life cycle of a compaction - * @param request the requested compaction + * @param options used to change max versions and TTL for the scanner being opened + * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void preCompactScannerOpen(ObserverContext c, Store store, - ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException {} + ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { + } /** * Called prior to writing the {@link StoreFile}s selected for compaction into a new @@ -267,130 +277,135 @@ default void preCompactScannerOpen(ObserverContext * {@link InternalScanner} with a custom implementation that is returned from this method. The * custom scanner can then inspect {@link org.apache.hadoop.hbase.Cell}s from the wrapped scanner, * applying its own policy to what gets written. - * @param c the environment provided by the region server - * @param store the store being compacted - * @param scanner the scanner over existing data used in the store file rewriting + * @param c the environment provided by the region server + * @param store the store being compacted + * @param scanner the scanner over existing data used in the store file rewriting * @param scanType type of Scan - * @param tracker tracker used to track the life cycle of a compaction - * @param request the requested compaction + * @param tracker tracker used to track the life cycle of a compaction + * @param request the requested compaction * @return the scanner to use during compaction. Should not be {@code null} unless the * implementation is writing new store files on its own. */ default InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { return scanner; } /** * Called after compaction has completed and the new store file has been moved in to place. - * @param c the environment provided by the region server - * @param store the store being compacted + * @param c the environment provided by the region server + * @param store the store being compacted * @param resultFile the new store file written out during compaction - * @param tracker used to track the life cycle of a compaction - * @param request the requested compaction + * @param tracker used to track the life cycle of a compaction + * @param request the requested compaction */ default void postCompact(ObserverContext c, Store store, - StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) - throws IOException {} + StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) + throws IOException { + } /** * Called before the region is reported as closed to the master. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param abortRequested true if the region server is aborting */ default void preClose(ObserverContext c, boolean abortRequested) - throws IOException {} + throws IOException { + } /** * Called after the region is reported as closed to the master. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param abortRequested true if the region server is aborting */ - default void postClose(ObserverContext c, boolean abortRequested) {} + default void postClose(ObserverContext c, boolean abortRequested) { + } /** * Called before the client performs a Get *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. - * @param c the environment provided by the region server - * @param get the Get request - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be used if default processing - * is not bypassed. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. + * @param c the environment provided by the region server + * @param get the Get request + * @param result The result to return to the client if default processing is bypassed. Can be + * modified. Will not be used if default processing is not bypassed. */ default void preGetOp(ObserverContext c, Get get, List result) - throws IOException {} + throws IOException { + } /** * Called after the client performs a Get *

      - * Note: Do not retain references to any Cells in 'result' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param get the Get request + * Note: Do not retain references to any Cells in 'result' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param get the Get request * @param result the result to return to the client, modify as necessary */ default void postGetOp(ObserverContext c, Get get, - List result) throws IOException {} + List result) throws IOException { + } /** * Called before the client tests for existence using a Get. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. - * @param c the environment provided by the region server - * @param get the Get request + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. + * @param c the environment provided by the region server + * @param get the Get request * @param exists the result returned by the region server * @return the value to return to the client if bypassing default processing */ default boolean preExists(ObserverContext c, Get get, - boolean exists) throws IOException { + boolean exists) throws IOException { return exists; } /** * Called after the client tests for existence using a Get. - * @param c the environment provided by the region server - * @param get the Get request + * @param c the environment provided by the region server + * @param get the Get request * @param exists the result returned by the region server * @return the result to return to the client */ default boolean postExists(ObserverContext c, Get get, - boolean exists) throws IOException { + boolean exists) throws IOException { return exists; } /** * Called before the client stores a value. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param put The Put object - * @param edit The WALEdit object that will be written to the wal + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param put The Put object + * @param edit The WALEdit object that will be written to the wal * @param durability Persistence guarantee for this Put * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #prePut(ObserverContext, Put, WALEdit)} instead. + * {@link #prePut(ObserverContext, Put, WALEdit)} instead. */ @Deprecated default void prePut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException {} + Durability durability) throws IOException { + } /** * Called before the client stores a value. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param put The Put object + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param put The Put object * @param edit The WALEdit object that will be written to the wal */ default void prePut(ObserverContext c, Put put, WALEdit edit) @@ -401,26 +416,27 @@ default void prePut(ObserverContext c, Put put, WA /** * Called after the client stores a value. *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param put The Put object - * @param edit The WALEdit object for the wal + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param put The Put object + * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Put * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postPut(ObserverContext, Put, WALEdit)} instead. + * {@link #postPut(ObserverContext, Put, WALEdit)} instead. */ @Deprecated default void postPut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException {} + Durability durability) throws IOException { + } /** * Called after the client stores a value. *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param put The Put object + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param put The Put object * @param edit The WALEdit object for the wal */ default void postPut(ObserverContext c, Put put, WALEdit edit) @@ -431,33 +447,34 @@ default void postPut(ObserverContext c, Put put, W /** * Called before the client deletes a value. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param delete The Delete object - * @param edit The WALEdit object for the wal + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param delete The Delete object + * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Delete * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preDelete(ObserverContext, Delete, WALEdit)} instead. + * {@link #preDelete(ObserverContext, Delete, WALEdit)} instead. */ @Deprecated default void preDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException {} + WALEdit edit, Durability durability) throws IOException { + } /** * Called before the client deletes a value. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param delete The Delete object - * @param edit The WALEdit object for the wal + * @param edit The WALEdit object for the wal */ default void preDelete(ObserverContext c, Delete delete, WALEdit edit) throws IOException { @@ -467,45 +484,47 @@ default void preDelete(ObserverContext c, Delete d /** * Called before the server updates the timestamp for version delete with latest timestamp. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. - * @param c the environment provided by the region server + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. + * @param c the environment provided by the region server * @param mutation - the parent mutation associated with this delete cell - * @param cell - The deleteColumn with latest version cell - * @param byteNow - timestamp bytes - * @param get - the get formed using the current cell's row. Note that the get does not specify - * the family and qualifier - * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced - * with something that doesn't expose IntefaceAudience.Private classes. + * @param cell - The deleteColumn with latest version cell + * @param byteNow - timestamp bytes + * @param get - the get formed using the current cell's row. Note that the get does not + * specify the family and qualifier + * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced with + * something that doesn't expose IntefaceAudience.Private classes. */ @Deprecated default void prePrepareTimeStampForDeleteVersion(ObserverContext c, - Mutation mutation, Cell cell, byte[] byteNow, Get get) throws IOException {} + Mutation mutation, Cell cell, byte[] byteNow, Get get) throws IOException { + } /** * Called after the client deletes a value. *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param delete The Delete object - * @param edit The WALEdit object for the wal + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param delete The Delete object + * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Delete * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postDelete(ObserverContext, Delete, WALEdit)} instead. + * {@link #postDelete(ObserverContext, Delete, WALEdit)} instead. */ @Deprecated default void postDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException {} + WALEdit edit, Durability durability) throws IOException { + } /** * Called after the client deletes a value. *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param delete The Delete object - * @param edit The WALEdit object for the wal + * @param edit The WALEdit object for the wal */ default void postDelete(ObserverContext c, Delete delete, WALEdit edit) throws IOException { @@ -515,113 +534,113 @@ default void postDelete(ObserverContext c, Delete /** * This will be called for every batch mutation operation happening at the server. This will be * called after acquiring the locks on the mutating rows and after applying the proper timestamp - * for each Mutation at the server. The batch may contain Put/Delete/Increment/Append. By - * setting OperationStatus of Mutations + * for each Mutation at the server. The batch may contain Put/Delete/Increment/Append. By setting + * OperationStatus of Mutations * ({@link MiniBatchOperationInProgress#setOperationStatus(int, OperationStatus)}), * {@link RegionObserver} can make Region to skip these Mutations. *

      - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param miniBatchOp batch of Mutations getting applied to region. */ default void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException {} + MiniBatchOperationInProgress miniBatchOp) throws IOException { + } /** - * This will be called after applying a batch of Mutations on a region. The Mutations are added - * to memstore and WAL. The difference of this one with - * {@link #postPut(ObserverContext, Put, WALEdit)} - * and {@link #postDelete(ObserverContext, Delete, WALEdit)} - * and {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} - * and {@link #postAppend(ObserverContext, Append, Result, WALEdit)} is - * this hook will be executed before the mvcc transaction completion. + * This will be called after applying a batch of Mutations on a region. The Mutations are added to + * memstore and WAL. The difference of this one with + * {@link #postPut(ObserverContext, Put, WALEdit)} and + * {@link #postDelete(ObserverContext, Delete, WALEdit)} and + * {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} and + * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} is this hook will be executed + * before the mvcc transaction completion. *

      - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param miniBatchOp batch of Mutations applied to region. Coprocessors are discouraged from * manipulating its state. */ // Coprocessors can do a form of bypass by changing state in miniBatchOp. default void postBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException {} + MiniBatchOperationInProgress miniBatchOp) throws IOException { + } /** * This will be called for region operations where read lock is acquired in - * {@link Region#startRegionOperation()}. - * @param ctx - * @param operation The operation is about to be taken on the region + * {@link Region#startRegionOperation()}. n * @param operation The operation is about to be taken + * on the region */ default void postStartRegionOperation(ObserverContext ctx, - Operation operation) throws IOException {} + Operation operation) throws IOException { + } /** - * Called after releasing read lock in {@link Region#closeRegionOperation()}. - * @param ctx - * @param operation + * Called after releasing read lock in {@link Region#closeRegionOperation()}. nn */ default void postCloseRegionOperation(ObserverContext ctx, - Operation operation) throws IOException {} + Operation operation) throws IOException { + } /** - * Called after the completion of batch put/delete/increment/append and will be called even if - * the batch operation fails. + * Called after the completion of batch put/delete/increment/append and will be called even if the + * batch operation fails. *

      - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param ctx - * @param miniBatchOp - * @param success true if batch operation is successful otherwise false. + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. nn * @param success true if + * batch operation is successful otherwise false. */ default void postBatchMutateIndispensably(ObserverContext ctx, - MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException {} + MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException { + } /** * Called before checkAndPut. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param put data to put if check succeeds - * @param result the default value of the result + * @param put data to put if check succeeds + * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPut(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, - boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, + boolean result) throws IOException { return result; } /** * Called before checkAndPut. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter - * @param put data to put if check succeeds + * @param put data to put if check succeeds * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPut(ObserverContext c, byte[] row, @@ -632,58 +651,56 @@ default boolean preCheckAndPut(ObserverContext c, /** * Called before checkAndPut but after acquiring rowlock. *

      - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param put data to put if check succeeds - * @param result the default value of the result + * @param put data to put if check succeeds + * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPutAfterRowLock(ObserverContext c, - byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - ByteArrayComparable comparator, Put put, boolean result) throws IOException { + byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Put put, boolean result) throws IOException { return result; } /** * Called before checkAndPut but after acquiring rowlock. *

      - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter - * @param put data to put if check succeeds + * @param put data to put if check succeeds * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPutAfterRowLock(ObserverContext c, @@ -694,42 +711,42 @@ default boolean preCheckAndPutAfterRowLock(ObserverContext - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param put data to put if check succeeds - * @param result from the checkAndPut + * @param put data to put if check succeeds + * @param result from the checkAndPut * @return the possibly transformed return value to return to client - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndPut(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, - boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, + boolean result) throws IOException { return result; } /** * Called after checkAndPut *

      - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter - * @param put data to put if check succeeds + * @param put data to put if check succeeds * @param result from the checkAndPut * @return the possibly transformed return value to return to client - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndPut(ObserverContext c, byte[] row, @@ -740,48 +757,48 @@ default boolean postCheckAndPut(ObserverContext c, /** * Called before checkAndDelete. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param delete delete to commit if check succeeds - * @param result the default value of the result + * @param delete delete to commit if check succeeds + * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDelete(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, - Delete delete, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { return result; } /** * Called before checkAndDelete. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter column family * @param delete delete to commit if check succeeds * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDelete(ObserverContext c, byte[] row, @@ -792,58 +809,56 @@ default boolean preCheckAndDelete(ObserverContext /** * Called before checkAndDelete but after acquiring rowock. *

      - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param delete delete to commit if check succeeds - * @param result the default value of the result + * @param delete delete to commit if check succeeds + * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDeleteAfterRowLock(ObserverContext c, - byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - ByteArrayComparable comparator, Delete delete, boolean result) throws IOException { + byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { return result; } /** * Called before checkAndDelete but after acquiring rowock. *

      - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter * @param delete delete to commit if check succeeds * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDeleteAfterRowLock(ObserverContext c, @@ -854,42 +869,42 @@ default boolean preCheckAndDeleteAfterRowLock(ObserverContext - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check - * @param family column family - * @param qualifier column qualifier - * @param op the comparison operation + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check + * @param family column family + * @param qualifier column qualifier + * @param op the comparison operation * @param comparator the comparator - * @param delete delete to commit if check succeeds - * @param result from the CheckAndDelete + * @param delete delete to commit if check succeeds + * @param result from the CheckAndDelete * @return the possibly transformed returned value to return to client - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndDelete(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, - Delete delete, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { return result; } /** * Called after checkAndDelete *

      - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param row row to check + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param row row to check * @param filter filter * @param delete delete to commit if check succeeds * @param result from the CheckAndDelete * @return the possibly transformed returned value to return to client - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndDelete(ObserverContext c, byte[] row, @@ -900,14 +915,14 @@ default boolean postCheckAndDelete(ObserverContext /** * Called before checkAndMutate *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object - * @param result the default value of the result + * @param result the default value of the result * @return the return value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -944,18 +959,18 @@ default CheckAndMutateResult preCheckAndMutate(ObserverContext - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object - * @param result the default value of the result + * @param result the default value of the result * @return the value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -965,13 +980,13 @@ default CheckAndMutateResult preCheckAndMutateAfterRowLock( if (checkAndMutate.getAction() instanceof Put) { boolean success; if (checkAndMutate.hasFilter()) { - success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); - } else { - success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), + success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); + } else { + success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), checkAndMutate.getFamily(), + checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Put) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } else if (checkAndMutate.getAction() instanceof Delete) { @@ -981,9 +996,9 @@ default CheckAndMutateResult preCheckAndMutateAfterRowLock( checkAndMutate.getFilter(), (Delete) checkAndMutate.getAction(), result.isSuccess()); } else { success = preCheckAndDeleteAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), - (Delete) checkAndMutate.getAction(), result.isSuccess()); + checkAndMutate.getFamily(), checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Delete) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } @@ -993,11 +1008,11 @@ default CheckAndMutateResult preCheckAndMutateAfterRowLock( /** * Called after checkAndMutate *

      - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object - * @param result from the checkAndMutate + * @param result from the checkAndMutate * @return the possibly transformed returned value to return to client * @throws IOException if an error occurred on the coprocessor */ @@ -1006,25 +1021,25 @@ default CheckAndMutateResult postCheckAndMutate(ObserverContext - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object * @return result to return to the client if bypassing default processing * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preAppend(ObserverContext, Append, WALEdit)} instead. + * {@link #preAppend(ObserverContext, Append, WALEdit)} instead. */ @Deprecated default Result preAppend(ObserverContext c, Append append) @@ -1054,14 +1069,14 @@ default Result preAppend(ObserverContext c, Append /** * Called before Append. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object - * @param edit The WALEdit object that will be written to the wal + * @param edit The WALEdit object that will be written to the wal * @return result to return to the client if bypassing default processing */ default Result preAppend(ObserverContext c, Append append, @@ -1072,20 +1087,20 @@ default Result preAppend(ObserverContext c, Append /** * Called before Append but after acquiring rowlock. *

      - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object * @return result to return to the client if bypassing default processing * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. + * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. */ @Deprecated default Result preAppendAfterRowLock(ObserverContext c, @@ -1096,14 +1111,14 @@ default Result preAppendAfterRowLock(ObserverContext - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object * @param result the result returned by increment * @return the result to return to the client * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} instead. + * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} instead. */ @Deprecated default Result postAppend(ObserverContext c, Append append, @@ -1114,12 +1129,12 @@ default Result postAppend(ObserverContext c, Appen /** * Called after Append *

      - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server * @param append Append object * @param result the result returned by increment - * @param edit The WALEdit object for the wal + * @param edit The WALEdit object for the wal * @return the result to return to the client */ default Result postAppend(ObserverContext c, Append append, @@ -1130,16 +1145,16 @@ default Result postAppend(ObserverContext c, Appen /** * Called before Increment. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object * @return result to return to the client if bypassing default processing * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preIncrement(ObserverContext, Increment, WALEdit)} instead. + * {@link #preIncrement(ObserverContext, Increment, WALEdit)} instead. */ @Deprecated default Result preIncrement(ObserverContext c, Increment increment) @@ -1150,14 +1165,14 @@ default Result preIncrement(ObserverContext c, Inc /** * Called before Increment. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object - * @param edit The WALEdit object that will be written to the wal + * @param edit The WALEdit object that will be written to the wal * @return result to return to the client if bypassing default processing */ default Result preIncrement(ObserverContext c, Increment increment, @@ -1168,21 +1183,20 @@ default Result preIncrement(ObserverContext c, Inc /** * Called before Increment but after acquiring rowlock. *

      - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object * @return result to return to the client if bypassing default processing * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. + * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. */ @Deprecated default Result preIncrementAfterRowLock(ObserverContext c, @@ -1195,12 +1209,12 @@ default Result preIncrementAfterRowLock(ObserverContext * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object - * @param result the result returned by increment + * @param result the result returned by increment * @return the result to return to the client * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} instead. + * {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} instead. */ @Deprecated default Result postIncrement(ObserverContext c, Increment increment, @@ -1213,10 +1227,10 @@ default Result postIncrement(ObserverContext c, In *

      * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param increment increment object - * @param result the result returned by increment - * @param edit The WALEdit object for the wal + * @param result the result returned by increment + * @param edit The WALEdit object for the wal * @return the result to return to the client */ default Result postIncrement(ObserverContext c, Increment increment, @@ -1229,11 +1243,11 @@ default Result postIncrement(ObserverContext c, In *

      * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param scan the Scan specification */ default void preScannerOpen(ObserverContext c, Scan scan) - throws IOException { + throws IOException { } /** @@ -1241,35 +1255,34 @@ default void preScannerOpen(ObserverContext c, Sca *

      * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server + * @param c the environment provided by the region server * @param scan the Scan specification - * @param s if not null, the base scanner + * @param s if not null, the base scanner * @return the scanner instance to use */ default RegionScanner postScannerOpen(ObserverContext c, Scan scan, - RegionScanner s) throws IOException { + RegionScanner s) throws IOException { return s; } /** * Called before the client asks for the next row on a scanner. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

      * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param s the scanner - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be returned if default processing - * is not bypassed. - * @param limit the maximum number of results to return + * @param c the environment provided by the region server + * @param s the scanner + * @param result The result to return to the client if default processing is bypassed. Can be + * modified. Will not be returned if default processing is not bypassed. + * @param limit the maximum number of results to return * @param hasNext the 'has more' indication * @return 'has more' indication that should be sent to client */ default boolean preScannerNext(ObserverContext c, InternalScanner s, - List result, int limit, boolean hasNext) throws IOException { + List result, int limit, boolean hasNext) throws IOException { return hasNext; } @@ -1278,15 +1291,15 @@ default boolean preScannerNext(ObserverContext c, *

      * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param s the scanner - * @param result the result to return to the client, can be modified - * @param limit the maximum number of results to return + * @param c the environment provided by the region server + * @param s the scanner + * @param result the result to return to the client, can be modified + * @param limit the maximum number of results to return * @param hasNext the 'has more' indication * @return 'has more' indication that should be sent to client */ default boolean postScannerNext(ObserverContext c, - InternalScanner s, List result, int limit, boolean hasNext) throws IOException { + InternalScanner s, List result, int limit, boolean hasNext) throws IOException { return hasNext; } @@ -1294,46 +1307,46 @@ default boolean postScannerNext(ObserverContext c, * This will be called by the scan flow when the current scanned row is being filtered out by the * filter. The filter may be filtering out the row via any of the below scenarios *

        - *
      1. - * boolean filterRowKey(byte [] buffer, int offset, int length) returning true
      2. - *
      3. - * boolean filterRow() returning true
      4. - *
      5. - * default void filterRow(List<KeyValue> kvs) removing all the kvs from - * the passed List
      6. + *
      7. boolean filterRowKey(byte [] buffer, int offset, int length) returning + * true
      8. + *
      9. boolean filterRow() returning true
      10. + *
      11. default void filterRow(List<KeyValue> kvs) removing all the kvs from the + * passed List
      12. *
      *

      * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. - * @param c the environment provided by the region server - * @param s the scanner + * @param c the environment provided by the region server + * @param s the scanner * @param curRowCell The cell in the current row which got filtered out - * @param hasMore the 'has more' indication + * @param hasMore the 'has more' indication * @return whether more rows are available for the scanner or not */ default boolean postScannerFilterRow(ObserverContext c, - InternalScanner s, Cell curRowCell, boolean hasMore) throws IOException { + InternalScanner s, Cell curRowCell, boolean hasMore) throws IOException { return hasMore; } /** * Called before the client closes a scanner. *

      - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. * @param c the environment provided by the region server * @param s the scanner */ default void preScannerClose(ObserverContext c, InternalScanner s) - throws IOException {} + throws IOException { + } /** * Called after the client closes a scanner. * @param ctx the environment provided by the region server - * @param s the scanner + * @param s the scanner */ default void postScannerClose(ObserverContext ctx, - InternalScanner s) throws IOException {} + InternalScanner s) throws IOException { + } /** * Called before a store opens a new scanner. @@ -1346,136 +1359,134 @@ default void postScannerClose(ObserverContext ctx, * {@code preScannerOpen}, but if the max versions config on the Store is 1, then you still can * only read 1 version. You need also to inject here to change the max versions to 10 if you want * to get more versions. - * @param ctx the environment provided by the region server - * @param store the store which we want to get scanner from + * @param ctx the environment provided by the region server + * @param store the store which we want to get scanner from * @param options used to change max versions and TTL for the scanner being opened * @see #preFlushScannerOpen(ObserverContext, Store, ScanOptions, FlushLifeCycleTracker) * @see #preCompactScannerOpen(ObserverContext, Store, ScanType, ScanOptions, * CompactionLifeCycleTracker, CompactionRequest) */ default void preStoreScannerOpen(ObserverContext ctx, Store store, - ScanOptions options) throws IOException {} + ScanOptions options) throws IOException { + } /** - * Called before replaying WALs for this region. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * @param ctx the environment provided by the region server - * @param info the RegionInfo for this region + * Called before replaying WALs for this region. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. + * @param ctx the environment provided by the region server + * @param info the RegionInfo for this region * @param edits the file of recovered edits */ // todo: what about these? default void preReplayWALs(ObserverContext ctx, - RegionInfo info, Path edits) throws IOException {} + RegionInfo info, Path edits) throws IOException { + } /** * Called after replaying WALs for this region. - * @param ctx the environment provided by the region server - * @param info the RegionInfo for this region + * @param ctx the environment provided by the region server + * @param info the RegionInfo for this region * @param edits the file of recovered edits */ default void postReplayWALs(ObserverContext ctx, - RegionInfo info, Path edits) throws IOException {} + RegionInfo info, Path edits) throws IOException { + } /** - * Called before a {@link WALEdit} - * replayed for this region. + * Called before a {@link WALEdit} replayed for this region. * @param ctx the environment provided by the region server */ default void preWALRestore(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called after a {@link WALEdit} - * replayed for this region. + * Called after a {@link WALEdit} replayed for this region. * @param ctx the environment provided by the region server */ default void postWALRestore(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called before bulkLoadHFile. Users can create a StoreFile instance to - * access the contents of a HFile. - * - * @param ctx the environment provided by the region server - * @param familyPaths pairs of { CF, HFile path } submitted for bulk load. Adding - * or removing from this list will add or remove HFiles to be bulk loaded. + * Called before bulkLoadHFile. Users can create a StoreFile instance to access the contents of a + * HFile. + * @param ctx the environment provided by the region server + * @param familyPaths pairs of { CF, HFile path } submitted for bulk load. Adding or removing from + * this list will add or remove HFiles to be bulk loaded. */ default void preBulkLoadHFile(ObserverContext ctx, - List> familyPaths) throws IOException {} + List> familyPaths) throws IOException { + } /** * Called before moving bulk loaded hfile to region directory. - * - * @param ctx the environment provided by the region server + * @param ctx the environment provided by the region server * @param family column family - * @param pairs List of pairs of { HFile location in staging dir, HFile path in region dir } - * Each pair are for the same hfile. + * @param pairs List of pairs of { HFile location in staging dir, HFile path in region dir } Each + * pair are for the same hfile. */ default void preCommitStoreFile(ObserverContext ctx, byte[] family, - List> pairs) throws IOException {} + List> pairs) throws IOException { + } /** * Called after moving bulk loaded hfile to region directory. - * - * @param ctx the environment provided by the region server - * @param family column family + * @param ctx the environment provided by the region server + * @param family column family * @param srcPath Path to file before the move * @param dstPath Path to file after the move */ default void postCommitStoreFile(ObserverContext ctx, byte[] family, - Path srcPath, Path dstPath) throws IOException {} + Path srcPath, Path dstPath) throws IOException { + } /** * Called after bulkLoadHFile. - * - * @param ctx the environment provided by the region server + * @param ctx the environment provided by the region server * @param stagingFamilyPaths pairs of { CF, HFile path } submitted for bulk load - * @param finalPaths Map of CF to List of file paths for the loaded files - * if the Map is not null, the bulkLoad was successful. Otherwise the bulk load failed. - * bulkload is done by the time this hook is called. + * @param finalPaths Map of CF to List of file paths for the loaded files if the Map is + * not null, the bulkLoad was successful. Otherwise the bulk load + * failed. bulkload is done by the time this hook is called. */ default void postBulkLoadHFile(ObserverContext ctx, - List> stagingFamilyPaths, Map> finalPaths) - throws IOException { + List> stagingFamilyPaths, Map> finalPaths) + throws IOException { } /** - * Called before creation of Reader for a store file. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * - * @param ctx the environment provided by the region server - * @param fs fileystem to read from - * @param p path to the file - * @param in {@link FSDataInputStreamWrapper} - * @param size Full size of the file - * @param cacheConf - * @param r original reference file. This will be not null only when reading a split file. + * Called before creation of Reader for a store file. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. + * @param ctx the environment provided by the region server + * @param fs fileystem to read from + * @param p path to the file + * @param in {@link FSDataInputStreamWrapper} + * @param size Full size of the file n * @param r original reference file. This will be not null + * only when reading a split file. * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain - * @return a Reader instance to use instead of the base reader if overriding - * default behavior, null otherwise + * @return a Reader instance to use instead of the base reader if overriding default behavior, + * null otherwise * @deprecated For Phoenix only, StoreFileReader is not a stable interface. */ @Deprecated // Passing InterfaceAudience.Private args FSDataInputStreamWrapper, CacheConfig and Reference. // This is fine as the hook is deprecated any way. default StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, - FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, StoreFileReader reader) throws IOException { + FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, + Reference r, StoreFileReader reader) throws IOException { return reader; } /** * Called after the creation of Reader for a store file. - * - * @param ctx the environment provided by the region server - * @param fs fileystem to read from - * @param p path to the file - * @param in {@link FSDataInputStreamWrapper} - * @param size Full size of the file - * @param cacheConf - * @param r original reference file. This will be not null only when reading a split file. + * @param ctx the environment provided by the region server + * @param fs fileystem to read from + * @param p path to the file + * @param in {@link FSDataInputStreamWrapper} + * @param size Full size of the file n * @param r original reference file. This will be not null + * only when reading a split file. * @param reader the base reader instance * @return The reader to use * @deprecated For Phoenix only, StoreFileReader is not a stable interface. @@ -1484,106 +1495,100 @@ default StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, - FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, StoreFileReader reader) throws IOException { + FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, + Reference r, StoreFileReader reader) throws IOException { return reader; } /** - * Called after a new cell has been created during an increment operation, but before - * it is committed to the WAL or memstore. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * @param ctx the environment provided by the region server - * @param opType the operation type + * Called after a new cell has been created during an increment operation, but before it is + * committed to the WAL or memstore. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. + * @param ctx the environment provided by the region server + * @param opType the operation type * @param mutation the current mutation - * @param oldCell old cell containing previous value - * @param newCell the new cell containing the computed value + * @param oldCell old cell containing previous value + * @param newCell the new cell containing the computed value * @return the new cell, possibly changed * @deprecated since 2.2.0 and will be removedin 4.0.0. Use - * {@link #postIncrementBeforeWAL(ObserverContext, Mutation, List)} or - * {@link #postAppendBeforeWAL(ObserverContext, Mutation, List)} instead. + * {@link #postIncrementBeforeWAL(ObserverContext, Mutation, List)} or + * {@link #postAppendBeforeWAL(ObserverContext, Mutation, List)} instead. * @see #postIncrementBeforeWAL(ObserverContext, Mutation, List) * @see #postAppendBeforeWAL(ObserverContext, Mutation, List) * @see HBASE-21643 */ @Deprecated default Cell postMutationBeforeWAL(ObserverContext ctx, - MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException { + MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException { return newCell; } /** * Called after a list of new cells has been created during an increment operation, but before * they are committed to the WAL or memstore. - * * @param ctx the environment provided by the region server * @param mutation the current mutation - * @param cellPairs a list of cell pair. The first cell is old cell which may be null. - * And the second cell is the new cell. + * @param cellPairs a list of cell pair. The first cell is old cell which may be null. And the + * second cell is the new cell. * @return a list of cell pair, possibly changed. */ default List> postIncrementBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs.add(new Pair<>(pair.getFirst(), - postMutationBeforeWAL(ctx, MutationType.INCREMENT, mutation, pair.getFirst(), - pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), postMutationBeforeWAL(ctx, MutationType.INCREMENT, + mutation, pair.getFirst(), pair.getSecond()))); } return resultPairs; } /** - * Called after a list of new cells has been created during an append operation, but before - * they are committed to the WAL or memstore. - * + * Called after a list of new cells has been created during an append operation, but before they + * are committed to the WAL or memstore. * @param ctx the environment provided by the region server * @param mutation the current mutation - * @param cellPairs a list of cell pair. The first cell is old cell which may be null. - * And the second cell is the new cell. + * @param cellPairs a list of cell pair. The first cell is old cell which may be null. And the + * second cell is the new cell. * @return a list of cell pair, possibly changed. */ default List> postAppendBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs.add(new Pair<>(pair.getFirst(), - postMutationBeforeWAL(ctx, MutationType.APPEND, mutation, pair.getFirst(), - pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), postMutationBeforeWAL(ctx, MutationType.APPEND, + mutation, pair.getFirst(), pair.getSecond()))); } return resultPairs; } /** - * Called after the ScanQueryMatcher creates ScanDeleteTracker. Implementing - * this hook would help in creating customised DeleteTracker and returning - * the newly created DeleteTracker + * Called after the ScanQueryMatcher creates ScanDeleteTracker. Implementing this hook would help + * in creating customised DeleteTracker and returning the newly created DeleteTracker *

      * Warn: This is used by internal coprocessors. Should not be implemented by user coprocessors - * @param ctx the environment provided by the region server + * @param ctx the environment provided by the region server * @param delTracker the deleteTracker that is created by the QueryMatcher * @return the Delete Tracker * @deprecated Since 2.0 with out any replacement and will be removed in 3.0 */ @Deprecated default DeleteTracker postInstantiateDeleteTracker( - ObserverContext ctx, DeleteTracker delTracker) - throws IOException { + ObserverContext ctx, DeleteTracker delTracker) + throws IOException { return delTracker; } /** * Called just before the WAL Entry is appended to the WAL. Implementing this hook allows - * coprocessors to add extended attributes to the WALKey that then get persisted to the - * WAL, and are available to replication endpoints to use in processing WAL Entries. + * coprocessors to add extended attributes to the WALKey that then get persisted to the WAL, and + * are available to replication endpoints to use in processing WAL Entries. * @param ctx the environment provided by the region server * @param key the WALKey associated with a particular append to a WAL */ default void preWALAppend(ObserverContext ctx, WALKey key, - WALEdit edit) - throws IOException { + WALEdit edit) throws IOException { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java index 66d8113a87a3..60bee538c16a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionServerCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java index 4a5d69a17aa4..4b2e8b5791b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -33,7 +31,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionServerCoprocessorEnvironment - extends CoprocessorEnvironment { + extends CoprocessorEnvironment { /** * @return Hosting Server's ServerName */ @@ -45,48 +43,44 @@ public interface RegionServerCoprocessorEnvironment OnlineRegions getOnlineRegions(); /** - * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection - * with the hosting server. Throws {@link UnsupportedOperationException} if you try to close - * or abort it. - * - * For light-weight usage only. Heavy-duty usage will pull down - * the hosting RegionServer responsiveness as well as that of other Coprocessors making use of - * this Connection. Use to create table on start or to do administrative operations. Coprocessors - * should create their own Connections if heavy usage to avoid impinging on hosting Server - * operation. To create a Connection or if a Coprocessor requires a region with a particular - * Configuration, use {@link org.apache.hadoop.hbase.client.ConnectionFactory} or + * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection with + * the hosting server. Throws {@link UnsupportedOperationException} if you try to close or abort + * it. For light-weight usage only. Heavy-duty usage will pull down the hosting RegionServer + * responsiveness as well as that of other Coprocessors making use of this Connection. Use to + * create table on start or to do administrative operations. Coprocessors should create their own + * Connections if heavy usage to avoid impinging on hosting Server operation. To create a + * Connection or if a Coprocessor requires a region with a particular Configuration, use + * {@link org.apache.hadoop.hbase.client.ConnectionFactory} or * {@link #createConnection(Configuration)}}. - * - *

      Be aware that operations that make use of this Connection are executed as the RegionServer + *

      + * Be aware that operations that make use of this Connection are executed as the RegionServer * User, the hbase super user that started this server process. Exercise caution running - * operations as this User (See {@link #createConnection(Configuration)}} to run as other than - * the RegionServer User). - * - *

      Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + * operations as this User (See {@link #createConnection(Configuration)}} to run as other than the + * RegionServer User). + *

      + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. - * * @see #createConnection(Configuration) * @return The host's Connection to the Cluster. */ Connection getConnection(); /** - * Creates a cluster connection using the passed Configuration. - * - * Creating a Connection is a heavy-weight operation. The resultant Connection's cache of - * region locations will be empty. Therefore you should cache and reuse Connections rather than - * create a Connection on demand. Create on start of your Coprocessor. You will have to cast - * the CoprocessorEnvironment appropriately to get at this API at start time because - * Coprocessor start method is passed a subclass of this CoprocessorEnvironment or fetch - * Connection using a synchronized accessor initializing the Connection on first access. Close - * the returned Connection when done to free resources. Using this API rather - * than {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} + * Creates a cluster connection using the passed Configuration. Creating a Connection is a + * heavy-weight operation. The resultant Connection's cache of region locations will be empty. + * Therefore you should cache and reuse Connections rather than create a Connection on demand. + * Create on start of your Coprocessor. You will have to cast the CoprocessorEnvironment + * appropriately to get at this API at start time because Coprocessor start method is passed a + * subclass of this CoprocessorEnvironment or fetch Connection using a synchronized accessor + * initializing the Connection on first access. Close the returned Connection when done to free + * resources. Using this API rather than + * {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} * returns a Connection that will short-circuit RPC if the target is a local resource. Use * ConnectionFactory if you don't need this ability. - * - *

      Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + *

      + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. @@ -96,9 +90,10 @@ public interface RegionServerCoprocessorEnvironment /** * Returns a MetricRegistry that can be used to track metrics at the region server level. - * - *

      See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

      + *

      + * See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples of how + * metrics can be instantiated and used. + *

      * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForRegionServer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java index f3ccd9d3638b..dc37ac324eb5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.yetus.audience.InterfaceAudience; @@ -27,27 +25,24 @@ /** * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process. - * - * Since most implementations will be interested in only a subset of hooks, this class uses - * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

      - * - *

      Exception Handling

      - * For all functions, exception handling is done as follows: + * {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process. Since most implementations + * will be interested in only a subset of hooks, this class uses 'default' functions to avoid having + * to add unnecessary overrides. When the functions are non-empty, it's simply to satisfy the + * compiler by returning value of expected (non-void) type. It is done in a way that these default + * definitions act as no-op. So our suggestion to implementation would be to not call these + * 'default' methods from overrides.
      + *
      + *

      Exception Handling

      For all functions, exception handling is done as follows: + *
        + *
      • Exceptions of type {@link IOException} are reported back to client.
      • + *
      • For any other kind of exception: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
        • + *
        • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
        • + *
        + *
      • *
      */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -57,33 +52,34 @@ public interface RegionServerObserver { * Called before stopping region server. * @param ctx the environment to interact with the framework and region server. */ - default void preStopRegionServer( - final ObserverContext ctx) throws IOException {} + default void preStopRegionServer(final ObserverContext ctx) + throws IOException { + } /** * This will be called before executing user request to roll a region server WAL. * @param ctx the environment to interact with the framework and region server. */ default void preRollWALWriterRequest( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after executing user request to roll a region server WAL. * @param ctx the environment to interact with the framework and region server. */ default void postRollWALWriterRequest( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after the replication endpoint is instantiated. - * @param ctx the environment to interact with the framework and region server. + * @param ctx the environment to interact with the framework and region server. * @param endpoint - the base endpoint for replication * @return the endpoint to use during replication. */ default ReplicationEndpoint postCreateReplicationEndPoint( - ObserverContext ctx, ReplicationEndpoint endpoint) { + ObserverContext ctx, ReplicationEndpoint endpoint) { return endpoint; } @@ -91,23 +87,23 @@ default ReplicationEndpoint postCreateReplicationEndPoint( /** * This will be called before executing replication request to shipping log entries. * @param ctx the environment to interact with the framework and region server. - * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal - * usage by AccessController. Do not use these hooks in custom co-processors. + * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal usage + * by AccessController. Do not use these hooks in custom co-processors. */ @Deprecated default void preReplicateLogEntries(final ObserverContext ctx) - throws IOException { + throws IOException { } /** * This will be called after executing replication request to shipping log entries. * @param ctx the environment to interact with the framework and region server. - * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal - * usage by AccessController. Do not use these hooks in custom co-processors. + * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal usage + * by AccessController. Do not use these hooks in custom co-processors. */ @Deprecated default void postReplicateLogEntries( - final ObserverContext ctx) throws IOException { + final ObserverContext ctx) throws IOException { } /** @@ -115,28 +111,30 @@ default void postReplicateLogEntries( * @param ctx the environment to interact with the framework and region server. */ default void preClearCompactionQueues( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after clearing compaction queues * @param ctx the environment to interact with the framework and region server. */ default void postClearCompactionQueues( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called before executing procedures * @param ctx the environment to interact with the framework and region server. */ default void preExecuteProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * This will be called after executing procedures * @param ctx the environment to interact with the framework and region server. */ default void postExecuteProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java index 1deddf9407b7..f2b98b61e6b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - /** * WALCoprocessor don't support loading services using {@link #getServices()}. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java index 71c72a2e7f18..1774481f2103 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.wal.WAL; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -34,9 +32,10 @@ public interface WALCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleRegionServerObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

      + *

      + * See ExampleRegionServerObserverWithMetrics class in the hbase-examples modules for examples of + * how metrics can be instantiated and used. + *

      * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForRegionServer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java index b2fa7ca4777e..bc57dbc735a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.RegionInfo; @@ -30,80 +27,73 @@ import org.apache.yetus.audience.InterfaceStability; /** - * It's provided to have a way for coprocessors to observe, rewrite, - * or skip WALEdits as they are being written to the WAL. - * - * Note that implementers of WALObserver will not see WALEdits that report themselves - * as empty via {@link WALEdit#isEmpty()}. - * - * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} provides - * hooks for adding logic for WALEdits in the region context during reconstruction. - * - * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.wal.WAL}. - * - * Since most implementations will be interested in only a subset of hooks, this class uses - * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

      - * - *

      Exception Handling

      - * For all functions, exception handling is done as follows: + * It's provided to have a way for coprocessors to observe, rewrite, or skip WALEdits as they are + * being written to the WAL. Note that implementers of WALObserver will not see WALEdits that report + * themselves as empty via {@link WALEdit#isEmpty()}. + * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} provides hooks for adding logic for + * WALEdits in the region context during reconstruction. Defines coprocessor hooks for interacting + * with operations on the {@link org.apache.hadoop.hbase.wal.WAL}. Since most implementations will + * be interested in only a subset of hooks, this class uses 'default' functions to avoid having to + * add unnecessary overrides. When the functions are non-empty, it's simply to satisfy the compiler + * by returning value of expected (non-void) type. It is done in a way that these default + * definitions act as no-op. So our suggestion to implementation would be to not call these + * 'default' methods from overrides.
      + *
      + *

      Exception Handling

      For all functions, exception handling is done as follows: + *
        + *
      • Exceptions of type {@link IOException} are reported back to client.
      • + *
      • For any other kind of exception: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
        • + *
        • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
        • + *
        + *
      • *
      */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface WALObserver { /** - * Called before a {@link WALEdit} - * is writen to WAL. - * Do not amend the WALKey. It is InterfaceAudience.Private. Changing the WALKey will cause - * damage. + * Called before a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is + * InterfaceAudience.Private. Changing the WALKey will cause damage. * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose - * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in hbase-3.0.0. + * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in + * hbase-3.0.0. */ @Deprecated default void preWALWrite(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called after a {@link WALEdit} - * is writen to WAL. - * Do not amend the WALKey. It is InterfaceAudience.Private. Changing the WALKey will cause - * damage. + * Called after a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is + * InterfaceAudience.Private. Changing the WALKey will cause damage. * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose - * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in hbase-3.0.0. + * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in + * hbase-3.0.0. */ @Deprecated default void postWALWrite(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** * Called before rolling the current WAL * @param oldPath the path of the current wal that we are replacing * @param newPath the path of the wal we are going to create */ - default void preWALRoll(ObserverContext ctx, - Path oldPath, Path newPath) throws IOException {} + default void preWALRoll(ObserverContext ctx, Path oldPath, + Path newPath) throws IOException { + } /** * Called after rolling the current WAL * @param oldPath the path of the wal that we replaced * @param newPath the path of the wal we have created and now is the current */ - default void postWALRoll(ObserverContext ctx, - Path oldPath, Path newPath) throws IOException {} + default void postWALRoll(ObserverContext ctx, Path oldPath, + Path newPath) throws IOException { + } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java index 142827be70c3..19fa8adc1e37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,39 +20,36 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; - /** * A ForeignException is an exception from another thread or process. *

      - * ForeignExceptions are sent to 'remote' peers to signal an abort in the face of failures. - * When serialized for transmission we encode using Protobufs to ensure version compatibility. + * ForeignExceptions are sent to 'remote' peers to signal an abort in the face of failures. When + * serialized for transmission we encode using Protobufs to ensure version compatibility. *

      - * Foreign exceptions contain a Throwable as its cause. This can be a "regular" exception - * generated locally or a ProxyThrowable that is a representation of the original exception - * created on original 'remote' source. These ProxyThrowables have their their stacks traces and - * messages overridden to reflect the original 'remote' exception. The only way these - * ProxyThrowables are generated are by this class's {@link #deserialize(byte[])} method. + * Foreign exceptions contain a Throwable as its cause. This can be a "regular" exception generated + * locally or a ProxyThrowable that is a representation of the original exception created on + * original 'remote' source. These ProxyThrowables have their their stacks traces and messages + * overridden to reflect the original 'remote' exception. The only way these ProxyThrowables are + * generated are by this class's {@link #deserialize(byte[])} method. */ @InterfaceAudience.Public @SuppressWarnings("serial") public class ForeignException extends IOException { /** - * Name of the throwable's source such as a host or thread name. Must be non-null. + * Name of the throwable's source such as a host or thread name. Must be non-null. */ private final String source; /** - * Create a new ForeignException that can be serialized. It is assumed that this came form a - * local source. - * @param source - * @param cause + * Create a new ForeignException that can be serialized. It is assumed that this came form a local + * source. nn */ public ForeignException(String source, Throwable cause) { super(cause); @@ -62,10 +59,8 @@ public ForeignException(String source, Throwable cause) { } /** - * Create a new ForeignException that can be serialized. It is assumed that this is locally - * generated. - * @param source - * @param msg + * Create a new ForeignException that can be serialized. It is assumed that this is locally + * generated. nn */ public ForeignException(String source, String msg) { super(new IllegalArgumentException(msg)); @@ -78,11 +73,9 @@ public String getSource() { /** * The cause of a ForeignException can be an exception that was generated on a local in process - * thread, or a thread from a 'remote' separate process. - * - * If the cause is a ProxyThrowable, we know it came from deserialization which usually means - * it came from not only another thread, but also from a remote thread. - * + * thread, or a thread from a 'remote' separate process. If the cause is a ProxyThrowable, we know + * it came from deserialization which usually means it came from not only another thread, but also + * from a remote thread. * @return true if went through deserialization, false if locally generated */ public boolean isRemote() { @@ -91,7 +84,7 @@ public boolean isRemote() { @Override public String toString() { - String className = getCause().getClass().getName() ; + String className = getCause().getClass().getName(); return className + " via " + getSource() + ":" + getLocalizedMessage(); } @@ -100,8 +93,8 @@ public String toString() { * @param trace the stack trace to convert to protobuf message * @return null if the passed stack is null. */ - private static List toStackTraceElementMessages( - StackTraceElement[] trace) { + private static List + toStackTraceElementMessages(StackTraceElement[] trace) { // if there is no stack trace, ignore it and just return the message if (trace == null) return null; // build the stack trace for the message @@ -130,7 +123,7 @@ private static class ProxyThrowable extends Throwable { /** * Converts a ForeignException to an array of bytes. * @param source the name of the external exception source - * @param t the "local" external exception (local) + * @param t the "local" external exception (local) * @return protobuf serialized version of ForeignException */ public static byte[] serialize(String source, Throwable t) { @@ -141,7 +134,7 @@ public static byte[] serialize(String source, Throwable t) { } // set the stack trace, if there is one List stack = - ForeignException.toStackTraceElementMessages(t.getStackTrace()); + ForeignException.toStackTraceElementMessages(t.getStackTrace()); if (stack != null) { gemBuilder.addAllTrace(stack); } @@ -153,25 +146,22 @@ public static byte[] serialize(String source, Throwable t) { } /** - * Takes a series of bytes and tries to generate an ForeignException instance for it. - * @param bytes - * @return the ForeignExcpetion instance + * Takes a series of bytes and tries to generate an ForeignException instance for it. n * @return + * the ForeignExcpetion instance * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown. */ - public static ForeignException deserialize(byte[] bytes) - throws IOException { + public static ForeignException deserialize(byte[] bytes) throws IOException { // figure out the data we need to pass ForeignExceptionMessage eem = ForeignExceptionMessage.parseFrom(bytes); GenericExceptionMessage gem = eem.getGenericException(); - StackTraceElement [] trace = ForeignException.toStackTrace(gem.getTraceList()); + StackTraceElement[] trace = ForeignException.toStackTrace(gem.getTraceList()); ProxyThrowable dfe = new ProxyThrowable(gem.getMessage(), trace); ForeignException e = new ForeignException(eem.getSource(), dfe); return e; } /** - * Unwind a serialized array of {@link StackTraceElementMessage}s to a - * {@link StackTraceElement}s. + * Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s. * @param traceList list that was serialized * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on * the sender). @@ -183,8 +173,8 @@ private static StackTraceElement[] toStackTrace(List t StackTraceElement[] trace = new StackTraceElement[traceList.size()]; for (int i = 0; i < traceList.size(); i++) { StackTraceElementMessage elem = traceList.get(i); - trace[i] = new StackTraceElement( - elem.getDeclaringClass(), elem.getMethodName(), elem.getFileName(), elem.getLineNumber()); + trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(), + elem.getFileName(), elem.getLineNumber()); } return trace; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java index b2ed0c267da4..22b208bf1476 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,24 +19,23 @@ import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * The dispatcher acts as the state holding entity for foreign error handling. The first - * exception received by the dispatcher get passed directly to the listeners. Subsequent - * exceptions are dropped. + * The dispatcher acts as the state holding entity for foreign error handling. The first exception + * received by the dispatcher get passed directly to the listeners. Subsequent exceptions are + * dropped. *

      * If there are multiple dispatchers that are all in the same foreign exception monitoring group, * ideally all these monitors are "peers" -- any error on one dispatcher should get propagated to - * all others (via rpc, or some other mechanism). Due to racing error conditions the exact reason - * for failure may be different on different peers, but the fact that they are in error state - * should eventually hold on all. + * all others (via rpc, or some other mechanism). Due to racing error conditions the exact reason + * for failure may be different on different peers, but the fact that they are in error state should + * eventually hold on all. *

      - * This is thread-safe and must be because this is expected to be used to propagate exceptions - * from foreign threads. + * This is thread-safe and must be because this is expected to be used to propagate exceptions from + * foreign threads. */ @InterfaceAudience.Private public class ForeignExceptionDispatcher implements ForeignExceptionListener, ForeignExceptionSnare { @@ -62,7 +61,7 @@ public synchronized void receive(ForeignException e) { // if we already have an exception, then ignore it if (exception != null) return; - LOG.debug(name + " accepting received exception" , e); + LOG.debug(name + " accepting received exception", e); // mark that we got the error if (e != null) { exception = e; @@ -95,19 +94,19 @@ synchronized public ForeignException getException() { /** * Sends an exception to all listeners. - * @param e {@link ForeignException} containing the cause. Can be null. + * @param e {@link ForeignException} containing the cause. Can be null. */ private void dispatch(ForeignException e) { // update all the listeners with the passed error - for (ForeignExceptionListener l: listeners) { + for (ForeignExceptionListener l : listeners) { l.receive(e); } } /** - * Listen for failures to a given process. This method should only be used during - * initialization and not added to after exceptions are accepted. - * @param errorable listener for the errors. may be null. + * Listen for failures to a given process. This method should only be used during initialization + * and not added to after exceptions are accepted. + * @param errorable listener for the errors. may be null. */ public synchronized void addListener(ForeignExceptionListener errorable) { this.listeners.add(errorable); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java index 26de489aa765..d2ff5bcc41ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java index 7bc1ee47713e..3718900cc87f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,45 +20,39 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This is an interface for a cooperative exception throwing mechanism. Implementations are - * containers that holds an exception from a separate thread. This can be used to receive - * exceptions from 'foreign' threads or from separate 'foreign' processes. + * This is an interface for a cooperative exception throwing mechanism. Implementations are + * containers that holds an exception from a separate thread. This can be used to receive exceptions + * from 'foreign' threads or from separate 'foreign' processes. *

      - * To use, one would pass an implementation of this object to a long running method and - * periodically check by calling {@link #rethrowException()}. If any foreign exceptions have - * been received, the calling thread is then responsible for handling the rethrown exception. + * To use, one would pass an implementation of this object to a long running method and periodically + * check by calling {@link #rethrowException()}. If any foreign exceptions have been received, the + * calling thread is then responsible for handling the rethrown exception. *

      * One could use the boolean {@link #hasException()} to determine if there is an exceptoin as well. *

      - * NOTE: This is very similar to the InterruptedException/interrupt/interrupted pattern. There, - * the notification state is bound to a Thread. Using this, applications receive Exceptions in - * the snare. The snare is referenced and checked by multiple threads which enables exception - * notification in all the involved threads/processes. + * NOTE: This is very similar to the InterruptedException/interrupt/interrupted pattern. There, the + * notification state is bound to a Thread. Using this, applications receive Exceptions in the + * snare. The snare is referenced and checked by multiple threads which enables exception + * notification in all the involved threads/processes. */ @InterfaceAudience.Private public interface ForeignExceptionSnare { /** - * Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is - * no exception this is a no-op - * - * @throws ForeignException - * all exceptions from remote sources are procedure exceptions + * Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is no + * exception this is a no-op n * all exceptions from remote sources are procedure exceptions */ void rethrowException() throws ForeignException; /** - * Non-exceptional form of {@link #rethrowException()}. Checks to see if any - * process to which the exception checkers is bound has created an error that - * would cause a failure. - * + * Non-exceptional form of {@link #rethrowException()}. Checks to see if any process to which the + * exception checkers is bound has created an error that would cause a failure. * @return true if there has been an error,false otherwise */ boolean hasException(); /** * Get the value of the captured exception. - * * @return the captured foreign exception or null if no exception captured. */ ForeignException getException(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java index f17dcde6baeb..a295dd9759cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,14 +34,14 @@ public class TimeoutException extends Exception { /** * Exception indicating that an operation attempt has timed out - * @param start time the operation started (ms since epoch) - * @param end time the timeout was triggered (ms since epoch) - * @param expected expected amount of time for the operation to complete (ms) - * (ideally, expected <= end-start) + * @param start time the operation started (ms since epoch) + * @param end time the timeout was triggered (ms since epoch) + * @param expected expected amount of time for the operation to complete (ms) (ideally, expected + * <= end-start) */ public TimeoutException(String sourceName, long start, long end, long expected) { - super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end - + ", diff:" + (end - start) + ", max:" + expected + " ms"); + super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end + ", diff:" + + (end - start) + ", max:" + expected + " ms"); this.sourceName = sourceName; this.start = start; this.end = end; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java index 36182d677d82..aaf0f67f8d14 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,10 @@ import java.util.Timer; import java.util.TimerTask; - +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Time a given process/operation and report a failure if the elapsed time exceeds the max allowed @@ -46,8 +45,8 @@ public class TimeoutExceptionInjector { /** * Create a generic timer for a task/process. * @param listener listener to notify if the process times out - * @param maxTime max allowed running time for the process. Timer starts on calls to - * {@link #start()} + * @param maxTime max allowed running time for the process. Timer starts on calls to + * {@link #start()} */ public TimeoutExceptionInjector(final ForeignExceptionListener listener, final long maxTime) { this.maxTime = maxTime; @@ -63,8 +62,8 @@ public void run() { TimeoutExceptionInjector.this.complete = true; } long end = EnvironmentEdgeManager.currentTime(); - TimeoutException tee = new TimeoutException( - "Timeout caused Foreign Exception", start, end, maxTime); + TimeoutException tee = + new TimeoutException("Timeout caused Foreign Exception", start, end, maxTime); String source = "timer-" + timer; listener.receive(new ForeignException(source, tee)); } @@ -85,8 +84,8 @@ public void complete() { return; } if (LOG.isDebugEnabled()) { - LOG.debug("Marking timer as complete - no error notifications will be received for " + - "this timer."); + LOG.debug("Marking timer as complete - no error notifications will be received for " + + "this timer."); } this.complete = true; } @@ -98,7 +97,7 @@ public void complete() { *

      * Non-blocking. * @throws IllegalStateException if the timer has already been marked done via {@link #complete()} - * or {@link #trigger()} + * or {@link #trigger()} */ public synchronized void start() throws IllegalStateException { if (this.start >= 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index 17054a5c409c..fcb60a35c4cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import io.opentelemetry.context.Scope; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -31,23 +29,19 @@ import org.slf4j.LoggerFactory; /** - * Abstract base class for all HBase event handlers. Subclasses should - * implement the {@link #process()} and {@link #prepare()} methods. Subclasses - * should also do all necessary checks up in their prepare() if possible -- check - * table exists, is disabled, etc. -- so they fail fast rather than later when process - * is running. Do it this way because process be invoked directly but event - * handlers are also - * run in an executor context -- i.e. asynchronously -- and in this case, - * exceptions thrown at process time will not be seen by the invoker, not till - * we implement a call-back mechanism so the client can pick them up later. + * Abstract base class for all HBase event handlers. Subclasses should implement the + * {@link #process()} and {@link #prepare()} methods. Subclasses should also do all necessary checks + * up in their prepare() if possible -- check table exists, is disabled, etc. -- so they fail fast + * rather than later when process is running. Do it this way because process be invoked directly but + * event handlers are also run in an executor context -- i.e. asynchronously -- and in this case, + * exceptions thrown at process time will not be seen by the invoker, not till we implement a + * call-back mechanism so the client can pick them up later. *

      - * Event handlers have an {@link EventType}. - * {@link EventType} is a list of ALL handler event types. We need to keep - * a full list in one place -- and as enums is a good shorthand for an - * implemenations -- because event handlers can be passed to executors when - * they are to be run asynchronously. The - * hbase executor, see ExecutorService, has a switch for passing - * event type to executor. + * Event handlers have an {@link EventType}. {@link EventType} is a list of ALL handler event types. + * We need to keep a full list in one place -- and as enums is a good shorthand for an + * implemenations -- because event handlers can be passed to executors when they are to be run + * asynchronously. The hbase executor, see ExecutorService, has a switch for passing event type to + * executor. *

      * @see ExecutorService */ @@ -80,17 +74,17 @@ public EventHandler(Server server, EventType eventType) { this.eventType = eventType; seqid = seqids.incrementAndGet(); if (server != null) { - this.waitingTimeForEvents = server.getConfiguration(). - getInt("hbase.master.event.waiting.time", 1000); + this.waitingTimeForEvents = + server.getConfiguration().getInt("hbase.master.event.waiting.time", 1000); } } /** - * Event handlers should do all the necessary checks in this method (rather than - * in the constructor, or in process()) so that the caller, which is mostly executed - * in the ipc context can fail fast. Process is executed async from the client ipc, - * so this method gives a quick chance to do some basic checks. - * Should be called after constructing the EventHandler, and before process(). + * Event handlers should do all the necessary checks in this method (rather than in the + * constructor, or in process()) so that the caller, which is mostly executed in the ipc context + * can fail fast. Process is executed async from the client ipc, so this method gives a quick + * chance to do some basic checks. Should be called after constructing the EventHandler, and + * before process(). * @return the instance of this class * @throws Exception when something goes wrong */ @@ -112,9 +106,7 @@ public void run() { } /** - * This method is the main processing loop to be implemented by the various - * subclasses. - * @throws IOException + * This method is the main processing loop to be implemented by the various subclasses. n */ public abstract void process() throws IOException; @@ -127,10 +119,10 @@ public EventType getEventType() { } /** - * Get the priority level for this handler instance. This uses natural - * ordering so lower numbers are higher priority. + * Get the priority level for this handler instance. This uses natural ordering so lower numbers + * are higher priority. *

      - * Lowest priority is Integer.MAX_VALUE. Highest priority is 0. + * Lowest priority is Integer.MAX_VALUE. Highest priority is 0. *

      * Subclasses should override this method to allow prioritizing handlers. *

      @@ -152,15 +144,15 @@ public long getSeqid() { /** * Default prioritized runnable comparator which implements a FIFO ordering. *

      - * Subclasses should not override this. Instead, if they want to implement - * priority beyond FIFO, they should override {@link #getPriority()}. + * Subclasses should not override this. Instead, if they want to implement priority beyond FIFO, + * they should override {@link #getPriority()}. */ @Override public int compareTo(EventHandler o) { if (o == null) { return 1; } - if(getPriority() != o.getPriority()) { + if (getPriority() != o.getPriority()) { return (getPriority() < o.getPriority()) ? -1 : 1; } return (this.seqid < o.seqid) ? -1 : 1; @@ -168,16 +160,13 @@ public int compareTo(EventHandler o) { @Override public String toString() { - return "Event #" + getSeqid() + - " of type " + eventType + - " (" + getInformativeName() + ")"; + return "Event #" + getSeqid() + " of type " + eventType + " (" + getInformativeName() + ")"; } /** - * Event implementations should override thie class to provide an - * informative name about what event they are handling. For example, - * event-specific information such as which region or server is - * being processed should be included if possible. + * Event implementations should override thie class to provide an informative name about what + * event they are handling. For example, event-specific information such as which region or server + * is being processed should be included if possible. */ public String getInformativeName() { return this.getClass().toString(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index 0b608be369a3..e79c9c2bc415 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,113 +33,103 @@ public enum EventType { // Messages originating from RS (NOTE: there is NO direct communication from // RS to Master). These are a result of RS updates into ZK. - // RS_ZK_REGION_CLOSING (1), // It is replaced by M_ZK_REGION_CLOSING(HBASE-4739) + // RS_ZK_REGION_CLOSING (1), // It is replaced by M_ZK_REGION_CLOSING(HBASE-4739) /** * RS_ZK_REGION_CLOSED
      - * * RS has finished closing a region. */ - RS_ZK_REGION_CLOSED (2, ExecutorType.MASTER_CLOSE_REGION), + RS_ZK_REGION_CLOSED(2, ExecutorType.MASTER_CLOSE_REGION), /** * RS_ZK_REGION_OPENING
      - * * RS is in process of opening a region. */ - RS_ZK_REGION_OPENING (3, null), + RS_ZK_REGION_OPENING(3, null), /** * RS_ZK_REGION_OPENED
      - * * RS has finished opening a region. */ - RS_ZK_REGION_OPENED (4, ExecutorType.MASTER_OPEN_REGION), + RS_ZK_REGION_OPENED(4, ExecutorType.MASTER_OPEN_REGION), /** * RS_ZK_REGION_SPLITTING
      - * * RS has started a region split after master says it's ok to move on. */ - RS_ZK_REGION_SPLITTING (5, null), + RS_ZK_REGION_SPLITTING(5, null), /** * RS_ZK_REGION_SPLIT
      - * * RS split has completed and is notifying the master. */ - RS_ZK_REGION_SPLIT (6, ExecutorType.MASTER_SERVER_OPERATIONS), + RS_ZK_REGION_SPLIT(6, ExecutorType.MASTER_SERVER_OPERATIONS), /** * RS_ZK_REGION_FAILED_OPEN
      - * * RS failed to open a region. */ - RS_ZK_REGION_FAILED_OPEN (7, ExecutorType.MASTER_CLOSE_REGION), + RS_ZK_REGION_FAILED_OPEN(7, ExecutorType.MASTER_CLOSE_REGION), /** * RS_ZK_REGION_MERGING
      - * * RS has started merging regions after master says it's ok to move on. */ - RS_ZK_REGION_MERGING (8, null), + RS_ZK_REGION_MERGING(8, null), /** * RS_ZK_REGION_MERGE
      - * * RS region merge has completed and is notifying the master. */ - RS_ZK_REGION_MERGED (9, ExecutorType.MASTER_SERVER_OPERATIONS), + RS_ZK_REGION_MERGED(9, ExecutorType.MASTER_SERVER_OPERATIONS), /** * RS_ZK_REQUEST_REGION_SPLIT
      - * - * RS has requested to split a region. This is to notify master - * and check with master if the region is in a state good to split. + * RS has requested to split a region. This is to notify master and check with master if the + * region is in a state good to split. */ - RS_ZK_REQUEST_REGION_SPLIT (10, null), + RS_ZK_REQUEST_REGION_SPLIT(10, null), /** * RS_ZK_REQUEST_REGION_MERGE
      - * - * RS has requested to merge two regions. This is to notify master - * and check with master if two regions is in states good to merge. + * RS has requested to merge two regions. This is to notify master and check with master if two + * regions is in states good to merge. */ - RS_ZK_REQUEST_REGION_MERGE (11, null), + RS_ZK_REQUEST_REGION_MERGE(11, null), /** * Messages originating from Master to RS.
      * M_RS_OPEN_REGION
      * Master asking RS to open a region. */ - M_RS_OPEN_REGION (20, ExecutorType.RS_OPEN_REGION), + M_RS_OPEN_REGION(20, ExecutorType.RS_OPEN_REGION), /** * Messages originating from Master to RS.
      * M_RS_OPEN_ROOT
      * Master asking RS to open root. */ - M_RS_OPEN_ROOT (21, ExecutorType.RS_OPEN_ROOT), + M_RS_OPEN_ROOT(21, ExecutorType.RS_OPEN_ROOT), /** * Messages originating from Master to RS.
      * M_RS_OPEN_META
      * Master asking RS to open meta. */ - M_RS_OPEN_META (22, ExecutorType.RS_OPEN_META), + M_RS_OPEN_META(22, ExecutorType.RS_OPEN_META), /** * Messages originating from Master to RS.
      * M_RS_CLOSE_REGION
      * Master asking RS to close a region. */ - M_RS_CLOSE_REGION (23, ExecutorType.RS_CLOSE_REGION), + M_RS_CLOSE_REGION(23, ExecutorType.RS_CLOSE_REGION), /** * Messages originating from Master to RS.
      * M_RS_CLOSE_ROOT
      * Master asking RS to close root. */ - M_RS_CLOSE_ROOT (24, ExecutorType.RS_CLOSE_ROOT), + M_RS_CLOSE_ROOT(24, ExecutorType.RS_CLOSE_ROOT), /** * Messages originating from Master to RS.
      * M_RS_CLOSE_META
      * Master asking RS to close meta. */ - M_RS_CLOSE_META (25, ExecutorType.RS_CLOSE_META), + M_RS_CLOSE_META(25, ExecutorType.RS_CLOSE_META), /** * Messages originating from Master to RS.
      * M_RS_OPEN_PRIORITY_REGION
      - * Master asking RS to open a priority region. + * Master asking RS to open a priority region. */ - M_RS_OPEN_PRIORITY_REGION (26, ExecutorType.RS_OPEN_PRIORITY_REGION), + M_RS_OPEN_PRIORITY_REGION(26, ExecutorType.RS_OPEN_PRIORITY_REGION), /** * Messages originating from Master to RS.
      * M_RS_SWITCH_RPC_THROTTLE
      @@ -152,168 +142,154 @@ public enum EventType { * C_M_MERGE_REGION
      * Client asking Master to merge regions. */ - C_M_MERGE_REGION (30, ExecutorType.MASTER_MERGE_OPERATIONS), + C_M_MERGE_REGION(30, ExecutorType.MASTER_MERGE_OPERATIONS), /** * Messages originating from Client to Master.
      * C_M_DELETE_TABLE
      * Client asking Master to delete a table. */ - C_M_DELETE_TABLE (40, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_DELETE_TABLE(40, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
      * C_M_DISABLE_TABLE
      * Client asking Master to disable a table. */ - C_M_DISABLE_TABLE (41, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_DISABLE_TABLE(41, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
      * C_M_ENABLE_TABLE
      * Client asking Master to enable a table. */ - C_M_ENABLE_TABLE (42, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_ENABLE_TABLE(42, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
      * C_M_MODIFY_TABLE
      * Client asking Master to modify a table. */ - C_M_MODIFY_TABLE (43, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_MODIFY_TABLE(43, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
      * C_M_ADD_FAMILY
      * Client asking Master to add family to table. */ - C_M_ADD_FAMILY (44, null), + C_M_ADD_FAMILY(44, null), /** * Messages originating from Client to Master.
      * C_M_DELETE_FAMILY
      * Client asking Master to delete family of table. */ - C_M_DELETE_FAMILY (45, null), + C_M_DELETE_FAMILY(45, null), /** * Messages originating from Client to Master.
      * C_M_MODIFY_FAMILY
      * Client asking Master to modify family of table. */ - C_M_MODIFY_FAMILY (46, null), + C_M_MODIFY_FAMILY(46, null), /** * Messages originating from Client to Master.
      * C_M_CREATE_TABLE
      * Client asking Master to create a table. */ - C_M_CREATE_TABLE (47, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_CREATE_TABLE(47, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
      * C_M_SNAPSHOT_TABLE
      * Client asking Master to snapshot an offline table. */ - C_M_SNAPSHOT_TABLE (48, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), + C_M_SNAPSHOT_TABLE(48, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), /** * Messages originating from Client to Master.
      * C_M_RESTORE_SNAPSHOT
      * Client asking Master to restore a snapshot. */ - C_M_RESTORE_SNAPSHOT (49, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), + C_M_RESTORE_SNAPSHOT(49, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), // Updates from master to ZK. This is done by the master and there is // nothing to process by either Master or RS /** - * M_ZK_REGION_OFFLINE - * Master adds this region as offline in ZK + * M_ZK_REGION_OFFLINE Master adds this region as offline in ZK */ - M_ZK_REGION_OFFLINE (50, null), + M_ZK_REGION_OFFLINE(50, null), /** - * M_ZK_REGION_CLOSING - * Master adds this region as closing in ZK + * M_ZK_REGION_CLOSING Master adds this region as closing in ZK */ - M_ZK_REGION_CLOSING (51, null), + M_ZK_REGION_CLOSING(51, null), /** - * Master controlled events to be executed on the master - * M_SERVER_SHUTDOWN - * Master is processing shutdown of a RS + * Master controlled events to be executed on the master M_SERVER_SHUTDOWN Master is processing + * shutdown of a RS */ - M_SERVER_SHUTDOWN (70, ExecutorType.MASTER_SERVER_OPERATIONS), + M_SERVER_SHUTDOWN(70, ExecutorType.MASTER_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
      * M_META_SERVER_SHUTDOWN
      * Master is processing shutdown of RS hosting a meta region (-ROOT- or hbase:meta). */ - M_META_SERVER_SHUTDOWN (72, ExecutorType.MASTER_META_SERVER_OPERATIONS), + M_META_SERVER_SHUTDOWN(72, ExecutorType.MASTER_META_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
      - * * M_MASTER_RECOVERY
      * Master is processing recovery of regions found in ZK RIT */ - M_MASTER_RECOVERY (73, ExecutorType.MASTER_SERVER_OPERATIONS), + M_MASTER_RECOVERY(73, ExecutorType.MASTER_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
      - * * M_LOG_REPLAY
      * Master is processing log replay of failed region server */ - M_LOG_REPLAY (74, ExecutorType.M_LOG_REPLAY_OPS), + M_LOG_REPLAY(74, ExecutorType.M_LOG_REPLAY_OPS), /** * RS controlled events to be executed on the RS.
      - * * RS_PARALLEL_SEEK */ - RS_PARALLEL_SEEK (80, ExecutorType.RS_PARALLEL_SEEK), + RS_PARALLEL_SEEK(80, ExecutorType.RS_PARALLEL_SEEK), /** * RS wal recovery work items (splitting wals) to be executed on the RS.
      - * * RS_LOG_REPLAY */ - RS_LOG_REPLAY (81, ExecutorType.RS_LOG_REPLAY_OPS), + RS_LOG_REPLAY(81, ExecutorType.RS_LOG_REPLAY_OPS), /** * RS flush triggering from secondary region replicas to primary region replica.
      - * * RS_REGION_REPLICA_FLUSH */ - RS_REGION_REPLICA_FLUSH (82, ExecutorType.RS_REGION_REPLICA_FLUSH_OPS), + RS_REGION_REPLICA_FLUSH(82, ExecutorType.RS_REGION_REPLICA_FLUSH_OPS), /** * RS compacted files discharger
      - * * RS_COMPACTED_FILES_DISCHARGER */ - RS_COMPACTED_FILES_DISCHARGER (83, ExecutorType.RS_COMPACTED_FILES_DISCHARGER), + RS_COMPACTED_FILES_DISCHARGER(83, ExecutorType.RS_COMPACTED_FILES_DISCHARGER), /** * RS refresh peer.
      - * * RS_REFRESH_PEER */ RS_REFRESH_PEER(84, ExecutorType.RS_REFRESH_PEER), /** * RS replay sync replication wal.
      - * * RS_REPLAY_SYNC_REPLICATION_WAL */ RS_REPLAY_SYNC_REPLICATION_WAL(85, ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL), /** * RS claim replication queue.
      - * * RS_CLAIM_REPLICATION_QUEUE */ RS_CLAIM_REPLICATION_QUEUE(86, ExecutorType.RS_CLAIM_REPLICATION_QUEUE), /** - * RS snapshot regions.
      - * - * RS_SNAPSHOT_REGIONS + * RS snapshot regions.
      + * RS_SNAPSHOT_REGIONS */ RS_SNAPSHOT_REGIONS(87, ExecutorType.RS_SNAPSHOT_OPERATIONS), /** - * RS verify snapshot.
      - * - * RS_VERIFY_SNAPSHOT + * RS verify snapshot.
      + * RS_VERIFY_SNAPSHOT */ RS_VERIFY_SNAPSHOT(88, ExecutorType.RS_SNAPSHOT_OPERATIONS); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java index cc36b957c4cc..f89a59a6c2dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,15 +45,15 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * This is a generic executor service. This component abstracts a - * threadpool, a queue to which {@link EventType}s can be submitted, - * and a Runnable that handles the object that is added to the queue. - * - *

      In order to create a new service, create an instance of this class and - * then do: instance.startExecutorService(executorConfig);. {@link ExecutorConfig} - * wraps the configuration needed by this service. When done call {@link #shutdown()}. - * - *

      In order to use the service created above, call {@link #submit(EventHandler)}. + * This is a generic executor service. This component abstracts a threadpool, a queue to which + * {@link EventType}s can be submitted, and a Runnable that handles the object that is + * added to the queue. + *

      + * In order to create a new service, create an instance of this class and then do: + * instance.startExecutorService(executorConfig);. {@link ExecutorConfig} wraps the + * configuration needed by this service. When done call {@link #shutdown()}. + *

      + * In order to use the service created above, call {@link #submit(EventHandler)}. */ @InterfaceAudience.Private public class ExecutorService { @@ -79,24 +78,22 @@ public ExecutorService(final String servername) { } /** - * Start an executor service with a given name. If there was a service already - * started with the same name, this throws a RuntimeException. + * Start an executor service with a given name. If there was a service already started with the + * same name, this throws a RuntimeException. * @param config Configuration to use for the executor. */ public void startExecutorService(final ExecutorConfig config) { final String name = config.getName(); Executor hbes = this.executorMap.compute(name, (key, value) -> { if (value != null) { - throw new RuntimeException("An executor service with the name " + key + - " is already running!"); + throw new RuntimeException( + "An executor service with the name " + key + " is already running!"); } return new Executor(config); }); - LOG.debug( - "Starting executor service name={}, corePoolSize={}, maxPoolSize={}", - name, hbes.threadPoolExecutor.getCorePoolSize(), - hbes.threadPoolExecutor.getMaximumPoolSize()); + LOG.debug("Starting executor service name={}, corePoolSize={}, maxPoolSize={}", name, + hbes.threadPoolExecutor.getCorePoolSize(), hbes.threadPoolExecutor.getMaximumPoolSize()); } boolean isExecutorServiceRunning(String name) { @@ -105,9 +102,8 @@ boolean isExecutorServiceRunning(String name) { public void shutdown() { this.delayedSubmitTimer.shutdownNow(); - for(Entry entry: this.executorMap.entrySet()) { - List wasRunning = - entry.getValue().threadPoolExecutor.shutdownNow(); + for (Entry entry : this.executorMap.entrySet()) { + List wasRunning = entry.getValue().threadPoolExecutor.shutdownNow(); if (!wasRunning.isEmpty()) { LOG.info(entry.getValue() + " had " + wasRunning + " on shutdown"); } @@ -133,8 +129,8 @@ public ThreadPoolExecutor getExecutorThreadPool(final ExecutorType type) { * {@link ExecutorService#startExecutorService(ExecutorConfig)} */ public ThreadPoolExecutor getExecutorLazily(ExecutorConfig config) { - return executorMap.computeIfAbsent(config.getName(), (executorName) -> - new Executor(config)).getThreadPoolExecutor(); + return executorMap.computeIfAbsent(config.getName(), (executorName) -> new Executor(config)) + .getThreadPoolExecutor(); } public void submit(final EventHandler eh) { @@ -143,8 +139,8 @@ public void submit(final EventHandler eh) { // This happens only when events are submitted after shutdown() was // called, so dropping them should be "ok" since it means we're // shutting down. - LOG.error("Cannot submit [" + eh + "] because the executor is missing." + - " Is this process shutting down?"); + LOG.error("Cannot submit [" + eh + "] because the executor is missing." + + " Is this process shutting down?"); } else { executor.submit(eh); } @@ -206,9 +202,9 @@ public boolean allowCoreThreadTimeout() { } /** - * Allows timing out of core threads. Good to set this for non-critical thread pools for - * release of unused resources. Refer to {@link ThreadPoolExecutor#allowCoreThreadTimeOut} - * for additional details. + * Allows timing out of core threads. Good to set this for non-critical thread pools for release + * of unused resources. Refer to {@link ThreadPoolExecutor#allowCoreThreadTimeOut} for + * additional details. */ public ExecutorConfig setAllowCoreThreadTimeout(boolean allowCoreThreadTimeout) { this.allowCoreThreadTimeout = allowCoreThreadTimeout; @@ -249,9 +245,9 @@ protected Executor(ExecutorConfig config) { this.name = config.getName(); // create the thread pool executor this.threadPoolExecutor = new TrackingThreadPoolExecutor( - // setting maxPoolSize > corePoolSize has no effect since we use an unbounded task queue. - config.getCorePoolSize(), config.getCorePoolSize(), - config.getKeepAliveTimeMillis(), TimeUnit.MILLISECONDS, q); + // setting maxPoolSize > corePoolSize has no effect since we use an unbounded task queue. + config.getCorePoolSize(), config.getCorePoolSize(), config.getKeepAliveTimeMillis(), + TimeUnit.MILLISECONDS, q); this.threadPoolExecutor.allowCoreThreadTimeOut(config.allowCoreThreadTimeout()); // name the threads for this threadpool ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); @@ -261,8 +257,7 @@ protected Executor(ExecutorConfig config) { } /** - * Submit the event to the queue for handling. - * @param event + * Submit the event to the queue for handling. n */ void submit(final EventHandler event) { // If there is a listener for this type, make sure we call the before @@ -286,18 +281,17 @@ public ExecutorStatus getStatus() { LOG.warn("Non-EventHandler " + r + " queued in " + name); continue; } - queuedEvents.add((EventHandler)r); + queuedEvents.add((EventHandler) r); } List running = Lists.newArrayList(); - for (Map.Entry e : - threadPoolExecutor.getRunningTasks().entrySet()) { + for (Map.Entry e : threadPoolExecutor.getRunningTasks().entrySet()) { Runnable r = e.getValue(); if (!(r instanceof EventHandler)) { LOG.warn("Non-EventHandler " + r + " running in " + name); continue; } - running.add(new RunningEventStatus(e.getKey(), (EventHandler)r)); + running.add(new RunningEventStatus(e.getKey(), (EventHandler) r)); } return new ExecutorStatus(this, queuedEvents, running); @@ -305,14 +299,14 @@ public ExecutorStatus getStatus() { } /** - * A subclass of ThreadPoolExecutor that keeps track of the Runnables that - * are executing at any given point in time. + * A subclass of ThreadPoolExecutor that keeps track of the Runnables that are executing at any + * given point in time. */ static class TrackingThreadPoolExecutor extends ThreadPoolExecutor { private ConcurrentMap running = Maps.newConcurrentMap(); - public TrackingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, - long keepAliveTime, TimeUnit unit, BlockingQueue workQueue) { + public TrackingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, + TimeUnit unit, BlockingQueue workQueue) { super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue); } @@ -330,10 +324,9 @@ protected void beforeExecute(Thread t, Runnable r) { } /** - * @return a map of the threads currently running tasks - * inside this executor. Each key is an active thread, - * and the value is the task that is currently running. - * Note that this is not a stable snapshot of the map. + * @return a map of the threads currently running tasks inside this executor. Each key is an + * active thread, and the value is the task that is currently running. Note that this is + * not a stable snapshot of the map. */ public ConcurrentMap getRunningTasks() { return running; @@ -341,20 +334,17 @@ public ConcurrentMap getRunningTasks() { } /** - * A snapshot of the status of a particular executor. This includes - * the contents of the executor's pending queue, as well as the - * threads and events currently being processed. - * - * This is a consistent snapshot that is immutable once constructed. + * A snapshot of the status of a particular executor. This includes the contents of the executor's + * pending queue, as well as the threads and events currently being processed. This is a + * consistent snapshot that is immutable once constructed. */ public static class ExecutorStatus { final Executor executor; final List queuedEvents; final List running; - ExecutorStatus(Executor executor, - List queuedEvents, - List running) { + ExecutorStatus(Executor executor, List queuedEvents, + List running) { this.executor = executor; this.queuedEvents = queuedEvents; this.running = running; @@ -369,17 +359,14 @@ public List getRunning() { } /** - * Dump a textual representation of the executor's status - * to the given writer. - * - * @param out the stream to write to + * Dump a textual representation of the executor's status to the given writer. + * @param out the stream to write to * @param indent a string prefix for each line, used for indentation */ public void dumpTo(Writer out, String indent) throws IOException { out.write(indent + "Status for executor: " + executor + "\n"); out.write(indent + "=======================================\n"); - out.write(indent + queuedEvents.size() + " events queued, " + - running.size() + " running\n"); + out.write(indent + queuedEvents.size() + " events queued, " + running.size() + " running\n"); if (!queuedEvents.isEmpty()) { out.write(indent + "Queued:\n"); for (EventHandler e : queuedEvents) { @@ -390,11 +377,9 @@ public void dumpTo(Writer out, String indent) throws IOException { if (!running.isEmpty()) { out.write(indent + "Running:\n"); for (RunningEventStatus stat : running) { - out.write(indent + " Running on thread '" + - stat.threadInfo.getThreadName() + - "': " + stat.event + "\n"); - out.write(ThreadMonitoring.formatThreadInfo( - stat.threadInfo, indent + " ")); + out.write(indent + " Running on thread '" + stat.threadInfo.getThreadName() + "': " + + stat.event + "\n"); + out.write(ThreadMonitoring.formatThreadInfo(stat.threadInfo, indent + " ")); out.write("\n"); } } @@ -403,8 +388,7 @@ public void dumpTo(Writer out, String indent) throws IOException { } /** - * The status of a particular event that is in the middle of being - * handled by an executor. + * The status of a particular event that is in the middle of being handled by an executor. */ public static class RunningEventStatus { final ThreadInfo threadInfo; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index cbecb3e8619f..0bcd3ee05190 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,35 +20,35 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * The following is a list of all executor types, both those that run in the - * master and those that run in the regionserver. + * The following is a list of all executor types, both those that run in the master and those that + * run in the regionserver. */ @InterfaceAudience.Private public enum ExecutorType { // Master executor services - MASTER_CLOSE_REGION (1), - MASTER_OPEN_REGION (2), - MASTER_SERVER_OPERATIONS (3), - MASTER_TABLE_OPERATIONS (4), - MASTER_RS_SHUTDOWN (5), - MASTER_META_SERVER_OPERATIONS (6), - M_LOG_REPLAY_OPS (7), - MASTER_SNAPSHOT_OPERATIONS (8), - MASTER_MERGE_OPERATIONS (9), + MASTER_CLOSE_REGION(1), + MASTER_OPEN_REGION(2), + MASTER_SERVER_OPERATIONS(3), + MASTER_TABLE_OPERATIONS(4), + MASTER_RS_SHUTDOWN(5), + MASTER_META_SERVER_OPERATIONS(6), + M_LOG_REPLAY_OPS(7), + MASTER_SNAPSHOT_OPERATIONS(8), + MASTER_MERGE_OPERATIONS(9), // RegionServer executor services - RS_OPEN_REGION (20), - RS_OPEN_ROOT (21), - RS_OPEN_META (22), - RS_CLOSE_REGION (23), - RS_CLOSE_ROOT (24), - RS_CLOSE_META (25), - RS_PARALLEL_SEEK (26), - RS_LOG_REPLAY_OPS (27), - RS_REGION_REPLICA_FLUSH_OPS (28), - RS_COMPACTED_FILES_DISCHARGER (29), - RS_OPEN_PRIORITY_REGION (30), + RS_OPEN_REGION(20), + RS_OPEN_ROOT(21), + RS_OPEN_META(22), + RS_CLOSE_REGION(23), + RS_CLOSE_ROOT(24), + RS_CLOSE_META(25), + RS_PARALLEL_SEEK(26), + RS_LOG_REPLAY_OPS(27), + RS_REGION_REPLICA_FLUSH_OPS(28), + RS_COMPACTED_FILES_DISCHARGER(29), + RS_OPEN_PRIORITY_REGION(30), RS_REFRESH_PEER(31), RS_REPLAY_SYNC_REPLICATION_WAL(32), RS_SWITCH_RPC_THROTTLE(33), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java index b127493fc5c2..7e20bb88187b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,20 +20,19 @@ import java.io.IOException; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** - * This is a Filter wrapper class which is used in the server side. Some filter - * related hooks can be defined in this wrapper. The only way to create a - * FilterWrapper instance is passing a client side Filter instance through - * {@link org.apache.hadoop.hbase.client.Scan#getFilter()}. - * + * This is a Filter wrapper class which is used in the server side. Some filter related hooks can be + * defined in this wrapper. The only way to create a FilterWrapper instance is passing a client side + * Filter instance through {@link org.apache.hadoop.hbase.client.Scan#getFilter()}. */ @InterfaceAudience.Private final public class FilterWrapper extends Filter { @@ -55,8 +52,7 @@ public FilterWrapper(Filter filter) { */ @Override public byte[] toByteArray() throws IOException { - FilterProtos.FilterWrapper.Builder builder = - FilterProtos.FilterWrapper.newBuilder(); + FilterProtos.FilterWrapper.Builder builder = FilterProtos.FilterWrapper.newBuilder(); builder.setFilter(ProtobufUtil.toFilter(this.filter)); return builder.build().toByteArray(); } @@ -67,8 +63,7 @@ public byte[] toByteArray() throws IOException { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static FilterWrapper parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static FilterWrapper parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FilterWrapper proto; try { proto = FilterProtos.FilterWrapper.parseFrom(pbBytes); @@ -130,16 +125,17 @@ public void filterRowCells(List kvs) throws IOException { public enum FilterRowRetCode { NOT_CALLED, - INCLUDE, // corresponds to filter.filterRow() returning false - EXCLUDE, // corresponds to filter.filterRow() returning true - INCLUDE_THIS_FAMILY // exclude other families + INCLUDE, // corresponds to filter.filterRow() returning false + EXCLUDE, // corresponds to filter.filterRow() returning true + INCLUDE_THIS_FAMILY // exclude other families } + public FilterRowRetCode filterRowCellsWithRet(List kvs) throws IOException { - //To fix HBASE-6429, - //Filter with filterRow() returning true is incompatible with scan with limit - //1. hasFilterRow() returns true, if either filterRow() or filterRow(kvs) is implemented. - //2. filterRow() is merged with filterRow(kvs), - //so that to make all those row related filtering stuff in the same function. + // To fix HBASE-6429, + // Filter with filterRow() returning true is incompatible with scan with limit + // 1. hasFilterRow() returns true, if either filterRow() or filterRow(kvs) is implemented. + // 2. filterRow() is merged with filterRow(kvs), + // so that to make all those row related filtering stuff in the same function. this.filter.filterRowCells(kvs); if (!kvs.isEmpty()) { if (this.filter.filterRow()) { @@ -158,15 +154,15 @@ public boolean isFamilyEssential(byte[] name) throws IOException { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof FilterWrapper)) return false; - FilterWrapper other = (FilterWrapper)o; + FilterWrapper other = (FilterWrapper) o; return this.filter.areSerializedFieldsEqual(other.filter); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index eda59ed2d560..f02419ca90fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.fs; import edu.umd.cs.findbugs.annotations.Nullable; @@ -57,27 +54,24 @@ import org.slf4j.LoggerFactory; /** - * An encapsulation for the FileSystem object that hbase uses to access - * data. This class allows the flexibility of using - * separate filesystem objects for reading and writing hfiles and wals. + * An encapsulation for the FileSystem object that hbase uses to access data. This class allows the + * flexibility of using separate filesystem objects for reading and writing hfiles and wals. */ @InterfaceAudience.Private public class HFileSystem extends FilterFileSystem { public static final Logger LOG = LoggerFactory.getLogger(HFileSystem.class); - private final FileSystem noChecksumFs; // read hfile data from storage + private final FileSystem noChecksumFs; // read hfile data from storage private final boolean useHBaseChecksum; private static volatile byte unspecifiedStoragePolicyId = Byte.MIN_VALUE; /** * Create a FileSystem object for HBase regionservers. - * @param conf The configuration to be used for the filesystem - * @param useHBaseChecksum if true, then use - * checksum verfication in hbase, otherwise - * delegate checksum verification to the FileSystem. + * @param conf The configuration to be used for the filesystem + * @param useHBaseChecksum if true, then use checksum verfication in hbase, otherwise delegate + * checksum verification to the FileSystem. */ - public HFileSystem(Configuration conf, boolean useHBaseChecksum) - throws IOException { + public HFileSystem(Configuration conf, boolean useHBaseChecksum) throws IOException { // Create the default filesystem with checksum verification switched on. // By default, any operation to this FilterFileSystem occurs on @@ -120,9 +114,8 @@ public HFileSystem(Configuration conf, boolean useHBaseChecksum) } /** - * Wrap a FileSystem object within a HFileSystem. The noChecksumFs and - * writefs are both set to be the same specified fs. - * Do not verify hbase-checksums while reading data from filesystem. + * Wrap a FileSystem object within a HFileSystem. The noChecksumFs and writefs are both set to be + * the same specified fs. Do not verify hbase-checksums while reading data from filesystem. * @param fs Set the noChecksumFs and writeFs to this specified filesystem. */ public HFileSystem(FileSystem fs) { @@ -132,11 +125,9 @@ public HFileSystem(FileSystem fs) { } /** - * Returns the filesystem that is specially setup for - * doing reads from storage. This object avoids doing - * checksum verifications for reads. - * @return The FileSystem object that can be used to read data - * from files. + * Returns the filesystem that is specially setup for doing reads from storage. This object avoids + * doing checksum verifications for reads. + * @return The FileSystem object that can be used to read data from files. */ public FileSystem getNoChecksumFs() { return noChecksumFs; @@ -152,10 +143,10 @@ public FileSystem getBackingFs() throws IOException { /** * Set the source path (directory/file) to the specified storage policy. - * @param path The source path (directory/file). - * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. - * See see hadoop 2.6+ org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g - * 'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'. + * @param path The source path (directory/file). + * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. See see hadoop 2.6+ + * org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD', + * 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'. */ public void setStoragePolicy(Path path, String policyName) { CommonFSUtils.setStoragePolicy(this.fs, path, policyName); @@ -171,7 +162,7 @@ public void setStoragePolicy(Path path, String policyName) { public String getStoragePolicyName(Path path) { try { Object blockStoragePolicySpi = - ReflectionUtils.invokeMethod(this.fs, "getStoragePolicy", path); + ReflectionUtils.invokeMethod(this.fs, "getStoragePolicy", path); return (String) ReflectionUtils.invokeMethod(blockStoragePolicySpi, "getName"); } catch (Exception e) { // Maybe fail because of using old HDFS version, try the old way @@ -221,8 +212,7 @@ private String getStoragePolicyForOldHDFSVersion(Path path) { /** * Are we verifying checksums in HBase? - * @return True, if hbase is configured to verify checksums, - * otherwise false. + * @return True, if hbase is configured to verify checksums, otherwise false. */ public boolean useHBaseChecksum() { return useHBaseChecksum; @@ -240,10 +230,8 @@ public void close() throws IOException { } /** - * Returns a brand new instance of the FileSystem. It does not use - * the FileSystem.Cache. In newer versions of HDFS, we can directly - * invoke FileSystem.newInstance(Configuration). - * + * Returns a brand new instance of the FileSystem. It does not use the FileSystem.Cache. In newer + * versions of HDFS, we can directly invoke FileSystem.newInstance(Configuration). * @param conf Configuration * @return A new instance of the filesystem */ @@ -271,9 +259,9 @@ private static FileSystem newInstanceFileSystem(Configuration conf) throws IOExc } /** - * Returns an instance of Filesystem wrapped into the class specified in - * hbase.fs.wrapper property, if one is set in the configuration, returns - * unmodified FS instance passed in as an argument otherwise. + * Returns an instance of Filesystem wrapped into the class specified in hbase.fs.wrapper + * property, if one is set in the configuration, returns unmodified FS instance passed in as an + * argument otherwise. * @param base Filesystem instance to wrap * @param conf Configuration * @return wrapped instance of FS, or the same instance if no wrapping configured. @@ -296,15 +284,14 @@ public static boolean addLocationsOrderInterceptor(Configuration conf) throws IO } /** - * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient - * linked to this FileSystem. See HBASE-6435 for the background. + * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient linked to + * this FileSystem. See HBASE-6435 for the background. *

      * There should be no reason, except testing, to create a specific ReorderBlocks. - * * @return true if the interceptor was added, false otherwise. */ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) { - if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) { // activated by default + if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) { // activated by default LOG.debug("addLocationsOrderInterceptor configured to false"); return false; } @@ -318,17 +305,16 @@ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlo } if (!(fs instanceof DistributedFileSystem)) { - LOG.debug("The file system is not a DistributedFileSystem. " + - "Skipping on block location reordering"); + LOG.debug("The file system is not a DistributedFileSystem. " + + "Skipping on block location reordering"); return false; } DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSClient dfsc = dfs.getClient(); if (dfsc == null) { - LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + - "block reordering interceptor. Continuing, but this is unexpected." - ); + LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + + "block reordering interceptor. Continuing, but this is unexpected."); return false; } @@ -341,16 +327,15 @@ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlo ClientProtocol namenode = (ClientProtocol) nf.get(dfsc); if (namenode == null) { - LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + - " reordering interceptor. Continuing, but this is unexpected." - ); + LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + + " reordering interceptor. Continuing, but this is unexpected."); return false; } ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf); nf.set(dfsc, cp1); - LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + - " using class " + lrb.getClass().getName()); + LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + + " using class " + lrb.getClass().getName()); } catch (NoSuchFieldException e) { LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e); return false; @@ -363,44 +348,44 @@ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlo } private static ClientProtocol createReorderingProxy(final ClientProtocol cp, - final ReorderBlocks lrb, final Configuration conf) { + final ReorderBlocks lrb, final Configuration conf) { return (ClientProtocol) Proxy.newProxyInstance(cp.getClass().getClassLoader(), - new Class[]{ClientProtocol.class, Closeable.class}, new InvocationHandler() { - @Override - public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { - try { - if ((args == null || args.length == 0) && "close".equals(method.getName())) { - RPC.stopProxy(cp); - return null; - } else { - Object res = method.invoke(cp, args); - if (res != null && args != null && args.length == 3 - && "getBlockLocations".equals(method.getName()) - && res instanceof LocatedBlocks - && args[0] instanceof String - && args[0] != null) { - lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]); - } - return res; - } - } catch (InvocationTargetException ite) { - // We will have this for all the exception, checked on not, sent - // by any layer, including the functional exception - Throwable cause = ite.getCause(); - if (cause == null){ - throw new RuntimeException("Proxy invocation failed and getCause is null", ite); + new Class[] { ClientProtocol.class, Closeable.class }, new InvocationHandler() { + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + try { + if ((args == null || args.length == 0) && "close".equals(method.getName())) { + RPC.stopProxy(cp); + return null; + } else { + Object res = method.invoke(cp, args); + if ( + res != null && args != null && args.length == 3 + && "getBlockLocations".equals(method.getName()) && res instanceof LocatedBlocks + && args[0] instanceof String && args[0] != null + ) { + lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]); } - if (cause instanceof UndeclaredThrowableException) { - Throwable causeCause = cause.getCause(); - if (causeCause == null) { - throw new RuntimeException("UndeclaredThrowableException had null cause!"); - } - cause = cause.getCause(); + return res; + } + } catch (InvocationTargetException ite) { + // We will have this for all the exception, checked on not, sent + // by any layer, including the functional exception + Throwable cause = ite.getCause(); + if (cause == null) { + throw new RuntimeException("Proxy invocation failed and getCause is null", ite); + } + if (cause instanceof UndeclaredThrowableException) { + Throwable causeCause = cause.getCause(); + if (causeCause == null) { + throw new RuntimeException("UndeclaredThrowableException had null cause!"); } - throw cause; + cause = cause.getCause(); } + throw cause; } - }); + } + }); } /** @@ -408,24 +393,23 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl */ interface ReorderBlocks { /** - * * @param conf - the conf to use - * @param lbs - the LocatedBlocks to reorder - * @param src - the file name currently read + * @param lbs - the LocatedBlocks to reorder + * @param src - the file name currently read * @throws IOException - if something went wrong */ void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException; } /** - * We're putting at lowest priority the wal files blocks that are on the same datanode - * as the original regionserver which created these files. This because we fear that the - * datanode is actually dead, so if we use it it will timeout. + * We're putting at lowest priority the wal files blocks that are on the same datanode as the + * original regionserver which created these files. This because we fear that the datanode is + * actually dead, so if we use it it will timeout. */ static class ReorderWALBlocks implements ReorderBlocks { @Override public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) - throws IOException { + throws IOException { ServerName sn = AbstractFSWALProvider.getServerNameFromWALDirectoryName(conf, src); if (sn == null) { @@ -436,8 +420,7 @@ public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) // Ok, so it's an WAL String hostName = sn.getHostname(); if (LOG.isTraceEnabled()) { - LOG.trace(src + - " is an WAL file, so reordering blocks, last hostname will be:" + hostName); + LOG.trace(src + " is an WAL file, so reordering blocks, last hostname will be:" + hostName); } // Just check for all blocks @@ -460,10 +443,9 @@ public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) } /** - * Create a new HFileSystem object, similar to FileSystem.get(). - * This returns a filesystem object that avoids checksum - * verification in the filesystem for hfileblock-reads. - * For these blocks, checksum verification is done by HBase. + * Create a new HFileSystem object, similar to FileSystem.get(). This returns a filesystem object + * that avoids checksum verification in the filesystem for hfileblock-reads. For these blocks, + * checksum verification is done by HBase. */ static public FileSystem get(Configuration conf) throws IOException { return new HFileSystem(conf, true); @@ -477,17 +459,13 @@ static public FileSystem getLocalFs(Configuration conf) throws IOException { } /** - * The org.apache.hadoop.fs.FilterFileSystem does not yet support - * createNonRecursive. This is a hadoop bug and when it is fixed in Hadoop, - * this definition will go away. + * The org.apache.hadoop.fs.FilterFileSystem does not yet support createNonRecursive. This is a + * hadoop bug and when it is fixed in Hadoop, this definition will go away. */ @Override @SuppressWarnings("deprecation") - public FSDataOutputStream createNonRecursive(Path f, - boolean overwrite, - int bufferSize, short replication, long blockSize, - Progressable progress) throws IOException { - return fs.createNonRecursive(f, overwrite, bufferSize, replication, - blockSize, progress); + public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, int bufferSize, + short replication, long blockSize, Progressable progress) throws IOException { + return fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, progress); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index 5bbc525b8459..819c5651081f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,9 +34,9 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables; /** - * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, - * as well as closing streams. Initialization is not thread-safe, but normal operation is; - * see method comments. + * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, as well + * as closing streams. Initialization is not thread-safe, but normal operation is; see method + * comments. */ @InterfaceAudience.Private public class FSDataInputStreamWrapper implements Closeable { @@ -50,25 +50,23 @@ public class FSDataInputStreamWrapper implements Closeable { private final boolean dropBehind; private final long readahead; - /** Two stream handles, one with and one without FS-level checksum. - * HDFS checksum setting is on FS level, not single read level, so you have to keep two - * FS objects and two handles open to interleave different reads freely, which is very sad. - * This is what we do: - * 1) First, we need to read the trailer of HFile to determine checksum parameters. - * We always use FS checksum to do that, so ctor opens {@link #stream}. - * 2.1) After that, if HBase checksum is not used, we'd just always use {@link #stream}; - * 2.2) If HBase checksum can be used, we'll open {@link #streamNoFsChecksum}, - * and close {@link #stream}. User MUST call prepareForBlockReader for that to happen; - * if they don't, (2.1) will be the default. - * 3) The users can call {@link #shouldUseHBaseChecksum()}, and pass its result to - * {@link #getStream(boolean)} to get stream (if Java had out/pointer params we could - * return both in one call). This stream is guaranteed to be set. - * 4) The first time HBase checksum fails, one would call {@link #fallbackToFsChecksum(int)}. - * That will take lock, and open {@link #stream}. While this is going on, others will - * continue to use the old stream; if they also want to fall back, they'll also call - * {@link #fallbackToFsChecksum(int)}, and block until {@link #stream} is set. - * 5) After some number of checksumOk() calls, we will go back to using HBase checksum. - * We will have 2 handles; however we presume checksums fail so rarely that we don't care. + /** + * Two stream handles, one with and one without FS-level checksum. HDFS checksum setting is on FS + * level, not single read level, so you have to keep two FS objects and two handles open to + * interleave different reads freely, which is very sad. This is what we do: 1) First, we need to + * read the trailer of HFile to determine checksum parameters. We always use FS checksum to do + * that, so ctor opens {@link #stream}. 2.1) After that, if HBase checksum is not used, we'd just + * always use {@link #stream}; 2.2) If HBase checksum can be used, we'll open + * {@link #streamNoFsChecksum}, and close {@link #stream}. User MUST call prepareForBlockReader + * for that to happen; if they don't, (2.1) will be the default. 3) The users can call + * {@link #shouldUseHBaseChecksum()}, and pass its result to {@link #getStream(boolean)} to get + * stream (if Java had out/pointer params we could return both in one call). This stream is + * guaranteed to be set. 4) The first time HBase checksum fails, one would call + * {@link #fallbackToFsChecksum(int)}. That will take lock, and open {@link #stream}. While this + * is going on, others will continue to use the old stream; if they also want to fall back, + * they'll also call {@link #fallbackToFsChecksum(int)}, and block until {@link #stream} is set. + * 5) After some number of checksumOk() calls, we will go back to using HBase checksum. We will + * have 2 handles; however we presume checksums fail so rarely that we don't care. */ private volatile FSDataInputStream stream = null; private volatile FSDataInputStream streamNoFsChecksum = null; @@ -103,17 +101,18 @@ public FSDataInputStreamWrapper(FileSystem fs, Path path) throws IOException { this(fs, path, false, -1L); } - public FSDataInputStreamWrapper(FileSystem fs, Path path, boolean dropBehind, long readahead) throws IOException { + public FSDataInputStreamWrapper(FileSystem fs, Path path, boolean dropBehind, long readahead) + throws IOException { this(fs, null, path, dropBehind, readahead); } - public FSDataInputStreamWrapper(FileSystem fs, FileLink link, - boolean dropBehind, long readahead) throws IOException { + public FSDataInputStreamWrapper(FileSystem fs, FileLink link, boolean dropBehind, long readahead) + throws IOException { this(fs, link, null, dropBehind, readahead); } private FSDataInputStreamWrapper(FileSystem fs, FileLink link, Path path, boolean dropBehind, - long readahead) throws IOException { + long readahead) throws IOException { assert (path == null) != (link == null); this.path = path; this.link = link; @@ -147,16 +146,16 @@ private void setStreamOptions(FSDataInputStream in) { } /** - * Prepares the streams for block reader. NOT THREAD SAFE. Must be called once, after any - * reads finish and before any other reads start (what happens in reality is we read the - * tail, then call this based on what's in the tail, then read blocks). + * Prepares the streams for block reader. NOT THREAD SAFE. Must be called once, after any reads + * finish and before any other reads start (what happens in reality is we read the tail, then call + * this based on what's in the tail, then read blocks). * @param forceNoHBaseChecksum Force not using HBase checksum. */ public void prepareForBlockReader(boolean forceNoHBaseChecksum) throws IOException { if (hfs == null) return; assert this.stream != null && !this.useHBaseChecksumConfigured; boolean useHBaseChecksum = - !forceNoHBaseChecksum && hfs.useHBaseChecksum() && (hfs.getNoChecksumFs() != hfs); + !forceNoHBaseChecksum && hfs.useHBaseChecksum() && (hfs.getNoChecksumFs() != hfs); if (useHBaseChecksum) { FileSystem fsNc = hfs.getNoChecksumFs(); @@ -196,8 +195,8 @@ public boolean shouldUseHBaseChecksum() { /** * Get the stream to use. Thread-safe. - * @param useHBaseChecksum must be the value that shouldUseHBaseChecksum has returned - * at some point in the past, otherwise the result is undefined. + * @param useHBaseChecksum must be the value that shouldUseHBaseChecksum has returned at some + * point in the past, otherwise the result is undefined. */ public FSDataInputStream getStream(boolean useHBaseChecksum) { return useHBaseChecksum ? this.streamNoFsChecksum : this.stream; @@ -227,8 +226,10 @@ public FSDataInputStream fallbackToFsChecksum(int offCount) throws IOException { /** Report that checksum was ok, so we may ponder going back to HBase checksum. */ public void checksumOk() { - if (this.useHBaseChecksumConfigured && !this.useHBaseChecksum - && (this.hbaseChecksumOffCount.getAndDecrement() < 0)) { + if ( + this.useHBaseChecksumConfigured && !this.useHBaseChecksum + && (this.hbaseChecksumOffCount.getAndDecrement() < 0) + ) { // The stream we need is already open (because we were using HBase checksum in the past). assert this.streamNoFsChecksum != null; this.useHBaseChecksum = true; @@ -239,20 +240,20 @@ private void updateInputStreamStatistics(FSDataInputStream stream) { // If the underlying file system is HDFS, update read statistics upon close. if (stream instanceof HdfsDataInputStream) { /** - * Because HDFS ReadStatistics is calculated per input stream, it is not - * feasible to update the aggregated number in real time. Instead, the - * metrics are updated when an input stream is closed. + * Because HDFS ReadStatistics is calculated per input stream, it is not feasible to update + * the aggregated number in real time. Instead, the metrics are updated when an input stream + * is closed. */ - HdfsDataInputStream hdfsDataInputStream = (HdfsDataInputStream)stream; + HdfsDataInputStream hdfsDataInputStream = (HdfsDataInputStream) stream; synchronized (readStatistics) { - readStatistics.totalBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalBytesRead(); - readStatistics.totalLocalBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalLocalBytesRead(); - readStatistics.totalShortCircuitBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalShortCircuitBytesRead(); - readStatistics.totalZeroCopyBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalZeroCopyBytesRead(); + readStatistics.totalBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalBytesRead(); + readStatistics.totalLocalBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalLocalBytesRead(); + readStatistics.totalShortCircuitBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalShortCircuitBytesRead(); + readStatistics.totalZeroCopyBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalZeroCopyBytesRead(); } } } @@ -291,7 +292,6 @@ public void close() { // we do not care about the close exception as it is for reading, no data loss issue. Closeables.closeQuietly(streamNoFsChecksum); - updateInputStreamStatistics(stream); Closeables.closeQuietly(stream); } @@ -331,10 +331,10 @@ public void unbuffer() { if (this.instanceOfCanUnbuffer) { try { this.unbuffer.unbuffer(); - } catch (UnsupportedOperationException e){ + } catch (UnsupportedOperationException e) { if (isLogTraceEnabled) { LOG.trace("Failed to invoke 'unbuffer' method in class " + streamClass - + " . So there may be the stream does not support unbuffering.", e); + + " . So there may be the stream does not support unbuffering.", e); } } } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java index ea285ed53fad..2d12fd88c11d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.FileNotFoundException; @@ -43,52 +42,34 @@ /** * The FileLink is a sort of hardlink, that allows access to a file given a set of locations. - * - *

      The Problem: + *

      + * The Problem: *

        - *
      • - * HDFS doesn't have support for hardlinks, and this make impossible to referencing - * the same data blocks using different names. - *
      • - *
      • - * HBase store files in one location (e.g. table/region/family/) and when the file is not - * needed anymore (e.g. compaction, region deletion, ...) moves it to an archive directory. - *
      • + *
      • HDFS doesn't have support for hardlinks, and this make impossible to referencing the same + * data blocks using different names.
      • + *
      • HBase store files in one location (e.g. table/region/family/) and when the file is not needed + * anymore (e.g. compaction, region deletion, ...) moves it to an archive directory.
      • *
      - * If we want to create a reference to a file, we need to remember that it can be in its - * original location or in the archive folder. - * The FileLink class tries to abstract this concept and given a set of locations - * it is able to switch between them making this operation transparent for the user. - * {@link HFileLink} is a more concrete implementation of the {@code FileLink}. - * - *

      Back-references: - * To help the {@link org.apache.hadoop.hbase.master.cleaner.CleanerChore} to keep track of - * the links to a particular file, during the {@code FileLink} creation, a new file is placed - * inside a back-reference directory. There's one back-reference directory for each file that - * has links, and in the directory there's one file per link. - * - *

      HFileLink Example + * If we want to create a reference to a file, we need to remember that it can be in its original + * location or in the archive folder. The FileLink class tries to abstract this concept and given a + * set of locations it is able to switch between them making this operation transparent for the + * user. {@link HFileLink} is a more concrete implementation of the {@code FileLink}. + *

      + * Back-references: To help the {@link org.apache.hadoop.hbase.master.cleaner.CleanerChore} + * to keep track of the links to a particular file, during the {@code FileLink} creation, a new file + * is placed inside a back-reference directory. There's one back-reference directory for each file + * that has links, and in the directory there's one file per link. + *

      + * HFileLink Example *

        - *
      • - * /hbase/table/region-x/cf/file-k - * (Original File) - *
      • - *
      • - * /hbase/table-cloned/region-y/cf/file-k.region-x.table - * (HFileLink to the original file) - *
      • - *
      • - * /hbase/table-2nd-cloned/region-z/cf/file-k.region-x.table - * (HFileLink to the original file) - *
      • - *
      • - * /hbase/.archive/table/region-x/.links-file-k/region-y.table-cloned - * (Back-reference to the link in table-cloned) - *
      • - *
      • - * /hbase/.archive/table/region-x/.links-file-k/region-z.table-2nd-cloned - * (Back-reference to the link in table-2nd-cloned) - *
      • + *
      • /hbase/table/region-x/cf/file-k (Original File)
      • + *
      • /hbase/table-cloned/region-y/cf/file-k.region-x.table (HFileLink to the original file)
      • + *
      • /hbase/table-2nd-cloned/region-z/cf/file-k.region-x.table (HFileLink to the original file) + *
      • + *
      • /hbase/.archive/table/region-x/.links-file-k/region-y.table-cloned (Back-reference to the + * link in table-cloned)
      • + *
      • /hbase/.archive/table/region-x/.links-file-k/region-z.table-2nd-cloned (Back-reference to the + * link in table-2nd-cloned)
      • *
      */ @InterfaceAudience.Private @@ -99,11 +80,11 @@ public class FileLink { public static final String BACK_REFERENCES_DIRECTORY_PREFIX = ".links-"; /** - * FileLink InputStream that handles the switch between the original path - * and the alternative locations, when the file is moved. + * FileLink InputStream that handles the switch between the original path and the alternative + * locations, when the file is moved. */ private static class FileLinkInputStream extends InputStream - implements Seekable, PositionedReadable, CanSetDropBehind, CanSetReadahead, CanUnbuffer { + implements Seekable, PositionedReadable, CanSetDropBehind, CanSetReadahead, CanUnbuffer { private FSDataInputStream in = null; private Path currentPath = null; private long pos = 0; @@ -112,13 +93,12 @@ private static class FileLinkInputStream extends InputStream private final int bufferSize; private final FileSystem fs; - public FileLinkInputStream(final FileSystem fs, final FileLink fileLink) - throws IOException { + public FileLinkInputStream(final FileSystem fs, final FileLink fileLink) throws IOException { this(fs, fileLink, CommonFSUtils.getDefaultBufferSize(fs)); } public FileLinkInputStream(final FileSystem fs, final FileLink fileLink, int bufferSize) - throws IOException { + throws IOException { this.bufferSize = bufferSize; this.fileLink = fileLink; this.fs = fs; @@ -148,7 +128,7 @@ public int read() throws IOException { @Override public int read(byte[] b) throws IOException { - return read(b, 0, b.length); + return read(b, 0, b.length); } @Override @@ -164,7 +144,7 @@ public int read(byte[] b, int off, int len) throws IOException { n = tryOpen().read(b, off, len); } if (n > 0) pos += n; - assert(in.getPos() == pos); + assert (in.getPos() == pos); return n; } @@ -296,18 +276,17 @@ public void unbuffer() { /** * Try to open the file from one of the available locations. - * * @return FSDataInputStream stream of the opened file link * @throws IOException on unexpected error, or file not found. */ private FSDataInputStream tryOpen() throws IOException { IOException exception = null; - for (Path path: fileLink.getLocations()) { + for (Path path : fileLink.getLocations()) { if (path.equals(currentPath)) continue; try { in = fs.open(path, bufferSize); if (pos != 0) in.seek(pos); - assert(in.getPos() == pos) : "Link unable to seek to the right position=" + pos; + assert (in.getPos() == pos) : "Link unable to seek to the right position=" + pos; if (LOG.isTraceEnabled()) { if (currentPath == null) { LOG.debug("link open path=" + path); @@ -316,7 +295,7 @@ private FSDataInputStream tryOpen() throws IOException { } } currentPath = path; - return(in); + return (in); } catch (FileNotFoundException | AccessControlException | RemoteException e) { exception = FileLink.handleAccessLocationException(fileLink, e, exception); } @@ -342,7 +321,7 @@ protected FileLink() { } /** - * @param originPath Original location of the file to link + * @param originPath Original location of the file to link * @param alternativePaths Alternative locations to look for the linked file */ public FileLink(Path originPath, Path... alternativePaths) { @@ -401,7 +380,6 @@ public Path getAvailablePath(FileSystem fs) throws IOException { /** * Get the FileStatus of the referenced file. - * * @param fs {@link FileSystem} on which to get the file status * @return InputStream for the hfile link. * @throws IOException on unexpected error. @@ -420,21 +398,21 @@ public FileStatus getFileStatus(FileSystem fs) throws IOException { /** * Handle exceptions which are thrown when access locations of file link - * @param fileLink the file link - * @param newException the exception caught by access the current location + * @param fileLink the file link + * @param newException the exception caught by access the current location * @param previousException the previous exception caught by access the other locations * @return return AccessControlException if access one of the locations caught, otherwise return * FileNotFoundException. The AccessControlException is threw if user scan snapshot * feature is enabled, see * {@link org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController}. * @throws IOException if the exception is neither AccessControlException nor - * FileNotFoundException + * FileNotFoundException */ private static IOException handleAccessLocationException(FileLink fileLink, - IOException newException, IOException previousException) throws IOException { + IOException newException, IOException previousException) throws IOException { if (newException instanceof RemoteException) { newException = ((RemoteException) newException) - .unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); + .unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); } if (newException instanceof FileNotFoundException) { // Try another file location @@ -453,9 +431,8 @@ private static IOException handleAccessLocationException(FileLink fileLink, /** * Open the FileLink for read. *

      - * It uses a wrapper of FSDataInputStream that is agnostic to the location - * of the file, even if the file switches between locations. - * + * It uses a wrapper of FSDataInputStream that is agnostic to the location of the file, even if + * the file switches between locations. * @param fs {@link FileSystem} on which to open the FileLink * @return InputStream for reading the file link. * @throws IOException on unexpected error. @@ -467,10 +444,9 @@ public FSDataInputStream open(final FileSystem fs) throws IOException { /** * Open the FileLink for read. *

      - * It uses a wrapper of FSDataInputStream that is agnostic to the location - * of the file, even if the file switches between locations. - * - * @param fs {@link FileSystem} on which to open the FileLink + * It uses a wrapper of FSDataInputStream that is agnostic to the location of the file, even if + * the file switches between locations. + * @param fs {@link FileSystem} on which to open the FileLink * @param bufferSize the size of the buffer to be used. * @return InputStream for reading the file link. * @throws IOException on unexpected error. @@ -480,8 +456,8 @@ public FSDataInputStream open(final FileSystem fs, int bufferSize) throws IOExce } /** - * If the passed FSDataInputStream is backed by a FileLink, returns the underlying - * InputStream for the resolved link target. Otherwise, returns null. + * If the passed FSDataInputStream is backed by a FileLink, returns the underlying InputStream for + * the resolved link target. Otherwise, returns null. */ public static FSDataInputStream getUnderlyingFileLinkInputStream(FSDataInputStream stream) { if (stream.getWrappedStream() instanceof FileLinkInputStream) { @@ -491,13 +467,13 @@ public static FSDataInputStream getUnderlyingFileLinkInputStream(FSDataInputStre } /** - * NOTE: This method must be used only in the constructor! - * It creates a List with the specified locations for the link. + * NOTE: This method must be used only in the constructor! It creates a List with the specified + * locations for the link. */ protected void setLocations(Path originPath, Path... alternativePaths) { assert this.locations == null : "Link locations already set"; - List paths = new ArrayList<>(alternativePaths.length +1); + List paths = new ArrayList<>(alternativePaths.length + 1); if (originPath != null) { paths.add(originPath); } @@ -512,10 +488,9 @@ protected void setLocations(Path originPath, Path... alternativePaths) { /** * Get the directory to store the link back references - * - *

      To simplify the reference count process, during the FileLink creation - * a back-reference is added to the back-reference directory of the specified file. - * + *

      + * To simplify the reference count process, during the FileLink creation a back-reference is added + * to the back-reference directory of the specified file. * @param storeDir Root directory for the link reference folder * @param fileName File Name with links * @return Path for the link back references. @@ -526,7 +501,6 @@ public static Path getBackReferencesDir(final Path storeDir, final String fileNa /** * Get the referenced file name from the reference link directory path. - * * @param dirPath Link references directory path * @return Name of the file referenced */ @@ -566,4 +540,3 @@ public int hashCode() { return Arrays.hashCode(locations); } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index fbed724a207e..4cf350004a3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.IOException; @@ -39,44 +38,39 @@ import org.slf4j.LoggerFactory; /** - * HFileLink describes a link to an hfile. - * - * An hfile can be served from a region or from the hfile archive directory (/hbase/.archive) - * HFileLink allows to access the referenced hfile regardless of the location where it is. - * - *

      Searches for hfiles in the following order and locations: + * HFileLink describes a link to an hfile. An hfile can be served from a region or from the hfile + * archive directory (/hbase/.archive) HFileLink allows to access the referenced hfile regardless of + * the location where it is. + *

      + * Searches for hfiles in the following order and locations: *

        - *
      • /hbase/table/region/cf/hfile
      • - *
      • /hbase/.archive/table/region/cf/hfile
      • + *
      • /hbase/table/region/cf/hfile
      • + *
      • /hbase/.archive/table/region/cf/hfile
      • *
      - * - * The link checks first in the original path if it is not present - * it fallbacks to the archived path. + * The link checks first in the original path if it is not present it fallbacks to the archived + * path. */ @InterfaceAudience.Private -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS", - justification="To be fixed but warning suppressed for now") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_DOESNT_OVERRIDE_EQUALS", + justification = "To be fixed but warning suppressed for now") public class HFileLink extends FileLink { private static final Logger LOG = LoggerFactory.getLogger(HFileLink.class); /** - * A non-capture group, for HFileLink, so that this can be embedded. - * The HFileLink describe a link to an hfile in a different table/region - * and the name is in the form: table=region-hfile. + * A non-capture group, for HFileLink, so that this can be embedded. The HFileLink describe a link + * to an hfile in a different table/region and the name is in the form: table=region-hfile. *

      * Table name is ([\p{IsAlphabetic}\p{Digit}][\p{IsAlphabetic}\p{Digit}.-]*), so '=' is an invalid - * character for the table name. - * Region name is ([a-f0-9]+), so '-' is an invalid character for the region name. - * HFile is ([0-9a-f]+(?:_SeqId_[0-9]+_)?) covering the plain hfiles (uuid) - * and the bulk loaded (_SeqId_[0-9]+_) hfiles. - * - *

      Here is an example name: /hbase/test/0123/cf/testtb=4567-abcd where 'testtb' is table name - * and '4567' is region name and 'abcd' is filename. + * character for the table name. Region name is ([a-f0-9]+), so '-' is an invalid character for + * the region name. HFile is ([0-9a-f]+(?:_SeqId_[0-9]+_)?) covering the plain hfiles (uuid) and + * the bulk loaded (_SeqId_[0-9]+_) hfiles. + *

      + * Here is an example name: /hbase/test/0123/cf/testtb=4567-abcd where 'testtb' is table name and + * '4567' is region name and 'abcd' is filename. */ - public static final String LINK_NAME_REGEX = - String.format("(?:(?:%s=)?)%s=%s-%s", - TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, - RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX); + public static final String LINK_NAME_REGEX = String.format("(?:(?:%s=)?)%s=%s-%s", + TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, + RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX); /** Define the HFile Link name parser in the form of: table=region-hfile */ public static final Pattern LINK_NAME_PATTERN = @@ -85,13 +79,12 @@ public class HFileLink extends FileLink { RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX)); /** - * The pattern should be used for hfile and reference links - * that can be found in /hbase/table/region/family/ + * The pattern should be used for hfile and reference links that can be found in + * /hbase/table/region/family/ */ private static final Pattern REF_OR_HFILE_LINK_PATTERN = - Pattern.compile(String.format("^(?:(%s)(?:=))?(%s)=(%s)-(.+)$", - TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, - RegionInfoBuilder.ENCODED_REGION_NAME_REGEX)); + Pattern.compile(String.format("^(?:(%s)(?:=))?(%s)=(%s)-(.+)$", TableName.VALID_NAMESPACE_REGEX, + TableName.VALID_TABLE_QUALIFIER_REGEX, RegionInfoBuilder.ENCODED_REGION_NAME_REGEX)); private final Path archivePath; private final Path originPath; @@ -102,7 +95,7 @@ public class HFileLink extends FileLink { * Dead simple hfile link constructor */ public HFileLink(final Path originPath, final Path tempPath, final Path mobPath, - final Path archivePath) { + final Path archivePath) { this.tempPath = tempPath; this.originPath = originPath; this.mobPath = mobPath; @@ -110,28 +103,24 @@ public HFileLink(final Path originPath, final Path tempPath, final Path mobPath, setLocations(originPath, tempPath, mobPath, archivePath); } - /** - * @param conf {@link Configuration} from which to extract specific archive locations + * @param conf {@link Configuration} from which to extract specific archive locations * @param hFileLinkPattern The path ending with a HFileLink pattern. (table=region-hfile) * @throws IOException on unexpected error. */ public static final HFileLink buildFromHFileLinkPattern(Configuration conf, Path hFileLinkPattern) - throws IOException { + throws IOException { return buildFromHFileLinkPattern(CommonFSUtils.getRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), hFileLinkPattern); + HFileArchiveUtil.getArchivePath(conf), hFileLinkPattern); } - - /** - * @param rootDir Path to the root directory where hbase files are stored - * @param archiveDir Path to the hbase archive directory + * @param rootDir Path to the root directory where hbase files are stored + * @param archiveDir Path to the hbase archive directory * @param hFileLinkPattern The path of the HFile Link. */ - public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, - final Path archiveDir, - final Path hFileLinkPattern) { + public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, final Path archiveDir, + final Path hFileLinkPattern) { Path hfilePath = getHFileLinkPatternRelativePath(hFileLinkPattern); Path tempPath = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), hfilePath); Path originPath = new Path(rootDir, hfilePath); @@ -142,14 +131,14 @@ public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, /** * Create an HFileLink relative path for the table/region/family/hfile location - * @param table Table name + * @param table Table name * @param region Region Name * @param family Family Name - * @param hfile HFile Name + * @param hfile HFile Name * @return the relative Path to open the specified table/region/family/hfile link */ - public static Path createPath(final TableName table, final String region, - final String family, final String hfile) { + public static Path createPath(final TableName table, final String region, final String family, + final String hfile) { if (HFileLink.isHFileLink(hfile)) { return new Path(family, hfile); } @@ -158,17 +147,16 @@ public static Path createPath(final TableName table, final String region, /** * Create an HFileLink instance from table/region/family/hfile location - * @param conf {@link Configuration} from which to extract specific archive locations - * @param table Table name + * @param conf {@link Configuration} from which to extract specific archive locations + * @param table Table name * @param region Region Name * @param family Family Name - * @param hfile HFile Name + * @param hfile HFile Name * @return Link to the file with the specified table/region/family/hfile location * @throws IOException on unexpected error. */ public static HFileLink build(final Configuration conf, final TableName table, - final String region, final String family, final String hfile) - throws IOException { + final String region, final String family, final String hfile) throws IOException { return HFileLink.buildFromHFileLinkPattern(conf, createPath(table, region, family, hfile)); } @@ -193,7 +181,7 @@ public Path getMobPath() { return this.mobPath; } - /** + /** * @param path Path to check. * @return True if the path is a HFileLink. */ @@ -214,10 +202,8 @@ public static boolean isHFileLink(String fileName) { } /** - * Convert a HFileLink path to a table relative path. - * e.g. the link: /hbase/test/0123/cf/testtb=4567-abcd - * becomes: /hbase/testtb/4567/cf/abcd - * + * Convert a HFileLink path to a table relative path. e.g. the link: + * /hbase/test/0123/cf/testtb=4567-abcd becomes: /hbase/testtb/4567/cf/abcd * @param path HFileLink path * @return Relative table path * @throws IOException on unexpected error. @@ -235,13 +221,11 @@ private static Path getHFileLinkPatternRelativePath(final Path path) { String hfileName = m.group(4); String familyName = path.getParent().getName(); Path tableDir = CommonFSUtils.getTableDir(new Path("./"), tableName); - return new Path(tableDir, new Path(regionName, new Path(familyName, - hfileName))); + return new Path(tableDir, new Path(regionName, new Path(familyName, hfileName))); } /** * Get the HFile name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced HFile */ @@ -250,12 +234,11 @@ public static String getReferencedHFileName(final String fileName) { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(4)); + return (m.group(4)); } /** * Get the Region name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced Region */ @@ -264,12 +247,11 @@ public static String getReferencedRegionName(final String fileName) { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(3)); + return (m.group(3)); } /** * Get the Table name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced Table */ @@ -278,76 +260,71 @@ public static TableName getReferencedTableName(final String fileName) { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(TableName.valueOf(m.group(1), m.group(2))); + return (TableName.valueOf(m.group(1), m.group(2))); } /** * Create a new HFileLink name - * * @param hfileRegionInfo - Linked HFile Region Info - * @param hfileName - Linked HFile name + * @param hfileName - Linked HFile name * @return file name of the HFile Link */ public static String createHFileLinkName(final RegionInfo hfileRegionInfo, - final String hfileName) { - return createHFileLinkName(hfileRegionInfo.getTable(), - hfileRegionInfo.getEncodedName(), hfileName); + final String hfileName) { + return createHFileLinkName(hfileRegionInfo.getTable(), hfileRegionInfo.getEncodedName(), + hfileName); } /** * Create a new HFileLink name - * - * @param tableName - Linked HFile table name + * @param tableName - Linked HFile table name * @param regionName - Linked HFile region name - * @param hfileName - Linked HFile name + * @param hfileName - Linked HFile name * @return file name of the HFile Link */ - public static String createHFileLinkName(final TableName tableName, - final String regionName, final String hfileName) { + public static String createHFileLinkName(final TableName tableName, final String regionName, + final String hfileName) { String s = String.format("%s=%s-%s", - tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), - regionName, hfileName); + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), regionName, hfileName); return s; } /** * Create a new HFileLink - * - *

      It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink - * @param dstFamilyPath - Destination path (table/region/cf/) + *

      + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink + * @param dstFamilyPath - Destination path (table/region/cf/) * @param hfileRegionInfo - Linked HFile Region Info - * @param hfileName - Linked HFile name + * @param hfileName - Linked HFile name * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final RegionInfo hfileRegionInfo, - final String hfileName) throws IOException { + final Path dstFamilyPath, final RegionInfo hfileRegionInfo, final String hfileName) + throws IOException { return create(conf, fs, dstFamilyPath, hfileRegionInfo, hfileName, true); } /** * Create a new HFileLink - * - *

      It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink - * @param dstFamilyPath - Destination path (table/region/cf/) + *

      + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink + * @param dstFamilyPath - Destination path (table/region/cf/) * @param hfileRegionInfo - Linked HFile Region Info - * @param hfileName - Linked HFile name - * @param createBackRef - Whether back reference should be created. Defaults to true. + * @param hfileName - Linked HFile name + * @param createBackRef - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final RegionInfo hfileRegionInfo, - final String hfileName, final boolean createBackRef) throws IOException { + final Path dstFamilyPath, final RegionInfo hfileRegionInfo, final String hfileName, + final boolean createBackRef) throws IOException { TableName linkedTable = hfileRegionInfo.getTable(); String linkedRegion = hfileRegionInfo.getEncodedName(); return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName, createBackRef); @@ -355,49 +332,47 @@ public static String create(final Configuration conf, final FileSystem fs, /** * Create a new HFileLink - * - *

      It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink + *

      + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) - * @param linkedTable - Linked Table Name - * @param linkedRegion - Linked Region Name - * @param hfileName - Linked HFile name + * @param linkedTable - Linked Table Name + * @param linkedRegion - Linked Region Name + * @param hfileName - Linked HFile name * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, - final String hfileName) throws IOException { + final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, + final String hfileName) throws IOException { return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName, true); } /** * Create a new HFileLink. In the event of link creation failure, this method throws an * IOException, so that the calling upper laying can decide on how to proceed with this. - * - *

      It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink + *

      + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) - * @param linkedTable - Linked Table Name - * @param linkedRegion - Linked Region Name - * @param hfileName - Linked HFile name + * @param linkedTable - Linked Table Name + * @param linkedRegion - Linked Region Name + * @param hfileName - Linked HFile name * @param createBackRef - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, - final String hfileName, final boolean createBackRef) throws IOException { + final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, + final String hfileName, final boolean createBackRef) throws IOException { String familyName = dstFamilyPath.getName(); String regionName = dstFamilyPath.getParent().getName(); - String tableName = CommonFSUtils.getTableName(dstFamilyPath.getParent().getParent()) - .getNameAsString(); + String tableName = + CommonFSUtils.getTableName(dstFamilyPath.getParent().getParent()).getNameAsString(); return create(conf, fs, dstFamilyPath, familyName, tableName, regionName, linkedTable, linkedRegion, hfileName, createBackRef); @@ -405,25 +380,25 @@ public static String create(final Configuration conf, final FileSystem fs, /** * Create a new HFileLink - * - *

      It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink + *

      + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) - * @param dstTableName - Destination table name + * @param dstTableName - Destination table name * @param dstRegionName - Destination region name - * @param linkedTable - Linked Table Name - * @param linkedRegion - Linked Region Name - * @param hfileName - Linked HFile name + * @param linkedTable - Linked Table Name + * @param linkedRegion - Linked Region Name + * @param hfileName - Linked HFile name * @param createBackRef - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException on file or parent directory creation failure */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final String familyName, final String dstTableName, - final String dstRegionName, final TableName linkedTable, final String linkedRegion, - final String hfileName, final boolean createBackRef) throws IOException { + final Path dstFamilyPath, final String familyName, final String dstTableName, + final String dstRegionName, final TableName linkedTable, final String linkedRegion, + final String hfileName, final boolean createBackRef) throws IOException { String name = createHFileLinkName(linkedTable, linkedRegion, hfileName); String refName = createBackReferenceName(dstTableName, dstRegionName); @@ -431,8 +406,8 @@ public static String create(final Configuration conf, final FileSystem fs, fs.mkdirs(dstFamilyPath); // Make sure the FileLink reference directory exists - Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, - linkedTable, linkedRegion, familyName); + Path archiveStoreDir = + HFileArchiveUtil.getStoreArchivePath(conf, linkedTable, linkedRegion, familyName); Path backRefPath = null; if (createBackRef) { Path backRefssDir = getBackReferencesDir(archiveStoreDir, hfileName); @@ -455,18 +430,17 @@ public static String create(final Configuration conf, final FileSystem fs, } throw e; } - throw new IOException("File link=" + name + " already exists under " + - dstFamilyPath + " folder."); + throw new IOException( + "File link=" + name + " already exists under " + dstFamilyPath + " folder."); } /** * Create a new HFileLink starting from a hfileLink name - * - *

      It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * - * @param conf {@link Configuration} to read for the archive directory name - * @param fs {@link FileSystem} on which to write the HFileLink + *

      + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. + * @param conf {@link Configuration} to read for the archive directory name + * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) * @param hfileLinkName - HFileLink name (it contains hfile-region-table) * @param createBackRef - Whether back reference should be created. Defaults to true. @@ -474,30 +448,28 @@ public static String create(final Configuration conf, final FileSystem fs, * @throws IOException on file or parent directory creation failure. */ public static String createFromHFileLink(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final String hfileLinkName, final boolean createBackRef) - throws IOException { + final Path dstFamilyPath, final String hfileLinkName, final boolean createBackRef) + throws IOException { Matcher m = LINK_NAME_PATTERN.matcher(hfileLinkName); if (!m.matches()) { throw new IllegalArgumentException(hfileLinkName + " is not a valid HFileLink name!"); } - return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)), - m.group(3), m.group(4), createBackRef); + return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)), m.group(3), + m.group(4), createBackRef); } /** * Create the back reference name */ - //package-private for testing - static String createBackReferenceName(final String tableNameStr, - final String regionName) { + // package-private for testing + static String createBackReferenceName(final String tableNameStr, final String regionName) { return regionName + "." + tableNameStr.replace(TableName.NAMESPACE_DELIM, '='); } /** * Get the full path of the HFile referenced by the back reference - * - * @param rootDir root hbase directory + * @param rootDir root hbase directory * @param linkRefPath Link Back Reference path * @return full path of the referenced hfile */ @@ -511,8 +483,8 @@ public static Path getHFileFromBackReference(final Path rootDir, final Path link Path regionPath = familyPath.getParent(); Path tablePath = regionPath.getParent(); - String linkName = createHFileLinkName(CommonFSUtils.getTableName(tablePath), - regionPath.getName(), hfileName); + String linkName = + createHFileLinkName(CommonFSUtils.getTableName(tablePath), regionPath.getName(), hfileName); Path linkTableDir = CommonFSUtils.getTableDir(rootDir, linkTableName); Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName); return new Path(new Path(regionDir, familyPath.getName()), linkName); @@ -521,22 +493,20 @@ public static Path getHFileFromBackReference(final Path rootDir, final Path link public static Pair parseBackReferenceName(String name) { int separatorIndex = name.indexOf('.'); String linkRegionName = name.substring(0, separatorIndex); - String tableSubstr = name.substring(separatorIndex + 1) - .replace('=', TableName.NAMESPACE_DELIM); + String tableSubstr = name.substring(separatorIndex + 1).replace('=', TableName.NAMESPACE_DELIM); TableName linkTableName = TableName.valueOf(tableSubstr); return new Pair<>(linkTableName, linkRegionName); } /** * Get the full path of the HFile referenced by the back reference - * - * @param conf {@link Configuration} to read for the archive directory name + * @param conf {@link Configuration} to read for the archive directory name * @param linkRefPath Link Back Reference path * @return full path of the referenced hfile * @throws IOException on unexpected error. */ public static Path getHFileFromBackReference(final Configuration conf, final Path linkRefPath) - throws IOException { + throws IOException { return getHFileFromBackReference(CommonFSUtils.getRootDir(conf), linkRefPath); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index ab293e36277f..95665391740e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.nio.ByteBuffer; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -40,25 +38,24 @@ import org.slf4j.LoggerFactory; /** - * A facade for a {@link org.apache.hadoop.hbase.io.hfile.HFile.Reader} that serves up - * either the top or bottom half of a HFile where 'bottom' is the first half - * of the file containing the keys that sort lowest and 'top' is the second half - * of the file with keys that sort greater than those of the bottom half. - * The top includes the split files midkey, of the key that follows if it does + * A facade for a {@link org.apache.hadoop.hbase.io.hfile.HFile.Reader} that serves up either the + * top or bottom half of a HFile where 'bottom' is the first half of the file containing the keys + * that sort lowest and 'top' is the second half of the file with keys that sort greater than those + * of the bottom half. The top includes the split files midkey, of the key that follows if it does * not exist in the file. - * - *

      This type works in tandem with the {@link Reference} type. This class - * is used reading while Reference is used writing. - * - *

      This file is not splitable. Calls to {@link #midKey()} return null. + *

      + * This type works in tandem with the {@link Reference} type. This class is used reading while + * Reference is used writing. + *

      + * This file is not splitable. Calls to {@link #midKey()} return null. */ @InterfaceAudience.Private public class HalfStoreFileReader extends StoreFileReader { private static final Logger LOG = LoggerFactory.getLogger(HalfStoreFileReader.class); final boolean top; - // This is the key we split around. Its the first possible entry on a row: + // This is the key we split around. Its the first possible entry on a row: // i.e. empty column and a timestamp of LATEST_TIMESTAMP. - protected final byte [] splitkey; + protected final byte[] splitkey; private final Cell splitCell; @@ -68,20 +65,20 @@ public class HalfStoreFileReader extends StoreFileReader { /** * Creates a half file reader for a hfile referred to by an hfilelink. - * @param context Reader context info - * @param fileInfo HFile info + * @param context Reader context info + * @param fileInfo HFile info * @param cacheConf CacheConfig - * @param r original reference file (contains top or bottom) - * @param refCount reference count - * @param conf Configuration + * @param r original reference file (contains top or bottom) + * @param refCount reference count + * @param conf Configuration */ public HalfStoreFileReader(final ReaderContext context, final HFileInfo fileInfo, - final CacheConfig cacheConf, final Reference r, - AtomicInteger refCount, final Configuration conf) throws IOException { + final CacheConfig cacheConf, final Reference r, AtomicInteger refCount, + final Configuration conf) throws IOException { super(context, fileInfo, cacheConf, refCount, conf); // This is not actual midkey for this half-file; its just border - // around which we split top and bottom. Have to look in files to find - // actual last and first keys for bottom and top halves. Half-files don't + // around which we split top and bottom. Have to look in files to find + // actual last and first keys for bottom and top halves. Half-files don't // have an actual midkey themselves. No midkey is how we indicate file is // not splittable. this.splitkey = r.getSplitKey(); @@ -95,8 +92,8 @@ protected boolean isTop() { } @Override - public HFileScanner getScanner(final boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { + public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, + final boolean isCompaction) { final HFileScanner s = super.getScanner(cacheBlocks, pread, isCompaction); return new HFileScanner() { final HFileScanner delegate = s; @@ -202,8 +199,8 @@ public int seekTo(Cell key) throws IOException { boolean res = delegate.seekBefore(splitCell); if (!res) { throw new IOException( - "Seeking for a key in bottom of file, but key exists in top of file, " + - "failed on seekBefore(midkey)"); + "Seeking for a key in bottom of file, but key exists in top of file, " + + "failed on seekBefore(midkey)"); } return 1; } @@ -227,7 +224,7 @@ public int reseekTo(Cell key) throws IOException { boolean res = delegate.seekBefore(splitCell); if (!res) { throw new IOException("Seeking for a key in bottom of file, but" - + " key exists in top of file, failed on seekBefore(midkey)"); + + " key exists in top of file, failed on seekBefore(midkey)"); } return 1; } @@ -243,8 +240,10 @@ public int reseekTo(Cell key) throws IOException { public boolean seekBefore(Cell key) throws IOException { if (top) { Optional fk = getFirstKey(); - if (fk.isPresent() && - PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0) { + if ( + fk.isPresent() + && PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0 + ) { return false; } } else { @@ -281,12 +280,12 @@ public void shipped() throws IOException { } }; } - + @Override public boolean passesKeyRangeFilter(Scan scan) { return true; } - + @Override public Optional getLastKey() { if (top) { @@ -326,7 +325,7 @@ public Optional getFirstKey() { } catch (IOException e) { LOG.warn("Failed seekTo first KV in the file", e); } finally { - if(scanner != null) { + if (scanner != null) { scanner.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java index 72da73e1e920..c2197cef9457 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; @@ -30,7 +29,7 @@ public class MetricsIO { public MetricsIO(MetricsIOWrapper wrapper) { this(CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) - .createIO(wrapper), wrapper); + .createIO(wrapper), wrapper); } MetricsIO(MetricsIOSource source, MetricsIOWrapper wrapper) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java index 1ce762a0ad2e..687d58334582 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.io.hfile.HFile; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java index 845005f1bbd0..ed3986f58838 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,44 +23,42 @@ import java.io.IOException; import java.io.InputStream; import java.util.Arrays; - import org.apache.commons.io.IOUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * A reference to the top or bottom half of a store file where 'bottom' is the first half - * of the file containing the keys that sort lowest and 'top' is the second half - * of the file with keys that sort greater than those of the bottom half. The file referenced - * lives under a different region. References are made at region split time. - * - *

      References work with a special half store file type. References know how - * to write out the reference format in the file system and are what is juggled - * when references are mixed in with direct store files. The half store file - * type is used reading the referred to file. - * - *

      References to store files located over in some other region look like - * this in the file system - * 1278437856009925445.3323223323: - * i.e. an id followed by hash of the referenced region. - * Note, a region is itself not splittable if it has instances of store file - * references. References are cleaned up by compactions. + * A reference to the top or bottom half of a store file where 'bottom' is the first half of the + * file containing the keys that sort lowest and 'top' is the second half of the file with keys that + * sort greater than those of the bottom half. The file referenced lives under a different region. + * References are made at region split time. + *

      + * References work with a special half store file type. References know how to write out the + * reference format in the file system and are what is juggled when references are mixed in with + * direct store files. The half store file type is used reading the referred to file. + *

      + * References to store files located over in some other region look like this in the file system + * 1278437856009925445.3323223323: i.e. an id followed by hash of the referenced + * region. Note, a region is itself not splittable if it has instances of store file references. + * References are cleaned up by compactions. */ @InterfaceAudience.Private public class Reference { - private byte [] splitkey; + private byte[] splitkey; private Range region; /** - * For split HStoreFiles, it specifies if the file covers the lower half or - * the upper half of the key range + * For split HStoreFiles, it specifies if the file covers the lower half or the upper half of the + * key range */ static enum Range { /** HStoreFile contains upper half of key range */ @@ -71,28 +68,25 @@ static enum Range { } /** - * @param splitRow - * @return A {@link Reference} that points at top half of a an hfile + * n * @return A {@link Reference} that points at top half of a an hfile */ - public static Reference createTopReference(final byte [] splitRow) { + public static Reference createTopReference(final byte[] splitRow) { return new Reference(splitRow, Range.top); } /** - * @param splitRow - * @return A {@link Reference} that points at the bottom half of a an hfile + * n * @return A {@link Reference} that points at the bottom half of a an hfile */ - public static Reference createBottomReference(final byte [] splitRow) { + public static Reference createBottomReference(final byte[] splitRow) { return new Reference(splitRow, Range.bottom); } /** * Constructor - * @param splitRow This is row we are splitting around. - * @param fr + * @param splitRow This is row we are splitting around. n */ - Reference(final byte [] splitRow, final Range fr) { - this.splitkey = splitRow == null? null: KeyValueUtil.createFirstOnRow(splitRow).getKey(); + Reference(final byte[] splitRow, final Range fr) { + this.splitkey = splitRow == null ? null : KeyValueUtil.createFirstOnRow(splitRow).getKey(); this.region = fr; } @@ -108,17 +102,16 @@ public Reference() { } /** - * - * @return Range + * n */ public Range getFileRegion() { return this.region; } /** - * @return splitKey + * n */ - public byte [] getSplitKey() { + public byte[] getSplitKey() { return splitkey; } @@ -135,20 +128,19 @@ public static boolean isTopFileRegion(final Range r) { } /** - * @deprecated Writables are going away. Use the pb serialization methods instead. - * Remove in a release after 0.96 goes out. This is here only to migrate - * old Reference files written with Writables before 0.96. + * @deprecated Writables are going away. Use the pb serialization methods instead. Remove in a + * release after 0.96 goes out. This is here only to migrate old Reference files + * written with Writables before 0.96. */ @Deprecated public void readFields(DataInput in) throws IOException { boolean tmp = in.readBoolean(); // If true, set region to top. - this.region = tmp? Range.top: Range.bottom; + this.region = tmp ? Range.top : Range.bottom; this.splitkey = Bytes.readByteArray(in); } - public Path write(final FileSystem fs, final Path p) - throws IOException { + public Path write(final FileSystem fs, final Path p) throws IOException { FSDataOutputStream out = fs.create(p, false); try { out.write(toByteArray()); @@ -159,26 +151,21 @@ public Path write(final FileSystem fs, final Path p) } /** - * Read a Reference from FileSystem. - * @param fs - * @param p - * @return New Reference made from passed p - * @throws IOException + * Read a Reference from FileSystem. nn * @return New Reference made from passed p n */ - public static Reference read(final FileSystem fs, final Path p) - throws IOException { + public static Reference read(final FileSystem fs, final Path p) throws IOException { InputStream in = fs.open(p); try { // I need to be able to move back in the stream if this is not a pb serialization so I can // do the Writable decoding instead. - in = in.markSupported()? in: new BufferedInputStream(in); + in = in.markSupported() ? in : new BufferedInputStream(in); int pblen = ProtobufUtil.lengthOfPBMagic(); in.mark(pblen); - byte [] pbuf = new byte[pblen]; - IOUtils.readFully(in, pbuf,0, pblen); + byte[] pbuf = new byte[pblen]; + IOUtils.readFully(in, pbuf, 0, pblen); // WATCHOUT! Return in middle of function!!! if (ProtobufUtil.isPBMagicPrefix(pbuf)) return convert(FSProtos.Reference.parseFrom(in)); - // Else presume Writables. Need to reset the stream since it didn't start w/ pb. + // Else presume Writables. Need to reset the stream since it didn't start w/ pb. // We won't bother rewriting thie Reference as a pb since Reference is transitory. in.reset(); Reference r = new Reference(); @@ -194,8 +181,9 @@ public static Reference read(final FileSystem fs, final Path p) public FSProtos.Reference convert() { FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder(); - builder.setRange(isTopFileRegion(getFileRegion())? - FSProtos.Reference.Range.TOP: FSProtos.Reference.Range.BOTTOM); + builder.setRange(isTopFileRegion(getFileRegion()) + ? FSProtos.Reference.Range.TOP + : FSProtos.Reference.Range.BOTTOM); builder.setSplitkey(UnsafeByteOperations.unsafeWrap(getSplitKey())); return builder.build(); } @@ -203,17 +191,16 @@ public FSProtos.Reference convert() { public static Reference convert(final FSProtos.Reference r) { Reference result = new Reference(); result.splitkey = r.getSplitkey().toByteArray(); - result.region = r.getRange() == FSProtos.Reference.Range.TOP? Range.top: Range.bottom; + result.region = r.getRange() == FSProtos.Reference.Range.TOP ? Range.top : Range.bottom; return result; } /** - * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom - * (w/o the delimiter, pb reads to EOF which may not be what you want). - * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. - * @throws IOException + * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the + * delimiter, pb reads to EOF which may not be what you want). + * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n */ - byte [] toByteArray() throws IOException { + byte[] toByteArray() throws IOException { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java index c495201a45fd..9ad95ff98f6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,43 +15,37 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * WALLink describes a link to a WAL. - * - * An wal can be in /hbase/.logs/<server>/<wal> - * or it can be in /hbase/.oldlogs/<wal> - * - * The link checks first in the original path, - * if it is not present it fallbacks to the archived path. + * WALLink describes a link to a WAL. An wal can be in /hbase/.logs/<server>/<wal> or it + * can be in /hbase/.oldlogs/<wal> The link checks first in the original path, if it is not + * present it fallbacks to the archived path. */ @InterfaceAudience.Private public class WALLink extends FileLink { /** - * @param conf {@link Configuration} from which to extract specific archive locations + * @param conf {@link Configuration} from which to extract specific archive locations * @param serverName Region Server owner of the log - * @param logName WAL file name + * @param logName WAL file name * @throws IOException on unexpected error. */ - public WALLink(final Configuration conf, - final String serverName, final String logName) throws IOException { + public WALLink(final Configuration conf, final String serverName, final String logName) + throws IOException { this(CommonFSUtils.getWALRootDir(conf), serverName, logName); } /** * @param walRootDir Path to the root directory where hbase files are stored * @param serverName Region Server owner of the log - * @param logName WAL file name + * @param logName WAL file name */ public WALLink(final Path walRootDir, final String serverName, final String logName) { final Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -60,7 +54,7 @@ public WALLink(final Path walRootDir, final String serverName, final String logN } /** - * @param originPath Path to the wal in the log directory + * @param originPath Path to the wal in the log directory * @param archivePath Path to the wal in the archived log directory */ public WALLink(final Path originPath, final Path archivePath) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java index 017c4d14b6e1..92b5e9fa67dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.yetus.audience.InterfaceAudience; @@ -27,11 +25,8 @@ @InterfaceAudience.Private public interface WritableWithSize { /** - * Provide a size hint to the caller. write() should ideally - * not go beyond this if at all possible. - * - * You can return 0 if there is no size hint. - * + * Provide a size hint to the caller. write() should ideally not go beyond this if at all + * possible. You can return 0 if there is no size hint. * @return the size of the writable */ long getWritableSize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java index cd8932269c5a..187b0536a30b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Snapshot of block cache age in cache. - * This object is preferred because we can control how it is serialized out when JSON'ing. + * Snapshot of block cache age in cache. This object is preferred because we can control how it is + * serialized out when JSON'ing. */ @InterfaceAudience.Private public class AgeSnapshot { @@ -32,7 +32,7 @@ public class AgeSnapshot { AgeSnapshot(final FastLongHistogram ageHistogram) { this.ageHistogram = ageHistogram; - this.quantiles = ageHistogram.getQuantiles(new double[]{0.75, 0.95, 0.98, 0.99, 0.999}); + this.quantiles = ageHistogram.getQuantiles(new double[] { 0.75, 0.95, 0.98, 0.99, 0.999 }); } public double get75thPercentile() { @@ -55,7 +55,6 @@ public double get999thPercentile() { return quantiles[4]; } - public double getMean() { return this.ageHistogram.getMean(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 6f32d623c5ea..4c6d438ca8cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,19 +18,18 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - import org.apache.yetus.audience.InterfaceAudience; /** - * Block cache interface. Anything that implements the {@link Cacheable} - * interface can be put in the cache. + * Block cache interface. Anything that implements the {@link Cacheable} interface can be put in the + * cache. */ @InterfaceAudience.Private public interface BlockCache extends Iterable { /** * Add block to cache. * @param cacheKey The block's cache key. - * @param buf The block contents wrapped in a ByteBuffer. + * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory */ void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory); @@ -39,16 +37,16 @@ public interface BlockCache extends Iterable { /** * Add block to cache (defaults to not in-memory). * @param cacheKey The block's cache key. - * @param buf The object to cache. + * @param buf The object to cache. */ void cacheBlock(BlockCacheKey cacheKey, Cacheable buf); /** * Fetch block from cache. - * @param cacheKey Block to fetch. - * @param caching Whether this request has caching enabled (used for stats) - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check locking) + * @param cacheKey Block to fetch. + * @param caching Whether this request has caching enabled (used for stats) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid + * double counting cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not * @return Block or null if block is not in 2 cache. */ @@ -57,16 +55,16 @@ Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, /** * Fetch block from cache. - * @param cacheKey Block to fetch. - * @param caching Whether this request has caching enabled (used for stats) - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check locking) + * @param cacheKey Block to fetch. + * @param caching Whether this request has caching enabled (used for stats) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid + * double counting cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not - * @param blockType BlockType + * @param blockType BlockType * @return Block or null if block is not in 2 cache. */ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, - boolean updateCacheMetrics, BlockType blockType) { + boolean updateCacheMetrics, BlockType blockType) { return getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @@ -79,14 +77,12 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe /** * Evicts all blocks for the given HFile. - * * @return the number of blocks evicted */ int evictBlocksByHfileName(String hfileName); /** - * Get the statistics for this block cache. - * @return Stats + * Get the statistics for this block cache. n */ CacheStats getStats(); @@ -131,11 +127,11 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe */ long getBlockCount(); - /** - * Returns the number of data blocks currently cached in the block cache. - * @return number of blocks in the cache - */ - long getDataBlockCount(); + /** + * Returns the number of data blocks currently cached in the block cache. + * @return number of blocks in the cache + */ + long getDataBlockCount(); /** * @return Iterator over the blocks in the cache. @@ -146,7 +142,7 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe /** * @return The list of sub blockcaches that make up this one; returns null if no sub caches. */ - BlockCache [] getBlockCaches(); + BlockCache[] getBlockCaches(); /** * Check if block type is meta or index block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java index 12c769ec805a..38a296aad523 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.concurrent.ForkJoinPool; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; @@ -50,13 +49,13 @@ public final class BlockCacheFactory { /** * If the chosen ioengine can persist its state across restarts, the path to the file to persist - * to. This file is NOT the data file. It is a file into which we will serialize the map of - * what is in the data file. For example, if you pass the following argument as + * to. This file is NOT the data file. It is a file into which we will serialize the map of what + * is in the data file. For example, if you pass the following argument as * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), * file:/tmp/bucketcache.data , then we will write the bucketcache data to the file * /tmp/bucketcache.data but the metadata on where the data is in the supplied file - * is an in-memory map that needs to be persisted across restarts. Where to store this - * in-memory state is what you supply here: e.g. /tmp/bucketcache.map. + * is an in-memory map that needs to be persisted across restarts. Where to store this in-memory + * state is what you supply here: e.g. /tmp/bucketcache.map. */ public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; @@ -103,16 +102,16 @@ public static BlockCache createBlockCache(Configuration conf) { boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); if (useExternal) { BlockCache l2CacheInstance = createExternalBlockcache(conf); - return l2CacheInstance == null ? - l1Cache : - new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); + return l2CacheInstance == null + ? l1Cache + : new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); } else { // otherwise use the bucket cache. BucketCache bucketCache = createBucketCache(conf); if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { // Non combined mode is off from 2.0 LOG.warn( - "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); + "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); } return bucketCache == null ? l1Cache : new CombinedBlockCache(l1Cache, bucketCache); } @@ -125,8 +124,8 @@ private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) } String policy = c.get(BLOCKCACHE_POLICY_KEY, BLOCKCACHE_POLICY_DEFAULT); int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); - LOG.info("Allocating BlockCache size=" + - StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); + LOG.info("Allocating BlockCache size=" + StringUtils.byteDesc(cacheSize) + ", blockSize=" + + StringUtils.byteDesc(blockSize)); if (policy.equalsIgnoreCase("LRU")) { return new LruBlockCache(cacheSize, blockSize, true, c); } else if (policy.equalsIgnoreCase("IndexOnlyLRU")) { @@ -141,13 +140,14 @@ private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) } /** - * Enum of all built in external block caches. - * This is used for config. + * Enum of all built in external block caches. This is used for config. */ private static enum ExternalBlockCaches { memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"); + // TODO(eclark): Consider more. Redis, etc. Class clazz; + ExternalBlockCaches(String clazzName) { try { clazz = (Class) Class.forName(clazzName); @@ -155,6 +155,7 @@ private static enum ExternalBlockCaches { clazz = null; } } + ExternalBlockCaches(Class clazz) { this.clazz = clazz; } @@ -168,12 +169,11 @@ private static BlockCache createExternalBlockcache(Configuration c) { // Get the class, from the config. s try { - klass = ExternalBlockCaches - .valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; + klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; } catch (IllegalArgumentException exception) { try { - klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, Class.forName( - "org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); + klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, + Class.forName("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); } catch (ClassNotFoundException e) { return null; } @@ -191,7 +191,7 @@ private static BlockCache createExternalBlockcache(Configuration c) { } private static BucketCache createBucketCache(Configuration c) { - // Check for L2. ioengine name must be non-null. + // Check for L2. ioengine name must be non-null. String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) { return null; @@ -200,20 +200,19 @@ private static BucketCache createBucketCache(Configuration c) { int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c); if (bucketCacheSize <= 0) { - throw new IllegalStateException("bucketCacheSize <= 0; Check " + - BUCKET_CACHE_SIZE_KEY + " setting and/or server java heap size"); + throw new IllegalStateException("bucketCacheSize <= 0; Check " + BUCKET_CACHE_SIZE_KEY + + " setting and/or server java heap size"); } if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) { LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer " - + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); + + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); } - int writerThreads = c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, - DEFAULT_BUCKET_CACHE_WRITER_THREADS); - int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, - DEFAULT_BUCKET_CACHE_WRITER_QUEUE); + int writerThreads = + c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, DEFAULT_BUCKET_CACHE_WRITER_THREADS); + int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, DEFAULT_BUCKET_CACHE_WRITER_QUEUE); String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY); String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY); - int [] bucketSizes = null; + int[] bucketSizes = null; if (configuredBucketSizes != null) { bucketSizes = new int[configuredBucketSizes.length]; for (int i = 0; i < configuredBucketSizes.length; i++) { @@ -225,22 +224,22 @@ private static BucketCache createBucketCache(Configuration c) { // See BucketEntry where offset to each block is represented using 5 bytes (instead of 8 // bytes long). We would like to save heap overhead as less as possible. throw new IllegalArgumentException("Illegal value: " + bucketSize + " configured for '" - + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); + + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); } bucketSizes[i] = bucketSize; } } BucketCache bucketCache = null; try { - int ioErrorsTolerationDuration = c.getInt( - "hbase.bucketcache.ioengine.errors.tolerated.duration", + int ioErrorsTolerationDuration = + c.getInt("hbase.bucketcache.ioengine.errors.tolerated.duration", BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); // Bucket cache logs its stats on creation internal to the constructor. - bucketCache = new BucketCache(bucketCacheIOEngineName, - bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath, - ioErrorsTolerationDuration, c); + bucketCache = new BucketCache(bucketCacheIOEngineName, bucketCacheSize, blockSize, + bucketSizes, writerThreads, writerQueueLen, persistentPath, ioErrorsTolerationDuration, c); } catch (IOException ioex) { - LOG.error("Can't instantiate bucket cache", ioex); throw new RuntimeException(ioex); + LOG.error("Can't instantiate bucket cache", ioex); + throw new RuntimeException(ioex); } return bucketCache; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java index 4683c3520c1b..f9c52e4acbc1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; /** * Cache Key for use with implementations of {@link BlockCache} @@ -35,14 +35,14 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { /** * Construct a new BlockCacheKey * @param hfileName The name of the HFile this block belongs to. - * @param offset Offset of the block into the file + * @param offset Offset of the block into the file */ public BlockCacheKey(String hfileName, long offset) { this(hfileName, offset, true, BlockType.DATA); } public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica, - BlockType blockType) { + BlockType blockType) { this.isPrimaryReplicaBlock = isPrimaryReplica; this.hfileName = hfileName; this.offset = offset; @@ -59,8 +59,7 @@ public boolean equals(Object o) { if (o instanceof BlockCacheKey) { BlockCacheKey k = (BlockCacheKey) o; return offset == k.offset - && (hfileName == null ? k.hfileName == null : hfileName - .equals(k.hfileName)); + && (hfileName == null ? k.hfileName == null : hfileName.equals(k.hfileName)); } else { return false; } @@ -74,13 +73,12 @@ public String toString() { public static final long FIXED_OVERHEAD = ClassSize.estimateBase(BlockCacheKey.class, false); /** - * Strings have two bytes per character due to default Java Unicode encoding - * (hence length times 2). + * Strings have two bytes per character due to default Java Unicode encoding (hence length times + * 2). */ @Override public long heapSize() { - return ClassSize.align(FIXED_OVERHEAD + ClassSize.STRING + - 2 * hfileName.length()); + return ClassSize.align(FIXED_OVERHEAD + ClassSize.STRING + 2 * hfileName.length()); } // can't avoid this unfortunately diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index c2cf82148bee..2a2faae3eaef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,16 +75,14 @@ public FastLongHistogram read(JsonReader in) throws IOException { }).setPrettyPrinting().create(); /** - * @param cb - * @return The block content as String. + * n * @return The block content as String. */ public static String toString(final CachedBlock cb, final long now) { return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now); } /** - * Little data structure to hold counts for a file. - * Used doing a toJSON. + * Little data structure to hold counts for a file. Used doing a toJSON. */ static class CachedBlockCountsPerFile { private int count = 0; @@ -122,7 +120,7 @@ public String getFilename() { * @return A JSON String of filename and counts of blocks */ public static String toJSON(String filename, NavigableSet blocks) - throws IOException { + throws IOException { CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename); for (CachedBlock cb : blocks) { counts.count++; @@ -151,63 +149,59 @@ public static String toJSON(BlockCache bc) throws IOException { } /** - * @param cb - * @return The block content of bc as a String minus the filename. + * n * @return The block content of bc as a String minus the filename. */ public static String toStringMinusFileName(final CachedBlock cb, final long now) { - return "offset=" + cb.getOffset() + - ", size=" + cb.getSize() + - ", age=" + (now - cb.getCachedTime()) + - ", type=" + cb.getBlockType() + - ", priority=" + cb.getBlockPriority(); + return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age=" + + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority=" + + cb.getBlockPriority(); } /** * Get a {@link CachedBlocksByFile} instance and load it up by iterating content in * {@link BlockCache}. * @param conf Used to read configurations - * @param bc Block Cache to iterate. + * @param bc Block Cache to iterate. * @return Laoded up instance of CachedBlocksByFile */ public static CachedBlocksByFile getLoadedCachedBlocksByFile(final Configuration conf, - final BlockCache bc) { + final BlockCache bc) { CachedBlocksByFile cbsbf = new CachedBlocksByFile(conf); - for (CachedBlock cb: bc) { + for (CachedBlock cb : bc) { if (cbsbf.update(cb)) break; } return cbsbf; } private static int compareCacheBlock(Cacheable left, Cacheable right, - boolean includeNextBlockMetadata) { + boolean includeNextBlockMetadata) { ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); left.serialize(l, includeNextBlockMetadata); ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); right.serialize(r, includeNextBlockMetadata); - return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), - r.array(), r.arrayOffset(), r.limit()); + return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), + r.limit()); } /** * Validate that the existing and newBlock are the same without including the nextBlockMetadata, - * if not, throw an exception. If they are the same without the nextBlockMetadata, - * return the comparison. - * + * if not, throw an exception. If they are the same without the nextBlockMetadata, return the + * comparison. * @param existing block that is existing in the cache. * @param newBlock block that is trying to be cached. * @param cacheKey the cache key of the blocks. * @return comparison of the existing block to the newBlock. */ public static int validateBlockAddition(Cacheable existing, Cacheable newBlock, - BlockCacheKey cacheKey) { + BlockCacheKey cacheKey) { int comparison = compareCacheBlock(existing, newBlock, false); if (comparison != 0) { - throw new RuntimeException("Cached block contents differ, which should not have happened." - + "cacheKey:" + cacheKey); + throw new RuntimeException( + "Cached block contents differ, which should not have happened." + "cacheKey:" + cacheKey); } if ((existing instanceof HFileBlock) && (newBlock instanceof HFileBlock)) { comparison = ((HFileBlock) existing).getNextBlockOnDiskSize() - - ((HFileBlock) newBlock).getNextBlockOnDiskSize(); + - ((HFileBlock) newBlock).getNextBlockOnDiskSize(); } return comparison; } @@ -221,13 +215,13 @@ public static int validateBlockAddition(Cacheable existing, Cacheable newBlock, * new block to cache has, then we can replace the existing block with the new block for better * performance.(HBASE-20447) * @param blockCache BlockCache to check - * @param cacheKey the block cache key - * @param newBlock the new block which try to put into the block cache. + * @param cacheKey the block cache key + * @param newBlock the new block which try to put into the block cache. * @return true means need to replace existing block with new block for the same block cache key. * false means just keep the existing block. */ public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache, - BlockCacheKey cacheKey, Cacheable newBlock) { + BlockCacheKey cacheKey, Cacheable newBlock) { // NOTICE: The getBlock has retained the existingBlock inside. Cacheable existingBlock = blockCache.getBlock(cacheKey, false, false, false); if (existingBlock == null) { @@ -237,16 +231,15 @@ public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache, int comparison = BlockCacheUtil.validateBlockAddition(existingBlock, newBlock, cacheKey); if (comparison < 0) { LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the new block has " - + "nextBlockOnDiskSize set. Caching new block."); + + "nextBlockOnDiskSize set. Caching new block."); return true; } else if (comparison > 0) { LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the existing block has " - + "nextBlockOnDiskSize set, Keeping cached block."); + + "nextBlockOnDiskSize set, Keeping cached block."); return false; } else { LOG.debug("Caching an already cached block: {}. This is harmless and can happen in rare " - + "cases (see HBASE-8547)", - cacheKey); + + "cases (see HBASE-8547)", cacheKey); return false; } } finally { @@ -256,9 +249,9 @@ public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache, } /** - * Use one of these to keep a running account of cached blocks by file. Throw it away when done. - * This is different than metrics in that it is stats on current state of a cache. - * See getLoadedCachedBlocksByFile + * Use one of these to keep a running account of cached blocks by file. Throw it away when done. + * This is different than metrics in that it is stats on current state of a cache. See + * getLoadedCachedBlocksByFile */ public static class CachedBlocksByFile { private int count; @@ -267,11 +260,9 @@ public static class CachedBlocksByFile { private long dataSize; private final long now = System.nanoTime(); /** - * How many blocks to look at before we give up. - * There could be many millions of blocks. We don't want the - * ui to freeze while we run through 1B blocks... users will - * think hbase dead. UI displays warning in red when stats - * are incomplete. + * How many blocks to look at before we give up. There could be many millions of blocks. We + * don't want the ui to freeze while we run through 1B blocks... users will think hbase dead. UI + * displays warning in red when stats are incomplete. */ private final int max; public static final int DEFAULT_MAX = 1000000; @@ -281,7 +272,7 @@ public static class CachedBlocksByFile { } CachedBlocksByFile(final Configuration c) { - this.max = c == null? DEFAULT_MAX: c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); + this.max = c == null ? DEFAULT_MAX : c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); } /** @@ -292,8 +283,7 @@ public static class CachedBlocksByFile { FastLongHistogram hist = new FastLongHistogram(); /** - * @param cb - * @return True if full.... if we won't be adding any more. + * n * @return True if full.... if we won't be adding any more. */ public boolean update(final CachedBlock cb) { if (isFull()) return true; @@ -310,15 +300,15 @@ public boolean update(final CachedBlock cb) { this.dataBlockCount++; this.dataSize += cb.getSize(); } - long age = (this.now - cb.getCachedTime())/NANOS_PER_SECOND; + long age = (this.now - cb.getCachedTime()) / NANOS_PER_SECOND; this.hist.add(age, 1); return false; } /** - * @return True if full; i.e. there are more items in the cache but we only loaded up - * the maximum set in configuration hbase.ui.blockcache.by.file.max - * (Default: DEFAULT_MAX). + * @return True if full; i.e. there are more items in the cache but we only loaded up the + * maximum set in configuration hbase.ui.blockcache.by.file.max (Default: + * DEFAULT_MAX). */ public boolean isFull() { return this.count >= this.max; @@ -360,16 +350,12 @@ public AgeSnapshot getAgeInCacheSnapshot() { @Override public String toString() { AgeSnapshot snapshot = getAgeInCacheSnapshot(); - return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size + - ", dataSize=" + getDataSize() + - ", mean age=" + snapshot.getMean() + - ", min age=" + snapshot.getMin() + - ", max age=" + snapshot.getMax() + - ", 75th percentile age=" + snapshot.get75thPercentile() + - ", 95th percentile age=" + snapshot.get95thPercentile() + - ", 98th percentile age=" + snapshot.get98thPercentile() + - ", 99th percentile age=" + snapshot.get99thPercentile() + - ", 99.9th percentile age=" + snapshot.get99thPercentile(); + return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size + + ", dataSize=" + getDataSize() + ", mean age=" + snapshot.getMean() + ", min age=" + + snapshot.getMin() + ", max age=" + snapshot.getMax() + ", 75th percentile age=" + + snapshot.get75thPercentile() + ", 95th percentile age=" + snapshot.get95thPercentile() + + ", 98th percentile age=" + snapshot.get98thPercentile() + ", 99th percentile age=" + + snapshot.get99thPercentile() + ", 99.9th percentile age=" + snapshot.get99thPercentile(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java index 2d90a85d9fc7..43498b85f205 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -29,10 +26,10 @@ @InterfaceAudience.Private class BlockCachesIterator implements Iterator { int index = 0; - final BlockCache [] bcs; + final BlockCache[] bcs; Iterator current; - BlockCachesIterator(final BlockCache [] blockCaches) { + BlockCachesIterator(final BlockCache[] blockCaches) { this.bcs = blockCaches; this.current = this.bcs[this.index].iterator(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java index b8f83578d2f7..c340254e07c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,8 @@ public class BlockWithScanInfo { private final HFileBlock hFileBlock; /** - * The first key in the next block following this one in the HFile. - * If this key is unknown, this is reference-equal with HConstants.NO_NEXT_INDEXED_KEY + * The first key in the next block following this one in the HFile. If this key is unknown, this + * is reference-equal with HConstants.NO_NEXT_INDEXED_KEY */ private final Cell nextIndexedKey; @@ -42,7 +42,7 @@ public HFileBlock getHFileBlock() { return hFileBlock; } - public Cell getNextIndexedKey() { + public Cell getNextIndexedKey() { return nextIndexedKey; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index dcbb71582f44..42ddddda800f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.io.ByteBuffAllocator; @@ -46,14 +45,13 @@ public class CacheConfig { public static final String CACHE_DATA_ON_READ_KEY = "hbase.block.data.cacheonread"; /** - * Configuration key to cache data blocks on write. There are separate - * switches for bloom blocks and non-root index blocks. + * Configuration key to cache data blocks on write. There are separate switches for bloom blocks + * and non-root index blocks. */ public static final String CACHE_BLOCKS_ON_WRITE_KEY = "hbase.rs.cacheblocksonwrite"; /** - * Configuration key to cache leaf and intermediate-level index blocks on - * write. + * Configuration key to cache leaf and intermediate-level index blocks on write. */ public static final String CACHE_INDEX_BLOCKS_ON_WRITE_KEY = "hfile.block.index.cacheonwrite"; @@ -68,14 +66,14 @@ public class CacheConfig { public static final String CACHE_DATA_BLOCKS_COMPRESSED_KEY = "hbase.block.data.cachecompressed"; /** - * Configuration key to evict all blocks of a given file from the block cache - * when the file is closed. + * Configuration key to evict all blocks of a given file from the block cache when the file is + * closed. */ public static final String EVICT_BLOCKS_ON_CLOSE_KEY = "hbase.rs.evictblocksonclose"; /** - * Configuration key to prefetch all blocks of a given file into the block cache - * when the file is opened. + * Configuration key to prefetch all blocks of a given file into the block cache when the file is + * opened. */ public static final String PREFETCH_BLOCKS_ON_OPEN_KEY = "hbase.rs.prefetchblocksonopen"; @@ -83,17 +81,17 @@ public class CacheConfig { * Configuration key to cache blocks when a compacted file is written */ public static final String CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY = - "hbase.rs.cachecompactedblocksonwrite"; + "hbase.rs.cachecompactedblocksonwrite"; /** * Configuration key to determine total size in bytes of compacted files beyond which we do not * cache blocks on compaction */ public static final String CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY = - "hbase.rs.cachecompactedblocksonwrite.threshold"; + "hbase.rs.cachecompactedblocksonwrite.threshold"; public static final String DROP_BEHIND_CACHE_COMPACTION_KEY = - "hbase.hfile.drop.behind.compaction"; + "hbase.hfile.drop.behind.compaction"; // Defaults public static final boolean DEFAULT_CACHE_DATA_ON_READ = true; @@ -109,10 +107,9 @@ public class CacheConfig { public static final long DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD = Long.MAX_VALUE; /** - * Whether blocks should be cached on read (default is on if there is a - * cache but this can be turned off on a per-family or per-request basis). - * If off we will STILL cache meta blocks; i.e. INDEX and BLOOM types. - * This cannot be disabled. + * Whether blocks should be cached on read (default is on if there is a cache but this can be + * turned off on a per-family or per-request basis). If off we will STILL cache meta blocks; i.e. + * INDEX and BLOOM types. This cannot be disabled. */ private final boolean cacheDataOnRead; @@ -155,8 +152,8 @@ public class CacheConfig { private final ByteBuffAllocator byteBuffAllocator; /** - * Create a cache configuration using the specified configuration object and - * defaults for family level settings. Only use if no column family context. + * Create a cache configuration using the specified configuration object and defaults for family + * level settings. Only use if no column family context. * @param conf hbase configuration */ public CacheConfig(Configuration conf) { @@ -168,45 +165,42 @@ public CacheConfig(Configuration conf, BlockCache blockCache) { } /** - * Create a cache configuration using the specified configuration object and - * family descriptor. - * @param conf hbase configuration + * Create a cache configuration using the specified configuration object and family descriptor. + * @param conf hbase configuration * @param family column family configuration */ public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache blockCache, - ByteBuffAllocator byteBuffAllocator) { - this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) && - (family == null ? true : family.isBlockCacheEnabled()); + ByteBuffAllocator byteBuffAllocator) { + this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) + && (family == null ? true : family.isBlockCacheEnabled()); this.inMemory = family == null ? DEFAULT_IN_MEMORY : family.isInMemory(); this.cacheDataCompressed = - conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); + conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); this.dropBehindCompaction = - conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); + conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); // For the following flags we enable them regardless of per-schema settings // if they are enabled in the global configuration. - this.cacheDataOnWrite = - conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) || - (family == null ? false : family.isCacheDataOnWrite()); + this.cacheDataOnWrite = conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) + || (family == null ? false : family.isCacheDataOnWrite()); this.cacheIndexesOnWrite = - conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) || - (family == null ? false : family.isCacheIndexesOnWrite()); + conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) + || (family == null ? false : family.isCacheIndexesOnWrite()); this.cacheBloomsOnWrite = - conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) || - (family == null ? false : family.isCacheBloomsOnWrite()); - this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) || - (family == null ? false : family.isEvictBlocksOnClose()); - this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || - (family == null ? false : family.isPrefetchBlocksOnOpen()); - this.cacheCompactedDataOnWrite = conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, - DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); + conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) + || (family == null ? false : family.isCacheBloomsOnWrite()); + this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) + || (family == null ? false : family.isEvictBlocksOnClose()); + this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) + || (family == null ? false : family.isPrefetchBlocksOnOpen()); + this.cacheCompactedDataOnWrite = + conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); this.cacheCompactedDataOnWriteThreshold = getCacheCompactedBlocksOnWriteThreshold(conf); this.blockCache = blockCache; this.byteBuffAllocator = byteBuffAllocator; } /** - * Constructs a cache configuration copied from the specified configuration. - * @param cacheConf + * Constructs a cache configuration copied from the specified configuration. n */ public CacheConfig(CacheConfig cacheConf) { this.cacheDataOnRead = cacheConf.cacheDataOnRead; @@ -240,8 +234,8 @@ private CacheConfig() { } /** - * Returns whether the DATA blocks of this HFile should be cached on read or not (we always - * cache the meta blocks, the INDEX and BLOOM blocks). + * Returns whether the DATA blocks of this HFile should be cached on read or not (we always cache + * the meta blocks, the INDEX and BLOOM blocks). * @return true if blocks should be cached on read, false if not */ public boolean shouldCacheDataOnRead() { @@ -253,13 +247,12 @@ public boolean shouldDropBehindCompaction() { } /** - * Should we cache a block of a particular category? We always cache - * important blocks such as index blocks, as long as the block cache is - * available. + * Should we cache a block of a particular category? We always cache important blocks such as + * index blocks, as long as the block cache is available. */ public boolean shouldCacheBlockOnRead(BlockCategory category) { - return cacheDataOnRead || category == BlockCategory.INDEX || category == BlockCategory.BLOOM || - (prefetchOnOpen && (category != BlockCategory.META && category != BlockCategory.UNKNOWN)); + return cacheDataOnRead || category == BlockCategory.INDEX || category == BlockCategory.BLOOM + || (prefetchOnOpen && (category != BlockCategory.META && category != BlockCategory.UNKNOWN)); } /** @@ -270,26 +263,23 @@ public boolean isInMemory() { } /** - * @return true if data blocks should be written to the cache when an HFile is - * written, false if not + * @return true if data blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheDataOnWrite() { return this.cacheDataOnWrite; } /** - * @param cacheDataOnWrite whether data blocks should be written to the cache - * when an HFile is written + * @param cacheDataOnWrite whether data blocks should be written to the cache when an HFile is + * written */ public void setCacheDataOnWrite(boolean cacheDataOnWrite) { this.cacheDataOnWrite = cacheDataOnWrite; } /** - * Enable cache on write including: - * cacheDataOnWrite - * cacheIndexesOnWrite - * cacheBloomsOnWrite + * Enable cache on write including: cacheDataOnWrite cacheIndexesOnWrite cacheBloomsOnWrite */ public void enableCacheOnWrite() { this.cacheDataOnWrite = true; @@ -298,24 +288,24 @@ public void enableCacheOnWrite() { } /** - * @return true if index blocks should be written to the cache when an HFile - * is written, false if not + * @return true if index blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheIndexesOnWrite() { return this.cacheIndexesOnWrite; } /** - * @return true if bloom blocks should be written to the cache when an HFile - * is written, false if not + * @return true if bloom blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheBloomsOnWrite() { return this.cacheBloomsOnWrite; } /** - * @return true if blocks should be evicted from the cache when an HFile - * reader is closed, false if not + * @return true if blocks should be evicted from the cache when an HFile reader is closed, false + * if not */ public boolean shouldEvictOnClose() { return this.evictOnClose; @@ -323,8 +313,8 @@ public boolean shouldEvictOnClose() { /** * Only used for testing. - * @param evictOnClose whether blocks should be evicted from the cache when an - * HFile reader is closed + * @param evictOnClose whether blocks should be evicted from the cache when an HFile reader is + * closed */ public void setEvictOnClose(boolean evictOnClose) { this.evictOnClose = evictOnClose; @@ -369,6 +359,7 @@ public boolean shouldCacheCompactedBlocksOnWrite() { public long getCacheCompactedBlocksOnWriteThreshold() { return this.cacheCompactedDataOnWriteThreshold; } + /** * Return true if we may find this type of block in block cache. *

      @@ -390,16 +381,18 @@ public boolean shouldReadBlockFromCache(BlockType blockType) { if (blockType == null) { return true; } - if (blockType.getCategory() == BlockCategory.BLOOM || - blockType.getCategory() == BlockCategory.INDEX) { + if ( + blockType.getCategory() == BlockCategory.BLOOM + || blockType.getCategory() == BlockCategory.INDEX + ) { return true; } return false; } /** - * If we make sure the block could not be cached, we will not acquire the lock - * otherwise we will acquire lock + * If we make sure the block could not be cached, we will not acquire the lock otherwise we will + * acquire lock */ public boolean shouldLockOnCacheMiss(BlockType blockType) { if (blockType == null) { @@ -410,7 +403,6 @@ public boolean shouldLockOnCacheMiss(BlockType blockType) { /** * Returns the block cache. - * * @return the block cache, or null if caching is completely disabled */ public Optional getBlockCache() { @@ -426,8 +418,8 @@ public ByteBuffAllocator getByteBuffAllocator() { } private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) { - long cacheCompactedBlocksOnWriteThreshold = conf - .getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, + long cacheCompactedBlocksOnWriteThreshold = + conf.getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD); if (cacheCompactedBlocksOnWriteThreshold < 0) { @@ -443,9 +435,9 @@ private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) { @Override public String toString() { return "cacheDataOnRead=" + shouldCacheDataOnRead() + ", cacheDataOnWrite=" - + shouldCacheDataOnWrite() + ", cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() - + ", cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + ", cacheEvictOnClose=" - + shouldEvictOnClose() + ", cacheDataCompressed=" + shouldCacheDataCompressed() - + ", prefetchOnOpen=" + shouldPrefetchOnOpen(); + + shouldCacheDataOnWrite() + ", cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + + ", cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + ", cacheEvictOnClose=" + + shouldEvictOnClose() + ", cacheDataCompressed=" + shouldCacheDataCompressed() + + ", prefetchOnOpen=" + shouldPrefetchOnOpen(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index 7c5b56364098..c5a247dfce18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,15 +23,15 @@ import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; import org.apache.yetus.audience.InterfaceAudience; - /** * Class that implements cache metrics. */ @InterfaceAudience.Private public class CacheStats { - /** Sliding window statistics. The number of metric periods to include in - * sliding window hit ratio calculations. + /** + * Sliding window statistics. The number of metric periods to include in sliding window hit ratio + * calculations. */ static final int DEFAULT_WINDOW_PERIODS = 5; @@ -43,10 +42,9 @@ public class CacheStats { private final LongAdder primaryHitCount = new LongAdder(); /** - * The number of getBlock requests that were cache hits, but only from - * requests that were set to use the block cache. This is because all reads - * attempt to read from the block cache even if they will not put new blocks - * into the block cache. See HBASE-2253 for more information. + * The number of getBlock requests that were cache hits, but only from requests that were set to + * use the block cache. This is because all reads attempt to read from the block cache even if + * they will not put new blocks into the block cache. See HBASE-2253 for more information. */ private final LongAdder hitCachingCount = new LongAdder(); @@ -56,8 +54,8 @@ public class CacheStats { /** The number of getBlock requests for primary replica that were cache misses */ private final LongAdder primaryMissCount = new LongAdder(); /** - * The number of getBlock requests that were cache misses, but only from - * requests that were set to use the block cache. + * The number of getBlock requests that were cache misses, but only from requests that were set to + * use the block cache. */ private final LongAdder missCachingCount = new LongAdder(); @@ -129,25 +127,22 @@ public CacheStats(final String name) { public CacheStats(final String name, int numPeriodsInWindow) { this.numPeriodsInWindow = numPeriodsInWindow; this.hitCounts = new long[numPeriodsInWindow]; - this.hitCachingCounts = new long[numPeriodsInWindow]; - this.requestCounts = new long[numPeriodsInWindow]; - this.requestCachingCounts = new long[numPeriodsInWindow]; + this.hitCachingCounts = new long[numPeriodsInWindow]; + this.requestCounts = new long[numPeriodsInWindow]; + this.requestCachingCounts = new long[numPeriodsInWindow]; this.ageAtEviction = new FastLongHistogram(); } @Override public String toString() { AgeSnapshot snapshot = getAgeAtEvictionSnapshot(); - return "hitCount=" + getHitCount() + ", hitCachingCount=" + getHitCachingCount() + - ", missCount=" + getMissCount() + ", missCachingCount=" + getMissCachingCount() + - ", evictionCount=" + getEvictionCount() + - ", evictedBlockCount=" + getEvictedCount() + - ", primaryMissCount=" + getPrimaryMissCount() + - ", primaryHitCount=" + getPrimaryHitCount() + - ", evictedAgeMean=" + snapshot.getMean(); + return "hitCount=" + getHitCount() + ", hitCachingCount=" + getHitCachingCount() + + ", missCount=" + getMissCount() + ", missCachingCount=" + getMissCachingCount() + + ", evictionCount=" + getEvictionCount() + ", evictedBlockCount=" + getEvictedCount() + + ", primaryMissCount=" + getPrimaryMissCount() + ", primaryHitCount=" + getPrimaryHitCount() + + ", evictedAgeMean=" + snapshot.getMean(); } - public void miss(boolean caching, boolean primary, BlockType type) { missCount.increment(); if (primary) primaryMissCount.increment(); @@ -199,7 +194,6 @@ public void hit(boolean caching, boolean primary, BlockType type) { if (primary) primaryHitCount.increment(); if (caching) hitCachingCount.increment(); - if (type == null) { return; } @@ -260,7 +254,6 @@ public long failInsert() { return failedInserts.incrementAndGet(); } - // All of the counts of misses and hits. public long getDataMissCount() { return dataMissCount.sum(); @@ -443,13 +436,11 @@ public long getFailedInserts() { public void rollMetricsPeriod() { hitCounts[windowIndex] = getHitCount() - lastHitCount; lastHitCount = getHitCount(); - hitCachingCounts[windowIndex] = - getHitCachingCount() - lastHitCachingCount; + hitCachingCounts[windowIndex] = getHitCachingCount() - lastHitCachingCount; lastHitCachingCount = getHitCachingCount(); requestCounts[windowIndex] = getRequestCount() - lastRequestCount; lastRequestCount = getRequestCount(); - requestCachingCounts[windowIndex] = - getRequestCachingCount() - lastRequestCachingCount; + requestCachingCounts[windowIndex] = getRequestCachingCount() - lastRequestCachingCount; lastRequestCachingCount = getRequestCachingCount(); windowIndex = (windowIndex + 1) % numPeriodsInWindow; } @@ -471,14 +462,14 @@ public long getSumRequestCachingCountsPastNPeriods() { } public double getHitRatioPastNPeriods() { - double ratio = ((double)getSumHitCountsPastNPeriods() / - (double)getSumRequestCountsPastNPeriods()); + double ratio = + ((double) getSumHitCountsPastNPeriods() / (double) getSumRequestCountsPastNPeriods()); return Double.isNaN(ratio) ? 0 : ratio; } public double getHitCachingRatioPastNPeriods() { - double ratio = ((double)getSumHitCachingCountsPastNPeriods() / - (double)getSumRequestCachingCountsPastNPeriods()); + double ratio = ((double) getSumHitCachingCountsPastNPeriods() + / (double) getSumRequestCachingCountsPastNPeriods()); return Double.isNaN(ratio) ? 0 : ratio; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java index 96c8e8275630..0a41b53a804c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,44 +15,37 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.nio.HBaseReferenceCounted; import org.apache.yetus.audience.InterfaceAudience; /** - * Cacheable is an interface that allows for an object to be cached. If using an - * on heap cache, just use heapsize. If using an off heap cache, Cacheable - * provides methods for serialization of the object. - * - * Some objects cannot be moved off heap, those objects will return a - * getSerializedLength() of 0. - * + * Cacheable is an interface that allows for an object to be cached. If using an on heap cache, just + * use heapsize. If using an off heap cache, Cacheable provides methods for serialization of the + * object. Some objects cannot be moved off heap, those objects will return a getSerializedLength() + * of 0. */ @InterfaceAudience.Private public interface Cacheable extends HeapSize, HBaseReferenceCounted { /** - * Returns the length of the ByteBuffer required to serialized the object. If the - * object cannot be serialized, it should return 0. - * + * Returns the length of the ByteBuffer required to serialized the object. If the object cannot be + * serialized, it should return 0. * @return int length in bytes of the serialized form or 0 if the object cannot be cached. */ int getSerializedLength(); /** * Serializes its data into destination. - * @param destination Where to serialize to + * @param destination Where to serialize to * @param includeNextBlockMetadata Whether to include nextBlockMetadata in the Cache block. */ void serialize(ByteBuffer destination, boolean includeNextBlockMetadata); /** * Returns CacheableDeserializer instance which reconstructs original object from ByteBuffer. - * * @return CacheableDeserialzer instance. */ CacheableDeserializer getDeserializer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java index e12173daba9e..2fe50381b77e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.yetus.audience.InterfaceAudience; @@ -30,10 +29,9 @@ @InterfaceAudience.Private public interface CacheableDeserializer { /** - * @param b ByteBuff to deserialize the Cacheable. + * @param b ByteBuff to deserialize the Cacheable. * @param allocator to manage NIO ByteBuffers for future allocation or de-allocation. - * @return T the deserialized object. - * @throws IOException + * @return T the deserialized object. n */ T deserialize(ByteBuff b, ByteBuffAllocator allocator) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java index 3f14f4ffeb2a..4e46b501ad70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java @@ -1,20 +1,19 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; @@ -22,26 +21,24 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; - import org.apache.yetus.audience.InterfaceAudience; /** - * This class is used to manage the identifiers for {@link CacheableDeserializer}. - * All deserializers are registered with this Manager via the - * {@link #registerDeserializer(CacheableDeserializer)}}. On registration, we return an - * int *identifier* for this deserializer. The int identifier is passed to - * {@link #getDeserializer(int)}} to obtain the registered deserializer instance. + * This class is used to manage the identifiers for {@link CacheableDeserializer}. All deserializers + * are registered with this Manager via the {@link #registerDeserializer(CacheableDeserializer)}}. + * On registration, we return an int *identifier* for this deserializer. The int identifier is + * passed to {@link #getDeserializer(int)}} to obtain the registered deserializer instance. */ @InterfaceAudience.Private public class CacheableDeserializerIdManager { private static final Map> registeredDeserializers = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private static final AtomicInteger identifier = new AtomicInteger(0); /** - * Register the given {@link Cacheable} -- usually an hfileblock instance, these implement - * the Cacheable Interface -- deserializer and generate a unique identifier id for it and return - * this as our result. + * Register the given {@link Cacheable} -- usually an hfileblock instance, these implement the + * Cacheable Interface -- deserializer and generate a unique identifier id for it and return this + * as our result. * @return the identifier of given cacheable deserializer * @see #getDeserializer(int) */ @@ -61,14 +58,14 @@ public static CacheableDeserializer getDeserializer(int id) { } /** - * Snapshot a map of the current identifiers to class names for reconstruction on reading out - * of a file. + * Snapshot a map of the current identifiers to class names for reconstruction on reading out of a + * file. */ - public static Map save() { + public static Map save() { // No synchronization here because weakly consistent view should be good enough // The assumed risk is that we might not see a new serializer that comes in while iterating, // but with a synchronized block, we won't see it anyway return registeredDeserializers.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getClass().getName())); + .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getClass().getName())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java index 0fcef862d42c..81823dd15dbd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +22,14 @@ @InterfaceAudience.Private public interface CachedBlock extends Comparable { BlockPriority getBlockPriority(); + BlockType getBlockType(); + long getOffset(); + long getSize(); + long getCachedTime(); + String getFilename(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java index f2f9d58796ac..8447b3bdbd26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java @@ -19,15 +19,14 @@ import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; +import org.apache.hadoop.hbase.util.ChecksumType; +import org.apache.hadoop.util.DataChecksum; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.ChecksumType; -import org.apache.hadoop.util.DataChecksum; /** * Utility methods to compute and validate checksums. @@ -39,42 +38,38 @@ public class ChecksumUtil { public static final int CHECKSUM_BUF_SIZE = 256; /** - * This is used by unit tests to make checksum failures throw an - * exception instead of returning null. Returning a null value from - * checksum validation will cause the higher layer to retry that - * read with hdfs-level checksums. Instead, we would like checksum - * failures to cause the entire unit test to fail. + * This is used by unit tests to make checksum failures throw an exception instead of returning + * null. Returning a null value from checksum validation will cause the higher layer to retry that + * read with hdfs-level checksums. Instead, we would like checksum failures to cause the entire + * unit test to fail. */ private static boolean generateExceptions = false; /** - * Generates a checksum for all the data in indata. The checksum is - * written to outdata. - * @param indata input data stream - * @param startOffset starting offset in the indata stream from where to - * compute checkums from - * @param endOffset ending offset in the indata stream upto - * which checksums needs to be computed - * @param outdata the output buffer where checksum values are written - * @param outOffset the starting offset in the outdata where the - * checksum values are written - * @param checksumType type of checksum + * Generates a checksum for all the data in indata. The checksum is written to outdata. + * @param indata input data stream + * @param startOffset starting offset in the indata stream from where to compute checkums + * from + * @param endOffset ending offset in the indata stream upto which checksums needs to be + * computed + * @param outdata the output buffer where checksum values are written + * @param outOffset the starting offset in the outdata where the checksum values are + * written + * @param checksumType type of checksum * @param bytesPerChecksum number of bytes per checksum value */ - static void generateChecksums(byte[] indata, int startOffset, int endOffset, - byte[] outdata, int outOffset, ChecksumType checksumType, - int bytesPerChecksum) throws IOException { + static void generateChecksums(byte[] indata, int startOffset, int endOffset, byte[] outdata, + int outOffset, ChecksumType checksumType, int bytesPerChecksum) throws IOException { if (checksumType == ChecksumType.NULL) { return; // No checksum for this block. } - DataChecksum checksum = DataChecksum.newDataChecksum( - checksumType.getDataChecksumType(), bytesPerChecksum); + DataChecksum checksum = + DataChecksum.newDataChecksum(checksumType.getDataChecksumType(), bytesPerChecksum); - checksum.calculateChunkedSums( - ByteBuffer.wrap(indata, startOffset, endOffset - startOffset), - ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset)); + checksum.calculateChunkedSums(ByteBuffer.wrap(indata, startOffset, endOffset - startOffset), + ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset)); } /** @@ -82,24 +77,24 @@ static void generateChecksums(byte[] indata, int startOffset, int endOffset, * this method will also verify checksum of each chunk in data. the difference is: this method can * accept {@link ByteBuff} as arguments, we can not add it in hadoop-common so defined here. * @param dataChecksum to calculate the checksum. - * @param data as the input - * @param checksums to compare - * @param pathName indicate that the data is read from which file. + * @param data as the input + * @param checksums to compare + * @param pathName indicate that the data is read from which file. * @return a flag indicate the checksum match or mismatch. * @see org.apache.hadoop.util.DataChecksum#verifyChunkedSums(ByteBuffer, ByteBuffer, String, * long) */ private static boolean verifyChunkedSums(DataChecksum dataChecksum, ByteBuff data, - ByteBuff checksums, String pathName) { + ByteBuff checksums, String pathName) { // Almost all of the HFile Block are about 64KB, and it would be a SingleByteBuff, use the // Hadoop's verify checksum directly, because it'll use the native checksum, which has no extra // byte[] allocation or copying. (HBASE-21917) if (data instanceof SingleByteBuff && checksums instanceof SingleByteBuff) { // the checksums ByteBuff must also be an SingleByteBuff because it's duplicated from data. ByteBuffer dataBB = (ByteBuffer) (data.nioByteBuffers()[0]).duplicate() - .position(data.position()).limit(data.limit()); + .position(data.position()).limit(data.limit()); ByteBuffer checksumBB = (ByteBuffer) (checksums.nioByteBuffers()[0]).duplicate() - .position(checksums.position()).limit(checksums.limit()); + .position(checksums.position()).limit(checksums.limit()); try { dataChecksum.verifyChunkedSums(dataBB, checksumBB, pathName, 0); return true; @@ -142,7 +137,7 @@ private static boolean verifyChunkedSums(DataChecksum dataChecksum, ByteBuff dat } int calculated = (int) dataChecksum.getValue(); int stored = (sum[0] << 24 & 0xff000000) | (sum[1] << 16 & 0xff0000) - | (sum[2] << 8 & 0xff00) | (sum[3] & 0xff); + | (sum[2] << 8 & 0xff00) | (sum[3] & 0xff); if (calculated != stored) { if (LOG.isTraceEnabled()) { long errPos = data.position() - startDataPos - n; @@ -162,10 +157,10 @@ private static boolean verifyChunkedSums(DataChecksum dataChecksum, ByteBuff dat /** * Validates that the data in the specified HFileBlock matches the checksum. Generates the * checksums for the data and then validate that it matches those stored in the end of the data. - * @param buf Contains the data in following order: HFileBlock header, data, checksums. + * @param buf Contains the data in following order: HFileBlock header, data, checksums. * @param pathName Path of the HFile to which the {@code data} belongs. Only used for logging. - * @param offset offset of the data being validated. Only used for logging. - * @param hdrSize Size of the block header in {@code data}. Only used for logging. + * @param offset offset of the data being validated. Only used for logging. + * @param hdrSize Size of the block header in {@code data}. Only used for logging. * @return True if checksum matches, else false. */ static boolean validateChecksum(ByteBuff buf, String pathName, long offset, int hdrSize) { @@ -177,22 +172,23 @@ static boolean validateChecksum(ByteBuff buf, String pathName, long offset, int // read in the stored value of the checksum size from the header. int bytesPerChecksum = buf.getInt(HFileBlock.Header.BYTES_PER_CHECKSUM_INDEX); DataChecksum dataChecksum = - DataChecksum.newDataChecksum(ctype.getDataChecksumType(), bytesPerChecksum); + DataChecksum.newDataChecksum(ctype.getDataChecksumType(), bytesPerChecksum); assert dataChecksum != null; int onDiskDataSizeWithHeader = buf.getInt(HFileBlock.Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); - LOG.trace("dataLength={}, sizeWithHeader={}, checksumType={}, file={}, " - + "offset={}, headerSize={}, bytesPerChecksum={}", buf.capacity(), onDiskDataSizeWithHeader, - ctype.getName(), pathName, offset, hdrSize, bytesPerChecksum); + LOG.trace( + "dataLength={}, sizeWithHeader={}, checksumType={}, file={}, " + + "offset={}, headerSize={}, bytesPerChecksum={}", + buf.capacity(), onDiskDataSizeWithHeader, ctype.getName(), pathName, offset, hdrSize, + bytesPerChecksum); ByteBuff data = buf.duplicate().position(0).limit(onDiskDataSizeWithHeader); ByteBuff checksums = buf.duplicate().position(onDiskDataSizeWithHeader).limit(buf.limit()); return verifyChunkedSums(dataChecksum, data, checksums, pathName); } /** - * Returns the number of bytes needed to store the checksums for - * a specified data size - * @param datasize number of bytes of data + * Returns the number of bytes needed to store the checksums for a specified data size + * @param datasize number of bytes of data * @param bytesPerChecksum number of bytes in a checksum chunk * @return The number of bytes needed to store the checksum values */ @@ -201,14 +197,13 @@ static long numBytes(long datasize, int bytesPerChecksum) { } /** - * Returns the number of checksum chunks needed to store the checksums for - * a specified data size - * @param datasize number of bytes of data + * Returns the number of checksum chunks needed to store the checksums for a specified data size + * @param datasize number of bytes of data * @param bytesPerChecksum number of bytes in a checksum chunk * @return The number of checksum chunks */ static long numChunks(long datasize, int bytesPerChecksum) { - long numChunks = datasize/bytesPerChecksum; + long numChunks = datasize / bytesPerChecksum; if (datasize % bytesPerChecksum != 0) { numChunks++; } @@ -216,13 +211,12 @@ static long numChunks(long datasize, int bytesPerChecksum) { } /** - * Mechanism to throw an exception in case of hbase checksum - * failure. This is used by unit tests only. - * @param value Setting this to true will cause hbase checksum - * verification failures to generate exceptions. + * Mechanism to throw an exception in case of hbase checksum failure. This is used by unit tests + * only. + * @param value Setting this to true will cause hbase checksum verification failures to generate + * exceptions. */ public static void generateExceptionForChecksumFailureForTest(boolean value) { generateExceptions = value; } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index dc4f697bae94..69a70600a6c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -1,37 +1,34 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.yetus.audience.InterfaceAudience; /** - * CombinedBlockCache is an abstraction layer that combines - * {@link FirstLevelBlockCache} and {@link BucketCache}. The smaller lruCache is used - * to cache bloom blocks and index blocks. The larger Cache is used to - * cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads - * first from the smaller l1Cache before looking for the block in the l2Cache. Blocks evicted - * from l1Cache are put into the bucket cache. - * Metrics are the combined size and hits and misses of both caches. + * CombinedBlockCache is an abstraction layer that combines {@link FirstLevelBlockCache} and + * {@link BucketCache}. The smaller lruCache is used to cache bloom blocks and index blocks. The + * larger Cache is used to cache data blocks. + * {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads first from the smaller l1Cache + * before looking for the block in the l2Cache. Blocks evicted from l1Cache are put into the bucket + * cache. Metrics are the combined size and hits and misses of both caches. */ @InterfaceAudience.Private public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @@ -42,8 +39,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) { this.l1Cache = l1Cache; this.l2Cache = l2Cache; - this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(), - l2Cache.getStats()); + this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(), l2Cache.getStats()); } @Override @@ -71,8 +67,8 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, - boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { // We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting // passed always. boolean existInL1 = l1Cache.containsBlock(cacheKey); @@ -80,14 +76,14 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, // If the block does not exist in L1, the containsBlock should be counted as one miss. l1Cache.getStats().miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); } - return existInL1 ? - l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): - l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + return existInL1 + ? l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics) + : l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, - boolean updateCacheMetrics, BlockType blockType) { + boolean updateCacheMetrics, BlockType blockType) { if (blockType == null) { return getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @@ -106,8 +102,7 @@ public boolean evictBlock(BlockCacheKey cacheKey) { @Override public int evictBlocksByHfileName(String hfileName) { - return l1Cache.evictBlocksByHfileName(hfileName) - + l2Cache.evictBlocksByHfileName(hfileName); + return l1Cache.evictBlocksByHfileName(hfileName) + l2Cache.evictBlocksByHfileName(hfileName); } @Override @@ -201,8 +196,8 @@ public long getRootIndexMissCount() { @Override public long getIntermediateIndexMissCount() { - return lruCacheStats.getIntermediateIndexMissCount() + - bucketCacheStats.getIntermediateIndexMissCount(); + return lruCacheStats.getIntermediateIndexMissCount() + + bucketCacheStats.getIntermediateIndexMissCount(); } @Override @@ -212,14 +207,14 @@ public long getFileInfoMissCount() { @Override public long getGeneralBloomMetaMissCount() { - return lruCacheStats.getGeneralBloomMetaMissCount() + - bucketCacheStats.getGeneralBloomMetaMissCount(); + return lruCacheStats.getGeneralBloomMetaMissCount() + + bucketCacheStats.getGeneralBloomMetaMissCount(); } @Override public long getDeleteFamilyBloomMissCount() { - return lruCacheStats.getDeleteFamilyBloomMissCount() + - bucketCacheStats.getDeleteFamilyBloomMissCount(); + return lruCacheStats.getDeleteFamilyBloomMissCount() + + bucketCacheStats.getDeleteFamilyBloomMissCount(); } @Override @@ -254,8 +249,8 @@ public long getRootIndexHitCount() { @Override public long getIntermediateIndexHitCount() { - return lruCacheStats.getIntermediateIndexHitCount() + - bucketCacheStats.getIntermediateIndexHitCount(); + return lruCacheStats.getIntermediateIndexHitCount() + + bucketCacheStats.getIntermediateIndexHitCount(); } @Override @@ -265,14 +260,14 @@ public long getFileInfoHitCount() { @Override public long getGeneralBloomMetaHitCount() { - return lruCacheStats.getGeneralBloomMetaHitCount() + - bucketCacheStats.getGeneralBloomMetaHitCount(); + return lruCacheStats.getGeneralBloomMetaHitCount() + + bucketCacheStats.getGeneralBloomMetaHitCount(); } @Override public long getDeleteFamilyBloomHitCount() { - return lruCacheStats.getDeleteFamilyBloomHitCount() + - bucketCacheStats.getDeleteFamilyBloomHitCount(); + return lruCacheStats.getDeleteFamilyBloomHitCount() + + bucketCacheStats.getDeleteFamilyBloomHitCount(); } @Override @@ -282,14 +277,12 @@ public long getTrailerHitCount() { @Override public long getRequestCount() { - return lruCacheStats.getRequestCount() - + bucketCacheStats.getRequestCount(); + return lruCacheStats.getRequestCount() + bucketCacheStats.getRequestCount(); } @Override public long getRequestCachingCount() { - return lruCacheStats.getRequestCachingCount() - + bucketCacheStats.getRequestCachingCount(); + return lruCacheStats.getRequestCachingCount() + bucketCacheStats.getRequestCachingCount(); } @Override @@ -304,8 +297,7 @@ public long getPrimaryMissCount() { @Override public long getMissCachingCount() { - return lruCacheStats.getMissCachingCount() - + bucketCacheStats.getMissCachingCount(); + return lruCacheStats.getMissCachingCount() + bucketCacheStats.getMissCachingCount(); } @Override @@ -317,28 +309,25 @@ public long getHitCount() { public long getPrimaryHitCount() { return lruCacheStats.getPrimaryHitCount() + bucketCacheStats.getPrimaryHitCount(); } + @Override public long getHitCachingCount() { - return lruCacheStats.getHitCachingCount() - + bucketCacheStats.getHitCachingCount(); + return lruCacheStats.getHitCachingCount() + bucketCacheStats.getHitCachingCount(); } @Override public long getEvictionCount() { - return lruCacheStats.getEvictionCount() - + bucketCacheStats.getEvictionCount(); + return lruCacheStats.getEvictionCount() + bucketCacheStats.getEvictionCount(); } @Override public long getEvictedCount() { - return lruCacheStats.getEvictedCount() - + bucketCacheStats.getEvictedCount(); + return lruCacheStats.getEvictedCount() + bucketCacheStats.getEvictedCount(); } @Override public long getPrimaryEvictedCount() { - return lruCacheStats.getPrimaryEvictedCount() - + bucketCacheStats.getPrimaryEvictedCount(); + return lruCacheStats.getPrimaryEvictedCount() + bucketCacheStats.getPrimaryEvictedCount(); } @Override @@ -355,25 +344,25 @@ public long getFailedInserts() { @Override public long getSumHitCountsPastNPeriods() { return lruCacheStats.getSumHitCountsPastNPeriods() - + bucketCacheStats.getSumHitCountsPastNPeriods(); + + bucketCacheStats.getSumHitCountsPastNPeriods(); } @Override public long getSumRequestCountsPastNPeriods() { return lruCacheStats.getSumRequestCountsPastNPeriods() - + bucketCacheStats.getSumRequestCountsPastNPeriods(); + + bucketCacheStats.getSumRequestCountsPastNPeriods(); } @Override public long getSumHitCachingCountsPastNPeriods() { return lruCacheStats.getSumHitCachingCountsPastNPeriods() - + bucketCacheStats.getSumHitCachingCountsPastNPeriods(); + + bucketCacheStats.getSumHitCachingCountsPastNPeriods(); } @Override public long getSumRequestCachingCountsPastNPeriods() { return lruCacheStats.getSumRequestCachingCountsPastNPeriods() - + bucketCacheStats.getSumRequestCachingCountsPastNPeriods(); + + bucketCacheStats.getSumRequestCachingCountsPastNPeriods(); } } @@ -384,7 +373,7 @@ public Iterator iterator() { @Override public BlockCache[] getBlockCaches() { - return new BlockCache [] {this.l1Cache, this.l2Cache}; + return new BlockCache[] { this.l1Cache, this.l2Cache }; } @Override @@ -394,8 +383,8 @@ public void setMaxSize(long size) { public int getRpcRefCount(BlockCacheKey cacheKey) { return (this.l2Cache instanceof BucketCache) - ? ((BucketCache) this.l2Cache).getRpcRefCount(cacheKey) - : 0; + ? ((BucketCache) this.l2Cache).getRpcRefCount(cacheKey) + : 0; } public FirstLevelBlockCache getFirstLevelCache() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java index 29f29e15a8c0..0d39d09c9691 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,32 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.io.DataInput; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Hash; +import org.apache.yetus.audience.InterfaceAudience; /** - * A Bloom filter implementation built on top of - * {@link org.apache.hadoop.hbase.util.BloomFilterChunk}, encapsulating - * a set of fixed-size Bloom filters written out at the time of - * {@link org.apache.hadoop.hbase.io.hfile.HFile} generation into the data - * block stream, and loaded on demand at query time. This class only provides - * reading capabilities. + * A Bloom filter implementation built on top of + * {@link org.apache.hadoop.hbase.util.BloomFilterChunk}, encapsulating a set of fixed-size Bloom + * filters written out at the time of {@link org.apache.hadoop.hbase.io.hfile.HFile} generation into + * the data block stream, and loaded on demand at query time. This class only provides reading + * capabilities. */ @InterfaceAudience.Private -public class CompoundBloomFilter extends CompoundBloomFilterBase - implements BloomFilter { +public class CompoundBloomFilter extends CompoundBloomFilterBase implements BloomFilter { /** Used to load chunks on demand */ private HFile.Reader reader; @@ -55,14 +50,11 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase private long[] numPositivesPerChunk; /** - * De-serialization for compound Bloom filter metadata. Must be consistent - * with what {@link CompoundBloomFilterWriter} does. - * - * @param meta serialized Bloom filter metadata without any magic blocks - * @throws IOException + * De-serialization for compound Bloom filter metadata. Must be consistent with what + * {@link CompoundBloomFilterWriter} does. + * @param meta serialized Bloom filter metadata without any magic blocks n */ - public CompoundBloomFilter(DataInput meta, HFile.Reader reader) - throws IOException { + public CompoundBloomFilter(DataInput meta, HFile.Reader reader) throws IOException { this.reader = reader; totalByteSize = meta.readLong(); @@ -72,8 +64,8 @@ public CompoundBloomFilter(DataInput meta, HFile.Reader reader) totalMaxKeys = meta.readLong(); numChunks = meta.readInt(); byte[] comparatorClassName = Bytes.readByteArray(meta); - // The writer would have return 0 as the vint length for the case of - // Bytes.BYTES_RAWCOMPARATOR. In such cases do not initialize comparator, it can be + // The writer would have return 0 as the vint length for the case of + // Bytes.BYTES_RAWCOMPARATOR. In such cases do not initialize comparator, it can be // null if (comparatorClassName.length != 0) { comparator = FixedFileTrailer.createComparator(Bytes.toString(comparatorClassName)); @@ -84,7 +76,7 @@ public CompoundBloomFilter(DataInput meta, HFile.Reader reader) throw new IllegalArgumentException("Invalid hash type: " + hashType); } // We will pass null for ROW block - if(comparator == null) { + if (comparator == null) { index = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); } else { index = new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator, 1); @@ -103,7 +95,7 @@ public boolean contains(byte[] key, int keyOffset, int keyLength, ByteBuff bloom try { ByteBuff bloomBuf = bloomBlock.getBufferReadOnly(); result = BloomFilterUtil.contains(key, keyOffset, keyLength, bloomBuf, - bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount); + bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount); } finally { // After the use, should release the block to deallocate byte buffers. bloomBlock.release(); @@ -120,7 +112,7 @@ private HFileBlock getBloomBlock(int block) { try { // We cache the block and use a positional read. bloomBlock = reader.readBlock(index.getRootBlockOffset(block), - index.getRootBlockDataSize(block), true, true, false, true, BlockType.BLOOM_CHUNK, null); + index.getRootBlockDataSize(block), true, true, false, true, BlockType.BLOOM_CHUNK, null); } catch (IOException ex) { // The Bloom filter is broken, turn it off. throw new IllegalArgumentException("Failed to load Bloom block", ex); @@ -198,12 +190,10 @@ public long getNumPositivesForTesting(int chunk) { public String toString() { StringBuilder sb = new StringBuilder(); sb.append(BloomFilterUtil.formatStats(this)); - sb.append(BloomFilterUtil.STATS_RECORD_SEP + - "Number of chunks: " + numChunks); - sb.append(BloomFilterUtil.STATS_RECORD_SEP + - ((comparator != null) ? "Comparator: " - + comparator.getClass().getSimpleName() : "Comparator: " - + Bytes.BYTES_RAWCOMPARATOR.getClass().getSimpleName())); + sb.append(BloomFilterUtil.STATS_RECORD_SEP + "Number of chunks: " + numChunks); + sb.append(BloomFilterUtil.STATS_RECORD_SEP + ((comparator != null) + ? "Comparator: " + comparator.getClass().getSimpleName() + : "Comparator: " + Bytes.BYTES_RAWCOMPARATOR.getClass().getSimpleName())); return sb.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java index efc21c641408..199223882724 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,27 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.util.BloomFilterBase; - import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.util.BloomFilterBase; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class CompoundBloomFilterBase implements BloomFilterBase { /** - * At read time, the total number of chunks. At write time, the number of - * chunks created so far. The first chunk has an ID of 0, and the current - * chunk has the ID of numChunks - 1. + * At read time, the total number of chunks. At write time, the number of chunks created so far. + * The first chunk has an ID of 0, and the current chunk has the ID of numChunks - 1. */ protected int numChunks; /** - * The Bloom filter version. There used to be a DynamicByteBloomFilter which - * had version 2. + * The Bloom filter version. There used to be a DynamicByteBloomFilter which had version 2. */ public static final int VERSION = 3; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java index 228b54c7ab00..1748ec56bc74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java @@ -23,33 +23,30 @@ import java.util.ArrayDeque; import java.util.Objects; import java.util.Queue; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilterChunk; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Adds methods required for writing a compound Bloom filter to the data - * section of an {@link org.apache.hadoop.hbase.io.hfile.HFile} to the - * {@link CompoundBloomFilter} class. + * Adds methods required for writing a compound Bloom filter to the data section of an + * {@link org.apache.hadoop.hbase.io.hfile.HFile} to the {@link CompoundBloomFilter} class. */ @InterfaceAudience.Private public class CompoundBloomFilterWriter extends CompoundBloomFilterBase - implements BloomFilterWriter, InlineBlockWriter { + implements BloomFilterWriter, InlineBlockWriter { - private static final Logger LOG = - LoggerFactory.getLogger(CompoundBloomFilterWriter.class); + private static final Logger LOG = LoggerFactory.getLogger(CompoundBloomFilterWriter.class); /** The current chunk being written to */ private BloomFilterChunk chunk; @@ -62,7 +59,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase /** The size of individual Bloom filter chunks to create */ private int chunkByteSize; - /** The prev Cell that was processed */ + /** The prev Cell that was processed */ private Cell prevCell; /** A Bloom filter chunk enqueued for writing */ @@ -78,7 +75,7 @@ private static class ReadyChunk { private byte[] firstKeyInChunk = null; private HFileBlockIndex.BlockIndexWriter bloomBlockIndexWriter = - new HFileBlockIndex.BlockIndexWriter(); + new HFileBlockIndex.BlockIndexWriter(); /** Whether to cache-on-write compound Bloom filter chunks */ private boolean cacheOnWrite; @@ -86,23 +83,13 @@ private static class ReadyChunk { private BloomType bloomType; /** - * @param chunkByteSizeHint - * each chunk's size in bytes. The real chunk size might be different - * as required by the fold factor. - * @param errorRate - * target false positive rate - * @param hashType - * hash function type to use - * @param maxFold - * maximum degree of folding allowed - * @param bloomType - * the bloom type + * n * each chunk's size in bytes. The real chunk size might be different as required by the fold + * factor. n * target false positive rate n * hash function type to use n * maximum degree of + * folding allowed n * the bloom type */ - public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, - int hashType, int maxFold, boolean cacheOnWrite, - CellComparator comparator, BloomType bloomType) { - chunkByteSize = BloomFilterUtil.computeFoldableByteSize( - chunkByteSizeHint * 8L, maxFold); + public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType, + int maxFold, boolean cacheOnWrite, CellComparator comparator, BloomType bloomType) { + chunkByteSize = BloomFilterUtil.computeFoldableByteSize(chunkByteSizeHint * 8L, maxFold); this.errorRate = errorRate; this.hashType = hashType; @@ -120,20 +107,17 @@ public boolean shouldWriteBlock(boolean closing) { /** * Enqueue the current chunk if it is ready to be written out. - * - * @param closing true if we are closing the file, so we do not expect new - * keys to show up + * @param closing true if we are closing the file, so we do not expect new keys to show up */ private void enqueueReadyChunk(boolean closing) { - if (chunk == null || - (chunk.getKeyCount() < chunk.getMaxKeys() && !closing)) { + if (chunk == null || (chunk.getKeyCount() < chunk.getMaxKeys() && !closing)) { return; } if (firstKeyInChunk == null) { - throw new NullPointerException("Trying to enqueue a chunk, " + - "but first key is null: closing=" + closing + ", keyCount=" + - chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys()); + throw new NullPointerException( + "Trying to enqueue a chunk, " + "but first key is null: closing=" + closing + ", keyCount=" + + chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys()); } ReadyChunk readyChunk = new ReadyChunk(); @@ -148,10 +132,9 @@ private void enqueueReadyChunk(boolean closing) { chunk.compactBloom(); if (LOG.isTraceEnabled() && prevByteSize != chunk.getByteSize()) { - LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from [" - + prevMaxKeys + " max keys, " + prevByteSize + " bytes] to [" - + chunk.getMaxKeys() + " max keys, " + chunk.getByteSize() - + " bytes]"); + LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from [" + prevMaxKeys + + " max keys, " + prevByteSize + " bytes] to [" + chunk.getMaxKeys() + " max keys, " + + chunk.getByteSize() + " bytes]"); } totalMaxKeys += chunk.getMaxKeys(); @@ -170,14 +153,13 @@ public void append(Cell cell) throws IOException { if (chunk == null) { if (firstKeyInChunk != null) { - throw new IllegalStateException("First key in chunk already set: " - + Bytes.toStringBinary(firstKeyInChunk)); + throw new IllegalStateException( + "First key in chunk already set: " + Bytes.toStringBinary(firstKeyInChunk)); } // This will be done only once per chunk if (bloomType == BloomType.ROWCOL) { - firstKeyInChunk = - PrivateCellUtil - .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); + firstKeyInChunk = PrivateCellUtil + .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); } else { firstKeyInChunk = CellUtil.copyRow(cell); } @@ -204,8 +186,7 @@ public Cell getPrevCell() { private void allocateNewChunk() { if (prevChunk == null) { // First chunk - chunk = BloomFilterUtil.createBySize(chunkByteSize, errorRate, - hashType, maxFold, bloomType); + chunk = BloomFilterUtil.createBySize(chunkByteSize, errorRate, hashType, maxFold, bloomType); } else { // Use the same parameters as the last chunk, but a new array and // a zero key count. @@ -213,13 +194,13 @@ private void allocateNewChunk() { } if (chunk.getKeyCount() != 0) { - throw new IllegalStateException("keyCount=" + chunk.getKeyCount() - + " > 0"); + throw new IllegalStateException("keyCount=" + chunk.getKeyCount() + " > 0"); } chunk.allocBloom(); ++numChunks; } + @Override public void writeInlineBlock(DataOutput out) throws IOException { // We don't remove the chunk from the queue here, because we might need it @@ -242,7 +223,8 @@ public BlockType getInlineBlockType() { } private class MetaWriter implements Writable { - protected MetaWriter() {} + protected MetaWriter() { + } @Override public void readFields(DataInput in) throws IOException { @@ -250,11 +232,11 @@ public void readFields(DataInput in) throws IOException { } /** - * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, - * although the two metadata formats do not have to be consistent. This - * does have to be consistent with how {@link - * CompoundBloomFilter#CompoundBloomFilter(DataInput, - * org.apache.hadoop.hbase.io.hfile.HFile.Reader)} reads fields. + * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, although + * the two metadata formats do not have to be consistent. This does have to be consistent with + * how + * {@link CompoundBloomFilter#CompoundBloomFilter(DataInput, org.apache.hadoop.hbase.io.hfile.HFile.Reader)} + * reads fields. */ @Override public void write(DataOutput out) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java index 28516c6bab4e..6a40ab1b15b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.yetus.audience.InterfaceAudience; /** * This exception is thrown when attempts to read an HFile fail due to corruption or truncation diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java index 7f7cc3e41b27..2e4a55bc7d76 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,11 +37,11 @@ public class ExclusiveMemHFileBlock extends HFileBlock { ExclusiveMemHFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, - long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, - HFileContext fileContext, ByteBuffAllocator alloc) { + int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, + long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, + ByteBuffAllocator alloc) { super(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, buf, - fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); + fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java index a0c34c9fe3ef..34d6c8d926b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ public interface FirstLevelBlockCache extends ResizableBlockCache, HeapSize { /** * Whether the cache contains the block with specified cacheKey - * * @param cacheKey cache key for the block * @return true if it contains the block */ @@ -37,7 +36,6 @@ public interface FirstLevelBlockCache extends ResizableBlockCache, HeapSize { /** * Specifies the secondary cache. An entry that is evicted from this cache due to a size * constraint will be inserted into the victim cache. - * * @param victimCache the second level cache * @throws IllegalArgumentException if the victim cache had already been set */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 6a2dcf926a4f..b358ad606893 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +17,6 @@ */ package org.apache.hadoop.hbase.io.hfile; - import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInput; @@ -36,20 +34,20 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; /** - * The {@link HFile} has a fixed trailer which contains offsets to other - * variable parts of the file. Also includes basic metadata on this file. The - * trailer size is fixed within a given {@link HFile} format version only, but - * we always store the version number as the last four-byte integer of the file. - * The version number itself is split into two portions, a major - * version and a minor version. The last three bytes of a file are the major - * version and a single preceding byte is the minor number. The major version - * determines which readers/writers to use to read/write a hfile while a minor - * version determines smaller changes in hfile format that do not need a new - * reader/writer type. + * The {@link HFile} has a fixed trailer which contains offsets to other variable parts of the file. + * Also includes basic metadata on this file. The trailer size is fixed within a given {@link HFile} + * format version only, but we always store the version number as the last four-byte integer of the + * file. The version number itself is split into two portions, a major version and a minor version. + * The last three bytes of a file are the major version and a single preceding byte is the minor + * number. The major version determines which readers/writers to use to read/write a hfile while a + * minor version determines smaller changes in hfile format that do not need a new reader/writer + * type. */ @InterfaceAudience.Private public class FixedFileTrailer { @@ -61,17 +59,16 @@ public class FixedFileTrailer { private static final int MAX_COMPARATOR_NAME_LENGTH = 128; /** - * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but - * only potentially useful for pretty-printing in v2. + * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but only potentially + * useful for pretty-printing in v2. */ private long fileInfoOffset; /** - * In version 1, the offset to the data block index. Starting from version 2, - * the meaning of this field is the offset to the section of the file that - * should be loaded at the time the file is being opened: i.e. on open we load - * the root index, file info, etc. See http://hbase.apache.org/book.html#_hfile_format_2 - * in the reference guide. + * In version 1, the offset to the data block index. Starting from version 2, the meaning of this + * field is the offset to the section of the file that should be loaded at the time the file is + * being opened: i.e. on open we load the root index, file info, etc. See + * http://hbase.apache.org/book.html#_hfile_format_2 in the reference guide. */ private long loadOnOpenDataOffset; @@ -96,8 +93,7 @@ public class FixedFileTrailer { private long totalUncompressedBytes; /** - * The number of key/value pairs in the file. This field was int in version 1, - * but is now long. + * The number of key/value pairs in the file. This field was int in version 1, but is now long. */ private long entryCount; @@ -107,8 +103,7 @@ public class FixedFileTrailer { private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE; /** - * The number of levels in the potentially multi-level data index. Used from - * version 2 onwards. + * The number of levels in the potentially multi-level data index. Used from version 2 onwards. */ private int numDataIndexLevels; @@ -118,8 +113,7 @@ public class FixedFileTrailer { private long firstDataBlockOffset; /** - * It is guaranteed that no key/value data blocks start after this offset in - * the file. + * It is guaranteed that no key/value data blocks start after this offset in the file. */ private long lastDataBlockOffset; @@ -185,9 +179,8 @@ public int getTrailerSize() { } /** - * Write the trailer to a data stream. We support writing version 1 for - * testing and for determining version 1 trailer size. It is also easy to see - * what fields changed in version 2. + * Write the trailer to a data stream. We support writing version 1 for testing and for + * determining version 1 trailer size. It is also easy to see what fields changed in version 2. */ void serialize(DataOutputStream outputStream) throws IOException { HFile.checkFormatVersion(majorVersion); @@ -206,15 +199,11 @@ void serialize(DataOutputStream outputStream) throws IOException { HFileProtos.FileTrailerProto toProtobuf() { HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder() - .setFileInfoOffset(fileInfoOffset) - .setLoadOnOpenDataOffset(loadOnOpenDataOffset) + .setFileInfoOffset(fileInfoOffset).setLoadOnOpenDataOffset(loadOnOpenDataOffset) .setUncompressedDataIndexSize(uncompressedDataIndexSize) - .setTotalUncompressedBytes(totalUncompressedBytes) - .setDataIndexCount(dataIndexCount) - .setMetaIndexCount(metaIndexCount) - .setEntryCount(entryCount) - .setNumDataIndexLevels(numDataIndexLevels) - .setFirstDataBlockOffset(firstDataBlockOffset) + .setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount) + .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount) + .setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset) .setLastDataBlockOffset(lastDataBlockOffset) .setComparatorClassName(getHBase1CompatibleName(comparatorClassName)) .setCompressionCodec(compressionCodec.ordinal()); @@ -225,9 +214,8 @@ HFileProtos.FileTrailerProto toProtobuf() { } /** - * Write trailer data as protobuf. - * NOTE: we run a translation on the comparator name and will serialize the old hbase-1.x where - * it makes sense. See {@link #getHBase1CompatibleName(String)}. + * Write trailer data as protobuf. NOTE: we run a translation on the comparator name and will + * serialize the old hbase-1.x where it makes sense. See {@link #getHBase1CompatibleName(String)}. */ void serializeAsPB(DataOutputStream output) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -249,17 +237,18 @@ void serializeAsPB(DataOutputStream output) throws IOException { } /** - * Deserialize the fixed file trailer from the given stream. The version needs - * to already be specified. Make sure this is consistent with - * {@link #serialize(DataOutputStream)}. + * Deserialize the fixed file trailer from the given stream. The version needs to already be + * specified. Make sure this is consistent with {@link #serialize(DataOutputStream)}. */ void deserialize(DataInputStream inputStream) throws IOException { HFile.checkFormatVersion(majorVersion); BlockType.TRAILER.readAndCheck(inputStream); - if (majorVersion > 2 - || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)) { + if ( + majorVersion > 2 + || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION) + ) { deserializeFromPB(inputStream); } else { deserializeFromWritable(inputStream); @@ -342,10 +331,10 @@ void deserializeFromWritable(DataInput input) throws IOException { numDataIndexLevels = input.readInt(); firstDataBlockOffset = input.readLong(); lastDataBlockOffset = input.readLong(); - // TODO this is a classname encoded into an HFile's trailer. We are going to need to have + // TODO this is a classname encoded into an HFile's trailer. We are going to need to have // some compat code here. - setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input, - MAX_COMPARATOR_NAME_LENGTH))); + setComparatorClass( + getComparatorClass(Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH))); } private void append(StringBuilder sb, String s) { @@ -381,19 +370,16 @@ public String toString() { /** * Reads a file trailer from the given file. - * - * @param istream the input stream with the ability to seek. Does not have to - * be buffered, as only one read operation is made. + * @param istream the input stream with the ability to seek. Does not have to be buffered, as + * only one read operation is made. * @param fileSize the file size. Can be obtained using - * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( - *org.apache.hadoop.fs.Path)}. + * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( org.apache.hadoop.fs.Path)}. * @return the fixed file trailer read - * @throws IOException if failed to read from the underlying stream, or the - * trailer is corrupted, or the version of the trailer is - * unsupported + * @throws IOException if failed to read from the underlying stream, or the trailer is corrupted, + * or the version of the trailer is unsupported */ - public static FixedFileTrailer readFromStream(FSDataInputStream istream, - long fileSize) throws IOException { + public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fileSize) + throws IOException { int bufferSize = MAX_TRAILER_SIZE; long seekPoint = fileSize - bufferSize; if (seekPoint < 0) { @@ -405,8 +391,7 @@ public static FixedFileTrailer readFromStream(FSDataInputStream istream, HFileUtil.seekOnMultipleSources(istream, seekPoint); ByteBuffer buf = ByteBuffer.allocate(bufferSize); - istream.readFully(buf.array(), buf.arrayOffset(), - buf.arrayOffset() + buf.limit()); + istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit()); // Read the version from the last int of the file. buf.position(buf.limit() - Bytes.SIZEOF_INT); @@ -428,23 +413,21 @@ public static FixedFileTrailer readFromStream(FSDataInputStream istream, public void expectMajorVersion(int expected) { if (majorVersion != expected) { - throw new IllegalArgumentException("Invalid HFile major version: " - + majorVersion - + " (expected: " + expected + ")"); + throw new IllegalArgumentException( + "Invalid HFile major version: " + majorVersion + " (expected: " + expected + ")"); } } public void expectMinorVersion(int expected) { if (minorVersion != expected) { - throw new IllegalArgumentException("Invalid HFile minor version: " - + minorVersion + " (expected: " + expected + ")"); + throw new IllegalArgumentException( + "Invalid HFile minor version: " + minorVersion + " (expected: " + expected + ")"); } } public void expectAtLeastMajorVersion(int lowerBound) { if (majorVersion < lowerBound) { - throw new IllegalArgumentException("Invalid HFile major version: " - + majorVersion + throw new IllegalArgumentException("Invalid HFile major version: " + majorVersion + " (expected: " + lowerBound + " or higher)."); } } @@ -569,21 +552,20 @@ public void setComparatorClass(Class klass) { } /** - * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather - * than the new name; writing the new name will make it so newly-written hfiles are not parseable - * by hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters + * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather than + * the new name; writing the new name will make it so newly-written hfiles are not parseable by + * hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters * reading hbase-2.x produce. *

      - * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare - * KeyValues. In hbase-2.x they were renamed making use of the more generic 'Cell' - * nomenclature to indicate that we intend to move away from KeyValues post hbase-2. A naming - * change is not reason enough to make it so hbase-1.x cannot read hbase-2.x files given the - * structure goes unchanged (hfile v3). So, lets write the old names for Comparators into the - * hfile tails in hbase-2. Here is where we do the translation. - * {@link #getComparatorClass(String)} does translation going the other way. - * - *

      The translation is done on the serialized Protobuf only.

      - * + * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare KeyValues. In + * hbase-2.x they were renamed making use of the more generic 'Cell' nomenclature to indicate that + * we intend to move away from KeyValues post hbase-2. A naming change is not reason enough to + * make it so hbase-1.x cannot read hbase-2.x files given the structure goes unchanged (hfile v3). + * So, lets write the old names for Comparators into the hfile tails in hbase-2. Here is where we + * do the translation. {@link #getComparatorClass(String)} does translation going the other way. + *

      + * The translation is done on the serialized Protobuf only. + *

      * @param comparator String class name of the Comparator used in this hfile. * @return What to store in the trailer as our comparator name. * @see #getComparatorClass(String) @@ -606,18 +588,25 @@ private static Class getComparatorClass(String compara throws IOException { Class comparatorKlass; // for BC - if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) - || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator"))) { + if ( + comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) + || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator")) + ) { comparatorKlass = CellComparatorImpl.class; - } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) - || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) - || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator"))) { + } else if ( + comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) + || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) + || (comparatorClassName + .equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) + || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator")) + ) { comparatorKlass = MetaCellComparator.class; - } else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") - || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")) { + } else if ( + comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") + || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator") + ) { // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator comparatorKlass = null; @@ -635,10 +624,10 @@ private static Class getComparatorClass(String compara static CellComparator createComparator(String comparatorClassName) throws IOException { if (comparatorClassName.equals(CellComparatorImpl.COMPARATOR.getClass().getName())) { return CellComparatorImpl.COMPARATOR; - } else if (comparatorClassName.equals( - MetaCellComparator.META_COMPARATOR.getClass().getName())) { - return MetaCellComparator.META_COMPARATOR; - } + } else + if (comparatorClassName.equals(MetaCellComparator.META_COMPARATOR.getClass().getName())) { + return MetaCellComparator.META_COMPARATOR; + } try { Class comparatorClass = getComparatorClass(comparatorClassName); if (comparatorClass != null) { @@ -660,8 +649,7 @@ public long getUncompressedDataIndexSize() { return uncompressedDataIndexSize; } - public void setUncompressedDataIndexSize( - long uncompressedDataIndexSize) { + public void setUncompressedDataIndexSize(long uncompressedDataIndexSize) { expectAtLeastMajorVersion(2); this.uncompressedDataIndexSize = uncompressedDataIndexSize; } @@ -678,24 +666,23 @@ public void setEncryptionKey(byte[] keyBytes) { } /** - * Extracts the major version for a 4-byte serialized version data. - * The major version is the 3 least significant bytes + * Extracts the major version for a 4-byte serialized version data. The major version is the 3 + * least significant bytes */ private static int extractMajorVersion(int serializedVersion) { return (serializedVersion & 0x00ffffff); } /** - * Extracts the minor version for a 4-byte serialized version data. - * The major version are the 3 the most significant bytes + * Extracts the minor version for a 4-byte serialized version data. The major version are the 3 + * the most significant bytes */ private static int extractMinorVersion(int serializedVersion) { return (serializedVersion >>> 24); } /** - * Create a 4 byte serialized version number by combining the - * minor and major version numbers. + * Create a 4 byte serialized version number by combining the minor and major version numbers. */ static int materializeVersion(int majorVersion, int minorVersion) { return ((majorVersion & 0x00ffffff) | (minorVersion << 24)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 0cb100eefb59..ba7ff84996f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,68 +54,66 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * File format for hbase. - * A file of sorted key/value pairs. Both keys and values are byte arrays. + * File format for hbase. A file of sorted key/value pairs. Both keys and values are byte arrays. *

      - * The memory footprint of a HFile includes the following (below is taken from the - * TFile documentation - * but applies also to HFile): + * The memory footprint of a HFile includes the following (below is taken from the TFile documentation but applies also + * to HFile): *

        *
      • Some constant overhead of reading or writing a compressed block. *
          - *
        • Each compressed block requires one compression/decompression codec for - * I/O. + *
        • Each compressed block requires one compression/decompression codec for I/O. *
        • Temporary space to buffer the key. *
        • Temporary space to buffer the value. *
        - *
      • HFile index, which is proportional to the total number of Data Blocks. - * The total amount of memory needed to hold the index can be estimated as - * (56+AvgKeySize)*NumBlocks. + *
      • HFile index, which is proportional to the total number of Data Blocks. The total amount of + * memory needed to hold the index can be estimated as (56+AvgKeySize)*NumBlocks. *
      * Suggestions on performance optimization. *
        - *
      • Minimum block size. We recommend a setting of minimum block size between - * 8KB to 1MB for general usage. Larger block size is preferred if files are - * primarily for sequential access. However, it would lead to inefficient random - * access (because there are more data to decompress). Smaller blocks are good - * for random access, but require more memory to hold the block index, and may - * be slower to create (because we must flush the compressor stream at the - * conclusion of each data block, which leads to an FS I/O flush). Further, due - * to the internal caching in Compression codec, the smallest possible block - * size would be around 20KB-30KB. - *
      • The current implementation does not offer true multi-threading for - * reading. The implementation uses FSDataInputStream seek()+read(), which is - * shown to be much faster than positioned-read call in single thread mode. - * However, it also means that if multiple threads attempt to access the same - * HFile (using multiple scanners) simultaneously, the actual I/O is carried out - * sequentially even if they access different DFS blocks (Reexamine! pread seems - * to be 10% faster than seek+read in my testing -- stack). - *
      • Compression codec. Use "none" if the data is not very compressable (by - * compressable, I mean a compression ratio at least 2:1). Generally, use "lzo" - * as the starting point for experimenting. "gz" overs slightly better - * compression ratio over "lzo" but requires 4x CPU to compress and 2x CPU to - * decompress, comparing to "lzo". + *
      • Minimum block size. We recommend a setting of minimum block size between 8KB to 1MB for + * general usage. Larger block size is preferred if files are primarily for sequential access. + * However, it would lead to inefficient random access (because there are more data to decompress). + * Smaller blocks are good for random access, but require more memory to hold the block index, and + * may be slower to create (because we must flush the compressor stream at the conclusion of each + * data block, which leads to an FS I/O flush). Further, due to the internal caching in Compression + * codec, the smallest possible block size would be around 20KB-30KB. + *
      • The current implementation does not offer true multi-threading for reading. The + * implementation uses FSDataInputStream seek()+read(), which is shown to be much faster than + * positioned-read call in single thread mode. However, it also means that if multiple threads + * attempt to access the same HFile (using multiple scanners) simultaneously, the actual I/O is + * carried out sequentially even if they access different DFS blocks (Reexamine! pread seems to be + * 10% faster than seek+read in my testing -- stack). + *
      • Compression codec. Use "none" if the data is not very compressable (by compressable, I mean a + * compression ratio at least 2:1). Generally, use "lzo" as the starting point for experimenting. + * "gz" overs slightly better compression ratio over "lzo" but requires 4x CPU to compress and 2x + * CPU to decompress, comparing to "lzo". *
      - * * For more on the background behind HFile, see HBASE-61. *

      - * File is made of data blocks followed by meta data blocks (if any), a fileinfo - * block, data block index, meta data block index, and a fixed size trailer - * which records the offsets at which file changes content type. - *

      <data blocks><meta blocks><fileinfo><
      - * data index><meta index><trailer>
      - * Each block has a bit of magic at its start. Block are comprised of - * key/values. In data blocks, they are both byte arrays. Metadata blocks are - * a String key and a byte array value. An empty file looks like this: - *
      <fileinfo><trailer>
      . That is, there are not data nor meta - * blocks present. + * File is made of data blocks followed by meta data blocks (if any), a fileinfo block, data block + * index, meta data block index, and a fixed size trailer which records the offsets at which file + * changes content type. + * + *
      + * <data blocks><meta blocks><fileinfo><
      + * data index><meta index><trailer>
      + * 
      + * + * Each block has a bit of magic at its start. Block are comprised of key/values. In data blocks, + * they are both byte arrays. Metadata blocks are a String key and a byte array value. An empty file + * looks like this: + * + *
      + * <fileinfo><trailer>
      + * 
      + * + * . That is, there are not data nor meta blocks present. *

      - * TODO: Do scanners need to be able to take a start and end row? - * TODO: Should BlockIndex know the name of its file? Should it have a Path - * that points at its file say for the case where an index lives apart from - * an HFile instance? + * TODO: Do scanners need to be able to take a start and end row? TODO: Should BlockIndex know the + * name of its file? Should it have a Path that points at its file say for the case where an index + * lives apart from an HFile instance? */ @InterfaceAudience.Private public final class HFile { @@ -137,7 +134,8 @@ public final class HFile { /** Minimum supported HFile format version */ public static final int MIN_FORMAT_VERSION = 2; - /** Maximum supported HFile format version + /** + * Maximum supported HFile format version */ public static final int MAX_FORMAT_VERSION = 3; @@ -147,17 +145,15 @@ public final class HFile { public static final int MIN_FORMAT_VERSION_WITH_TAGS = 3; /** Default compression name: none. */ - public final static String DEFAULT_COMPRESSION = - DEFAULT_COMPRESSION_ALGORITHM.getName(); + public final static String DEFAULT_COMPRESSION = DEFAULT_COMPRESSION_ALGORITHM.getName(); /** Meta data block name for bloom filter bits. */ public static final String BLOOM_FILTER_DATA_KEY = "BLOOM_FILTER_DATA"; /** - * We assume that HFile path ends with - * ROOT_DIR/TABLE_NAME/REGION_NAME/CF_NAME/HFILE, so it has at least this - * many levels of nesting. This is needed for identifying table and CF name - * from an HFile path. + * We assume that HFile path ends with ROOT_DIR/TABLE_NAME/REGION_NAME/CF_NAME/HFILE, so it has at + * least this many levels of nesting. This is needed for identifying table and CF name from an + * HFile path. */ public final static int MIN_NUM_HFILE_PATH_LEVELS = 5; @@ -178,19 +174,18 @@ public final class HFile { /** * Shutdown constructor. */ - private HFile() {} + private HFile() { + } /** - * Number of checksum verification failures. It also - * clears the counter. + * Number of checksum verification failures. It also clears the counter. */ public static final long getAndResetChecksumFailuresCount() { return CHECKSUM_FAILURES.sumThenReset(); } /** - * Number of checksum verification failures. It also - * clears the counter. + * Number of checksum verification failures. It also clears the counter. */ public static final long getChecksumFailuresCount() { return CHECKSUM_FAILURES.sum(); @@ -211,7 +206,7 @@ public static final void updateWriteLatency(long latencyMillis) { /** API required to write an {@link HFile} */ public interface Writer extends Closeable, CellSink, ShipperListener { /** Max memstore (mvcc) timestamp in FileInfo */ - public static final byte [] MAX_MEMSTORE_TS_KEY = Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); + public static final byte[] MAX_MEMSTORE_TS_KEY = Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); /** Add an element to the file info map. */ void appendFileInfo(byte[] key, byte[] value) throws IOException; @@ -220,29 +215,27 @@ public interface Writer extends Closeable, CellSink, ShipperListener { Path getPath(); /** - * Adds an inline block writer such as a multi-level block index writer or - * a compound Bloom filter writer. + * Adds an inline block writer such as a multi-level block index writer or a compound Bloom + * filter writer. */ void addInlineBlockWriter(InlineBlockWriter bloomWriter); - // The below three methods take Writables. We'd like to undo Writables but undoing the below - // would be pretty painful. Could take a byte [] or a Message but we want to be backward + // The below three methods take Writables. We'd like to undo Writables but undoing the below + // would be pretty painful. Could take a byte [] or a Message but we want to be backward // compatible around hfiles so would need to map between Message and Writable or byte [] and - // current Writable serialization. This would be a bit of work to little gain. Thats my - // thinking at moment. St.Ack 20121129 + // current Writable serialization. This would be a bit of work to little gain. Thats my + // thinking at moment. St.Ack 20121129 void appendMetaBlock(String bloomFilterMetaKey, Writable metaWriter); /** - * Store general Bloom filter in the file. This does not deal with Bloom filter - * internals but is necessary, since Bloom filters are stored differently - * in HFile version 1 and version 2. + * Store general Bloom filter in the file. This does not deal with Bloom filter internals but is + * necessary, since Bloom filters are stored differently in HFile version 1 and version 2. */ void addGeneralBloomFilter(BloomFilterWriter bfw); /** - * Store delete family Bloom filter in the file, which is only supported in - * HFile V2. + * Store delete family Bloom filter in the file, which is only supported in HFile V2. */ void addDeleteFamilyBloomFilter(BloomFilterWriter bfw) throws IOException; @@ -253,8 +246,8 @@ public interface Writer extends Closeable, CellSink, ShipperListener { } /** - * This variety of ways to construct writers is used throughout the code, and - * we want to be able to swap writer implementations. + * This variety of ways to construct writers is used throughout the code, and we want to be able + * to swap writer implementations. */ public static class WriterFactory { protected final Configuration conf; @@ -301,11 +294,9 @@ public WriterFactory withShouldDropCacheBehind(boolean shouldDropBehind) { return this; } - public Writer create() throws IOException { if ((path != null ? 1 : 0) + (ostream != null ? 1 : 0) != 1) { - throw new AssertionError("Please specify exactly one of " + - "filesystem/path or path"); + throw new AssertionError("Please specify exactly one of " + "filesystem/path or path"); } if (path != null) { ostream = HFileWriterImpl.createOutputStream(conf, fs, path, favoredNodes); @@ -330,68 +321,65 @@ public static int getFormatVersion(Configuration conf) { } /** - * Returns the factory to be used to create {@link HFile} writers. - * Disables block cache access for all writers created through the - * returned factory. + * Returns the factory to be used to create {@link HFile} writers. Disables block cache access for + * all writers created through the returned factory. */ - public static final WriterFactory getWriterFactoryNoCache(Configuration - conf) { + public static final WriterFactory getWriterFactoryNoCache(Configuration conf) { return HFile.getWriterFactory(conf, CacheConfig.DISABLED); } /** * Returns the factory to be used to create {@link HFile} writers */ - public static final WriterFactory getWriterFactory(Configuration conf, - CacheConfig cacheConf) { + public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) { int version = getFormatVersion(conf); switch (version) { case 2: - throw new IllegalArgumentException("This should never happen. " + - "Did you change hfile.format.version to read v2? This version of the software writes v3" + - " hfiles only (but it can read v2 files without having to update hfile.format.version " + - "in hbase-site.xml)"); + throw new IllegalArgumentException("This should never happen. " + + "Did you change hfile.format.version to read v2? This version of the software writes v3" + + " hfiles only (but it can read v2 files without having to update hfile.format.version " + + "in hbase-site.xml)"); case 3: return new HFile.WriterFactory(conf, cacheConf); default: - throw new IllegalArgumentException("Cannot create writer for HFile " + - "format version " + version); + throw new IllegalArgumentException( + "Cannot create writer for HFile " + "format version " + version); } } /** - * An abstraction used by the block index. - * Implementations will check cache for any asked-for block and return cached block if found. - * Otherwise, after reading from fs, will try and put block into cache before returning. + * An abstraction used by the block index. Implementations will check cache for any asked-for + * block and return cached block if found. Otherwise, after reading from fs, will try and put + * block into cache before returning. */ public interface CachingBlockReader { /** * Read in a file block. - * @param offset offset to read. - * @param onDiskBlockSize size of the block - * @param isCompaction is this block being read as part of a compaction - * @param expectedBlockType the block type we are expecting to read with this read operation, - * or null to read whatever block type is available and avoid checking (that might reduce - * caching efficiency of encoded data blocks) + * @param offset offset to read. + * @param onDiskBlockSize size of the block + * @param isCompaction is this block being read as part of a compaction + * @param expectedBlockType the block type we are expecting to read with this read + * operation, or null to read whatever block type is available + * and avoid checking (that might reduce caching efficiency of + * encoded data blocks) * @param expectedDataBlockEncoding the data block encoding the caller is expecting data blocks - * to be in, or null to not perform this check and return the block irrespective of the - * encoding. This check only applies to data blocks and can be set to null when the caller is - * expecting to read a non-data block and has set expectedBlockType accordingly. + * to be in, or null to not perform this check and return the + * block irrespective of the encoding. This check only applies + * to data blocks and can be set to null when the caller is + * expecting to read a non-data block and has set + * expectedBlockType accordingly. * @return Block wrapped in a ByteBuffer. */ - HFileBlock readBlock(long offset, long onDiskBlockSize, - boolean cacheBlock, final boolean pread, final boolean isCompaction, - final boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) - throws IOException; + HFileBlock readBlock(long offset, long onDiskBlockSize, boolean cacheBlock, final boolean pread, + final boolean isCompaction, final boolean updateCacheMetrics, BlockType expectedBlockType, + DataBlockEncoding expectedDataBlockEncoding) throws IOException; } /** An interface used by clients to open and iterate an {@link HFile}. */ public interface Reader extends Closeable, CachingBlockReader { /** - * Returns this reader's "name". Usually the last component of the path. - * Needs to be constant as the file is being moved to support caching on - * write. + * Returns this reader's "name". Usually the last component of the path. Needs to be constant as + * the file is being moved to support caching on write. */ String getName(); @@ -421,23 +409,23 @@ HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, FixedFileTrailer getTrailer(); void setDataBlockIndexReader(HFileBlockIndex.CellBasedKeyBlockIndexReader reader); + HFileBlockIndex.CellBasedKeyBlockIndexReader getDataBlockIndexReader(); void setMetaBlockIndexReader(HFileBlockIndex.ByteArrayKeyBlockIndexReader reader); + HFileBlockIndex.ByteArrayKeyBlockIndexReader getMetaBlockIndexReader(); HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread); /** - * Retrieves general Bloom filter metadata as appropriate for each - * {@link HFile} version. - * Knows nothing about how that metadata is structured. + * Retrieves general Bloom filter metadata as appropriate for each {@link HFile} version. Knows + * nothing about how that metadata is structured. */ DataInput getGeneralBloomFilterMetadata() throws IOException; /** - * Retrieves delete family Bloom filter metadata as appropriate for each - * {@link HFile} version. + * Retrieves delete family Bloom filter metadata as appropriate for each {@link HFile} version. * Knows nothing about how that metadata is structured. */ DataInput getDeleteBloomFilterMetadata() throws IOException; @@ -471,25 +459,26 @@ HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, void unbufferStream(); ReaderContext getContext(); + HFileInfo getHFileInfo(); + void setDataBlockEncoder(HFileDataBlockEncoder dataBlockEncoder); } /** - * Method returns the reader given the specified arguments. - * TODO This is a bad abstraction. See HBASE-6635. - * - * @param context Reader context info - * @param fileInfo HFile info + * Method returns the reader given the specified arguments. TODO This is a bad abstraction. See + * HBASE-6635. + * @param context Reader context info + * @param fileInfo HFile info * @param cacheConf Cache configuation values, cannot be null. - * @param conf Configuration + * @param conf Configuration * @return an appropriate instance of HFileReader * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", + justification = "Intentional") public static Reader createReader(ReaderContext context, HFileInfo fileInfo, - CacheConfig cacheConf, Configuration conf) throws IOException { + CacheConfig cacheConf, Configuration conf) throws IOException { try { if (context.getReaderType() == ReaderType.STREAM) { // stream reader will share trailer with pread reader, see HFileStreamReader#copyFields @@ -508,8 +497,8 @@ public static Reader createReader(ReaderContext context, HFileInfo fileInfo, } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper(), e -> LOG.warn("failed to close input stream wrapper", e)); - throw new CorruptHFileException("Problem reading HFile Trailer from file " - + context.getFilePath(), t); + throw new CorruptHFileException( + "Problem reading HFile Trailer from file " + context.getFilePath(), t); } finally { context.getInputStreamWrapper().unbuffer(); } @@ -517,43 +506,39 @@ public static Reader createReader(ReaderContext context, HFileInfo fileInfo, /** * Creates reader with cache configuration disabled - * @param fs filesystem + * @param fs filesystem * @param path Path to file to read * @param conf Configuration * @return an active Reader instance - * @throws IOException Will throw a CorruptHFileException - * (DoNotRetryIOException subtype) if hfile is corrupt/invalid. + * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile + * is corrupt/invalid. */ public static Reader createReader(FileSystem fs, Path path, Configuration conf) - throws IOException { + throws IOException { // The primaryReplicaReader is mainly used for constructing block cache key, so if we do not use // block cache then it is OK to set it as any value. We use true here. return createReader(fs, path, CacheConfig.DISABLED, true, conf); } /** - * @param fs filesystem - * @param path Path to file to read - * @param cacheConf This must not be null. + * @param fs filesystem + * @param path Path to file to read + * @param cacheConf This must not be null. * @param primaryReplicaReader true if this is a reader for primary replica - * @param conf Configuration + * @param conf Configuration * @return an active Reader instance * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile - * is corrupt/invalid. + * is corrupt/invalid. * @see CacheConfig#CacheConfig(Configuration) */ public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf, - boolean primaryReplicaReader, Configuration conf) throws IOException { + boolean primaryReplicaReader, Configuration conf) throws IOException { Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf"); FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path); - ReaderContext context = new ReaderContextBuilder() - .withFilePath(path) - .withInputStreamWrapper(stream) - .withFileSize(fs.getFileStatus(path).getLen()) - .withFileSystem(stream.getHfs()) - .withPrimaryReplicaReader(primaryReplicaReader) - .withReaderType(ReaderType.PREAD) - .build(); + ReaderContext context = + new ReaderContextBuilder().withFilePath(path).withInputStreamWrapper(stream) + .withFileSize(fs.getFileStatus(path).getLen()).withFileSystem(stream.getHfs()) + .withPrimaryReplicaReader(primaryReplicaReader).withReaderType(ReaderType.PREAD).build(); HFileInfo fileInfo = new HFileInfo(context, conf); Reader reader = createReader(context, fileInfo, cacheConf, conf); fileInfo.initMetaAndIndex(reader); @@ -562,7 +547,7 @@ public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheCon /** * Returns true if the specified file has a valid HFile Trailer. - * @param fs filesystem + * @param fs filesystem * @param path Path to file to verify * @return true if the file has a valid HFile Trailer, otherwise false * @throws IOException if failed to read from the underlying stream @@ -573,13 +558,13 @@ public static boolean isHFileFormat(final FileSystem fs, final Path path) throws /** * Returns true if the specified file has a valid HFile Trailer. - * @param fs filesystem + * @param fs filesystem * @param fileStatus the file to verify * @return true if the file has a valid HFile Trailer, otherwise false * @throws IOException if failed to read from the underlying stream */ public static boolean isHFileFormat(final FileSystem fs, final FileStatus fileStatus) - throws IOException { + throws IOException { final Path path = fileStatus.getPath(); final long size = fileStatus.getLen(); try (FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path)) { @@ -593,12 +578,9 @@ public static boolean isHFileFormat(final FileSystem fs, final FileStatus fileSt } /** - * Get names of supported compression algorithms. The names are acceptable by - * HFile.Writer. - * - * @return Array of strings, each represents a supported compression - * algorithm. Currently, the following compression algorithms are - * supported. + * Get names of supported compression algorithms. The names are acceptable by HFile.Writer. + * @return Array of strings, each represents a supported compression algorithm. Currently, the + * following compression algorithms are supported. *

        *
      • "none" - No compression. *
      • "gz" - GZIP compression. @@ -616,29 +598,28 @@ public static String[] getSupportedCompressionAlgorithms() { static int longToInt(final long l) { // Expecting the size() of a block not exceeding 4GB. Assuming the // size() will wrap to negative integer if it exceeds 2GB (From tfile). - return (int)(l & 0x00000000ffffffffL); + return (int) (l & 0x00000000ffffffffL); } /** - * Returns all HFiles belonging to the given region directory. Could return an - * empty list. - * - * @param fs The file system reference. - * @param regionDir The region directory to scan. + * Returns all HFiles belonging to the given region directory. Could return an empty list. + * @param fs The file system reference. + * @param regionDir The region directory to scan. * @return The list of files found. * @throws IOException When scanning the files fails. */ - public static List getStoreFiles(FileSystem fs, Path regionDir) - throws IOException { + public static List getStoreFiles(FileSystem fs, Path regionDir) throws IOException { List regionHFiles = new ArrayList<>(); PathFilter dirFilter = new FSUtils.DirFilter(fs); FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter); - for(FileStatus dir : familyDirs) { + for (FileStatus dir : familyDirs) { FileStatus[] files = fs.listStatus(dir.getPath()); for (FileStatus file : files) { - if (!file.isDirectory() && - (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) && - (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))) { + if ( + !file.isDirectory() + && (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) + && (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR)) + ) { regionHFiles.add(file.getPath()); } } @@ -647,32 +628,28 @@ public static List getStoreFiles(FileSystem fs, Path regionDir) } /** - * Checks the given {@link HFile} format version, and throws an exception if - * invalid. Note that if the version number comes from an input file and has - * not been verified, the caller needs to re-throw an {@link IOException} to - * indicate that this is not a software error, but corrupted input. - * + * Checks the given {@link HFile} format version, and throws an exception if invalid. Note that if + * the version number comes from an input file and has not been verified, the caller needs to + * re-throw an {@link IOException} to indicate that this is not a software error, but corrupted + * input. * @param version an HFile version * @throws IllegalArgumentException if the version is invalid */ - public static void checkFormatVersion(int version) - throws IllegalArgumentException { + public static void checkFormatVersion(int version) throws IllegalArgumentException { if (version < MIN_FORMAT_VERSION || version > MAX_FORMAT_VERSION) { - throw new IllegalArgumentException("Invalid HFile version: " + version - + " (expected to be " + "between " + MIN_FORMAT_VERSION + " and " - + MAX_FORMAT_VERSION + ")"); + throw new IllegalArgumentException("Invalid HFile version: " + version + " (expected to be " + + "between " + MIN_FORMAT_VERSION + " and " + MAX_FORMAT_VERSION + ")"); } } - public static void checkHFileVersion(final Configuration c) { int version = c.getInt(FORMAT_VERSION_KEY, MAX_FORMAT_VERSION); if (version < MAX_FORMAT_VERSION || version > MAX_FORMAT_VERSION) { - throw new IllegalArgumentException("The setting for " + FORMAT_VERSION_KEY + - " (in your hbase-*.xml files) is " + version + " which does not match " + - MAX_FORMAT_VERSION + - "; are you running with a configuration from an older or newer hbase install (an " + - "incompatible hbase-default.xml or hbase-site.xml on your CLASSPATH)?"); + throw new IllegalArgumentException( + "The setting for " + FORMAT_VERSION_KEY + " (in your hbase-*.xml files) is " + version + + " which does not match " + MAX_FORMAT_VERSION + + "; are you running with a configuration from an older or newer hbase install (an " + + "incompatible hbase-default.xml or hbase-site.xml on your CLASSPATH)?"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 7c7fa4ef8c36..99a952b52ee0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP; + import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; @@ -28,7 +29,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -62,56 +62,51 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Cacheable Blocks of an {@link HFile} version 2 file. - * Version 2 was introduced in hbase-0.92.0. - * - *

        Version 1 was the original file block. Version 2 was introduced when we changed the hbase file - * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support - * for Version 1 was removed in hbase-1.3.0. - * - *

        HFileBlock: Version 2

        - * In version 2, a block is structured as follows: + * Cacheable Blocks of an {@link HFile} version 2 file. Version 2 was introduced in hbase-0.92.0. + *

        + * Version 1 was the original file block. Version 2 was introduced when we changed the hbase file + * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support for + * Version 1 was removed in hbase-1.3.0. + *

        HFileBlock: Version 2

        In version 2, a block is structured as follows: *
          *
        • Header: See Writer#putHeader() for where header is written; header total size is * HFILEBLOCK_HEADER_SIZE *
            - *
          • 0. blockType: Magic record identifying the {@link BlockType} (8 bytes): - * e.g. DATABLK* + *
          • 0. blockType: Magic record identifying the {@link BlockType} (8 bytes): e.g. + * DATABLK* *
          • 1. onDiskSizeWithoutHeader: Compressed -- a.k.a 'on disk' -- block size, excluding header, * but including tailing checksum bytes (4 bytes) *
          • 2. uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and excluding * checksum bytes (4 bytes) - *
          • 3. prevBlockOffset: The offset of the previous block of the same type (8 bytes). This is - * used to navigate to the previous block without having to go to the block index + *
          • 3. prevBlockOffset: The offset of the previous block of the same type (8 bytes). This is used + * to navigate to the previous block without having to go to the block index *
          • 4: For minorVersions >=1, the ordinal describing checksum type (1 byte) *
          • 5: For minorVersions >=1, the number of data bytes/checksum chunk (4 bytes) *
          • 6: onDiskDataSizeWithHeader: For minorVersions >=1, the size of data 'on disk', including * header, excluding checksums (4 bytes) *
          *
        • - *
        • Raw/Compressed/Encrypted/Encoded data: The compression - * algorithm is the same for all the blocks in an {@link HFile}. If compression is NONE, this is - * just raw, serialized Cells. - *
        • Tail: For minorVersions >=1, a series of 4 byte checksums, one each for - * the number of bytes specified by bytesPerChecksum. + *
        • Raw/Compressed/Encrypted/Encoded data: The compression algorithm is the same for all + * the blocks in an {@link HFile}. If compression is NONE, this is just raw, serialized Cells. + *
        • Tail: For minorVersions >=1, a series of 4 byte checksums, one each for the number + * of bytes specified by bytesPerChecksum. *
        - * - *

        Caching

        - * Caches cache whole blocks with trailing checksums if any. We then tag on some metadata, the - * content of BLOCK_METADATA_SPACE which will be flag on if we are doing 'hbase' - * checksums and then the offset into the file which is needed when we re-make a cache key - * when we return the block to the cache as 'done'. - * See {@link Cacheable#serialize(ByteBuffer, boolean)} and {@link Cacheable#getDeserializer()}. - * - *

        TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where - * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the - * only place we get blocks to cache. We also will cache the raw return from an hdfs read. In this - * case, the checksums may be present. If the cache is backed by something that doesn't do ECC, - * say an SSD, we might want to preserve checksums. For now this is open question. - *

        TODO: Over in BucketCache, we save a block allocation by doing a custom serialization. - * Be sure to change it if serialization changes in here. Could we add a method here that takes an - * IOEngine and that then serializes to it rather than expose our internals over in BucketCache? - * IOEngine is in the bucket subpackage. Pull it up? Then this class knows about bucketcache. Ugh. + *

        Caching

        Caches cache whole blocks with trailing checksums if any. We then tag on some + * metadata, the content of BLOCK_METADATA_SPACE which will be flag on if we are doing 'hbase' + * checksums and then the offset into the file which is needed when we re-make a cache key when we + * return the block to the cache as 'done'. See {@link Cacheable#serialize(ByteBuffer, boolean)} and + * {@link Cacheable#getDeserializer()}. + *

        + * TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where we make + * a block to cache-on-write, there is an attempt at turning off checksums. This is not the only + * place we get blocks to cache. We also will cache the raw return from an hdfs read. In this case, + * the checksums may be present. If the cache is backed by something that doesn't do ECC, say an + * SSD, we might want to preserve checksums. For now this is open question. + *

        + * TODO: Over in BucketCache, we save a block allocation by doing a custom serialization. Be sure to + * change it if serialization changes in here. Could we add a method here that takes an IOEngine and + * that then serializes to it rather than expose our internals over in BucketCache? IOEngine is in + * the bucket subpackage. Pull it up? Then this class knows about bucketcache. Ugh. */ @InterfaceAudience.Private public class HFileBlock implements Cacheable { @@ -162,48 +157,47 @@ static class Header { private long prevBlockOffset; /** - * Size on disk of header + data. Excludes checksum. Header field 6, - * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum. + * Size on disk of header + data. Excludes checksum. Header field 6, OR calculated from + * {@link #onDiskSizeWithoutHeader} when using HDFS checksum. * @see Writer#putHeader(byte[], int, int, int, int) */ private int onDiskDataSizeWithHeader; // End of Block Header fields. /** - * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by - * a single ByteBuffer or by many. Make no assumptions. - * - *

        Be careful reading from this buf. Duplicate and work on the duplicate or if - * not, be sure to reset position and limit else trouble down the road. - * - *

        TODO: Make this read-only once made. - * - *

        We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have - * a ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. - * So, we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be - * good if could be confined to cache-use only but hard-to-do. + * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by a + * single ByteBuffer or by many. Make no assumptions. + *

        + * Be careful reading from this buf. Duplicate and work on the duplicate or if not, + * be sure to reset position and limit else trouble down the road. + *

        + * TODO: Make this read-only once made. + *

        + * We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have a + * ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. So, + * we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be good if + * could be confined to cache-use only but hard-to-do. */ private ByteBuff buf; - /** Meta data that holds meta information on the hfileblock. + /** + * Meta data that holds meta information on the hfileblock. */ private HFileContext fileContext; /** - * The offset of this block in the file. Populated by the reader for - * convenience of access. This offset is not part of the block header. + * The offset of this block in the file. Populated by the reader for convenience of access. This + * offset is not part of the block header. */ private long offset = UNSET; /** - * The on-disk size of the next block, including the header and checksums if present. - * UNSET if unknown. - * - * Blocks try to carry the size of the next block to read in this data member. Usually - * we get block sizes from the hfile index but sometimes the index is not available: - * e.g. when we read the indexes themselves (indexes are stored in blocks, we do not - * have an index for the indexes). Saves seeks especially around file open when - * there is a flurry of reading in hfile metadata. + * The on-disk size of the next block, including the header and checksums if present. UNSET if + * unknown. Blocks try to carry the size of the next block to read in this data member. Usually we + * get block sizes from the hfile index but sometimes the index is not available: e.g. when we + * read the indexes themselves (indexes are stored in blocks, we do not have an index for the + * indexes). Saves seeks especially around file open when there is a flurry of reading in hfile + * metadata. */ private int nextBlockOnDiskSize = UNSET; @@ -221,19 +215,18 @@ static class Header { // How to get the estimate correctly? if it is a singleBB? public static final int MULTI_BYTE_BUFFER_HEAP_SIZE = - (int)ClassSize.estimateBase(MultiByteBuff.class, false); + (int) ClassSize.estimateBase(MultiByteBuff.class, false); /** - * Space for metadata on a block that gets stored along with the block when we cache it. - * There are a few bytes stuck on the end of the HFileBlock that we pull in from HDFS. - * 8 bytes are for the offset of this block (long) in the file. Offset is important because is is - * used when we remake the CacheKey when we return block to the cache when done. There is also - * a flag on whether checksumming is being done by hbase or not. See class comment for note on - * uncertain state of checksumming of blocks that come out of cache (should we or should we not?). - * Finally there are 4 bytes to hold the length of the next block which can save a seek on - * occasion if available. - * (This EXTRA info came in with original commit of the bucketcache, HBASE-7404. It was - * formerly known as EXTRA_SERIALIZATION_SPACE). + * Space for metadata on a block that gets stored along with the block when we cache it. There are + * a few bytes stuck on the end of the HFileBlock that we pull in from HDFS. 8 bytes are for the + * offset of this block (long) in the file. Offset is important because is is used when we remake + * the CacheKey when we return block to the cache when done. There is also a flag on whether + * checksumming is being done by hbase or not. See class comment for note on uncertain state of + * checksumming of blocks that come out of cache (should we or should we not?). Finally there are + * 4 bytes to hold the length of the next block which can save a seek on occasion if available. + * (This EXTRA info came in with original commit of the bucketcache, HBASE-7404. It was formerly + * known as EXTRA_SERIALIZATION_SPACE). */ public static final int BLOCK_METADATA_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT; @@ -244,12 +237,10 @@ static class Header { static final int CHECKSUM_SIZE = Bytes.SIZEOF_INT; static final byte[] DUMMY_HEADER_NO_CHECKSUM = - new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM]; + new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM]; /** - * Used deserializing blocks from Cache. - * - * + * Used deserializing blocks from Cache. * ++++++++++++++ * + HFileBlock + * ++++++++++++++ @@ -267,8 +258,7 @@ private BlockDeserializer() { } @Override - public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc) - throws IOException { + public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc) throws IOException { // The buf has the file block followed by block metadata. // Set limit to just before the BLOCK_METADATA_SPACE then rewind. buf.limit(buf.limit() - BLOCK_METADATA_SPACE).rewind(); @@ -292,33 +282,36 @@ public int getDeserializerIdentifier() { private static final int DESERIALIZER_IDENTIFIER; static { DESERIALIZER_IDENTIFIER = - CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER); + CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER); } /** - * Creates a new {@link HFile} block from the given fields. This constructor - * is used only while writing blocks and caching, - * and is sitting in a byte buffer and we want to stuff the block into cache. - * - *

        TODO: The caller presumes no checksumming - *

        TODO: HFile block writer can also off-heap ?

        - * required of this block instance since going into cache; checksum already verified on - * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? - * - * @param blockType the type of this block, see {@link BlockType} - * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} + * Creates a new {@link HFile} block from the given fields. This constructor is used only while + * writing blocks and caching, and is sitting in a byte buffer and we want to stuff the block into + * cache. + *

        + * TODO: The caller presumes no checksumming + *

        + * TODO: HFile block writer can also off-heap ? + *

        + * required of this block instance since going into cache; checksum already verified on underlying + * block data pulled in from filesystem. Is that correct? What if cache is SSD? + * @param blockType the type of this block, see {@link BlockType} + * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} - * @param prevBlockOffset see {@link #prevBlockOffset} - * @param buf block buffer with header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) - * @param fillHeader when true, write the first 4 header fields into passed buffer. - * @param offset the file offset the block was read from - * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} - * @param fileContext HFile meta data + * @param prevBlockOffset see {@link #prevBlockOffset} + * @param buf block buffer with header + * ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) + * @param fillHeader when true, write the first 4 header fields into passed + * buffer. + * @param offset the file offset the block was read from + * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} + * @param fileContext HFile meta data */ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, - long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, - ByteBuffAllocator allocator) { + int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, + long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, + ByteBuffAllocator allocator) { this.blockType = blockType; this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader; this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader; @@ -336,25 +329,24 @@ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, } /** - * Creates a block from an existing buffer starting with a header. Rewinds - * and takes ownership of the buffer. By definition of rewind, ignores the - * buffer position, but if you slice the buffer beforehand, it will rewind - * to that point. + * Creates a block from an existing buffer starting with a header. Rewinds and takes ownership of + * the buffer. By definition of rewind, ignores the buffer position, but if you slice the buffer + * beforehand, it will rewind to that point. * @param buf Has header, content, and trailing checksums if present. */ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final long offset, - final int nextBlockOnDiskSize, HFileContext fileContext, ByteBuffAllocator allocator) - throws IOException { + final int nextBlockOnDiskSize, HFileContext fileContext, ByteBuffAllocator allocator) + throws IOException { buf.rewind(); final BlockType blockType = BlockType.read(buf); final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); final int uncompressedSizeWithoutHeader = - buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); + buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); // This constructor is called when we deserialize a block from cache and when we read a block in // from the fs. fileCache is null when deserialized from cache so need to make up one. - HFileContextBuilder fileContextBuilder = fileContext != null ? - new HFileContextBuilder(fileContext) : new HFileContextBuilder(); + HFileContextBuilder fileContextBuilder = + fileContext != null ? new HFileContextBuilder(fileContext) : new HFileContextBuilder(); fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); int onDiskDataSizeWithHeader; if (usesHBaseChecksum) { @@ -372,36 +364,30 @@ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final } fileContext = fileContextBuilder.build(); assert usesHBaseChecksum == fileContext.isUseHBaseChecksum(); - return new HFileBlockBuilder() - .withBlockType(blockType) - .withOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader) - .withUncompressedSizeWithoutHeader(uncompressedSizeWithoutHeader) - .withPrevBlockOffset(prevBlockOffset) - .withOffset(offset) - .withOnDiskDataSizeWithHeader(onDiskDataSizeWithHeader) - .withNextBlockOnDiskSize(nextBlockOnDiskSize) - .withHFileContext(fileContext) - .withByteBuffAllocator(allocator) - .withByteBuff(buf.rewind()) - .withShared(!buf.hasArray()) - .build(); + return new HFileBlockBuilder().withBlockType(blockType) + .withOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader) + .withUncompressedSizeWithoutHeader(uncompressedSizeWithoutHeader) + .withPrevBlockOffset(prevBlockOffset).withOffset(offset) + .withOnDiskDataSizeWithHeader(onDiskDataSizeWithHeader) + .withNextBlockOnDiskSize(nextBlockOnDiskSize).withHFileContext(fileContext) + .withByteBuffAllocator(allocator).withByteBuff(buf.rewind()).withShared(!buf.hasArray()) + .build(); } /** * Parse total on disk size including header and checksum. - * @param headerBuf Header ByteBuffer. Presumed exact size of header. + * @param headerBuf Header ByteBuffer. Presumed exact size of header. * @param verifyChecksum true if checksum verification is in use. * @return Size of the block with header included. */ - private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, - boolean verifyChecksum) { + private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, boolean verifyChecksum) { return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + headerSize(verifyChecksum); } /** * @return the on-disk size of the next block (including the header size and any checksums if - * present) read by peeking into the next block's header; use as a hint when doing - * a read of the next block when scanning or running over a file. + * present) read by peeking into the next block's header; use as a hint when doing a read + * of the next block when scanning or running over a file. */ int getNextBlockOnDiskSize() { return nextBlockOnDiskSize; @@ -435,8 +421,8 @@ public boolean release() { /** @return get data block encoding id that was used to encode this block */ short getDataBlockEncodingId() { if (blockType != BlockType.ENCODED_DATA) { - throw new IllegalArgumentException("Querying encoder ID of a block " + - "of type other than " + BlockType.ENCODED_DATA + ": " + blockType); + throw new IllegalArgumentException("Querying encoder ID of a block " + "of type other than " + + BlockType.ENCODED_DATA + ": " + blockType); } return buf.getShort(headerSize()); } @@ -463,16 +449,15 @@ int getUncompressedSizeWithoutHeader() { } /** - * @return the offset of the previous block of the same type in the file, or - * -1 if unknown + * @return the offset of the previous block of the same type in the file, or -1 if unknown */ long getPrevBlockOffset() { return prevBlockOffset; } /** - * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position - * is modified as side-effect. + * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position is modified as + * side-effect. */ private void overwriteHeader() { buf.rewind(); @@ -510,10 +495,9 @@ public ByteBuff getBufferWithoutHeader(boolean withChecksum) { * Returns a read-only duplicate of the buffer this block stores internally ready to be read. * Clients must not modify the buffer object though they may set position and limit on the * returned buffer since we pass back a duplicate. This method has to be public because it is used - * in {@link CompoundBloomFilter} to avoid object creation on every Bloom - * filter lookup, but has to be used with caution. Buffer holds header, block content, - * and any follow-on checksums if present. - * + * in {@link CompoundBloomFilter} to avoid object creation on every Bloom filter lookup, but has + * to be used with caution. Buffer holds header, block content, and any follow-on checksums if + * present. * @return the buffer of this block for read-only operations */ public ByteBuff getBufferReadOnly() { @@ -527,29 +511,28 @@ public ByteBuffAllocator getByteBuffAllocator() { return this.allocator; } - private void sanityCheckAssertion(long valueFromBuf, long valueFromField, - String fieldName) throws IOException { + private void sanityCheckAssertion(long valueFromBuf, long valueFromField, String fieldName) + throws IOException { if (valueFromBuf != valueFromField) { throw new AssertionError(fieldName + " in the buffer (" + valueFromBuf - + ") is different from that in the field (" + valueFromField + ")"); + + ") is different from that in the field (" + valueFromField + ")"); } } private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromField) - throws IOException { + throws IOException { if (valueFromBuf != valueFromField) { - throw new IOException("Block type stored in the buffer: " + - valueFromBuf + ", block type field: " + valueFromField); + throw new IOException("Block type stored in the buffer: " + valueFromBuf + + ", block type field: " + valueFromField); } } /** * Checks if the block is internally consistent, i.e. the first - * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a - * valid header consistent with the fields. Assumes a packed block structure. - * This function is primary for testing and debugging, and is not - * thread-safe, because it alters the internal buffer pointer. - * Used by tests only. + * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a valid header consistent + * with the fields. Assumes a packed block structure. This function is primary for testing and + * debugging, and is not thread-safe, because it alters the internal buffer pointer. Used by tests + * only. */ void sanityCheck() throws IOException { // Duplicate so no side-effects @@ -559,13 +542,13 @@ void sanityCheck() throws IOException { sanityCheckAssertion(dup.getInt(), onDiskSizeWithoutHeader, "onDiskSizeWithoutHeader"); sanityCheckAssertion(dup.getInt(), uncompressedSizeWithoutHeader, - "uncompressedSizeWithoutHeader"); + "uncompressedSizeWithoutHeader"); sanityCheckAssertion(dup.getLong(), prevBlockOffset, "prevBlockOffset"); if (this.fileContext.isUseHBaseChecksum()) { sanityCheckAssertion(dup.get(), this.fileContext.getChecksumType().getCode(), "checksumType"); sanityCheckAssertion(dup.getInt(), this.fileContext.getBytesPerChecksum(), - "bytesPerChecksum"); + "bytesPerChecksum"); sanityCheckAssertion(dup.getInt(), onDiskDataSizeWithHeader, "onDiskDataSizeWithHeader"); } @@ -580,50 +563,44 @@ void sanityCheck() throws IOException { int hdrSize = headerSize(); dup.rewind(); if (dup.remaining() != expectedBufLimit && dup.remaining() != expectedBufLimit + hdrSize) { - throw new AssertionError("Invalid buffer capacity: " + dup.remaining() + - ", expected " + expectedBufLimit + " or " + (expectedBufLimit + hdrSize)); + throw new AssertionError("Invalid buffer capacity: " + dup.remaining() + ", expected " + + expectedBufLimit + " or " + (expectedBufLimit + hdrSize)); } } @Override public String toString() { - StringBuilder sb = new StringBuilder() - .append("[") - .append("blockType=").append(blockType) - .append(", fileOffset=").append(offset) - .append(", headerSize=").append(headerSize()) + StringBuilder sb = new StringBuilder().append("[").append("blockType=").append(blockType) + .append(", fileOffset=").append(offset).append(", headerSize=").append(headerSize()) .append(", onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader) .append(", uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader) - .append(", prevBlockOffset=").append(prevBlockOffset) - .append(", isUseHBaseChecksum=").append(fileContext.isUseHBaseChecksum()); + .append(", prevBlockOffset=").append(prevBlockOffset).append(", isUseHBaseChecksum=") + .append(fileContext.isUseHBaseChecksum()); if (fileContext.isUseHBaseChecksum()) { sb.append(", checksumType=").append(ChecksumType.codeToType(this.buf.get(24))) .append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1)) .append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader); } else { - sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader) - .append("(").append(onDiskSizeWithoutHeader) - .append("+").append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")"); + sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader).append("(") + .append(onDiskSizeWithoutHeader).append("+") + .append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")"); } String dataBegin; if (buf.hasArray()) { dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(), - Math.min(32, buf.limit() - buf.arrayOffset() - headerSize())); + Math.min(32, buf.limit() - buf.arrayOffset() - headerSize())); } else { ByteBuff bufWithoutHeader = getBufferWithoutHeader(); - byte[] dataBeginBytes = new byte[Math.min(32, - bufWithoutHeader.limit() - bufWithoutHeader.position())]; + byte[] dataBeginBytes = + new byte[Math.min(32, bufWithoutHeader.limit() - bufWithoutHeader.position())]; bufWithoutHeader.get(dataBeginBytes); dataBegin = Bytes.toStringBinary(dataBeginBytes); } sb.append(", getOnDiskSizeWithHeader=").append(getOnDiskSizeWithHeader()) - .append(", totalChecksumBytes=").append(totalChecksumBytes()) - .append(", isUnpacked=").append(isUnpacked()) - .append(", buf=[").append(buf).append("]") - .append(", dataBeginsWith=").append(dataBegin) - .append(", fileContext=").append(fileContext) - .append(", nextBlockOnDiskSize=").append(nextBlockOnDiskSize) - .append("]"); + .append(", totalChecksumBytes=").append(totalChecksumBytes()).append(", isUnpacked=") + .append(isUnpacked()).append(", buf=[").append(buf).append("]").append(", dataBeginsWith=") + .append(dataBegin).append(", fileContext=").append(fileContext) + .append(", nextBlockOnDiskSize=").append(nextBlockOnDiskSize).append("]"); return sb.toString(); } @@ -644,7 +621,8 @@ HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException boolean succ = false; try { HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA - ? reader.getBlockDecodingContext() : reader.getDefaultBlockDecodingContext(); + ? reader.getBlockDecodingContext() + : reader.getDefaultBlockDecodingContext(); // Create a duplicated buffer without the header part. ByteBuff dup = this.buf.duplicate(); dup.position(this.headerSize()); @@ -662,9 +640,8 @@ HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException } /** - * Always allocates a new buffer of the correct size. Copies header bytes - * from the existing buffer. Does not change header fields. - * Reserve room to keep checksum bytes too. + * Always allocates a new buffer of the correct size. Copies header bytes from the existing + * buffer. Does not change header fields. Reserve room to keep checksum bytes too. */ private void allocateBuffer() { int cksumBytes = totalChecksumBytes(); @@ -740,14 +717,13 @@ public boolean isSharedMem() { } /** - * Unified version 2 {@link HFile} block writer. The intended usage pattern - * is as follows: + * Unified version 2 {@link HFile} block writer. The intended usage pattern is as follows: *
          *
        1. Construct an {@link HFileBlock.Writer}, providing a compression algorithm. *
        2. Call {@link Writer#startWriting} and get a data stream to write to. *
        3. Write your data into the stream. - *
        4. Call Writer#writeHeaderAndData(FSDataOutputStream) as many times as you need to. - * store the serialized block into an external stream. + *
        5. Call Writer#writeHeaderAndData(FSDataOutputStream) as many times as you need to. store the + * serialized block into an external stream. *
        6. Repeat to write more blocks. *
        *

        @@ -767,42 +743,39 @@ private enum State { private HFileBlockEncodingContext dataBlockEncodingCtx; - /** block encoding context for non-data blocks*/ + /** block encoding context for non-data blocks */ private HFileBlockDefaultEncodingContext defaultBlockEncodingCtx; /** - * The stream we use to accumulate data into a block in an uncompressed format. - * We reset this stream at the end of each block and reuse it. The - * header is written as the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes into this - * stream. + * The stream we use to accumulate data into a block in an uncompressed format. We reset this + * stream at the end of each block and reuse it. The header is written as the first + * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes into this stream. */ private ByteArrayOutputStream baosInMemory; /** - * Current block type. Set in {@link #startWriting(BlockType)}. Could be - * changed in {@link #finishBlock()} from {@link BlockType#DATA} - * to {@link BlockType#ENCODED_DATA}. + * Current block type. Set in {@link #startWriting(BlockType)}. Could be changed in + * {@link #finishBlock()} from {@link BlockType#DATA} to {@link BlockType#ENCODED_DATA}. */ private BlockType blockType; /** - * A stream that we write uncompressed bytes to, which compresses them and - * writes them to {@link #baosInMemory}. + * A stream that we write uncompressed bytes to, which compresses them and writes them to + * {@link #baosInMemory}. */ private DataOutputStream userDataStream; /** - * Bytes to be written to the file system, including the header. Compressed - * if compression is turned on. It also includes the checksum data that - * immediately follows the block data. (header + data + checksums) + * Bytes to be written to the file system, including the header. Compressed if compression is + * turned on. It also includes the checksum data that immediately follows the block data. + * (header + data + checksums) */ private ByteArrayOutputStream onDiskBlockBytesWithHeader; /** - * The size of the checksum data on disk. It is used only if data is - * not compressed. If data is compressed, then the checksums are already - * part of onDiskBytesWithHeader. If data is uncompressed, then this - * variable stores the checksum data for this block. + * The size of the checksum data on disk. It is used only if data is not compressed. If data is + * compressed, then the checksums are already part of onDiskBytesWithHeader. If data is + * uncompressed, then this variable stores the checksum data for this block. */ private byte[] onDiskChecksum = HConstants.EMPTY_BYTE_ARRAY; @@ -813,14 +786,13 @@ private enum State { private long startOffset; /** - * Offset of previous block by block type. Updated when the next block is - * started. + * Offset of previous block by block type. Updated when the next block is started. */ private long[] prevOffsetByType; /** The offset of the previous block of the same type */ private long prevOffset; - /** Meta data that holds information about the hfileblock**/ + /** Meta data that holds information about the hfileblock **/ private HFileContext fileContext; private final ByteBuffAllocator allocator; @@ -840,20 +812,20 @@ EncodingState getEncodingState() { * @param dataBlockEncoder data block encoding algorithm to use */ public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, - HFileContext fileContext) { + HFileContext fileContext) { this(conf, dataBlockEncoder, fileContext, ByteBuffAllocator.HEAP); } public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, - HFileContext fileContext, ByteBuffAllocator allocator) { + HFileContext fileContext, ByteBuffAllocator allocator) { if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) { - throw new RuntimeException("Unsupported value of bytesPerChecksum. " + - " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " + - fileContext.getBytesPerChecksum()); + throw new RuntimeException("Unsupported value of bytesPerChecksum. " + " Minimum is " + + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " + + fileContext.getBytesPerChecksum()); } this.allocator = allocator; - this.dataBlockEncoder = dataBlockEncoder != null? - dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE; + this.dataBlockEncoder = + dataBlockEncoder != null ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE; this.dataBlockEncodingCtx = this.dataBlockEncoder.newDataBlockEncodingContext(conf, HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); // TODO: This should be lazily instantiated @@ -872,11 +844,9 @@ public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, /** * Starts writing into the block. The previous block's data is discarded. - * * @return the stream the user can write their data into */ - DataOutputStream startWriting(BlockType newBlockType) - throws IOException { + DataOutputStream startWriting(BlockType newBlockType) throws IOException { if (state == State.BLOCK_READY && startOffset != -1) { // We had a previous block that was written to a stream at a specific // offset. Save that offset as the last offset of a block of that type. @@ -902,18 +872,17 @@ DataOutputStream startWriting(BlockType newBlockType) /** * Writes the Cell to this block */ - void write(Cell cell) throws IOException{ + void write(Cell cell) throws IOException { expectState(State.WRITING); this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream); } /** - * Transitions the block writer from the "writing" state to the "block - * ready" state. Does nothing if a block is already finished. + * Transitions the block writer from the "writing" state to the "block ready" state. Does + * nothing if a block is already finished. */ void ensureBlockReady() throws IOException { - Preconditions.checkState(state != State.INIT, - "Unexpected state: " + state); + Preconditions.checkState(state != State.INIT, "Unexpected state: " + state); if (state == State.BLOCK_READY) { return; @@ -924,15 +893,14 @@ void ensureBlockReady() throws IOException { } /** - * Finish up writing of the block. - * Flushes the compressing stream (if using compression), fills out the header, - * does any compression/encryption of bytes to flush out to disk, and manages + * Finish up writing of the block. Flushes the compressing stream (if using compression), fills + * out the header, does any compression/encryption of bytes to flush out to disk, and manages * the cache on write content, if applicable. Sets block write state to "block ready". */ private void finishBlock() throws IOException { if (blockType == BlockType.DATA) { this.dataBlockEncoder.endBlockEncoding(dataBlockEncodingCtx, userDataStream, - baosInMemory.getBuffer(), blockType); + baosInMemory.getBuffer(), blockType); blockType = dataBlockEncodingCtx.getBlockType(); } userDataStream.flush(); @@ -943,11 +911,11 @@ private void finishBlock() throws IOException { state = State.BLOCK_READY; Bytes compressAndEncryptDat; if (blockType == BlockType.DATA || blockType == BlockType.ENCODED_DATA) { - compressAndEncryptDat = dataBlockEncodingCtx. - compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); + compressAndEncryptDat = + dataBlockEncodingCtx.compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); } else { - compressAndEncryptDat = defaultBlockEncodingCtx. - compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); + compressAndEncryptDat = defaultBlockEncodingCtx.compressAndEncrypt(baosInMemory.getBuffer(), + 0, baosInMemory.size()); } if (compressAndEncryptDat == null) { compressAndEncryptDat = new Bytes(baosInMemory.getBuffer(), 0, baosInMemory.size()); @@ -957,34 +925,32 @@ private void finishBlock() throws IOException { } onDiskBlockBytesWithHeader.reset(); onDiskBlockBytesWithHeader.write(compressAndEncryptDat.get(), - compressAndEncryptDat.getOffset(), compressAndEncryptDat.getLength()); + compressAndEncryptDat.getOffset(), compressAndEncryptDat.getLength()); // Calculate how many bytes we need for checksum on the tail of the block. - int numBytes = (int) ChecksumUtil.numBytes( - onDiskBlockBytesWithHeader.size(), - fileContext.getBytesPerChecksum()); + int numBytes = (int) ChecksumUtil.numBytes(onDiskBlockBytesWithHeader.size(), + fileContext.getBytesPerChecksum()); // Put the header for the on disk bytes; header currently is unfilled-out - putHeader(onDiskBlockBytesWithHeader, - onDiskBlockBytesWithHeader.size() + numBytes, - baosInMemory.size(), onDiskBlockBytesWithHeader.size()); + putHeader(onDiskBlockBytesWithHeader, onDiskBlockBytesWithHeader.size() + numBytes, + baosInMemory.size(), onDiskBlockBytesWithHeader.size()); if (onDiskChecksum.length != numBytes) { onDiskChecksum = new byte[numBytes]; } - ChecksumUtil.generateChecksums( - onDiskBlockBytesWithHeader.getBuffer(), 0,onDiskBlockBytesWithHeader.size(), - onDiskChecksum, 0, fileContext.getChecksumType(), fileContext.getBytesPerChecksum()); + ChecksumUtil.generateChecksums(onDiskBlockBytesWithHeader.getBuffer(), 0, + onDiskBlockBytesWithHeader.size(), onDiskChecksum, 0, fileContext.getChecksumType(), + fileContext.getBytesPerChecksum()); } /** * Put the header into the given byte array at the given offset. - * @param onDiskSize size of the block on disk header + data + checksum - * @param uncompressedSize size of the block after decompression (but - * before optional data block decoding) including header - * @param onDiskDataSize size of the block on disk with header - * and data but not including the checksums + * @param onDiskSize size of the block on disk header + data + checksum + * @param uncompressedSize size of the block after decompression (but before optional data block + * decoding) including header + * @param onDiskDataSize size of the block on disk with header and data but not including the + * checksums */ - private void putHeader(byte[] dest, int offset, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { + private void putHeader(byte[] dest, int offset, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { offset = blockType.put(dest, offset); offset = Bytes.putInt(dest, offset, onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE); offset = Bytes.putInt(dest, offset, uncompressedSize - HConstants.HFILEBLOCK_HEADER_SIZE); @@ -994,8 +960,8 @@ private void putHeader(byte[] dest, int offset, int onDiskSize, Bytes.putInt(dest, offset, onDiskDataSize); } - private void putHeader(ByteBuff buff, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { + private void putHeader(ByteBuff buff, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { buff.rewind(); blockType.write(buff); buff.putInt(onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE); @@ -1006,36 +972,33 @@ private void putHeader(ByteBuff buff, int onDiskSize, buff.putInt(onDiskDataSize); } - private void putHeader(ByteArrayOutputStream dest, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { - putHeader(dest.getBuffer(),0, onDiskSize, uncompressedSize, onDiskDataSize); + private void putHeader(ByteArrayOutputStream dest, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { + putHeader(dest.getBuffer(), 0, onDiskSize, uncompressedSize, onDiskDataSize); } /** - * Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records - * the offset of this block so that it can be referenced in the next block - * of the same type. + * Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records the offset of this + * block so that it can be referenced in the next block of the same type. */ void writeHeaderAndData(FSDataOutputStream out) throws IOException { long offset = out.getPos(); if (startOffset != UNSET && offset != startOffset) { throw new IOException("A " + blockType + " block written to a " - + "stream twice, first at offset " + startOffset + ", then at " - + offset); + + "stream twice, first at offset " + startOffset + ", then at " + offset); } startOffset = offset; finishBlockAndWriteHeaderAndData(out); } /** - * Writes the header and the compressed data of this block (or uncompressed - * data when not using compression) into the given stream. Can be called in - * the "writing" state or in the "block ready" state. If called in the - * "writing" state, transitions the writer to the "block ready" state. + * Writes the header and the compressed data of this block (or uncompressed data when not using + * compression) into the given stream. Can be called in the "writing" state or in the "block + * ready" state. If called in the "writing" state, transitions the writer to the "block ready" + * state. * @param out the output stream to write the */ - protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) - throws IOException { + protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) throws IOException { ensureBlockReady(); long startTime = EnvironmentEdgeManager.currentTime(); out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size()); @@ -1044,25 +1007,21 @@ protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) } /** - * Returns the header or the compressed data (or uncompressed data when not - * using compression) as a byte array. Can be called in the "writing" state - * or in the "block ready" state. If called in the "writing" state, - * transitions the writer to the "block ready" state. This returns - * the header + data + checksums stored on disk. - * + * Returns the header or the compressed data (or uncompressed data when not using compression) + * as a byte array. Can be called in the "writing" state or in the "block ready" state. If + * called in the "writing" state, transitions the writer to the "block ready" state. This + * returns the header + data + checksums stored on disk. * @return header and data as they would be stored on disk in a byte array */ byte[] getHeaderAndDataForTest() throws IOException { ensureBlockReady(); // This is not very optimal, because we are doing an extra copy. // But this method is used only by unit tests. - byte[] output = - new byte[onDiskBlockBytesWithHeader.size() - + onDiskChecksum.length]; + byte[] output = new byte[onDiskBlockBytesWithHeader.size() + onDiskChecksum.length]; System.arraycopy(onDiskBlockBytesWithHeader.getBuffer(), 0, output, 0, - onDiskBlockBytesWithHeader.size()); - System.arraycopy(onDiskChecksum, 0, output, - onDiskBlockBytesWithHeader.size(), onDiskChecksum.length); + onDiskBlockBytesWithHeader.size()); + System.arraycopy(onDiskChecksum, 0, output, onDiskBlockBytesWithHeader.size(), + onDiskChecksum.length); return output; } @@ -1081,25 +1040,21 @@ void release() { } /** - * Returns the on-disk size of the data portion of the block. This is the - * compressed size if compression is enabled. Can only be called in the - * "block ready" state. Header is not compressed, and its size is not - * included in the return value. - * + * Returns the on-disk size of the data portion of the block. This is the compressed size if + * compression is enabled. Can only be called in the "block ready" state. Header is not + * compressed, and its size is not included in the return value. * @return the on-disk size of the block, not including the header. */ int getOnDiskSizeWithoutHeader() { expectState(State.BLOCK_READY); - return onDiskBlockBytesWithHeader.size() + - onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE; + return onDiskBlockBytesWithHeader.size() + onDiskChecksum.length + - HConstants.HFILEBLOCK_HEADER_SIZE; } /** - * Returns the on-disk size of the block. Can only be called in the - * "block ready" state. - * - * @return the on-disk size of the block ready to be written, including the - * header size, the data and the checksum data. + * Returns the on-disk size of the block. Can only be called in the "block ready" state. + * @return the on-disk size of the block ready to be written, including the header size, the + * data and the checksum data. */ int getOnDiskSizeWithHeader() { expectState(State.BLOCK_READY); @@ -1122,16 +1077,14 @@ int getUncompressedSizeWithHeader() { return baosInMemory.size(); } - /** @return true if a block is being written */ + /** @return true if a block is being written */ boolean isWriting() { return state == State.WRITING; } /** - * Returns the number of bytes written into the current block so far, or - * zero if not writing the block at the moment. Note that this will return - * zero in the "block ready" state as well. - * + * Returns the number of bytes written into the current block so far, or zero if not writing the + * block at the moment. Note that this will return zero in the "block ready" state as well. * @return the number of bytes written */ public int encodedBlockSizeWritten() { @@ -1139,10 +1092,8 @@ public int encodedBlockSizeWritten() { } /** - * Returns the number of bytes written into the current block so far, or - * zero if not writing the block at the moment. Note that this will return - * zero in the "block ready" state as well. - * + * Returns the number of bytes written into the current block so far, or zero if not writing the + * block at the moment. Note that this will return zero in the "block ready" state as well. * @return the number of bytes written */ int blockSizeWritten() { @@ -1150,22 +1101,20 @@ int blockSizeWritten() { } /** - * Clones the header followed by the uncompressed data, even if using - * compression. This is needed for storing uncompressed blocks in the block - * cache. Can be called in the "writing" state or the "block ready" state. - * Returns only the header and data, does not include checksum data. - * + * Clones the header followed by the uncompressed data, even if using compression. This is + * needed for storing uncompressed blocks in the block cache. Can be called in the "writing" + * state or the "block ready" state. Returns only the header and data, does not include checksum + * data. * @return Returns an uncompressed block ByteBuff for caching on write */ ByteBuff cloneUncompressedBufferWithHeader() { expectState(State.BLOCK_READY); ByteBuff bytebuff = allocator.allocate(baosInMemory.size()); baosInMemory.toByteBuff(bytebuff); - int numBytes = (int) ChecksumUtil.numBytes( - onDiskBlockBytesWithHeader.size(), - fileContext.getBytesPerChecksum()); - putHeader(bytebuff, onDiskBlockBytesWithHeader.size() + numBytes, - baosInMemory.size(), onDiskBlockBytesWithHeader.size()); + int numBytes = (int) ChecksumUtil.numBytes(onDiskBlockBytesWithHeader.size(), + fileContext.getBytesPerChecksum()); + putHeader(bytebuff, onDiskBlockBytesWithHeader.size() + numBytes, baosInMemory.size(), + onDiskBlockBytesWithHeader.size()); bytebuff.rewind(); return bytebuff; } @@ -1186,53 +1135,45 @@ private ByteBuff cloneOnDiskBufferWithHeader() { private void expectState(State expectedState) { if (state != expectedState) { - throw new IllegalStateException("Expected state: " + expectedState + - ", actual state: " + state); + throw new IllegalStateException( + "Expected state: " + expectedState + ", actual state: " + state); } } /** - * Takes the given {@link BlockWritable} instance, creates a new block of - * its appropriate type, writes the writable into this block, and flushes - * the block into the output stream. The writer is instructed not to buffer - * uncompressed bytes for cache-on-write. - * - * @param bw the block-writable object to write as a block + * Takes the given {@link BlockWritable} instance, creates a new block of its appropriate type, + * writes the writable into this block, and flushes the block into the output stream. The writer + * is instructed not to buffer uncompressed bytes for cache-on-write. + * @param bw the block-writable object to write as a block * @param out the file system output stream */ - void writeBlock(BlockWritable bw, FSDataOutputStream out) - throws IOException { + void writeBlock(BlockWritable bw, FSDataOutputStream out) throws IOException { bw.writeToBlock(startWriting(bw.getBlockType())); writeHeaderAndData(out); } /** - * Creates a new HFileBlock. Checksums have already been validated, so - * the byte buffer passed into the constructor of this newly created - * block does not have checksum data even though the header minor - * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a - * 0 value in bytesPerChecksum. This method copies the on-disk or - * uncompressed data to build the HFileBlock which is used only - * while writing blocks and caching. - * - *

        TODO: Should there be an option where a cache can ask that hbase preserve block - * checksums for checking after a block comes out of the cache? Otehrwise, cache is responsible - * for blocks being wholesome (ECC memory or if file-backed, it does checksumming). + * Creates a new HFileBlock. Checksums have already been validated, so the byte buffer passed + * into the constructor of this newly created block does not have checksum data even though the + * header minor version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a 0 value + * in bytesPerChecksum. This method copies the on-disk or uncompressed data to build the + * HFileBlock which is used only while writing blocks and caching. + *

        + * TODO: Should there be an option where a cache can ask that hbase preserve block checksums for + * checking after a block comes out of the cache? Otehrwise, cache is responsible for blocks + * being wholesome (ECC memory or if file-backed, it does checksumming). */ HFileBlock getBlockForCaching(CacheConfig cacheConf) { - HFileContext newContext = new HFileContextBuilder() - .withBlockSize(fileContext.getBlocksize()) - .withBytesPerCheckSum(0) - .withChecksumType(ChecksumType.NULL) // no checksums in cached data - .withCompression(fileContext.getCompression()) - .withDataBlockEncoding(fileContext.getDataBlockEncoding()) - .withHBaseCheckSum(fileContext.isUseHBaseChecksum()) - .withCompressTags(fileContext.isCompressTags()) - .withIncludesMvcc(fileContext.isIncludesMvcc()) - .withIncludesTags(fileContext.isIncludesTags()) - .withColumnFamily(fileContext.getColumnFamily()) - .withTableName(fileContext.getTableName()) - .build(); + HFileContext newContext = new HFileContextBuilder().withBlockSize(fileContext.getBlocksize()) + .withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL) // no checksums in cached data + .withCompression(fileContext.getCompression()) + .withDataBlockEncoding(fileContext.getDataBlockEncoding()) + .withHBaseCheckSum(fileContext.isUseHBaseChecksum()) + .withCompressTags(fileContext.isCompressTags()) + .withIncludesMvcc(fileContext.isIncludesMvcc()) + .withIncludesTags(fileContext.isIncludesTags()) + .withColumnFamily(fileContext.getColumnFamily()).withTableName(fileContext.getTableName()) + .build(); // Build the HFileBlock. HFileBlockBuilder builder = new HFileBlockBuilder(); ByteBuff buff; @@ -1242,18 +1183,13 @@ HFileBlock getBlockForCaching(CacheConfig cacheConf) { buff = cloneUncompressedBufferWithHeader(); } return builder.withBlockType(blockType) - .withOnDiskSizeWithoutHeader(getOnDiskSizeWithoutHeader()) - .withUncompressedSizeWithoutHeader(getUncompressedSizeWithoutHeader()) - .withPrevBlockOffset(prevOffset) - .withByteBuff(buff) - .withFillHeader(FILL_HEADER) - .withOffset(startOffset) - .withNextBlockOnDiskSize(UNSET) - .withOnDiskDataSizeWithHeader(onDiskBlockBytesWithHeader.size() + onDiskChecksum.length) - .withHFileContext(newContext) - .withByteBuffAllocator(cacheConf.getByteBuffAllocator()) - .withShared(!buff.hasArray()) - .build(); + .withOnDiskSizeWithoutHeader(getOnDiskSizeWithoutHeader()) + .withUncompressedSizeWithoutHeader(getUncompressedSizeWithoutHeader()) + .withPrevBlockOffset(prevOffset).withByteBuff(buff).withFillHeader(FILL_HEADER) + .withOffset(startOffset).withNextBlockOnDiskSize(UNSET) + .withOnDiskDataSizeWithHeader(onDiskBlockBytesWithHeader.size() + onDiskChecksum.length) + .withHFileContext(newContext).withByteBuffAllocator(cacheConf.getByteBuffAllocator()) + .withShared(!buff.hasArray()).build(); } } @@ -1263,9 +1199,7 @@ interface BlockWritable { BlockType getBlockType(); /** - * Writes the block to the provided stream. Must not write any magic - * records. - * + * Writes the block to the provided stream. Must not write any magic records. * @param out a stream to write uncompressed data into */ void writeToBlock(DataOutput out) throws IOException; @@ -1303,37 +1237,37 @@ interface FSReader { /** * Reads the block at the given offset in the file with the given on-disk size and uncompressed * size. - * @param offset of the file to read - * @param onDiskSize the on-disk size of the entire block, including all applicable headers, or - * -1 if unknown - * @param pread true to use pread, otherwise use the stream read. + * @param offset of the file to read + * @param onDiskSize the on-disk size of the entire block, including all applicable headers, + * or -1 if unknown + * @param pread true to use pread, otherwise use the stream read. * @param updateMetrics update the metrics or not. - * @param intoHeap allocate the block's ByteBuff by {@link ByteBuffAllocator} or JVM heap. For - * LRUBlockCache, we must ensure that the block to cache is an heap one, because the - * memory occupation is based on heap now, also for {@link CombinedBlockCache}, we use - * the heap LRUBlockCache as L1 cache to cache small blocks such as IndexBlock or - * MetaBlock for faster access. So introduce an flag here to decide whether allocate - * from JVM heap or not so that we can avoid an extra off-heap to heap memory copy when - * using LRUBlockCache. For most cases, we known what's the expected block type we'll - * read, while for some special case (Example: HFileReaderImpl#readNextDataBlock()), we - * cannot pre-decide what's the expected block type, then we can only allocate block's - * ByteBuff from {@link ByteBuffAllocator} firstly, and then when caching it in - * {@link LruBlockCache} we'll check whether the ByteBuff is from heap or not, if not - * then we'll clone it to an heap one and cache it. + * @param intoHeap allocate the block's ByteBuff by {@link ByteBuffAllocator} or JVM heap. + * For LRUBlockCache, we must ensure that the block to cache is an heap + * one, because the memory occupation is based on heap now, also for + * {@link CombinedBlockCache}, we use the heap LRUBlockCache as L1 cache to + * cache small blocks such as IndexBlock or MetaBlock for faster access. So + * introduce an flag here to decide whether allocate from JVM heap or not + * so that we can avoid an extra off-heap to heap memory copy when using + * LRUBlockCache. For most cases, we known what's the expected block type + * we'll read, while for some special case (Example: + * HFileReaderImpl#readNextDataBlock()), we cannot pre-decide what's the + * expected block type, then we can only allocate block's ByteBuff from + * {@link ByteBuffAllocator} firstly, and then when caching it in + * {@link LruBlockCache} we'll check whether the ByteBuff is from heap or + * not, if not then we'll clone it to an heap one and cache it. * @return the newly read block */ HFileBlock readBlockData(long offset, long onDiskSize, boolean pread, boolean updateMetrics, - boolean intoHeap) throws IOException; + boolean intoHeap) throws IOException; /** - * Creates a block iterator over the given portion of the {@link HFile}. - * The iterator returns blocks starting with offset such that offset <= - * startOffset < endOffset. Returned blocks are always unpacked. - * Used when no hfile index available; e.g. reading in the hfile index - * blocks themselves on file open. - * + * Creates a block iterator over the given portion of the {@link HFile}. The iterator returns + * blocks starting with offset such that offset <= startOffset < endOffset. Returned + * blocks are always unpacked. Used when no hfile index available; e.g. reading in the hfile + * index blocks themselves on file open. * @param startOffset the offset of the block to start iteration with - * @param endOffset the offset to end iteration at (exclusive) + * @param endOffset the offset to end iteration at (exclusive) * @return an iterator of blocks between the two given offsets */ BlockIterator blockRange(long startOffset, long endOffset); @@ -1348,6 +1282,7 @@ HFileBlock readBlockData(long offset, long onDiskSize, boolean pread, boolean up HFileBlockDecodingContext getDefaultBlockDecodingContext(); void setIncludesMemStoreTS(boolean includesMemstoreTS); + void setDataBlockEncoder(HFileDataBlockEncoder encoder, Configuration conf); /** @@ -1358,12 +1293,10 @@ HFileBlock readBlockData(long offset, long onDiskSize, boolean pread, boolean up } /** - * Data-structure to use caching the header of the NEXT block. Only works if next read - * that comes in here is next in sequence in this block. - * - * When we read, we read current block and the next blocks' header. We do this so we have - * the length of the next block to read if the hfile index is not available (rare, at - * hfile open only). + * Data-structure to use caching the header of the NEXT block. Only works if next read that comes + * in here is next in sequence in this block. When we read, we read current block and the next + * blocks' header. We do this so we have the length of the next block to read if the hfile index + * is not available (rare, at hfile open only). */ private static class PrefetchedHeader { long offset = -1; @@ -1380,8 +1313,10 @@ public String toString() { * Reads version 2 HFile blocks from the filesystem. */ static class FSReaderImpl implements FSReader { - /** The file system stream of the underlying {@link HFile} that - * does or doesn't do checksum validations in the filesystem */ + /** + * The file system stream of the underlying {@link HFile} that does or doesn't do checksum + * validations in the filesystem + */ private FSDataInputStreamWrapper streamWrapper; private HFileBlockDecodingContext encodedBlockDecodingCtx; @@ -1390,10 +1325,9 @@ static class FSReaderImpl implements FSReader { private final HFileBlockDefaultDecodingContext defaultDecodingCtx; /** - * Cache of the NEXT header after this. Check it is indeed next blocks header - * before using it. TODO: Review. This overread into next block to fetch - * next blocks header seems unnecessary given we usually get the block size - * from the hfile index. Review! + * Cache of the NEXT header after this. Check it is indeed next blocks header before using it. + * TODO: Review. This overread into next block to fetch next blocks header seems unnecessary + * given we usually get the block size from the hfile index. Review! */ private AtomicReference prefetchedHeader = new AtomicReference<>(new PrefetchedHeader()); @@ -1415,8 +1349,8 @@ static class FSReaderImpl implements FSReader { private final Lock streamLock = new ReentrantLock(); - FSReaderImpl(ReaderContext readerContext, HFileContext fileContext, - ByteBuffAllocator allocator, Configuration conf) throws IOException { + FSReaderImpl(ReaderContext readerContext, HFileContext fileContext, ByteBuffAllocator allocator, + Configuration conf) throws IOException { this.fileSize = readerContext.getFileSize(); this.hfs = readerContext.getFileSystem(); if (readerContext.getFilePath() != null) { @@ -1465,7 +1399,7 @@ public HFileBlock nextBlockWithBlockType(BlockType blockType) throws IOException HFileBlock blk = nextBlock(); if (blk.getBlockType() != blockType) { throw new IOException( - "Expected block of type " + blockType + " but found " + blk.getBlockType()); + "Expected block of type " + blockType + " but found " + blk.getBlockType()); } return blk; } @@ -1486,25 +1420,25 @@ public void freeBlocks() { * Does a positional read or a seek and read into the given byte buffer. We need take care that * we will call the {@link ByteBuff#release()} for every exit to deallocate the ByteBuffers, * otherwise the memory leak may happen. - * @param dest destination buffer - * @param size size of read + * @param dest destination buffer + * @param size size of read * @param peekIntoNextBlock whether to read the next block's on-disk size - * @param fileOffset position in the stream to read at - * @param pread whether we should do a positional read - * @param istream The input source of data + * @param fileOffset position in the stream to read at + * @param pread whether we should do a positional read + * @param istream The input source of data * @return true to indicate the destination buffer include the next block header, otherwise only * include the current block data without the next block header. * @throws IOException if any IO error happen. */ protected boolean readAtOffset(FSDataInputStream istream, ByteBuff dest, int size, - boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException { + boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException { if (!pread) { // Seek + read. Better for scanning. HFileUtil.seekOnMultipleSources(istream, fileOffset); long realOffset = istream.getPos(); if (realOffset != fileOffset) { throw new IOException("Tried to seek to " + fileOffset + " to read " + size - + " bytes, but pos=" + realOffset + " after seek"); + + " bytes, but pos=" + realOffset + " after seek"); } if (!peekIntoNextBlock) { BlockIOUtils.readFully(dest, istream, size); @@ -1531,18 +1465,19 @@ protected boolean readAtOffset(FSDataInputStream istream, ByteBuff dest, int siz /** * Reads a version 2 block (version 1 blocks not supported and not expected). Tries to do as * little memory allocation as possible, using the provided on-disk size. - * @param offset the offset in the stream to read at + * @param offset the offset in the stream to read at * @param onDiskSizeWithHeaderL the on-disk size of the block, including the header, or -1 if - * unknown; i.e. when iterating over blocks reading in the file metadata info. - * @param pread whether to use a positional read - * @param updateMetrics whether to update the metrics - * @param intoHeap allocate ByteBuff of block from heap or off-heap. + * unknown; i.e. when iterating over blocks reading in the file + * metadata info. + * @param pread whether to use a positional read + * @param updateMetrics whether to update the metrics + * @param intoHeap allocate ByteBuff of block from heap or off-heap. * @see FSReader#readBlockData(long, long, boolean, boolean, boolean) for more details about the * useHeap. */ @Override public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean pread, - boolean updateMetrics, boolean intoHeap) throws IOException { + boolean updateMetrics, boolean intoHeap) throws IOException { // Get a copy of the current state of whether to validate // hbase checksums or not for this read call. This is not // thread-safe but the one constaint is that if we decide @@ -1554,17 +1489,13 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean HFileBlock blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread, doVerificationThruHBaseChecksum, updateMetrics, intoHeap); if (blk == null) { - HFile.LOG.warn("HBase checksum verification failed for file " + - pathName + " at offset " + - offset + " filesize " + fileSize + - ". Retrying read with HDFS checksums turned on..."); + HFile.LOG.warn("HBase checksum verification failed for file " + pathName + " at offset " + + offset + " filesize " + fileSize + ". Retrying read with HDFS checksums turned on..."); if (!doVerificationThruHBaseChecksum) { - String msg = "HBase checksum verification failed for file " + - pathName + " at offset " + - offset + " filesize " + fileSize + - " but this cannot happen because doVerify is " + - doVerificationThruHBaseChecksum; + String msg = "HBase checksum verification failed for file " + pathName + " at offset " + + offset + " filesize " + fileSize + " but this cannot happen because doVerify is " + + doVerificationThruHBaseChecksum; HFile.LOG.warn(msg); throw new IOException(msg); // cannot happen case here } @@ -1581,15 +1512,14 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread, doVerificationThruHBaseChecksum, updateMetrics, intoHeap); if (blk != null) { - HFile.LOG.warn("HDFS checksum verification succeeded for file " + - pathName + " at offset " + - offset + " filesize " + fileSize); + HFile.LOG.warn("HDFS checksum verification succeeded for file " + pathName + " at offset " + + offset + " filesize " + fileSize); } } if (blk == null && !doVerificationThruHBaseChecksum) { - String msg = "readBlockData failed, possibly due to " + - "checksum verification failed for file " + pathName + - " at offset " + offset + " filesize " + fileSize; + String msg = + "readBlockData failed, possibly due to " + "checksum verification failed for file " + + pathName + " at offset " + offset + " filesize " + fileSize; HFile.LOG.warn(msg); throw new IOException(msg); } @@ -1609,37 +1539,38 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean * @return Check onDiskSizeWithHeaderL size is healthy and then return it as an int */ private static int checkAndGetSizeAsInt(final long onDiskSizeWithHeaderL, final int hdrSize) - throws IOException { - if ((onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1) - || onDiskSizeWithHeaderL >= Integer.MAX_VALUE) { - throw new IOException("Invalid onDisksize=" + onDiskSizeWithHeaderL - + ": expected to be at least " + hdrSize + throws IOException { + if ( + (onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1) + || onDiskSizeWithHeaderL >= Integer.MAX_VALUE + ) { + throw new IOException( + "Invalid onDisksize=" + onDiskSizeWithHeaderL + ": expected to be at least " + hdrSize + " and at most " + Integer.MAX_VALUE + ", or -1"); } - return (int)onDiskSizeWithHeaderL; + return (int) onDiskSizeWithHeaderL; } /** - * Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something - * is not right. + * Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something is + * not right. */ private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuff headerBuf, - final long offset, boolean verifyChecksum) - throws IOException { + final long offset, boolean verifyChecksum) throws IOException { // Assert size provided aligns with what is in the header int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum); if (passedIn != fromHeader) { - throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " != " + fromHeader + - ", offset=" + offset + ", fileContext=" + this.fileContext); + throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " != " + fromHeader + + ", offset=" + offset + ", fileContext=" + this.fileContext); } } /** - * Check atomic reference cache for this block's header. Cache only good if next - * read coming through is next in sequence in the block. We read next block's - * header on the tail of reading the previous block to save a seek. Otherwise, - * we have to do a seek to read the header before we can pull in the block OR - * we have to backup the stream because we over-read (the next block's header). + * Check atomic reference cache for this block's header. Cache only good if next read coming + * through is next in sequence in the block. We read next block's header on the tail of reading + * the previous block to save a seek. Otherwise, we have to do a seek to read the header before + * we can pull in the block OR we have to backup the stream because we over-read (the next + * block's header). * @see PrefetchedHeader * @return The cached block header or null if not found. * @see #cacheNextBlockHeader(long, ByteBuff, int, int) @@ -1654,8 +1585,8 @@ private ByteBuff getCachedHeader(final long offset) { * @see #getCachedHeader(long) * @see PrefetchedHeader */ - private void cacheNextBlockHeader(final long offset, - ByteBuff onDiskBlock, int onDiskSizeWithHeader, int headerLength) { + private void cacheNextBlockHeader(final long offset, ByteBuff onDiskBlock, + int onDiskSizeWithHeader, int headerLength) { PrefetchedHeader ph = new PrefetchedHeader(); ph.offset = offset; onDiskBlock.get(onDiskSizeWithHeader, ph.header, 0, headerLength); @@ -1663,12 +1594,11 @@ private void cacheNextBlockHeader(final long offset, } private int getNextBlockOnDiskSize(boolean readNextHeader, ByteBuff onDiskBlock, - int onDiskSizeWithHeader) { + int onDiskSizeWithHeader) { int nextBlockOnDiskSize = -1; if (readNextHeader) { nextBlockOnDiskSize = - onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH) - + hdrSize; + onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH) + hdrSize; } return nextBlockOnDiskSize; } @@ -1679,34 +1609,37 @@ private ByteBuff allocate(int size, boolean intoHeap) { /** * Reads a version 2 block. - * @param offset the offset in the stream to read at. + * @param offset the offset in the stream to read at. * @param onDiskSizeWithHeaderL the on-disk size of the block, including the header and - * checksums if present or -1 if unknown (as a long). Can be -1 if we are doing raw - * iteration of blocks as when loading up file metadata; i.e. the first read of a new - * file. Usually non-null gotten from the file index. - * @param pread whether to use a positional read - * @param verifyChecksum Whether to use HBase checksums. If HBase checksum is switched off, then - * use HDFS checksum. Can also flip on/off reading same file if we hit a troublesome - * patch in an hfile. - * @param updateMetrics whether need to update the metrics. - * @param intoHeap allocate the ByteBuff of block from heap or off-heap. + * checksums if present or -1 if unknown (as a long). Can be -1 if + * we are doing raw iteration of blocks as when loading up file + * metadata; i.e. the first read of a new file. Usually non-null + * gotten from the file index. + * @param pread whether to use a positional read + * @param verifyChecksum Whether to use HBase checksums. If HBase checksum is switched + * off, then use HDFS checksum. Can also flip on/off reading same + * file if we hit a troublesome patch in an hfile. + * @param updateMetrics whether need to update the metrics. + * @param intoHeap allocate the ByteBuff of block from heap or off-heap. * @return the HFileBlock or null if there is a HBase checksum mismatch */ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, - long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics, - boolean intoHeap) throws IOException { + long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics, + boolean intoHeap) throws IOException { if (offset < 0) { - throw new IOException("Invalid offset=" + offset + " trying to read " - + "block (onDiskSize=" + onDiskSizeWithHeaderL + ")"); + throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize=" + + onDiskSizeWithHeaderL + ")"); } int onDiskSizeWithHeader = checkAndGetSizeAsInt(onDiskSizeWithHeaderL, hdrSize); // Try and get cached header. Will serve us in rare case where onDiskSizeWithHeaderL is -1 // and will save us having to seek the stream backwards to reread the header we // read the last time through here. ByteBuff headerBuf = getCachedHeader(offset); - LOG.trace("Reading {} at offset={}, pread={}, verifyChecksum={}, cachedHeader={}, " + - "onDiskSizeWithHeader={}", this.fileContext.getHFileName(), offset, pread, - verifyChecksum, headerBuf, onDiskSizeWithHeader); + LOG.trace( + "Reading {} at offset={}, pread={}, verifyChecksum={}, cachedHeader={}, " + + "onDiskSizeWithHeader={}", + this.fileContext.getHFileName(), offset, pread, verifyChecksum, headerBuf, + onDiskSizeWithHeader); // This is NOT same as verifyChecksum. This latter is whether to do hbase // checksums. Can change with circumstances. The below flag is whether the // file has support for checksums (version 2+). @@ -1729,7 +1662,7 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, } onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport); } - int preReadHeaderSize = headerBuf == null? 0 : hdrSize; + int preReadHeaderSize = headerBuf == null ? 0 : hdrSize; // Allocate enough space to fit the next block's header too; saves a seek next time through. // onDiskBlock is whole block + header + checksums then extra hdrSize to read next header; // onDiskSizeWithHeader is header, body, and any checksums if present. preReadHeaderSize @@ -1746,7 +1679,7 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread); onDiskBlock.rewind(); // in case of moving position when copying a cached header int nextBlockOnDiskSize = - getNextBlockOnDiskSize(readNextHeader, onDiskBlock, onDiskSizeWithHeader); + getNextBlockOnDiskSize(readNextHeader, onDiskBlock, onDiskSizeWithHeader); if (headerBuf == null) { headerBuf = onDiskBlock.duplicate().position(0).limit(hdrSize); } @@ -1788,8 +1721,8 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, @Override public void setIncludesMemStoreTS(boolean includesMemstoreTS) { - this.fileContext = new HFileContextBuilder(this.fileContext) - .withIncludesMvcc(includesMemstoreTS).build(); + this.fileContext = + new HFileContextBuilder(this.fileContext).withIncludesMvcc(includesMemstoreTS).build(); } @Override @@ -1808,8 +1741,8 @@ public HFileBlockDecodingContext getDefaultBlockDecodingContext() { } /** - * Generates the checksum for the header as well as the data and then validates it. - * If the block doesn't uses checksum, returns false. + * Generates the checksum for the header as well as the data and then validates it. If the block + * doesn't uses checksum, returns false. * @return True if checksum matches, else false. */ private boolean validateChecksum(long offset, ByteBuff data, int hdrSize) { @@ -1850,12 +1783,10 @@ public String toString() { /** An additional sanity-check in case no compression or encryption is being used. */ void sanityCheckUncompressed() throws IOException { - if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + - totalChecksumBytes()) { - throw new IOException("Using no compression but " - + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", " - + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader - + ", numChecksumbytes=" + totalChecksumBytes()); + if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { + throw new IOException("Using no compression but " + "onDiskSizeWithoutHeader=" + + onDiskSizeWithoutHeader + ", " + "uncompressedSizeWithoutHeader=" + + uncompressedSizeWithoutHeader + ", numChecksumbytes=" + totalChecksumBytes()); } } @@ -1955,8 +1886,10 @@ public boolean equals(Object comparison) { if (castedComparison.uncompressedSizeWithoutHeader != this.uncompressedSizeWithoutHeader) { return false; } - if (ByteBuff.compareTo(this.buf, 0, this.buf.limit(), castedComparison.buf, 0, - castedComparison.buf.limit()) != 0) { + if ( + ByteBuff.compareTo(this.buf, 0, this.buf.limit(), castedComparison.buf, 0, + castedComparison.buf.limit()) != 0 + ) { return false; } return true; @@ -1983,8 +1916,8 @@ int getOnDiskDataSizeWithHeader() { } /** - * Calculate the number of bytes required to store all the checksums - * for this block. Each checksum value is a 4 byte integer. + * Calculate the number of bytes required to store all the checksums for this block. Each checksum + * value is a 4 byte integer. */ int totalChecksumBytes() { // If the hfile block has minorVersion 0, then there are no checksum @@ -1995,7 +1928,7 @@ int totalChecksumBytes() { return 0; } return (int) ChecksumUtil.numBytes(onDiskDataSizeWithHeader, - this.fileContext.getBytesPerChecksum()); + this.fileContext.getBytesPerChecksum()); } /** @@ -2009,8 +1942,9 @@ public int headerSize() { * Maps a minor version to the size of the header. */ public static int headerSize(boolean usesHBaseChecksum) { - return usesHBaseChecksum? - HConstants.HFILEBLOCK_HEADER_SIZE: HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; + return usesHBaseChecksum + ? HConstants.HFILEBLOCK_HEADER_SIZE + : HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; } /** @@ -2025,21 +1959,20 @@ byte[] getDummyHeaderForVersion() { * Return the appropriate DUMMY_HEADER for the minor version */ static private byte[] getDummyHeaderForVersion(boolean usesHBaseChecksum) { - return usesHBaseChecksum? HConstants.HFILEBLOCK_DUMMY_HEADER: DUMMY_HEADER_NO_CHECKSUM; + return usesHBaseChecksum ? HConstants.HFILEBLOCK_DUMMY_HEADER : DUMMY_HEADER_NO_CHECKSUM; } /** - * @return This HFileBlocks fileContext which will a derivative of the - * fileContext for the file from which this block's data was originally read. + * @return This HFileBlocks fileContext which will a derivative of the fileContext for the file + * from which this block's data was originally read. */ public HFileContext getHFileContext() { return this.fileContext; } /** - * Convert the contents of the block header into a human readable string. - * This is mostly helpful for debugging. This assumes that the block - * has minor version > 0. + * Convert the contents of the block header into a human readable string. This is mostly helpful + * for debugging. This assumes that the block has minor version > 0. */ static String toStringHeader(ByteBuff buf) throws IOException { byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)]; @@ -2051,31 +1984,23 @@ static String toStringHeader(ByteBuff buf) throws IOException { byte cksumtype = buf.get(); long bytesPerChecksum = buf.getInt(); long onDiskDataSizeWithHeader = buf.getInt(); - return " Header dump: magic: " + Bytes.toString(magicBuf) + - " blockType " + bt + - " compressedBlockSizeNoHeader " + - compressedBlockSizeNoHeader + - " uncompressedBlockSizeNoHeader " + - uncompressedBlockSizeNoHeader + - " prevBlockOffset " + prevBlockOffset + - " checksumType " + ChecksumType.codeToType(cksumtype) + - " bytesPerChecksum " + bytesPerChecksum + - " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader; + return " Header dump: magic: " + Bytes.toString(magicBuf) + " blockType " + bt + + " compressedBlockSizeNoHeader " + compressedBlockSizeNoHeader + + " uncompressedBlockSizeNoHeader " + uncompressedBlockSizeNoHeader + " prevBlockOffset " + + prevBlockOffset + " checksumType " + ChecksumType.codeToType(cksumtype) + + " bytesPerChecksum " + bytesPerChecksum + " onDiskDataSizeWithHeader " + + onDiskDataSizeWithHeader; } - private static HFileBlockBuilder createBuilder(HFileBlock blk){ - return new HFileBlockBuilder() - .withBlockType(blk.blockType) - .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader) - .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader) - .withPrevBlockOffset(blk.prevBlockOffset) - .withByteBuff(blk.buf.duplicate()) // Duplicate the buffer. - .withOffset(blk.offset) - .withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader) - .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize) - .withHFileContext(blk.fileContext) - .withByteBuffAllocator(blk.allocator) - .withShared(blk.isSharedMem()); + private static HFileBlockBuilder createBuilder(HFileBlock blk) { + return new HFileBlockBuilder().withBlockType(blk.blockType) + .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader) + .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader) + .withPrevBlockOffset(blk.prevBlockOffset).withByteBuff(blk.buf.duplicate()) // Duplicate the + // buffer. + .withOffset(blk.offset).withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader) + .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize).withHFileContext(blk.fileContext) + .withByteBuffAllocator(blk.allocator).withShared(blk.isSharedMem()); } static HFileBlock shallowClone(HFileBlock blk) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java index dc37a920f2ff..91e62b491c6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -103,12 +103,12 @@ public HFileBlockBuilder withShared(boolean isShared) { public HFileBlock build() { if (isShared) { return new SharedMemHFileBlock(blockType, onDiskSizeWithoutHeader, - uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, - nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator); + uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, + nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator); } else { return new ExclusiveMemHFileBlock(blockType, onDiskSizeWithoutHeader, - uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, - nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator); + uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, + nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index b38964ebfd73..782383d697bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,20 +28,15 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -//import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader; @@ -53,17 +47,16 @@ import org.apache.hadoop.hbase.util.ObjectIntPair; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Provides functionality to write ({@link BlockIndexWriter}) and read - * BlockIndexReader - * single-level and multi-level block indexes. - * - * Examples of how to use the block index writer can be found in - * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and - * {@link HFileWriterImpl}. Examples of how to use the reader can be - * found in {@link HFileReaderImpl} and - * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex. + * Provides functionality to write ({@link BlockIndexWriter}) and read BlockIndexReader single-level + * and multi-level block indexes. Examples of how to use the block index writer can be found in + * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and {@link HFileWriterImpl}. + * Examples of how to use the reader can be found in {@link HFileReaderImpl} and + * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex. */ @InterfaceAudience.Private public class HFileBlockIndex { @@ -73,8 +66,8 @@ public class HFileBlockIndex { static final int DEFAULT_MAX_CHUNK_SIZE = 128 * 1024; /** - * The maximum size guideline for index blocks (both leaf, intermediate, and - * root). If not specified, DEFAULT_MAX_CHUNK_SIZE is used. + * The maximum size guideline for index blocks (both leaf, intermediate, and root). If not + * specified, DEFAULT_MAX_CHUNK_SIZE is used. */ public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size"; @@ -89,35 +82,32 @@ public class HFileBlockIndex { static final int DEFAULT_MIN_INDEX_NUM_ENTRIES = 16; /** - * The number of bytes stored in each "secondary index" entry in addition to - * key bytes in the non-root index block format. The first long is the file - * offset of the deeper-level block the entry points to, and the int that - * follows is that block's on-disk size without including header. + * The number of bytes stored in each "secondary index" entry in addition to key bytes in the + * non-root index block format. The first long is the file offset of the deeper-level block the + * entry points to, and the int that follows is that block's on-disk size without including + * header. */ - static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT - + Bytes.SIZEOF_LONG; + static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG; /** * Error message when trying to use inline block API in single-level mode. */ private static final String INLINE_BLOCKS_NOT_ALLOWED = - "Inline blocks are not allowed in the single-level-only mode"; + "Inline blocks are not allowed in the single-level-only mode"; /** - * The size of a meta-data record used for finding the mid-key in a - * multi-level index. Consists of the middle leaf-level index block offset - * (long), its on-disk size without header included (int), and the mid-key - * entry's zero-based index in that leaf index block. + * The size of a meta-data record used for finding the mid-key in a multi-level index. Consists of + * the middle leaf-level index block offset (long), its on-disk size without header included + * (int), and the mid-key entry's zero-based index in that leaf index block. */ - private static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG + - 2 * Bytes.SIZEOF_INT; + private static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG + 2 * Bytes.SIZEOF_INT; /** - * An implementation of the BlockIndexReader that deals with block keys which are plain - * byte[] like MetaBlock or the Bloom Block for ROW bloom. - * Does not need a comparator. It can work on Bytes.BYTES_RAWCOMPARATOR + * An implementation of the BlockIndexReader that deals with block keys which are plain byte[] + * like MetaBlock or the Bloom Block for ROW bloom. Does not need a comparator. It can work on + * Bytes.BYTES_RAWCOMPARATOR */ - static class ByteArrayKeyBlockIndexReader extends BlockIndexReader { + static class ByteArrayKeyBlockIndexReader extends BlockIndexReader { private byte[][] blockKeys; @@ -148,8 +138,7 @@ public boolean isEmpty() { } /** - * @param i - * from 0 to {@link #getRootBlockCount() - 1} + * n * from 0 to {@link #getRootBlockCount() - 1} */ public byte[] getRootBlockKey(int i) { return blockKeys[i]; @@ -157,9 +146,9 @@ public byte[] getRootBlockKey(int i) { @Override public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, - boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + boolean cacheBlocks, boolean pread, boolean isCompaction, + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException { // this would not be needed return null; } @@ -209,8 +198,8 @@ public int rootBlockContainingKey(byte[] key, int offset, int length, CellCompar public int rootBlockContainingKey(Cell key) { // Should not be called on this because here it deals only with byte[] throw new UnsupportedOperationException( - "Cannot search for a key that is of Cell type. Only plain byte array keys " + - "can be searched for"); + "Cannot search for a key that is of Cell type. Only plain byte array keys " + + "can be searched for"); } @Override @@ -218,18 +207,17 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("size=" + rootCount).append("\n"); for (int i = 0; i < rootCount; i++) { - sb.append("key=").append(KeyValue.keyToString(blockKeys[i])) - .append("\n offset=").append(blockOffsets[i]) - .append(", dataSize=" + blockDataSizes[i]).append("\n"); + sb.append("key=").append(KeyValue.keyToString(blockKeys[i])).append("\n offset=") + .append(blockOffsets[i]).append(", dataSize=" + blockDataSizes[i]).append("\n"); } return sb.toString(); } } /** - * An implementation of the BlockIndexReader that deals with block keys which are the key - * part of a cell like the Data block index or the ROW_COL bloom blocks - * This needs a comparator to work with the Cells + * An implementation of the BlockIndexReader that deals with block keys which are the key part of + * a cell like the Data block index or the ROW_COL bloom blocks This needs a comparator to work + * with the Cells */ static class CellBasedKeyBlockIndexReader extends BlockIndexReader { @@ -268,8 +256,7 @@ public boolean isEmpty() { } /** - * @param i - * from 0 to {@link #getRootBlockCount() - 1} + * n * from 0 to {@link #getRootBlockCount() - 1} */ public Cell getRootBlockKey(int i) { return blockKeys[i]; @@ -277,9 +264,9 @@ public Cell getRootBlockKey(int i) { @Override public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, - boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + boolean cacheBlocks, boolean pread, boolean isCompaction, + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException { int rootLevelIndex = rootBlockContainingKey(key); if (rootLevelIndex < 0 || rootLevelIndex >= blockOffsets.length) { return null; @@ -334,7 +321,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB if (block == null) { throw new IOException("Failed to read block at offset " + currentOffset - + ", onDiskSize=" + currentOnDiskSize); + + ", onDiskSize=" + currentOnDiskSize); } // Found a data block, break the loop and check our level in the tree. @@ -346,7 +333,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB // index block. We don't allow going deeper than searchTreeLevel. if (++lookupLevel > searchTreeLevel) { throw new IOException("Search Tree Level overflow: lookupLevel=" + lookupLevel - + ", searchTreeLevel=" + searchTreeLevel); + + ", searchTreeLevel=" + searchTreeLevel); } // Locate the entry corresponding to the given key in the non-root @@ -356,9 +343,8 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB if (index == -1) { // This has to be changed // For now change this to key value - throw new IOException("The key " - + CellUtil.getCellKeyAsString(key) - + " is before the" + " first key of the non-root index block " + block); + throw new IOException("The key " + CellUtil.getCellKeyAsString(key) + " is before the" + + " first key of the non-root index block " + block); } currentOffset = buffer.getLong(); @@ -387,7 +373,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB block.release(); } throw new IOException("Reached a data block at level " + lookupLevel - + " but the number of levels is " + searchTreeLevel); + + " but the number of levels is " + searchTreeLevel); } // set the next indexed key for the current block. @@ -396,8 +382,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB @Override public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { - if (rootCount == 0) - throw new IOException("HFile empty"); + if (rootCount == 0) throw new IOException("HFile empty"); Cell targetMidKey = this.midKey.get(); if (targetMidKey != null) { @@ -406,23 +391,21 @@ public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { if (midLeafBlockOffset >= 0) { if (cachingBlockReader == null) { - throw new IOException("Have to read the middle leaf block but " + - "no block reader available"); + throw new IOException( + "Have to read the middle leaf block but " + "no block reader available"); } // Caching, using pread, assuming this is not a compaction. - HFileBlock midLeafBlock = cachingBlockReader.readBlock( - midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, true, - BlockType.LEAF_INDEX, null); + HFileBlock midLeafBlock = cachingBlockReader.readBlock(midLeafBlockOffset, + midLeafBlockOnDiskSize, true, true, false, true, BlockType.LEAF_INDEX, null); try { ByteBuff b = midLeafBlock.getBufferWithoutHeader(); int numDataBlocks = b.getIntAfterPosition(0); int keyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 1)); int keyLen = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset - - SECONDARY_INDEX_ENTRY_OVERHEAD; + - SECONDARY_INDEX_ENTRY_OVERHEAD; int keyOffset = - Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset - + SECONDARY_INDEX_ENTRY_OVERHEAD; + Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset + SECONDARY_INDEX_ENTRY_OVERHEAD; byte[] bytes = b.toBytes(keyOffset, keyLen); targetMidKey = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length); } finally { @@ -444,9 +427,8 @@ protected void initialize(int numEntries) { /** * Adds a new entry in the root block index. Only used when reading. - * - * @param key Last key in the block - * @param offset file offset where the block is stored + * @param key Last key in the block + * @param offset file offset where the block is stored * @param dataSize the uncompressed data size */ @Override @@ -460,10 +442,10 @@ protected void add(final byte[] key, final long offset, final int dataSize) { @Override public int rootBlockContainingKey(final byte[] key, int offset, int length, - CellComparator comp) { + CellComparator comp) { // This should always be called with Cell not with a byte[] key - throw new UnsupportedOperationException("Cannot find for a key containing plain byte " + - "array. Only cell based keys can be searched for"); + throw new UnsupportedOperationException("Cannot find for a key containing plain byte " + + "array. Only cell based keys can be searched for"); } @Override @@ -494,23 +476,20 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("size=" + rootCount).append("\n"); for (int i = 0; i < rootCount; i++) { - sb.append("key=").append((blockKeys[i])) - .append("\n offset=").append(blockOffsets[i]) - .append(", dataSize=" + blockDataSizes[i]).append("\n"); + sb.append("key=").append((blockKeys[i])).append("\n offset=").append(blockOffsets[i]) + .append(", dataSize=" + blockDataSizes[i]).append("\n"); } return sb.toString(); } } /** - * The reader will always hold the root level index in the memory. Index - * blocks at all other levels will be cached in the LRU cache in practice, - * although this API does not enforce that. - * - *

        All non-root (leaf and intermediate) index blocks contain what we call a - * "secondary index": an array of offsets to the entries within the block. - * This allows us to do binary search for the entry corresponding to the - * given key without having to deserialize the block. + * The reader will always hold the root level index in the memory. Index blocks at all other + * levels will be cached in the LRU cache in practice, although this API does not enforce that. + *

        + * All non-root (leaf and intermediate) index blocks contain what we call a "secondary index": an + * array of offsets to the entries within the block. This allows us to do binary search for the + * entry corresponding to the given key without having to deserialize the block. */ static abstract class BlockIndexReader implements HeapSize { @@ -524,8 +503,8 @@ static abstract class BlockIndexReader implements HeapSize { protected int midKeyEntry = -1; /** - * The number of levels in the block index tree. One if there is only root - * level, two for root and leaf levels, etc. + * The number of levels in the block index tree. One if there is only root level, two for root + * and leaf levels, etc. */ protected int searchTreeLevel; @@ -535,8 +514,8 @@ static abstract class BlockIndexReader implements HeapSize { public abstract boolean isEmpty(); /** - * Verifies that the block index is non-empty and throws an - * {@link IllegalStateException} otherwise. + * Verifies that the block index is non-empty and throws an {@link IllegalStateException} + * otherwise. */ public void ensureNonEmpty() { if (isEmpty()) { @@ -545,23 +524,18 @@ public void ensureNonEmpty() { } /** - * Return the data block which contains this key. This function will only - * be called when the HFile version is larger than 1. - * - * @param key the key we are looking for - * @param currentBlock the current block, to avoid re-reading the same block - * @param cacheBlocks - * @param pread - * @param isCompaction - * @param expectedDataBlockEncoding the data block encoding the caller is - * expecting the data block to be in, or null to not perform this - * check and return the block irrespective of the encoding - * @return reader a basic way to load blocks - * @throws IOException + * Return the data block which contains this key. This function will only be called when the + * HFile version is larger than 1. + * @param key the key we are looking for + * @param currentBlock the current block, to avoid re-reading the same block nnn * @param + * expectedDataBlockEncoding the data block encoding the caller is expecting + * the data block to be in, or null to not perform this check and return the + * block irrespective of the encoding + * @return reader a basic way to load blocks n */ public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks, - boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, + CachingBlockReader cachingBlockReader) throws IOException { BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, currentBlock, cacheBlocks, pread, isCompaction, expectedDataBlockEncoding, cachingBlockReader); if (blockWithScanInfo == null) { @@ -572,29 +546,25 @@ public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boole } /** - * Return the BlockWithScanInfo, a data structure which contains the Data HFileBlock with - * other scan info such as the key that starts the next HFileBlock. This function will only - * be called when the HFile version is larger than 1. - * - * @param key the key we are looking for - * @param currentBlock the current block, to avoid re-reading the same block - * @param expectedDataBlockEncoding the data block encoding the caller is - * expecting the data block to be in, or null to not perform this - * check and return the block irrespective of the encoding. - * @return the BlockWithScanInfo which contains the DataBlock with other - * scan info such as nextIndexedKey. - * @throws IOException + * Return the BlockWithScanInfo, a data structure which contains the Data HFileBlock with other + * scan info such as the key that starts the next HFileBlock. This function will only be called + * when the HFile version is larger than 1. + * @param key the key we are looking for + * @param currentBlock the current block, to avoid re-reading the same block + * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data + * block to be in, or null to not perform this check and return + * the block irrespective of the encoding. + * @return the BlockWithScanInfo which contains the DataBlock with other scan info such as + * nextIndexedKey. n */ public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, - boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException; + boolean cacheBlocks, boolean pread, boolean isCompaction, + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException; /** - * An approximation to the {@link HFile}'s mid-key. Operates on block - * boundaries, and does not go inside blocks. In other words, returns the - * first key of the middle block of the file. - * + * An approximation to the {@link HFile}'s mid-key. Operates on block boundaries, and does not + * go inside blocks. In other words, returns the first key of the middle block of the file. * @return the first key of the middle block */ public abstract Cell midkey(CachingBlockReader cachingBlockReader) throws IOException; @@ -608,8 +578,8 @@ public long getRootBlockOffset(int i) { /** * @param i zero-based index of a root-level block - * @return the on-disk size of the root-level block for version 2, or the - * uncompressed size for version 1 + * @return the on-disk size of the root-level block for version 2, or the uncompressed size for + * version 1 */ public int getRootBlockDataSize(int i) { return blockDataSizes[i]; @@ -623,30 +593,21 @@ public int getRootBlockCount() { } /** - * Finds the root-level index block containing the given key. - * - * @param key - * Key to find - * @param comp - * the comparator to be used - * @return Offset of block containing key (between 0 and the - * number of blocks - 1) or -1 if this file does not contain the - * request. + * Finds the root-level index block containing the given key. n * Key to find n * the comparator + * to be used + * @return Offset of block containing key (between 0 and the number of blocks - 1) + * or -1 if this file does not contain the request. */ // When we want to find the meta index block or bloom block for ROW bloom // type Bytes.BYTES_RAWCOMPARATOR would be enough. For the ROW_COL bloom case we need the // CellComparator. public abstract int rootBlockContainingKey(final byte[] key, int offset, int length, - CellComparator comp); + CellComparator comp); /** - * Finds the root-level index block containing the given key. - * - * @param key - * Key to find - * @return Offset of block containing key (between 0 and the - * number of blocks - 1) or -1 if this file does not contain the - * request. + * Finds the root-level index block containing the given key. n * Key to find + * @return Offset of block containing key (between 0 and the number of blocks - 1) + * or -1 if this file does not contain the request. */ // When we want to find the meta index block or bloom block for ROW bloom // type @@ -657,17 +618,13 @@ public int rootBlockContainingKey(final byte[] key, int offset, int length) { } /** - * Finds the root-level index block containing the given key. - * - * @param key - * Key to find + * Finds the root-level index block containing the given key. n * Key to find */ public abstract int rootBlockContainingKey(final Cell key); /** - * The indexed key at the ith position in the nonRootIndex. The position starts at 0. - * @param nonRootIndex - * @param i the ith position + * The indexed key at the ith position in the nonRootIndex. The position starts at 0. n * @param + * i the ith position * @return The indexed key at the ith position in the nonRootIndex. */ protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) { @@ -680,42 +637,33 @@ protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) { // The secondary index takes numEntries + 1 ints. int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2); // Targetkey's offset relative to the end of secondary index - int targetKeyRelOffset = nonRootIndex.getInt( - Bytes.SIZEOF_INT * (i + 1)); + int targetKeyRelOffset = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 1)); // The offset of the target key in the blockIndex buffer - int targetKeyOffset = entriesOffset // Skip secondary index - + targetKeyRelOffset // Skip all entries until mid - + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size + int targetKeyOffset = entriesOffset // Skip secondary index + + targetKeyRelOffset // Skip all entries until mid + + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size // We subtract the two consecutive secondary index elements, which // gives us the size of the whole (offset, onDiskSize, key) tuple. We // then need to subtract the overhead of offset and onDiskSize. - int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - - targetKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; + int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - targetKeyRelOffset + - SECONDARY_INDEX_ENTRY_OVERHEAD; // TODO check whether we can make BB backed Cell here? So can avoid bytes copy. return nonRootIndex.toBytes(targetKeyOffset, targetKeyLength); } /** - * Performs a binary search over a non-root level index block. Utilizes the - * secondary index, which records the offsets of (offset, onDiskSize, - * firstKey) tuples of all entries. - * - * @param key - * the key we are searching for offsets to individual entries in - * the blockIndex buffer - * @param nonRootIndex - * the non-root index block buffer, starting with the secondary - * index. The position is ignored. - * @return the index i in [0, numEntries - 1] such that keys[i] <= key < - * keys[i + 1], if keys is the array of all keys being searched, or - * -1 otherwise - * @throws IOException + * Performs a binary search over a non-root level index block. Utilizes the secondary index, + * which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. n * the + * key we are searching for offsets to individual entries in the blockIndex buffer n * the + * non-root index block buffer, starting with the secondary index. The position is ignored. + * @return the index i in [0, numEntries - 1] such that keys[i] <= key < keys[i + 1], if keys is + * the array of all keys being searched, or -1 otherwise n */ static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex, - CellComparator comparator) { + CellComparator comparator) { int numEntries = nonRootIndex.getIntAfterPosition(0); int low = 0; @@ -738,15 +686,15 @@ static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex, int midKeyRelOffset = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 1)); // The offset of the middle key in the blockIndex buffer - int midKeyOffset = entriesOffset // Skip secondary index - + midKeyRelOffset // Skip all entries until mid - + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size + int midKeyOffset = entriesOffset // Skip secondary index + + midKeyRelOffset // Skip all entries until mid + + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size // We subtract the two consecutive secondary index elements, which // gives us the size of the whole (offset, onDiskSize, key) tuple. We // then need to subtract the overhead of offset and onDiskSize. - int midLength = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 2)) - - midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; + int midLength = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 2)) + - midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; // we have to compare in this order, because the comparator order // has special logic when the 'left side' is a special key. @@ -758,13 +706,10 @@ static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex, int cmp = PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, nonRootIndexkeyOnlyKV); // key lives above the midpoint - if (cmp > 0) - low = mid + 1; // Maintain the invariant that keys[low - 1] < key + if (cmp > 0) low = mid + 1; // Maintain the invariant that keys[low - 1] < key // key lives below the midpoint - else if (cmp < 0) - high = mid - 1; // Maintain the invariant that key < keys[high + 1] - else - return mid; // exact match + else if (cmp < 0) high = mid - 1; // Maintain the invariant that key < keys[high + 1] + else return mid; // exact match } // As per our invariant, keys[low - 1] < key < keys[high + 1], meaning @@ -772,8 +717,8 @@ else if (cmp < 0) // condition, low >= high + 1. Therefore, low = high + 1. if (low != high + 1) { - throw new IllegalStateException("Binary search broken: low=" + low - + " " + "instead of " + (high + 1)); + throw new IllegalStateException( + "Binary search broken: low=" + low + " " + "instead of " + (high + 1)); } // OK, our invariant says that keys[low - 1] < key < keys[low]. We need to @@ -782,30 +727,22 @@ else if (cmp < 0) // Some extra validation on the result. if (i < -1 || i >= numEntries) { - throw new IllegalStateException("Binary search broken: result is " + - i + " but expected to be between -1 and (numEntries - 1) = " + - (numEntries - 1)); + throw new IllegalStateException("Binary search broken: result is " + i + + " but expected to be between -1 and (numEntries - 1) = " + (numEntries - 1)); } return i; } /** - * Search for one key using the secondary index in a non-root block. In case - * of success, positions the provided buffer at the entry of interest, where - * the file offset and the on-disk-size can be read. - * - * @param nonRootBlock - * a non-root block without header. Initial position does not - * matter. - * @param key - * the byte array containing the key - * @return the index position where the given key was found, otherwise - * return -1 in the case the given key is before the first key. - * + * Search for one key using the secondary index in a non-root block. In case of success, + * positions the provided buffer at the entry of interest, where the file offset and the + * on-disk-size can be read. n * a non-root block without header. Initial position does not + * matter. n * the byte array containing the key + * @return the index position where the given key was found, otherwise return -1 in the case the + * given key is before the first key. */ - static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, - CellComparator comparator) { + static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, CellComparator comparator) { int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator); if (entryIndex != -1) { @@ -816,8 +753,7 @@ static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, // The offset of the entry we are interested in relative to the end of // the secondary index. - int entryRelOffset = nonRootBlock - .getIntAfterPosition(Bytes.SIZEOF_INT * (1 + entryIndex)); + int entryRelOffset = nonRootBlock.getIntAfterPosition(Bytes.SIZEOF_INT * (1 + entryIndex)); nonRootBlock.position(entriesOffset + entryRelOffset); } @@ -826,14 +762,11 @@ static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, } /** - * Read in the root-level index from the given input stream. Must match - * what was written into the root level by - * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the - * offset that function returned. - * - * @param in the buffered input stream or wrapped byte input stream - * @param numEntries the number of root-level index entries - * @throws IOException + * Read in the root-level index from the given input stream. Must match what was written into + * the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset + * that function returned. + * @param in the buffered input stream or wrapped byte input stream + * @param numEntries the number of root-level index entries n */ public void readRootIndex(DataInput in, final int numEntries) throws IOException { blockOffsets = new long[numEntries]; @@ -856,15 +789,12 @@ public void readRootIndex(DataInput in, final int numEntries) throws IOException protected abstract void add(final byte[] key, final long offset, final int dataSize); /** - * Read in the root-level index from the given input stream. Must match - * what was written into the root level by - * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the - * offset that function returned. - * - * @param blk the HFile block + * Read in the root-level index from the given input stream. Must match what was written into + * the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset + * that function returned. + * @param blk the HFile block * @param numEntries the number of root-level index entries - * @return the buffered input stream or wrapped byte input stream - * @throws IOException + * @return the buffered input stream or wrapped byte input stream n */ public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = blk.getByteStream(); @@ -874,15 +804,12 @@ public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throw /** * Read the root-level metadata of a multi-level block index. Based on - * {@link #readRootIndex(DataInput, int)}, but also reads metadata - * necessary to compute the mid-key in a multi-level index. - * - * @param blk the HFile block - * @param numEntries the number of root-level index entries - * @throws IOException + * {@link #readRootIndex(DataInput, int)}, but also reads metadata necessary to compute the + * mid-key in a multi-level index. + * @param blk the HFile block + * @param numEntries the number of root-level index entries n */ - public void readMultiLevelIndexRoot(HFileBlock blk, - final int numEntries) throws IOException { + public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = readRootIndex(blk, numEntries); // after reading the root index the checksum bytes have to // be subtracted to know if the mid key exists. @@ -899,8 +826,8 @@ public void readMultiLevelIndexRoot(HFileBlock blk, @Override public long heapSize() { // The BlockIndexReader does not have the blockKey, comparator and the midkey atomic reference - long heapSize = ClassSize.align(3 * ClassSize.REFERENCE + - 2 * Bytes.SIZEOF_INT + ClassSize.OBJECT); + long heapSize = + ClassSize.align(3 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + ClassSize.OBJECT); // Mid-key metadata. heapSize += MID_KEY_METADATA_SIZE; @@ -908,13 +835,11 @@ public long heapSize() { heapSize = calculateHeapSizeForBlockKeys(heapSize); if (blockOffsets != null) { - heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length - * Bytes.SIZEOF_LONG); + heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length * Bytes.SIZEOF_LONG); } if (blockDataSizes != null) { - heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length - * Bytes.SIZEOF_INT); + heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length * Bytes.SIZEOF_INT); } return ClassSize.align(heapSize); @@ -924,46 +849,38 @@ public long heapSize() { } /** - * Writes the block index into the output stream. Generate the tree from - * bottom up. The leaf level is written to disk as a sequence of inline - * blocks, if it is larger than a certain number of bytes. If the leaf level - * is not large enough, we write all entries to the root level instead. - * - * After all leaf blocks have been written, we end up with an index - * referencing the resulting leaf index blocks. If that index is larger than - * the allowed root index size, the writer will break it up into - * reasonable-size intermediate-level index block chunks write those chunks - * out, and create another index referencing those chunks. This will be - * repeated until the remaining index is small enough to become the root - * index. However, in most practical cases we will only have leaf-level - * blocks and the root index, or just the root index. + * Writes the block index into the output stream. Generate the tree from bottom up. The leaf level + * is written to disk as a sequence of inline blocks, if it is larger than a certain number of + * bytes. If the leaf level is not large enough, we write all entries to the root level instead. + * After all leaf blocks have been written, we end up with an index referencing the resulting leaf + * index blocks. If that index is larger than the allowed root index size, the writer will break + * it up into reasonable-size intermediate-level index block chunks write those chunks out, and + * create another index referencing those chunks. This will be repeated until the remaining index + * is small enough to become the root index. However, in most practical cases we will only have + * leaf-level blocks and the root index, or just the root index. */ public static class BlockIndexWriter implements InlineBlockWriter { /** - * While the index is being written, this represents the current block - * index referencing all leaf blocks, with one exception. If the file is - * being closed and there are not enough blocks to complete even a single - * leaf block, no leaf blocks get written and this contains the entire + * While the index is being written, this represents the current block index referencing all + * leaf blocks, with one exception. If the file is being closed and there are not enough blocks + * to complete even a single leaf block, no leaf blocks get written and this contains the entire * block index. After all levels of the index were written by - * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final - * root-level index. + * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final root-level index. */ private BlockIndexChunk rootChunk = new BlockIndexChunk(); /** - * Current leaf-level chunk. New entries referencing data blocks get added - * to this chunk until it grows large enough to be written to disk. + * Current leaf-level chunk. New entries referencing data blocks get added to this chunk until + * it grows large enough to be written to disk. */ private BlockIndexChunk curInlineChunk = new BlockIndexChunk(); /** - * The number of block index levels. This is one if there is only root - * level (even empty), two if there a leaf level and root level, and is - * higher if there are intermediate levels. This is only final after - * {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The - * initial value accounts for the root level, and will be increased to two - * as soon as we find out there is a leaf-level in - * {@link #blockWritten(long, int, int)}. + * The number of block index levels. This is one if there is only root level (even empty), two + * if there a leaf level and root level, and is higher if there are intermediate levels. This is + * only final after {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The initial + * value accounts for the root level, and will be increased to two as soon as we find out there + * is a leaf-level in {@link #blockWritten(long, int, int)}. */ private int numLevels = 1; @@ -971,9 +888,8 @@ public static class BlockIndexWriter implements InlineBlockWriter { private byte[] firstKey = null; /** - * The total number of leaf-level entries, i.e. entries referenced by - * leaf-level blocks. For the data block index this is equal to the number - * of data blocks. + * The total number of leaf-level entries, i.e. entries referenced by leaf-level blocks. For the + * data block index this is equal to the number of data blocks. */ private long totalNumEntries; @@ -1006,15 +922,14 @@ public BlockIndexWriter() { /** * Creates a multi-level block index writer. - * * @param blockWriter the block writer to use to write index blocks - * @param cacheConf used to determine when and how a block should be cached-on-write. + * @param cacheConf used to determine when and how a block should be cached-on-write. */ - public BlockIndexWriter(HFileBlock.Writer blockWriter, - CacheConfig cacheConf, String nameForCaching) { + public BlockIndexWriter(HFileBlock.Writer blockWriter, CacheConfig cacheConf, + String nameForCaching) { if ((cacheConf == null) != (nameForCaching == null)) { - throw new IllegalArgumentException("Block cache and file name for " + - "caching must be both specified or both null"); + throw new IllegalArgumentException( + "Block cache and file name for " + "caching must be both specified or both null"); } this.blockWriter = blockWriter; @@ -1039,41 +954,35 @@ public void setMinIndexNumEntries(int minIndexNumEntries) { } /** - * Writes the root level and intermediate levels of the block index into - * the output stream, generating the tree from bottom up. Assumes that the - * leaf level has been inline-written to the disk if there is enough data - * for more than one leaf block. We iterate by breaking the current level - * of the block index, starting with the index of all leaf-level blocks, - * into chunks small enough to be written to disk, and generate its parent - * level, until we end up with a level small enough to become the root - * level. - * - * If the leaf level is not large enough, there is no inline block index - * anymore, so we only write that level of block index to disk as the root - * level. - * + * Writes the root level and intermediate levels of the block index into the output stream, + * generating the tree from bottom up. Assumes that the leaf level has been inline-written to + * the disk if there is enough data for more than one leaf block. We iterate by breaking the + * current level of the block index, starting with the index of all leaf-level blocks, into + * chunks small enough to be written to disk, and generate its parent level, until we end up + * with a level small enough to become the root level. If the leaf level is not large enough, + * there is no inline block index anymore, so we only write that level of block index to disk as + * the root level. * @param out FSDataOutputStream - * @return position at which we entered the root-level index. - * @throws IOException + * @return position at which we entered the root-level index. n */ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) { - throw new IOException("Trying to write a multi-level block index, " + - "but are " + curInlineChunk.getNumEntries() + " entries in the " + - "last inline chunk."); + throw new IOException("Trying to write a multi-level block index, " + "but are " + + curInlineChunk.getNumEntries() + " entries in the " + "last inline chunk."); } // We need to get mid-key metadata before we create intermediate // indexes and overwrite the root chunk. - byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() - : null; + byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() : null; if (curInlineChunk != null) { - while (rootChunk.getRootSize() > maxChunkSize + while ( + rootChunk.getRootSize() > maxChunkSize // HBASE-16288: if firstKey is larger than maxChunkSize we will loop indefinitely && rootChunk.getNumEntries() > minIndexNumEntries // Sanity check. We will not hit this (minIndexNumEntries ^ 16) blocks can be addressed - && numLevels < 16) { + && numLevels < 16 + ) { rootChunk = writeIntermediateLevel(out, rootChunk); numLevels += 1; } @@ -1083,84 +992,67 @@ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { long rootLevelIndexPos = out.getPos(); { - DataOutput blockStream = - blockWriter.startWriting(BlockType.ROOT_INDEX); + DataOutput blockStream = blockWriter.startWriting(BlockType.ROOT_INDEX); rootChunk.writeRoot(blockStream); - if (midKeyMetadata != null) - blockStream.write(midKeyMetadata); + if (midKeyMetadata != null) blockStream.write(midKeyMetadata); blockWriter.writeHeaderAndData(out); if (cacheConf != null) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock(new BlockCacheKey(nameForCaching, rootLevelIndexPos, true, - blockForCaching.getBlockType()), blockForCaching); + blockForCaching.getBlockType()), blockForCaching); }); } } // Add root index block size totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader(); - totalBlockUncompressedSize += - blockWriter.getUncompressedSizeWithoutHeader(); + totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); if (LOG.isTraceEnabled()) { LOG.trace("Wrote a " + numLevels + "-level index with root level at pos " - + rootLevelIndexPos + ", " + rootChunk.getNumEntries() - + " root-level entries, " + totalNumEntries + " total entries, " - + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + - " on-disk size, " - + StringUtils.humanReadableInt(totalBlockUncompressedSize) + - " total uncompressed size."); + + rootLevelIndexPos + ", " + rootChunk.getNumEntries() + " root-level entries, " + + totalNumEntries + " total entries, " + + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + " on-disk size, " + + StringUtils.humanReadableInt(totalBlockUncompressedSize) + " total uncompressed size."); } return rootLevelIndexPos; } /** - * Writes the block index data as a single level only. Does not do any - * block framing. - * - * @param out the buffered output stream to write the index to. Typically a - * stream writing into an {@link HFile} block. - * @param description a short description of the index being written. Used - * in a log message. - * @throws IOException + * Writes the block index data as a single level only. Does not do any block framing. + * @param out the buffered output stream to write the index to. Typically a stream + * writing into an {@link HFile} block. + * @param description a short description of the index being written. Used in a log message. n */ - public void writeSingleLevelIndex(DataOutput out, String description) - throws IOException { + public void writeSingleLevelIndex(DataOutput out, String description) throws IOException { expectNumLevels(1); - if (!singleLevelOnly) - throw new IOException("Single-level mode is turned off"); + if (!singleLevelOnly) throw new IOException("Single-level mode is turned off"); if (rootChunk.getNumEntries() > 0) - throw new IOException("Root-level entries already added in " + - "single-level mode"); + throw new IOException("Root-level entries already added in " + "single-level mode"); rootChunk = curInlineChunk; curInlineChunk = new BlockIndexChunk(); if (LOG.isTraceEnabled()) { - LOG.trace("Wrote a single-level " + description + " index with " - + rootChunk.getNumEntries() + " entries, " + rootChunk.getRootSize() - + " bytes"); + LOG.trace("Wrote a single-level " + description + " index with " + rootChunk.getNumEntries() + + " entries, " + rootChunk.getRootSize() + " bytes"); } rootChunk.writeRoot(out); } /** - * Split the current level of the block index into intermediate index - * blocks of permitted size and write those blocks to disk. Return the next - * level of the block index referencing those intermediate-level blocks. - * - * @param out - * @param currentLevel the current level of the block index, such as the a - * chunk referencing all leaf-level index blocks - * @return the parent level block index, which becomes the root index after - * a few (usually zero) iterations - * @throws IOException + * Split the current level of the block index into intermediate index blocks of permitted size + * and write those blocks to disk. Return the next level of the block index referencing those + * intermediate-level blocks. n * @param currentLevel the current level of the block index, such + * as the a chunk referencing all leaf-level index blocks + * @return the parent level block index, which becomes the root index after a few (usually zero) + * iterations n */ private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out, - BlockIndexChunk currentLevel) throws IOException { + BlockIndexChunk currentLevel) throws IOException { // Entries referencing intermediate-level blocks we are about to create. BlockIndexChunk parent = new BlockIndexChunk(); @@ -1168,8 +1060,8 @@ private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out, BlockIndexChunk curChunk = new BlockIndexChunk(); for (int i = 0; i < currentLevel.getNumEntries(); ++i) { - curChunk.add(currentLevel.getBlockKey(i), - currentLevel.getBlockOffset(i), currentLevel.getOnDiskDataSize(i)); + curChunk.add(currentLevel.getBlockKey(i), currentLevel.getBlockOffset(i), + currentLevel.getOnDiskDataSize(i)); // HBASE-16288: We have to have at least minIndexNumEntries(16) items in the index so that // we won't end up with too-many levels for a index with very large rowKeys. Also, if the @@ -1186,11 +1078,10 @@ private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out, return parent; } - private void writeIntermediateBlock(FSDataOutputStream out, - BlockIndexChunk parent, BlockIndexChunk curChunk) throws IOException { + private void writeIntermediateBlock(FSDataOutputStream out, BlockIndexChunk parent, + BlockIndexChunk curChunk) throws IOException { long beginOffset = out.getPos(); - DataOutputStream dos = blockWriter.startWriting( - BlockType.INTERMEDIATE_INDEX); + DataOutputStream dos = blockWriter.startWriting(BlockType.INTERMEDIATE_INDEX); curChunk.writeNonRoot(dos); byte[] curFirstKey = curChunk.getBlockKey(0); blockWriter.writeHeaderAndData(out); @@ -1199,23 +1090,21 @@ private void writeIntermediateBlock(FSDataOutputStream out, cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock( - new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), - blockForCaching); + new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), + blockForCaching); }); } // Add intermediate index block size totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader(); - totalBlockUncompressedSize += - blockWriter.getUncompressedSizeWithoutHeader(); + totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); // OFFSET is the beginning offset the chunk of block index entries. // SIZE is the total byte size of the chunk of block index entries // + the secondary index size // FIRST_KEY is the first key in the chunk of block index // entries. - parent.add(curFirstKey, beginOffset, - blockWriter.getOnDiskSizeWithHeader()); + parent.add(curFirstKey, beginOffset, blockWriter.getOnDiskSizeWithHeader()); // clear current block index chunk curChunk.clear(); @@ -1238,15 +1127,15 @@ public int getNumLevels() { private void expectNumLevels(int expectedNumLevels) { if (numLevels != expectedNumLevels) { - throw new IllegalStateException("Number of block index levels is " - + numLevels + "but is expected to be " + expectedNumLevels); + throw new IllegalStateException("Number of block index levels is " + numLevels + + "but is expected to be " + expectedNumLevels); } } /** - * Whether there is an inline block ready to be written. In general, we - * write an leaf-level index block as an inline block as soon as its size - * as serialized in the non-root format reaches a certain threshold. + * Whether there is an inline block ready to be written. In general, we write an leaf-level + * index block as an inline block as soon as its size as serialized in the non-root format + * reaches a certain threshold. */ @Override public boolean shouldWriteBlock(boolean closing) { @@ -1255,8 +1144,8 @@ public boolean shouldWriteBlock(boolean closing) { } if (curInlineChunk == null) { - throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + - "called with closing=true and then called again?"); + throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + + "called with closing=true and then called again?"); } if (curInlineChunk.getNumEntries() == 0) { @@ -1271,7 +1160,7 @@ public boolean shouldWriteBlock(boolean closing) { expectNumLevels(1); rootChunk = curInlineChunk; - curInlineChunk = null; // Disallow adding any more index entries. + curInlineChunk = null; // Disallow adding any more index entries. return false; } @@ -1282,15 +1171,12 @@ public boolean shouldWriteBlock(boolean closing) { } /** - * Write out the current inline index block. Inline blocks are non-root - * blocks, so the non-root index format is used. - * - * @param out + * Write out the current inline index block. Inline blocks are non-root blocks, so the non-root + * index format is used. n */ @Override public void writeInlineBlock(DataOutput out) throws IOException { - if (singleLevelOnly) - throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); + if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); // Write the inline block index to the output stream in the non-root // index block format. @@ -1305,8 +1191,8 @@ public void writeInlineBlock(DataOutput out) throws IOException { } /** - * Called after an inline block has been written so that we can add an - * entry referring to that block to the parent-level index. + * Called after an inline block has been written so that we can add an entry referring to that + * block to the parent-level index. */ @Override public void blockWritten(long offset, int onDiskSize, int uncompressedSize) { @@ -1314,13 +1200,12 @@ public void blockWritten(long offset, int onDiskSize, int uncompressedSize) { totalBlockOnDiskSize += onDiskSize; totalBlockUncompressedSize += uncompressedSize; - if (singleLevelOnly) - throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); + if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); if (firstKey == null) { - throw new IllegalStateException("Trying to add second-level index " + - "entry with offset=" + offset + " and onDiskSize=" + onDiskSize + - "but the first key was not set in writeInlineBlock"); + throw new IllegalStateException( + "Trying to add second-level index " + "entry with offset=" + offset + " and onDiskSize=" + + onDiskSize + "but the first key was not set in writeInlineBlock"); } if (rootChunk.getNumEntries() == 0) { @@ -1341,14 +1226,13 @@ public BlockType getInlineBlockType() { } /** - * Add one index entry to the current leaf-level block. When the leaf-level - * block gets large enough, it will be flushed to disk as an inline block. - * - * @param firstKey the first key of the data block - * @param blockOffset the offset of the data block - * @param blockDataSize the on-disk size of the data block ({@link HFile} - * format version 2), or the uncompressed size of the data block ( - * {@link HFile} format version 1). + * Add one index entry to the current leaf-level block. When the leaf-level block gets large + * enough, it will be flushed to disk as an inline block. + * @param firstKey the first key of the data block + * @param blockOffset the offset of the data block + * @param blockDataSize the on-disk size of the data block ({@link HFile} format version 2), or + * the uncompressed size of the data block ( {@link HFile} format version + * 1). */ public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) { curInlineChunk.add(firstKey, blockOffset, blockDataSize); @@ -1360,16 +1244,15 @@ public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) { */ public void ensureSingleLevel() throws IOException { if (numLevels > 1) { - throw new IOException ("Wrote a " + numLevels + "-level index with " + - rootChunk.getNumEntries() + " root-level entries, but " + - "this is expected to be a single-level block index."); + throw new IOException( + "Wrote a " + numLevels + "-level index with " + rootChunk.getNumEntries() + + " root-level entries, but " + "this is expected to be a single-level block index."); } } /** - * @return true if we are using cache-on-write. This is configured by the - * caller of the constructor by either passing a valid block cache - * or null. + * @return true if we are using cache-on-write. This is configured by the caller of the + * constructor by either passing a valid block cache or null. */ @Override public boolean getCacheOnWrite() { @@ -1377,9 +1260,8 @@ public boolean getCacheOnWrite() { } /** - * The total uncompressed size of the root index block, intermediate-level - * index blocks, and leaf-level index blocks. - * + * The total uncompressed size of the root index block, intermediate-level index blocks, and + * leaf-level index blocks. * @return the total uncompressed size of all index blocks */ public long getTotalUncompressedSize() { @@ -1389,9 +1271,8 @@ public long getTotalUncompressedSize() { } /** - * A single chunk of the block index in the process of writing. The data in - * this chunk can become a leaf-level, intermediate-level, or root index - * block. + * A single chunk of the block index in the process of writing. The data in this chunk can become + * a leaf-level, intermediate-level, or root index block. */ static class BlockIndexChunk { @@ -1405,16 +1286,16 @@ static class BlockIndexChunk { private final List onDiskDataSizes = new ArrayList<>(); /** - * The cumulative number of sub-entries, i.e. entries on deeper-level block - * index entries. numSubEntriesAt[i] is the number of sub-entries in the - * blocks corresponding to this chunk's entries #0 through #i inclusively. + * The cumulative number of sub-entries, i.e. entries on deeper-level block index entries. + * numSubEntriesAt[i] is the number of sub-entries in the blocks corresponding to this chunk's + * entries #0 through #i inclusively. */ private final List numSubEntriesAt = new ArrayList<>(); /** - * The offset of the next entry to be added, relative to the end of the - * "secondary index" in the "non-root" format representation of this index - * chunk. This is the next value to be added to the secondary index. + * The offset of the next entry to be added, relative to the end of the "secondary index" in the + * "non-root" format representation of this index chunk. This is the next value to be added to + * the secondary index. */ private int curTotalNonRootEntrySize = 0; @@ -1424,34 +1305,29 @@ static class BlockIndexChunk { private int curTotalRootSize = 0; /** - * The "secondary index" used for binary search over variable-length - * records in a "non-root" format block. These offsets are relative to the - * end of this secondary index. + * The "secondary index" used for binary search over variable-length records in a "non-root" + * format block. These offsets are relative to the end of this secondary index. */ private final List secondaryIndexOffsetMarks = new ArrayList<>(); /** * Adds a new entry to this block index chunk. - * - * @param firstKey the first key in the block pointed to by this entry - * @param blockOffset the offset of the next-level block pointed to by this - * entry - * @param onDiskDataSize the on-disk data of the block pointed to by this - * entry, including header size - * @param curTotalNumSubEntries if this chunk is the root index chunk under - * construction, this specifies the current total number of - * sub-entries in all leaf-level chunks, including the one - * corresponding to the second-level entry being added. + * @param firstKey the first key in the block pointed to by this entry + * @param blockOffset the offset of the next-level block pointed to by this entry + * @param onDiskDataSize the on-disk data of the block pointed to by this entry, + * including header size + * @param curTotalNumSubEntries if this chunk is the root index chunk under construction, this + * specifies the current total number of sub-entries in all + * leaf-level chunks, including the one corresponding to the + * second-level entry being added. */ - void add(byte[] firstKey, long blockOffset, int onDiskDataSize, - long curTotalNumSubEntries) { + void add(byte[] firstKey, long blockOffset, int onDiskDataSize, long curTotalNumSubEntries) { // Record the offset for the secondary index secondaryIndexOffsetMarks.add(curTotalNonRootEntrySize); - curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD - + firstKey.length; + curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD + firstKey.length; curTotalRootSize += Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT - + WritableUtils.getVIntSize(firstKey.length) + firstKey.length; + + WritableUtils.getVIntSize(firstKey.length) + firstKey.length; blockKeys.add(firstKey); blockOffsets.add(blockOffset); @@ -1462,17 +1338,15 @@ void add(byte[] firstKey, long blockOffset, int onDiskDataSize, // Make sure the parallel arrays are in sync. if (numSubEntriesAt.size() != blockKeys.size()) { - throw new IllegalStateException("Only have key/value count " + - "stats for " + numSubEntriesAt.size() + " block index " + - "entries out of " + blockKeys.size()); + throw new IllegalStateException("Only have key/value count " + "stats for " + + numSubEntriesAt.size() + " block index " + "entries out of " + blockKeys.size()); } } } /** - * The same as {@link #add(byte[], long, int, long)} but does not take the - * key/value into account. Used for single-level indexes. - * + * The same as {@link #add(byte[], long, int, long)} but does not take the key/value into + * account. Used for single-level indexes. * @see #add(byte[], long, int, long) */ public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) { @@ -1490,21 +1364,15 @@ public void clear() { } /** - * Finds the entry corresponding to the deeper-level index block containing - * the given deeper-level entry (a "sub-entry"), assuming a global 0-based - * ordering of sub-entries. - * + * Finds the entry corresponding to the deeper-level index block containing the given + * deeper-level entry (a "sub-entry"), assuming a global 0-based ordering of sub-entries. *

        - * Implementation note. We are looking for i such that - * numSubEntriesAt[i - 1] <= k < numSubEntriesAt[i], because a deeper-level - * block #i (0-based) contains sub-entries # numSubEntriesAt[i - 1]'th - * through numSubEntriesAt[i] - 1, assuming a global 0-based ordering of - * sub-entries. i is by definition the insertion point of k in - * numSubEntriesAt. - * + * Implementation note. We are looking for i such that numSubEntriesAt[i - 1] <= k < + * numSubEntriesAt[i], because a deeper-level block #i (0-based) contains sub-entries # + * numSubEntriesAt[i - 1]'th through numSubEntriesAt[i] - 1, assuming a global 0-based ordering + * of sub-entries. i is by definition the insertion point of k in numSubEntriesAt. * @param k sub-entry index, from 0 to the total number sub-entries - 1 - * @return the 0-based index of the entry corresponding to the given - * sub-entry + * @return the 0-based index of the entry corresponding to the given sub-entry */ public int getEntryBySubEntry(long k) { // We define mid-key as the key corresponding to k'th sub-entry @@ -1515,24 +1383,20 @@ public int getEntryBySubEntry(long k) { // Exact match: cumulativeWeight[i] = k. This means chunks #0 through // #i contain exactly k sub-entries, and the sub-entry #k (0-based) // is in the (i + 1)'th chunk. - if (i >= 0) - return i + 1; + if (i >= 0) return i + 1; // Inexact match. Return the insertion point. return -i - 1; } /** - * Used when writing the root block index of a multi-level block index. - * Serializes additional information allowing to efficiently identify the - * mid-key. - * + * Used when writing the root block index of a multi-level block index. Serializes additional + * information allowing to efficiently identify the mid-key. * @return a few serialized fields for finding the mid-key * @throws IOException if could not create metadata for computing mid-key */ public byte[] getMidKeyMetadata() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream( - MID_KEY_METADATA_SIZE); + ByteArrayOutputStream baos = new ByteArrayOutputStream(MID_KEY_METADATA_SIZE); DataOutputStream baosDos = new DataOutputStream(baos); long totalNumSubEntries = numSubEntriesAt.get(blockKeys.size() - 1); if (totalNumSubEntries == 0) { @@ -1544,23 +1408,20 @@ public byte[] getMidKeyMetadata() throws IOException { baosDos.writeLong(blockOffsets.get(midKeyEntry)); baosDos.writeInt(onDiskDataSizes.get(midKeyEntry)); - long numSubEntriesBefore = midKeyEntry > 0 - ? numSubEntriesAt.get(midKeyEntry - 1) : 0; + long numSubEntriesBefore = midKeyEntry > 0 ? numSubEntriesAt.get(midKeyEntry - 1) : 0; long subEntryWithinEntry = midKeySubEntry - numSubEntriesBefore; - if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) - { + if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) { throw new IOException("Could not identify mid-key index within the " - + "leaf-level block containing mid-key: out of range (" - + subEntryWithinEntry + ", numSubEntriesBefore=" - + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry - + ")"); + + "leaf-level block containing mid-key: out of range (" + subEntryWithinEntry + + ", numSubEntriesBefore=" + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry + + ")"); } baosDos.writeInt((int) subEntryWithinEntry); if (baosDos.size() != MID_KEY_METADATA_SIZE) { - throw new IOException("Could not write mid-key metadata: size=" + - baosDos.size() + ", correct size: " + MID_KEY_METADATA_SIZE); + throw new IOException("Could not write mid-key metadata: size=" + baosDos.size() + + ", correct size: " + MID_KEY_METADATA_SIZE); } // Close just to be good citizens, although this has no effect. @@ -1570,22 +1431,17 @@ public byte[] getMidKeyMetadata() throws IOException { } /** - * Writes the block index chunk in the non-root index block format. This - * format contains the number of entries, an index of integer offsets - * for quick binary search on variable-length records, and tuples of - * block offset, on-disk block size, and the first key for each entry. - * - * @param out - * @throws IOException + * Writes the block index chunk in the non-root index block format. This format contains the + * number of entries, an index of integer offsets for quick binary search on variable-length + * records, and tuples of block offset, on-disk block size, and the first key for each entry. nn */ void writeNonRoot(DataOutput out) throws IOException { // The number of entries in the block. out.writeInt(blockKeys.size()); if (secondaryIndexOffsetMarks.size() != blockKeys.size()) { - throw new IOException("Corrupted block index chunk writer: " + - blockKeys.size() + " entries but " + - secondaryIndexOffsetMarks.size() + " secondary index items"); + throw new IOException("Corrupted block index chunk writer: " + blockKeys.size() + + " entries but " + secondaryIndexOffsetMarks.size() + " secondary index items"); } // For each entry, write a "secondary index" of relative offsets to the @@ -1607,24 +1463,20 @@ void writeNonRoot(DataOutput out) throws IOException { } /** - * @return the size of this chunk if stored in the non-root index block - * format + * @return the size of this chunk if stored in the non-root index block format */ int getNonRootSize() { - return Bytes.SIZEOF_INT // Number of entries - + Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index - + curTotalNonRootEntrySize; // All entries + return Bytes.SIZEOF_INT // Number of entries + + Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index + + curTotalNonRootEntrySize; // All entries } /** - * Writes this chunk into the given output stream in the root block index - * format. This format is similar to the {@link HFile} version 1 block - * index format, except that we store on-disk size of the block instead of - * its uncompressed size. - * - * @param out the data output stream to write the block index to. Typically - * a stream writing into an {@link HFile} block. - * @throws IOException + * Writes this chunk into the given output stream in the root block index format. This format is + * similar to the {@link HFile} version 1 block index format, except that we store on-disk size + * of the block instead of its uncompressed size. + * @param out the data output stream to write the block index to. Typically a stream writing + * into an {@link HFile} block. n */ void writeRoot(DataOutput out) throws IOException { for (int i = 0; i < blockKeys.size(); ++i) { @@ -1661,8 +1513,7 @@ public int getOnDiskDataSize(int i) { } public long getCumulativeNumKV(int i) { - if (i < 0) - return 0; + if (i < 0) return 0; return numSubEntriesAt.get(i); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java index 6a1611de8dc3..cf253e20bd85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; @@ -27,9 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Controls what kind of data block encoding is used. If data block encoding is - * not set or the given block is not a data block (encoded or not), methods - * should just return the unmodified block. + * Controls what kind of data block encoding is used. If data block encoding is not set or the given + * block is not a data block (encoded or not), methods should just return the unmodified block. */ @InterfaceAudience.Private public interface HFileDataBlockEncoder { @@ -38,36 +38,24 @@ public interface HFileDataBlockEncoder { /** * Starts encoding for a block of KeyValues. Call - * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} - * to finish encoding of a block. - * @param encodingCtx - * @param out - * @throws IOException + * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} to + * finish encoding of a block. nnn */ void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out) - throws IOException; + throws IOException; /** - * Encodes a KeyValue. - * @param cell - * @param encodingCtx - * @param out - * @throws IOException + * Encodes a KeyValue. nnnn */ void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) - throws IOException; + throws IOException; /** * Ends encoding for a block of KeyValues. Gives a chance for the encoder to do the finishing - * stuff for the encoded block. It must be called at the end of block encoding. - * @param encodingCtx - * @param out - * @param uncompressedBytesWithHeader - * @param blockType - * @throws IOException + * stuff for the encoded block. It must be called at the end of block encoding. nnnnn */ void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, - byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException; + byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException; /** * Decides whether we should use a scanner over encoded blocks. @@ -80,40 +68,35 @@ void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream ou * @param writer writer for a given HFile * @exception IOException on disk problems */ - void saveMetadata(HFile.Writer writer) - throws IOException; + void saveMetadata(HFile.Writer writer) throws IOException; /** @return the data block encoding */ DataBlockEncoding getDataBlockEncoding(); /** - * @return the effective in-cache data block encoding, taking into account - * whether we are doing a compaction. + * @return the effective in-cache data block encoding, taking into account whether we are doing a + * compaction. */ public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction); /** - * Create an encoder specific encoding context object for writing. And the - * encoding context should also perform compression if compressionAlgorithm is - * valid. - * - * @param conf store configuration + * Create an encoder specific encoding context object for writing. And the encoding context should + * also perform compression if compressionAlgorithm is valid. + * @param conf store configuration * @param headerBytes header bytes * @param fileContext HFile meta data * @return a new {@link HFileBlockEncodingContext} object */ HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, byte[] headerBytes, - HFileContext fileContext); + HFileContext fileContext); /** - * create a encoder specific decoding context for reading. And the - * decoding context should also do decompression if compressionAlgorithm - * is valid. - * - * @param conf store configuration + * create a encoder specific decoding context for reading. And the decoding context should also do + * decompression if compressionAlgorithm is valid. + * @param conf store configuration * @param fileContext - HFile meta data * @return a new {@link HFileBlockDecodingContext} object */ HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, - HFileContext fileContext); + HFileContext fileContext); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index d2ce77245c9c..6505e3d33fe8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -1,24 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; @@ -31,8 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Do different kinds of data block encoding according to column family - * options. + * Do different kinds of data block encoding according to column family options. */ @InterfaceAudience.Private public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { @@ -46,8 +45,7 @@ public HFileDataBlockEncoderImpl(DataBlockEncoding encoding) { this.encoding = encoding != null ? encoding : DataBlockEncoding.NONE; } - public static HFileDataBlockEncoder createFromFileInfo( - HFileInfo fileInfo) throws IOException { + public static HFileDataBlockEncoder createFromFileInfo(HFileInfo fileInfo) throws IOException { DataBlockEncoding encoding = DataBlockEncoding.NONE; byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING); if (dataBlockEncodingType != null) { @@ -55,8 +53,8 @@ public static HFileDataBlockEncoder createFromFileInfo( try { encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr); } catch (IllegalArgumentException ex) { - throw new IOException("Invalid data block encoding type in file info: " - + dataBlockEncodingStr, ex); + throw new IOException( + "Invalid data block encoding type in file info: " + dataBlockEncodingStr, ex); } } @@ -93,7 +91,7 @@ public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) { @Override public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) - throws IOException { + throws IOException { this.encoding.getEncoder().encode(cell, encodingCtx, out); } @@ -102,7 +100,6 @@ public boolean useEncodedScanner() { return encoding != DataBlockEncoding.NONE; } - @Override public String toString() { return getClass().getSimpleName() + "(encoding=" + encoding + ")"; @@ -110,7 +107,7 @@ public String toString() { @Override public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, - byte[] dummyHeader, HFileContext fileContext) { + byte[] dummyHeader, HFileContext fileContext) { DataBlockEncoder encoder = encoding.getEncoder(); if (encoder != null) { return encoder.newDataBlockEncodingContext(conf, encoding, dummyHeader, fileContext); @@ -120,7 +117,7 @@ public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, @Override public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, - HFileContext fileContext) { + HFileContext fileContext) { DataBlockEncoder encoder = encoding.getEncoder(); if (encoder != null) { return encoder.newDataBlockDecodingContext(conf, fileContext); @@ -130,7 +127,7 @@ public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, @Override public void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out) - throws IOException { + throws IOException { if (this.encoding != null && this.encoding != DataBlockEncoding.NONE) { this.encoding.getEncoder().startBlockEncoding(encodingCtx, out); } @@ -138,7 +135,7 @@ public void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutput @Override public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, - byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException { + byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException { this.encoding.getEncoder().endBlockEncoding(encodingCtx, out, uncompressedBytesWithHeader); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 072e5b10628a..104d9a679d80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,8 +59,8 @@ * key seen, comparator used writing the file, etc. Clients can add their own attributes via * {@link #append(byte[], byte[], boolean)} and they'll be persisted and available at read time. * Reader creates the HFileInfo on open by reading the tail of the HFile. The parse of the HFile - * trailer also creates a {@link HFileContext}, a read-only data structure that includes bulk of - * the HFileInfo and extras that is safe to pass around when working on HFiles. + * trailer also creates a {@link HFileContext}, a read-only data structure that includes bulk of the + * HFileInfo and extras that is safe to pass around when working on HFiles. * @see HFileContext */ @InterfaceAudience.Private @@ -71,13 +70,13 @@ public class HFileInfo implements SortedMap { static final String RESERVED_PREFIX = "hfile."; static final byte[] RESERVED_PREFIX_BYTES = Bytes.toBytes(RESERVED_PREFIX); - static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); - static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN"); - static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN"); - static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS"); - static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); - public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); - private final SortedMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + static final byte[] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); + static final byte[] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN"); + static final byte[] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN"); + static final byte[] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS"); + static final byte[] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); + public static final byte[] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); + private final SortedMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * We can read files whose major version is v2 IFF their minor version is at least 3. @@ -99,15 +98,15 @@ public class HFileInfo implements SortedMap { private boolean decodeMemstoreTS = false; /** - * Blocks read from the load-on-open section, excluding data root index, meta - * index, and file info. + * Blocks read from the load-on-open section, excluding data root index, meta index, and file + * info. */ private List loadOnOpenBlocks = new ArrayList<>(); /** * The iterator will track all blocks in load-on-open section, since we use the - * {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} to manage the ByteBuffers in block now, - * so we must ensure that deallocate all ByteBuffers in the end. + * {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} to manage the ByteBuffers in block now, so + * we must ensure that deallocate all ByteBuffers in the end. */ private HFileBlock.BlockIterator blockIter; @@ -126,25 +125,22 @@ public HFileInfo(ReaderContext context, Configuration conf) throws IOException { } /** - * Append the given key/value pair to the file info, optionally checking the - * key prefix. - * - * @param k key to add - * @param v value to add - * @param checkPrefix whether to check that the provided key does not start - * with the reserved prefix + * Append the given key/value pair to the file info, optionally checking the key prefix. + * @param k key to add + * @param v value to add + * @param checkPrefix whether to check that the provided key does not start with the reserved + * prefix * @return this file info object - * @throws IOException if the key or value is invalid + * @throws IOException if the key or value is invalid * @throws NullPointerException if {@code key} or {@code value} is {@code null} */ - public HFileInfo append(final byte[] k, final byte[] v, - final boolean checkPrefix) throws IOException { + public HFileInfo append(final byte[] k, final byte[] v, final boolean checkPrefix) + throws IOException { Objects.requireNonNull(k, "key cannot be null"); Objects.requireNonNull(v, "value cannot be null"); if (checkPrefix && isReservedFileInfoKey(k)) { - throw new IOException("Keys with a " + HFileInfo.RESERVED_PREFIX - + " are reserved"); + throw new IOException("Keys with a " + HFileInfo.RESERVED_PREFIX + " are reserved"); } put(k, v); return this; @@ -256,13 +252,12 @@ public Collection values() { } /** - * Write out this instance on the passed in out stream. - * We write it as a protobuf. + * Write out this instance on the passed in out stream. We write it as a protobuf. * @see #read(DataInputStream) */ void write(final DataOutputStream out) throws IOException { HFileProtos.FileInfoProto.Builder builder = HFileProtos.FileInfoProto.newBuilder(); - for (Map.Entry e: this.map.entrySet()) { + for (Map.Entry e : this.map.entrySet()) { HBaseProtos.BytesBytesPair.Builder bbpBuilder = HBaseProtos.BytesBytesPair.newBuilder(); bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(e.getKey())); bbpBuilder.setSecond(UnsafeByteOperations.unsafeWrap(e.getValue())); @@ -273,14 +268,14 @@ void write(final DataOutputStream out) throws IOException { } /** - * Populate this instance with what we find on the passed in in stream. - * Can deserialize protobuf of old Writables format. + * Populate this instance with what we find on the passed in in stream. Can + * deserialize protobuf of old Writables format. * @see #write(DataOutputStream) */ void read(final DataInputStream in) throws IOException { // This code is tested over in TestHFileReaderV1 where we read an old hfile w/ this new code. int pblen = ProtobufUtil.lengthOfPBMagic(); - byte [] pbuf = new byte[pblen]; + byte[] pbuf = new byte[pblen]; if (in.markSupported()) { in.mark(pblen); } @@ -298,7 +293,7 @@ void read(final DataInputStream in) throws IOException { // We cannot use BufferedInputStream, it consumes more than we read from the underlying IS ByteArrayInputStream bais = new ByteArrayInputStream(pbuf); SequenceInputStream sis = new SequenceInputStream(bais, in); // Concatenate input streams - // TODO: Am I leaking anything here wrapping the passed in stream? We are not calling + // TODO: Am I leaking anything here wrapping the passed in stream? We are not calling // close on the wrapped streams but they should be let go after we leave this context? // I see that we keep a reference to the passed in inputstream but since we no longer // have a reference to this after we leave, we should be ok. @@ -308,10 +303,9 @@ void read(final DataInputStream in) throws IOException { } /** - * Now parse the old Writable format. It was a list of Map entries. Each map entry was a - * key and a value of a byte []. The old map format had a byte before each entry that held - * a code which was short for the key or value type. We know it was a byte [] so in below - * we just read and dump it. + * Now parse the old Writable format. It was a list of Map entries. Each map entry was a key and a + * value of a byte []. The old map format had a byte before each entry that held a code which was + * short for the key or value type. We know it was a byte [] so in below we just read and dump it. */ void parseWritable(final DataInputStream in) throws IOException { // First clear the map. @@ -321,11 +315,11 @@ void parseWritable(final DataInputStream in) throws IOException { int entries = in.readInt(); // Then read each key/value pair for (int i = 0; i < entries; i++) { - byte [] key = Bytes.readByteArray(in); + byte[] key = Bytes.readByteArray(in); // We used to read a byte that encoded the class type. // Read and ignore it because it is always byte [] in hfile in.readByte(); - byte [] value = Bytes.readByteArray(in); + byte[] value = Bytes.readByteArray(in); this.map.put(key, value); } } @@ -336,7 +330,7 @@ void parseWritable(final DataInputStream in) throws IOException { */ void parsePB(final HFileProtos.FileInfoProto fip) { this.map.clear(); - for (BytesBytesPair pair: fip.getMapEntryList()) { + for (BytesBytesPair pair : fip.getMapEntryList()) { this.map.put(pair.getFirst().toByteArray(), pair.getSecond().toByteArray()); } } @@ -344,8 +338,8 @@ void parsePB(final HFileProtos.FileInfoProto fip) { public void initTrailerAndContext(ReaderContext context, Configuration conf) throws IOException { try { boolean isHBaseChecksum = context.getInputStreamWrapper().shouldUseHBaseChecksum(); - trailer = FixedFileTrailer.readFromStream(context.getInputStreamWrapper() - .getStream(isHBaseChecksum), context.getFileSize()); + trailer = FixedFileTrailer.readFromStream( + context.getInputStreamWrapper().getStream(isHBaseChecksum), context.getFileSize()); Path path = context.getFilePath(); checkFileVersion(path); this.hfileContext = createHFileContext(path, trailer, conf); @@ -353,8 +347,8 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper(), e -> LOG.warn("failed to close input stream wrapper", e)); - throw new CorruptHFileException("Problem reading HFile Trailer from file " - + context.getFilePath(), t); + throw new CorruptHFileException( + "Problem reading HFile Trailer from file " + context.getFilePath(), t); } } @@ -367,13 +361,13 @@ public void initMetaAndIndex(HFile.Reader reader) throws IOException { HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); // Initialize an block iterator, and parse load-on-open blocks in the following. blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), - context.getFileSize() - trailer.getTrailerSize()); + context.getFileSize() - trailer.getTrailerSize()); // Data index. We also read statistics about the block index written after // the root level. - this.dataIndexReader = - new HFileBlockIndex.CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); - dataIndexReader - .readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); + this.dataIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReader( + trailer.createComparator(), trailer.getNumDataIndexLevels()); + dataIndexReader.readMultiLevelIndexRoot( + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); reader.setDataBlockIndexReader(dataIndexReader); // Meta index. this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); @@ -397,12 +391,10 @@ public void initMetaAndIndex(HFile.Reader reader) throws IOException { } } - private HFileContext createHFileContext(Path path, - FixedFileTrailer trailer, Configuration conf) throws IOException { - HFileContextBuilder builder = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withHFileName(path.getName()) - .withCompression(trailer.getCompressionCodec()) + private HFileContext createHFileContext(Path path, FixedFileTrailer trailer, Configuration conf) + throws IOException { + HFileContextBuilder builder = new HFileContextBuilder().withHBaseCheckSum(true) + .withHFileName(path.getName()).withCompression(trailer.getCompressionCodec()) .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName())); // Check for any key material available byte[] keyBytes = trailer.getEncryptionKey(); @@ -412,8 +404,8 @@ private HFileContext createHFileContext(Path path, // Use the algorithm the key wants Cipher cipher = Encryption.getCipher(conf, key.getAlgorithm()); if (cipher == null) { - throw new IOException("Cipher '" + key.getAlgorithm() + "' is not available" - + ", path=" + path); + throw new IOException( + "Cipher '" + key.getAlgorithm() + "' is not available" + ", path=" + path); } cryptoContext.setCipher(cipher); cryptoContext.setKey(key); @@ -424,11 +416,10 @@ private HFileContext createHFileContext(Path path, } private void loadMetaInfo(HFileBlock.BlockIterator blockIter, HFileContext hfileContext) - throws IOException { + throws IOException { read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); byte[] creationTimeBytes = get(HFileInfo.CREATE_TIME_TS); - hfileContext.setFileCreateTime(creationTimeBytes == null ? - 0 : Bytes.toLong(creationTimeBytes)); + hfileContext.setFileCreateTime(creationTimeBytes == null ? 0 : Bytes.toLong(creationTimeBytes)); byte[] tmp = get(HFileInfo.MAX_TAGS_LEN); // max tag length is not present in the HFile means tags were not at all written to file. if (tmp != null) { @@ -444,9 +435,9 @@ private void loadMetaInfo(HFileBlock.BlockIterator blockIter, HFileContext hfile } avgKeyLen = Bytes.toInt(get(HFileInfo.AVG_KEY_LEN)); avgValueLen = Bytes.toInt(get(HFileInfo.AVG_VALUE_LEN)); - byte [] keyValueFormatVersion = get(HFileWriterImpl.KEY_VALUE_VERSION); - includesMemstoreTS = keyValueFormatVersion != null && - Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE; + byte[] keyValueFormatVersion = get(HFileWriterImpl.KEY_VALUE_VERSION); + includesMemstoreTS = keyValueFormatVersion != null + && Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE; hfileContext.setIncludesMvcc(includesMemstoreTS); if (includesMemstoreTS) { decodeMemstoreTS = Bytes.toLong(get(HFileWriterImpl.MAX_MEMSTORE_TS_KEY)) > 0; @@ -467,9 +458,9 @@ private void checkFileVersion(Path path) { return; } // We can read v3 or v2 versions of hfile. - throw new IllegalArgumentException("Invalid HFile version: major=" + - trailer.getMajorVersion() + ", minor=" + trailer.getMinorVersion() + ": expected at least " + - "major=2 and minor=" + MAX_MINOR_VERSION + ", path=" + path); + throw new IllegalArgumentException("Invalid HFile version: major=" + trailer.getMajorVersion() + + ", minor=" + trailer.getMinorVersion() + ": expected at least " + "major=2 and minor=" + + MAX_MINOR_VERSION + ", path=" + path); } public void close() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 98fe885de516..25627c34f510 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ public class HFilePreadReader extends HFileReaderImpl { private static final Logger LOG = LoggerFactory.getLogger(HFileReaderImpl.class); - public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, - CacheConfig cacheConf, Configuration conf) throws IOException { + public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, + Configuration conf) throws IOException { super(context, fileInfo, cacheConf, conf); // Prefetch file blocks upon open if requested if (cacheConf.shouldPrefetchOnOpen()) { @@ -74,8 +74,9 @@ public void run() { LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), e); } } catch (NullPointerException e) { - LOG.warn("Stream moved/closed or prefetch cancelled?" + - getPathOffsetEndStr(path, offset, end), e); + LOG.warn( + "Stream moved/closed or prefetch cancelled?" + getPathOffsetEndStr(path, offset, end), + e); } catch (Exception e) { // Other exceptions are interesting LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index c24d8be7c035..9064f6b793b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -139,15 +138,14 @@ public HFilePrettyPrinter(Configuration conf) { } private void init() { - options.addOption("v", "verbose", false, - "Verbose output; emits file and meta data delimiters"); + options.addOption("v", "verbose", false, "Verbose output; emits file and meta data delimiters"); options.addOption("p", "printkv", false, "Print key/value pairs"); options.addOption("e", "printkey", false, "Print keys"); options.addOption("m", "printmeta", false, "Print meta data of file"); options.addOption("b", "printblocks", false, "Print block index meta data"); options.addOption("h", "printblockheaders", false, "Print block headers for each block."); options.addOption("k", "checkrow", false, - "Enable row order check; looks for out-of-order keys"); + "Enable row order check; looks for out-of-order keys"); options.addOption("a", "checkfamily", false, "Enable family check"); options.addOption("w", "seekToRow", true, "Seek to this row and print all the kvs for this row only"); @@ -158,8 +156,8 @@ private void init() { OptionGroup files = new OptionGroup(); files.addOption(new Option("f", "file", true, "File to scan. Pass full-path; e.g. hdfs://a:9000/hbase/hbase:meta/12/34")); - files.addOption(new Option("r", "region", true, - "Region to scan. Pass region name; e.g. 'hbase:meta,,1'")); + files.addOption( + new Option("r", "region", true, "Region to scan. Pass region name; e.g. 'hbase:meta,,1'")); options.addOptionGroup(files); } @@ -168,8 +166,7 @@ public void setPrintStreams(PrintStream out, PrintStream err) { this.err = err; } - public boolean parseOptions(String args[]) throws ParseException, - IOException { + public boolean parseOptions(String args[]) throws ParseException, IOException { if (args.length == 0) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("hfile", options, true); @@ -212,24 +209,19 @@ public boolean parseOptions(String args[]) throws ParseException, Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.valueOf(hri[0])); String enc = RegionInfo.encodeRegionName(rn); Path regionDir = new Path(tableDir, enc); - if (verbose) - out.println("region dir -> " + regionDir); - List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()), - regionDir); - if (verbose) - out.println("Number of region files found -> " - + regionFiles.size()); + if (verbose) out.println("region dir -> " + regionDir); + List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()), regionDir); + if (verbose) out.println("Number of region files found -> " + regionFiles.size()); if (verbose) { int i = 1; for (Path p : regionFiles) { - if (verbose) - out.println("Found file[" + i++ + "] -> " + p); + if (verbose) out.println("Found file[" + i++ + "] -> " + p); } } files.addAll(regionFiles); } - if(checkMobIntegrity) { + if (checkMobIntegrity) { if (verbose) { System.out.println("checkMobIntegrity is enabled"); } @@ -242,8 +234,8 @@ public boolean parseOptions(String args[]) throws ParseException, } /** - * Runs the command-line pretty-printer, and returns the desired command - * exit code (zero for success, non-zero for failure). + * Runs the command-line pretty-printer, and returns the desired command exit code (zero for + * success, non-zero for failure). */ @Override public int run(String[] args) { @@ -352,10 +344,8 @@ public int processFile(Path file, boolean checkRootDir) throws IOException { */ FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, file); long fileSize = fs.getFileStatus(file).getLen(); - FixedFileTrailer trailer = - FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); - long offset = trailer.getFirstDataBlockOffset(), - max = trailer.getLastDataBlockOffset(); + FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); + long offset = trailer.getFirstDataBlockOffset(), max = trailer.getLastDataBlockOffset(); HFileBlock block; while (offset <= max) { block = reader.readBlock(offset, -1, /* cacheBlock */ false, /* pread */ false, @@ -374,8 +364,8 @@ public int processFile(Path file, boolean checkRootDir) throws IOException { return 0; } - private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, - HFileScanner scanner, byte[] row) throws IOException { + private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, HFileScanner scanner, + byte[] row) throws IOException { Cell pCell = null; FileSystem fs = FileSystem.get(getConf()); Set foundMobFiles = new LinkedHashSet<>(FOUND_MOB_FILES_CACHE_CAPACITY); @@ -398,9 +388,8 @@ private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, if (printKey) { out.print("K: " + cell); if (printValue) { - out.print(" V: " - + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + out.print(" V: " + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength())); int i = 0; List tags = PrivateCellUtil.getTags(cell); for (Tag tag : tags) { @@ -412,37 +401,35 @@ private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, // check if rows are in order if (checkRow && pCell != null) { if (CellComparator.getInstance().compareRows(pCell, cell) > 0) { - err.println("WARNING, previous row is greater then" - + " current row\n\tfilename -> " + file + "\n\tprevious -> " - + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " - + CellUtil.getCellKeyAsString(cell)); + err.println("WARNING, previous row is greater then" + " current row\n\tfilename -> " + + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " + + CellUtil.getCellKeyAsString(cell)); } } // check if families are consistent if (checkFamily) { - String fam = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength()); + String fam = + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); if (!file.toString().contains(fam)) { - err.println("WARNING, filename does not match kv family," - + "\n\tfilename -> " + file + "\n\tkeyvalue -> " - + CellUtil.getCellKeyAsString(cell)); + err.println("WARNING, filename does not match kv family," + "\n\tfilename -> " + file + + "\n\tkeyvalue -> " + CellUtil.getCellKeyAsString(cell)); } if (pCell != null && CellComparator.getInstance().compareFamilies(pCell, cell) != 0) { - err.println("WARNING, previous kv has different family" - + " compared to current key\n\tfilename -> " + file - + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) - + "\n\tcurrent -> " + CellUtil.getCellKeyAsString(cell)); + err.println( + "WARNING, previous kv has different family" + " compared to current key\n\tfilename -> " + + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " + + CellUtil.getCellKeyAsString(cell)); } } // check if mob files are missing. if (checkMobIntegrity && MobUtils.isMobReferenceCell(cell)) { Optional tn = MobUtils.getTableName(cell); - if (! tn.isPresent()) { - System.err.println("ERROR, wrong tag format in mob reference cell " - + CellUtil.getCellKeyAsString(cell)); + if (!tn.isPresent()) { + System.err.println( + "ERROR, wrong tag format in mob reference cell " + CellUtil.getCellKeyAsString(cell)); } else if (!MobUtils.hasValidMobRefCellValue(cell)) { - System.err.println("ERROR, wrong value format in mob reference cell " - + CellUtil.getCellKeyAsString(cell)); + System.err.println( + "ERROR, wrong value format in mob reference cell " + CellUtil.getCellKeyAsString(cell)); } else { String mobFileName = MobUtils.getMobFileName(cell); boolean exist = mobFileExists(fs, tn.get(), mobFileName, @@ -515,42 +502,44 @@ private void evictMobFilesIfNecessary(Set mobFileNames, int limit) { } /** - * Format a string of the form "k1=v1, k2=v2, ..." into separate lines - * with a four-space indentation. + * Format a string of the form "k1=v1, k2=v2, ..." into separate lines with a four-space + * indentation. */ private static String asSeparateLines(String keyValueStr) { - return keyValueStr.replaceAll(", ([a-zA-Z]+=)", - ",\n" + FOUR_SPACES + "$1"); + return keyValueStr.replaceAll(", ([a-zA-Z]+=)", ",\n" + FOUR_SPACES + "$1"); } - private void printMeta(HFile.Reader reader, Map fileInfo) - throws IOException { - out.println("Block index size as per heapsize: " - + reader.indexSize()); + private void printMeta(HFile.Reader reader, Map fileInfo) throws IOException { + out.println("Block index size as per heapsize: " + reader.indexSize()); out.println(asSeparateLines(reader.toString())); - out.println("Trailer:\n " - + asSeparateLines(reader.getTrailer().toString())); + out.println("Trailer:\n " + asSeparateLines(reader.getTrailer().toString())); out.println("Fileinfo:"); for (Map.Entry e : fileInfo.entrySet()) { out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = "); - if (Bytes.equals(e.getKey(), HStoreFile.MAX_SEQ_ID_KEY) + if ( + Bytes.equals(e.getKey(), HStoreFile.MAX_SEQ_ID_KEY) || Bytes.equals(e.getKey(), HStoreFile.DELETE_FAMILY_COUNT) || Bytes.equals(e.getKey(), HStoreFile.EARLIEST_PUT_TS) || Bytes.equals(e.getKey(), HFileWriterImpl.MAX_MEMSTORE_TS_KEY) || Bytes.equals(e.getKey(), HFileInfo.CREATE_TIME_TS) - || Bytes.equals(e.getKey(), HStoreFile.BULKLOAD_TIME_KEY)) { + || Bytes.equals(e.getKey(), HStoreFile.BULKLOAD_TIME_KEY) + ) { out.println(Bytes.toLong(e.getValue())); } else if (Bytes.equals(e.getKey(), HStoreFile.TIMERANGE_KEY)) { TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(e.getValue()); out.println(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); - } else if (Bytes.equals(e.getKey(), HFileInfo.AVG_KEY_LEN) + } else if ( + Bytes.equals(e.getKey(), HFileInfo.AVG_KEY_LEN) || Bytes.equals(e.getKey(), HFileInfo.AVG_VALUE_LEN) || Bytes.equals(e.getKey(), HFileWriterImpl.KEY_VALUE_VERSION) - || Bytes.equals(e.getKey(), HFileInfo.MAX_TAGS_LEN)) { + || Bytes.equals(e.getKey(), HFileInfo.MAX_TAGS_LEN) + ) { out.println(Bytes.toInt(e.getValue())); - } else if (Bytes.equals(e.getKey(), HStoreFile.MAJOR_COMPACTION_KEY) + } else if ( + Bytes.equals(e.getKey(), HStoreFile.MAJOR_COMPACTION_KEY) || Bytes.equals(e.getKey(), HFileInfo.TAGS_COMPRESSED) - || Bytes.equals(e.getKey(), HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY)) { + || Bytes.equals(e.getKey(), HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY) + ) { out.println(Bytes.toBoolean(e.getValue())); } else if (Bytes.equals(e.getKey(), HFileInfo.LASTKEY)) { out.println(new KeyValue.KeyOnlyKeyValue(e.getValue()).toString()); @@ -562,19 +551,18 @@ private void printMeta(HFile.Reader reader, Map fileInfo) try { out.println("Mid-key: " + reader.midKey().map(CellUtil::getCellKeyAsString)); } catch (Exception e) { - out.println ("Unable to retrieve the midkey"); + out.println("Unable to retrieve the midkey"); } // Printing general bloom information DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); BloomFilter bloomFilter = null; - if (bloomMeta != null) - bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + if (bloomMeta != null) bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); out.println("Bloom filter:"); if (bloomFilter != null) { - out.println(FOUR_SPACES + bloomFilter.toString().replaceAll( - BloomFilterUtil.STATS_RECORD_SEP, "\n" + FOUR_SPACES)); + out.println(FOUR_SPACES + + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, "\n" + FOUR_SPACES)); } else { out.println(FOUR_SPACES + "Not present"); } @@ -582,14 +570,12 @@ private void printMeta(HFile.Reader reader, Map fileInfo) // Printing delete bloom information bloomMeta = reader.getDeleteBloomFilterMetadata(); bloomFilter = null; - if (bloomMeta != null) - bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + if (bloomMeta != null) bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); out.println("Delete Family Bloom filter:"); if (bloomFilter != null) { out.println(FOUR_SPACES - + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, - "\n" + FOUR_SPACES)); + + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, "\n" + FOUR_SPACES)); } else { out.println(FOUR_SPACES + "Not present"); } @@ -598,15 +584,15 @@ private void printMeta(HFile.Reader reader, Map fileInfo) private static class KeyValueStatsCollector { private final MetricRegistry metricsRegistry = new MetricRegistry(); private final ByteArrayOutputStream metricsOutput = new ByteArrayOutputStream(); - private final SimpleReporter simpleReporter = SimpleReporter.forRegistry(metricsRegistry). - outputTo(new PrintStream(metricsOutput)).filter(MetricFilter.ALL).build(); + private final SimpleReporter simpleReporter = SimpleReporter.forRegistry(metricsRegistry) + .outputTo(new PrintStream(metricsOutput)).filter(MetricFilter.ALL).build(); Histogram keyLen = metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Key length")); Histogram valLen = metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Val length")); - Histogram rowSizeBytes = metricsRegistry.histogram( - name(HFilePrettyPrinter.class, "Row size (bytes)")); - Histogram rowSizeCols = metricsRegistry.histogram( - name(HFilePrettyPrinter.class, "Row size (columns)")); + Histogram rowSizeBytes = + metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Row size (bytes)")); + Histogram rowSizeCols = + metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Row size (columns)")); long curRowBytes = 0; long curRowCols = 0; @@ -619,8 +605,7 @@ private static class KeyValueStatsCollector { public void collect(Cell cell) { valLen.update(cell.getValueLength()); - if (prevCell != null && - CellComparator.getInstance().compareRows(prevCell, cell) != 0) { + if (prevCell != null && CellComparator.getInstance().compareRows(prevCell, cell) != 0) { // new row collectRow(); } @@ -652,27 +637,23 @@ public void finish() { @Override public String toString() { - if (prevCell == null) - return "no data available for statistics"; + if (prevCell == null) return "no data available for statistics"; // Dump the metrics to the output stream simpleReporter.stop(); simpleReporter.report(); - return - metricsOutput.toString() + - "Key of biggest row: " + Bytes.toStringBinary(biggestRow); + return metricsOutput.toString() + "Key of biggest row: " + Bytes.toStringBinary(biggestRow); } } /** - * Almost identical to ConsoleReporter, but extending ScheduledReporter, - * as extending ConsoleReporter in this version of dropwizard is now too much trouble. + * Almost identical to ConsoleReporter, but extending ScheduledReporter, as extending + * ConsoleReporter in this version of dropwizard is now too much trouble. */ private static class SimpleReporter extends ScheduledReporter { /** * Returns a new {@link Builder} for {@link ConsoleReporter}. - * * @param registry the registry to report * @return a {@link Builder} instance for a {@link ConsoleReporter} */ @@ -681,9 +662,9 @@ public static Builder forRegistry(MetricRegistry registry) { } /** - * A builder for {@link SimpleReporter} instances. Defaults to using the default locale and - * time zone, writing to {@code System.out}, converting rates to events/second, converting - * durations to milliseconds, and not filtering metrics. + * A builder for {@link SimpleReporter} instances. Defaults to using the default locale and time + * zone, writing to {@code System.out}, converting rates to events/second, converting durations + * to milliseconds, and not filtering metrics. */ public static class Builder { private final MetricRegistry registry; @@ -706,7 +687,6 @@ private Builder(MetricRegistry registry) { /** * Write to the given {@link PrintStream}. - * * @param output a {@link PrintStream} instance. * @return {@code this} */ @@ -717,7 +697,6 @@ public Builder outputTo(PrintStream output) { /** * Only report metrics which match the given filter. - * * @param filter a {@link MetricFilter} * @return {@code this} */ @@ -728,17 +707,11 @@ public Builder filter(MetricFilter filter) { /** * Builds a {@link ConsoleReporter} with the given properties. - * * @return a {@link ConsoleReporter} */ public SimpleReporter build() { - return new SimpleReporter(registry, - output, - locale, - timeZone, - rateUnit, - durationUnit, - filter); + return new SimpleReporter(registry, output, locale, timeZone, rateUnit, durationUnit, + filter); } } @@ -746,29 +719,20 @@ public SimpleReporter build() { private final Locale locale; private final DateFormat dateFormat; - private SimpleReporter(MetricRegistry registry, - PrintStream output, - Locale locale, - TimeZone timeZone, - TimeUnit rateUnit, - TimeUnit durationUnit, - MetricFilter filter) { + private SimpleReporter(MetricRegistry registry, PrintStream output, Locale locale, + TimeZone timeZone, TimeUnit rateUnit, TimeUnit durationUnit, MetricFilter filter) { super(registry, "simple-reporter", filter, rateUnit, durationUnit); this.output = output; this.locale = locale; - this.dateFormat = DateFormat.getDateTimeInstance(DateFormat.SHORT, - DateFormat.MEDIUM, - locale); + this.dateFormat = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.MEDIUM, locale); dateFormat.setTimeZone(timeZone); } @Override - public void report(SortedMap gauges, - SortedMap counters, - SortedMap histograms, - SortedMap meters, - SortedMap timers) { + public void report(SortedMap gauges, SortedMap counters, + SortedMap histograms, SortedMap meters, + SortedMap timers) { // we know we only have histograms if (!histograms.isEmpty()) { for (Map.Entry entry : histograms.entrySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index c7a71584327c..e7bc6901b2f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -58,10 +58,10 @@ * Implementation that can handle all hfile versions of {@link HFile.Reader}. */ @InterfaceAudience.Private -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // This class is HFileReaderV3 + HFileReaderV2 + AbstractHFileReader all squashed together into - // one file. Ditto for all the HFileReader.ScannerV? implementations. I was running up against + // one file. Ditto for all the HFileReader.ScannerV? implementations. I was running up against // the MaxInlineLevel limit because too many tiers involved reading from an hfile. Was also hard // to navigate the source code when so many classes participating in read. private static final Logger LOG = LoggerFactory.getLogger(HFileReaderImpl.class); @@ -77,8 +77,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { private final boolean primaryReplicaReader; /** - * What kind of data block encoding should be used while reading, writing, - * and handling cache. + * What kind of data block encoding should be used while reading, writing, and handling cache. */ protected HFileDataBlockEncoder dataBlockEncoder = NoOpDataBlockEncoder.INSTANCE; @@ -103,10 +102,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { protected HFileBlock.FSReader fsBlockReader; /** - * A "sparse lock" implementation allowing to lock on a particular block - * identified by offset. The purpose of this is to avoid two clients loading - * the same block, and have all but one client wait to get the block from the - * cache. + * A "sparse lock" implementation allowing to lock on a particular block identified by offset. The + * purpose of this is to avoid two clients loading the same block, and have all but one client + * wait to get the block from the cache. */ private IdLock offsetLock = new IdLock(); @@ -123,14 +121,14 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { /** * Opens a HFile. - * @param context Reader context info - * @param fileInfo HFile info + * @param context Reader context info + * @param fileInfo HFile info * @param cacheConf Cache configuration. - * @param conf Configuration + * @param conf Configuration */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public HFileReaderImpl(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, - Configuration conf) throws IOException { + Configuration conf) throws IOException { this.cacheConf = cacheConf; this.context = context; this.path = context.getFilePath(); @@ -140,8 +138,8 @@ public HFileReaderImpl(ReaderContext context, HFileInfo fileInfo, CacheConfig ca this.fileInfo = fileInfo; this.trailer = fileInfo.getTrailer(); this.hfileContext = fileInfo.getHFileContext(); - this.fsBlockReader = new HFileBlock.FSReaderImpl(context, hfileContext, - cacheConf.getByteBuffAllocator(), conf); + this.fsBlockReader = + new HFileBlock.FSReaderImpl(context, hfileContext, cacheConf.getByteBuffAllocator(), conf); this.dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo); fsBlockReader.setDataBlockEncoder(dataBlockEncoder, conf); dataBlockIndexReader = fileInfo.getDataBlockIndexReader(); @@ -166,16 +164,13 @@ private Optional toStringLastKey() { @Override public String toString() { - return "reader=" + path.toString() + - (!isFileInfoLoaded()? "": - ", compression=" + trailer.getCompressionCodec().getName() + - ", cacheConf=" + cacheConf + - ", firstKey=" + toStringFirstKey() + - ", lastKey=" + toStringLastKey()) + - ", avgKeyLen=" + fileInfo.getAvgKeyLen() + - ", avgValueLen=" + fileInfo.getAvgValueLen() + - ", entries=" + trailer.getEntryCount() + - ", length=" + context.getFileSize(); + return "reader=" + path.toString() + + (!isFileInfoLoaded() + ? "" + : ", compression=" + trailer.getCompressionCodec().getName() + ", cacheConf=" + cacheConf + + ", firstKey=" + toStringFirstKey() + ", lastKey=" + toStringLastKey()) + + ", avgKeyLen=" + fileInfo.getAvgKeyLen() + ", avgValueLen=" + fileInfo.getAvgValueLen() + + ", entries=" + trailer.getEntryCount() + ", length=" + context.getFileSize(); } @Override @@ -184,23 +179,22 @@ public long length() { } /** - * @return the first key in the file. May be null if file has no entries. Note - * that this is not the first row key, but rather the byte form of the - * first KeyValue. + * @return the first key in the file. May be null if file has no entries. Note that this is not + * the first row key, but rather the byte form of the first KeyValue. */ @Override public Optional getFirstKey() { if (dataBlockIndexReader == null) { throw new BlockIndexNotLoadedException(path); } - return dataBlockIndexReader.isEmpty() ? Optional.empty() - : Optional.of(dataBlockIndexReader.getRootBlockKey(0)); + return dataBlockIndexReader.isEmpty() + ? Optional.empty() + : Optional.of(dataBlockIndexReader.getRootBlockKey(0)); } /** - * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's - * patch goes in to eliminate {@link KeyValue} here. - * + * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to + * eliminate {@link KeyValue} here. * @return the first row key, or null if the file is empty. */ @Override @@ -210,9 +204,8 @@ public Optional getFirstRowKey() { } /** - * TODO left from {@link HFile} version 1: move this to StoreFile after - * Ryan's patch goes in to eliminate {@link KeyValue} here. - * + * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to + * eliminate {@link KeyValue} here. * @return the last row key, or null if the file is empty. */ @Override @@ -238,14 +231,13 @@ public Compression.Algorithm getCompressionAlgorithm() { } /** - * @return the total heap size of data and meta block indexes in bytes. Does - * not take into account non-root blocks of a multilevel data index. + * @return the total heap size of data and meta block indexes in bytes. Does not take into account + * non-root blocks of a multilevel data index. */ @Override public long indexSize() { return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0) - + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() - : 0); + + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() : 0); } @Override @@ -300,8 +292,8 @@ public boolean isPrimaryReplicaReader() { } /** - * An exception thrown when an operation requiring a scanner to be seeked - * is invoked on a scanner that is not seeked. + * An exception thrown when an operation requiring a scanner to be seeked is invoked on a scanner + * that is not seeked. */ @SuppressWarnings("serial") public static class NotSeekedException extends IllegalStateException { @@ -328,11 +320,10 @@ protected static class HFileScannerImpl implements HFileScanner { final ObjectIntPair pair = new ObjectIntPair<>(); /** - * The next indexed key is to keep track of the indexed key of the next data block. - * If the nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the - * current data block is the last data block. - * - * If the nextIndexedKey is null, it means the nextIndexedKey has not been loaded yet. + * The next indexed key is to keep track of the indexed key of the next data block. If the + * nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the current data block is the + * last data block. If the nextIndexedKey is null, it means the nextIndexedKey has not been + * loaded yet. */ protected Cell nextIndexedKey; // Current block being used. NOTICE: DON't release curBlock separately except in shipped() or @@ -345,7 +336,7 @@ protected static class HFileScannerImpl implements HFileScanner { protected final ArrayList prevBlocks = new ArrayList<>(); public HFileScannerImpl(final HFile.Reader reader, final boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { + final boolean pread, final boolean isCompaction) { this.reader = reader; this.cacheBlocks = cacheBlocks; this.pread = pread; @@ -380,7 +371,7 @@ private void returnBlocks(boolean returnAll) { } @Override - public boolean isSeeked(){ + public boolean isSeeked() { return blockBuffer != null; } @@ -423,8 +414,7 @@ public void close() { // Returns the #bytes in HFile for the current cell. Used to skip these many bytes in current // HFile block's buffer so as to position to the next cell. private int getCurCellSerializedSize() { - int curCellSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen - + currMemstoreTSLen; + int curCellSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen + currMemstoreTSLen; if (this.reader.getFileContext().isIncludesTags()) { curCellSize += Bytes.SIZEOF_SHORT + currTagsLen; } @@ -443,8 +433,8 @@ protected void readKeyValueLen() { // But ensure that you read long instead of two ints long ll = blockBuffer.getLongAfterPosition(0); // Read top half as an int of key length and bottom int as value length - this.currKeyLen = (int)(ll >> Integer.SIZE); - this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); + this.currKeyLen = (int) (ll >> Integer.SIZE); + this.currValueLen = (int) (Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); checkKeyValueLen(); this.rowLen = blockBuffer.getShortAfterPosition(Bytes.SIZEOF_LONG); // Move position past the key and value lengths and then beyond the key and value @@ -460,11 +450,10 @@ protected void readKeyValueLen() { private final void checkTagsLen() { if (checkLen(this.currTagsLen)) { - throw new IllegalStateException("Invalid currTagsLen " + this.currTagsLen + - ". Block offset: " + curBlock.getOffset() + ", block length: " + - this.blockBuffer.limit() + - ", position: " + this.blockBuffer.position() + " (without header)." + - " path=" + reader.getPath()); + throw new IllegalStateException( + "Invalid currTagsLen " + this.currTagsLen + ". Block offset: " + curBlock.getOffset() + + ", block length: " + this.blockBuffer.limit() + ", position: " + + this.blockBuffer.position() + " (without header)." + " path=" + reader.getPath()); } } @@ -496,7 +485,7 @@ private void _readMvccVersion(int offsetFromPos) { if (len == 1) { this.currMemstoreTS = firstByte; } else { - int remaining = len -1; + int remaining = len - 1; long i = 0; offsetFromPos++; if (remaining >= Bytes.SIZEOF_INT) { @@ -523,19 +512,14 @@ private void _readMvccVersion(int offsetFromPos) { } /** - * Within a loaded block, seek looking for the last key that is smaller than - * (or equal to?) the key we are interested in. - * A note on the seekBefore: if you have seekBefore = true, AND the first - * key in the block = key, then you'll get thrown exceptions. The caller has - * to check for that case and load the previous block as appropriate. - * @param key - * the key to find - * @param seekBefore - * find the key before the given key in case of exact match. - * @return 0 in case of an exact key match, 1 in case of an inexact match, - * -2 in case of an inexact match and furthermore, the input key - * less than the first key of current block(e.g. using a faked index - * key) + * Within a loaded block, seek looking for the last key that is smaller than (or equal to?) the + * key we are interested in. A note on the seekBefore: if you have seekBefore = true, AND the + * first key in the block = key, then you'll get thrown exceptions. The caller has to check for + * that case and load the previous block as appropriate. n * the key to find n * find the key + * before the given key in case of exact match. + * @return 0 in case of an exact key match, 1 in case of an inexact match, -2 in case of an + * inexact match and furthermore, the input key less than the first key of current + * block(e.g. using a faked index key) */ protected int blockSeek(Cell key, boolean seekBefore) { int klen, vlen, tlen = 0; @@ -545,31 +529,29 @@ protected int blockSeek(Cell key, boolean seekBefore) { offsetFromPos = 0; // Better to ensure that we use the BB Utils here long ll = blockBuffer.getLongAfterPosition(offsetFromPos); - klen = (int)(ll >> Integer.SIZE); - vlen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); + klen = (int) (ll >> Integer.SIZE); + vlen = (int) (Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); if (checkKeyLen(klen) || checkLen(vlen)) { - throw new IllegalStateException("Invalid klen " + klen + " or vlen " - + vlen + ". Block offset: " - + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)." - + " path=" + reader.getPath()); + throw new IllegalStateException( + "Invalid klen " + klen + " or vlen " + vlen + ". Block offset: " + curBlock.getOffset() + + ", block length: " + blockBuffer.limit() + ", position: " + blockBuffer.position() + + " (without header)." + " path=" + reader.getPath()); } offsetFromPos += Bytes.SIZEOF_LONG; this.rowLen = blockBuffer.getShortAfterPosition(offsetFromPos); blockBuffer.asSubByteBuffer(blockBuffer.position() + offsetFromPos, klen, pair); bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen, rowLen); int comp = - PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv); + PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv); offsetFromPos += klen + vlen; if (this.reader.getFileContext().isIncludesTags()) { // Read short as unsigned, high byte first tlen = ((blockBuffer.getByteAfterPosition(offsetFromPos) & 0xff) << 8) - ^ (blockBuffer.getByteAfterPosition(offsetFromPos + 1) & 0xff); + ^ (blockBuffer.getByteAfterPosition(offsetFromPos + 1) & 0xff); if (checkLen(tlen)) { throw new IllegalStateException("Invalid tlen " + tlen + ". Block offset: " - + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)." - + " path=" + reader.getPath()); + + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " + + blockBuffer.position() + " (without header)." + " path=" + reader.getPath()); } // add the two bytes read for the tags. offsetFromPos += tlen + (Bytes.SIZEOF_SHORT); @@ -582,10 +564,9 @@ protected int blockSeek(Cell key, boolean seekBefore) { if (seekBefore) { if (lastKeyValueSize < 0) { throw new IllegalStateException("blockSeek with seekBefore " - + "at the first key of the block: key=" + CellUtil.getCellKeyAsString(key) - + ", blockOffset=" + curBlock.getOffset() + ", onDiskSize=" - + curBlock.getOnDiskSizeWithHeader() - + ", path=" + reader.getPath()); + + "at the first key of the block: key=" + CellUtil.getCellKeyAsString(key) + + ", blockOffset=" + curBlock.getOffset() + ", onDiskSize=" + + curBlock.getOnDiskSizeWithHeader() + ", path=" + reader.getPath()); } blockBuffer.moveBack(lastKeyValueSize); readKeyValueLen(); @@ -643,17 +624,19 @@ public int reseekTo(Cell key) throws IOException { return compared; } else { // The comparison with no_next_index_key has to be checked - if (this.nextIndexedKey != null && - (this.nextIndexedKey == KeyValueScanner.NO_NEXT_INDEXED_KEY || PrivateCellUtil - .compareKeyIgnoresMvcc(reader.getComparator(), key, nextIndexedKey) < 0)) { + if ( + this.nextIndexedKey != null && (this.nextIndexedKey + == KeyValueScanner.NO_NEXT_INDEXED_KEY + || PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, nextIndexedKey) + < 0) + ) { // The reader shall continue to scan the current data block instead // of querying the // block index as long as it knows the target key is strictly // smaller than // the next indexed key or the current data block is the last data // block. - return loadBlockAndSeekToKey(this.curBlock, nextIndexedKey, false, key, - false); + return loadBlockAndSeekToKey(this.curBlock, nextIndexedKey, false, key, false); } } } @@ -663,22 +646,19 @@ public int reseekTo(Cell key) throws IOException { } /** - * An internal API function. Seek to the given key, optionally rewinding to - * the first key of the block before doing the seek. - * - * @param key - a cell representing the key that we need to fetch - * @param rewind whether to rewind to the first key of the block before - * doing the seek. If this is false, we are assuming we never go - * back, otherwise the result is undefined. - * @return -1 if the key is earlier than the first key of the file, - * 0 if we are at the given key, 1 if we are past the given key - * -2 if the key is earlier than the first key of the file while - * using a faked index key + * An internal API function. Seek to the given key, optionally rewinding to the first key of the + * block before doing the seek. + * @param key - a cell representing the key that we need to fetch + * @param rewind whether to rewind to the first key of the block before doing the seek. If this + * is false, we are assuming we never go back, otherwise the result is undefined. + * @return -1 if the key is earlier than the first key of the file, 0 if we are at the given + * key, 1 if we are past the given key -2 if the key is earlier than the first key of + * the file while using a faked index key */ public int seekTo(Cell key, boolean rewind) throws IOException { HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader(); BlockWithScanInfo blockWithScanInfo = indexReader.loadDataBlockWithScanInfo(key, curBlock, - cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding(), reader); + cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding(), reader); if (blockWithScanInfo == null || blockWithScanInfo.getHFileBlock() == null) { // This happens if the key e.g. falls before the beginning of the file. return -1; @@ -690,8 +670,7 @@ public int seekTo(Cell key, boolean rewind) throws IOException { @Override public boolean seekBefore(Cell key) throws IOException { HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, curBlock, - cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction), - reader); + cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction), reader); if (seekToBlock == null) { return false; } @@ -735,13 +714,12 @@ protected void releaseIfNotCurBlock(HFileBlock block) { } /** - * Scans blocks in the "scanned" section of the {@link HFile} until the next - * data block is found. - * + * Scans blocks in the "scanned" section of the {@link HFile} until the next data block is + * found. * @return the next block, or null if there are no more data blocks */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="Yeah, unnecessary null check; could do w/ clean up") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "Yeah, unnecessary null check; could do w/ clean up") protected HFileBlock readNextDataBlock() throws IOException { long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset(); if (curBlock == null) { @@ -792,28 +770,28 @@ public Cell getCell() { // we can handle the 'no tags' case. if (currTagsLen > 0) { ret = new SizeCachedKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, - rowLen); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } else { ret = new SizeCachedNoTagsKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, - rowLen); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } } else { ByteBuffer buf = blockBuffer.asSubByteBuffer(cellBufSize); if (buf.isDirect()) { ret = currTagsLen > 0 - ? new SizeCachedByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, - currKeyLen, rowLen) - : new SizeCachedNoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, - currKeyLen, rowLen); + ? new SizeCachedByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, currKeyLen, + rowLen) + : new SizeCachedNoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, + currKeyLen, rowLen); } else { if (currTagsLen > 0) { ret = new SizeCachedKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId, currKeyLen, rowLen); + cellBufSize, seqId, currKeyLen, rowLen); } else { ret = new SizeCachedNoTagsKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId, currKeyLen, rowLen); + cellBufSize, seqId, currKeyLen, rowLen); } } } @@ -828,8 +806,8 @@ public Cell getKey() { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, keyPair); ByteBuffer keyBuf = keyPair.getFirst(); if (keyBuf.hasArray()) { - return new KeyValue.KeyOnlyKeyValue(keyBuf.array(), keyBuf.arrayOffset() - + keyPair.getSecond(), currKeyLen); + return new KeyValue.KeyOnlyKeyValue(keyBuf.array(), + keyBuf.arrayOffset() + keyPair.getSecond(), currKeyLen); } else { // Better to do a copy here instead of holding on to this BB so that // we could release the blocks referring to this key. This key is specifically used @@ -871,11 +849,10 @@ private void positionThisBlockBuffer() { try { blockBuffer.skip(getCurCellSerializedSize()); } catch (IllegalArgumentException e) { - LOG.error("Current pos = " + blockBuffer.position() - + "; currKeyLen = " + currKeyLen + "; currValLen = " - + currValueLen + "; block limit = " + blockBuffer.limit() - + "; currBlock currBlockOffset = " + this.curBlock.getOffset() - + "; path=" + reader.getPath()); + LOG.error("Current pos = " + blockBuffer.position() + "; currKeyLen = " + currKeyLen + + "; currValLen = " + currValueLen + "; block limit = " + blockBuffer.limit() + + "; currBlock currBlockOffset = " + this.curBlock.getOffset() + "; path=" + + reader.getPath()); throw e; } } @@ -894,7 +871,6 @@ private boolean positionForNextBlock() throws IOException { return isNextBlock(); } - private boolean isNextBlock() throws IOException { // Methods are small so they get inlined because they are 'hot'. HFileBlock nextBlock = readNextDataBlock(); @@ -918,10 +894,8 @@ private final boolean _next() throws IOException { } /** - * Go to the next key/value in the block section. Loads the next block if - * necessary. If successful, {@link #getKey()} and {@link #getValue()} can - * be called. - * + * Go to the next key/value in the block section. Loads the next block if necessary. If + * successful, {@link #getKey()} and {@link #getValue()} can be called. * @return true if successfully navigated to the next key/value */ @Override @@ -935,9 +909,8 @@ public boolean next() throws IOException { /** * Positions this scanner at the start of the file. - * - * @return false if empty file; i.e. a call to next would return false and - * the current key and value are undefined. + * @return false if empty file; i.e. a call to next would return false and the current key and + * value are undefined. */ @Override public boolean seekTo() throws IOException { @@ -959,7 +932,7 @@ public boolean seekTo() throws IOException { return true; } - protected boolean processFirstDataBlock() throws IOException{ + protected boolean processFirstDataBlock() throws IOException { blockBuffer.rewind(); readKeyValueLen(); return true; @@ -970,14 +943,14 @@ protected void readAndUpdateNewBlock(long firstDataBlockOffset) throws IOExcepti isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); if (newBlock.getOffset() < 0) { releaseIfNotCurBlock(newBlock); - throw new IOException("Invalid offset=" + newBlock.getOffset() + - ", path=" + reader.getPath()); + throw new IOException( + "Invalid offset=" + newBlock.getOffset() + ", path=" + reader.getPath()); } updateCurrentBlock(newBlock); } protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, - Cell key, boolean seekBefore) throws IOException { + Cell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1008,10 +981,9 @@ protected final boolean checkLen(final int v) { protected final void checkKeyValueLen() { if (checkKeyLen(this.currKeyLen) || checkLen(this.currValueLen)) { throw new IllegalStateException("Invalid currKeyLen " + this.currKeyLen - + " or currValueLen " + this.currValueLen + ". Block offset: " - + this.curBlock.getOffset() + ", block length: " - + this.blockBuffer.limit() + ", position: " + this.blockBuffer.position() - + " (without header)." + ", path=" + reader.getPath()); + + " or currValueLen " + this.currValueLen + ". Block offset: " + this.curBlock.getOffset() + + ", block length: " + this.blockBuffer.limit() + ", position: " + + this.blockBuffer.position() + " (without header)." + ", path=" + reader.getPath()); } } @@ -1019,16 +991,16 @@ protected final void checkKeyValueLen() { * Updates the current block to be the given {@link HFileBlock}. Seeks to the the first * key/value pair. * @param newBlock the block read by {@link HFileReaderImpl#readBlock}, it's a totally new block - * with new allocated {@link ByteBuff}, so if no further reference to this block, we - * should release it carefully. + * with new allocated {@link ByteBuff}, so if no further reference to this + * block, we should release it carefully. */ protected void updateCurrentBlock(HFileBlock newBlock) throws IOException { try { if (newBlock.getBlockType() != BlockType.DATA) { throw new IllegalStateException( - "ScannerV2 works only on data blocks, got " + newBlock.getBlockType() + "; " - + "HFileName=" + reader.getPath() + ", " + "dataBlockEncoder=" - + reader.getDataBlockEncoding() + ", " + "isCompaction=" + isCompaction); + "ScannerV2 works only on data blocks, got " + newBlock.getBlockType() + "; " + + "HFileName=" + reader.getPath() + ", " + "dataBlockEncoder=" + + reader.getDataBlockEncoding() + ", " + "isCompaction=" + isCompaction); } updateCurrBlockRef(newBlock); blockBuffer = newBlock.getBufferWithoutHeader(); @@ -1048,8 +1020,8 @@ protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { buffer.skip(Bytes.SIZEOF_INT);// Skip value len part ByteBuffer keyBuff = buffer.asSubByteBuffer(klen); if (keyBuff.hasArray()) { - return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), keyBuff.arrayOffset() - + keyBuff.position(), klen); + return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), + keyBuff.arrayOffset() + keyBuff.position(), klen); } else { return new ByteBufferKeyOnlyKeyValue(keyBuff, keyBuff.position(), klen); } @@ -1106,8 +1078,7 @@ public void setConf(Configuration conf) { public static final int PBUF_TRAILER_MINOR_VERSION = 2; /** - * The size of a (key length, value length) tuple that prefixes each entry in - * a data block. + * The size of a (key length, value length) tuple that prefixes each entry in a data block. */ public final static int KEY_VALUE_LEN_SIZE = 2 * Bytes.SIZEOF_INT; @@ -1116,14 +1087,13 @@ public void setConf(Configuration conf) { * and its encoding vs. {@code expectedDataBlockEncoding}. Unpacks the block as necessary. */ private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boolean useLock, - boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) throws IOException { + boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, + DataBlockEncoding expectedDataBlockEncoding) throws IOException { // Check cache for block. If found return. BlockCache cache = cacheConf.getBlockCache().orElse(null); if (cache != null) { - HFileBlock cachedBlock = - (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, - updateCacheMetrics, expectedBlockType); + HFileBlock cachedBlock = (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, + updateCacheMetrics, expectedBlockType); if (cachedBlock != null) { if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { HFileBlock compressedBlock = cachedBlock; @@ -1147,8 +1117,10 @@ private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, bo // Block types other than data blocks always have // DataBlockEncoding.NONE. To avoid false negative cache misses, only // perform this check if cached block is a data block. - if (cachedBlock.getBlockType().isData() && - !actualDataBlockEncoding.equals(expectedDataBlockEncoding)) { + if ( + cachedBlock.getBlockType().isData() + && !actualDataBlockEncoding.equals(expectedDataBlockEncoding) + ) { // This mismatch may happen if a Scanner, which is used for say a // compaction, tries to read an encoded block from the block cache. // The reverse might happen when an EncodedScanner tries to read @@ -1159,17 +1131,20 @@ private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, bo // forced here. This will potentially cause a significant number of // cache misses, so update so we should keep track of this as it might // justify the work on a CompoundScanner. - if (!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) && - !actualDataBlockEncoding.equals(DataBlockEncoding.NONE)) { + if ( + !expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) + && !actualDataBlockEncoding.equals(DataBlockEncoding.NONE) + ) { // If the block is encoded but the encoding does not match the // expected encoding it is likely the encoding was changed but the // block was not yet evicted. Evictions on file close happen async // so blocks with the old encoding still linger in cache for some // period of time. This event should be rare as it only happens on // schema definition change. - LOG.info("Evicting cached block with key {} because data block encoding mismatch; " + - "expected {}, actual {}, path={}", cacheKey, actualDataBlockEncoding, - expectedDataBlockEncoding, path); + LOG.info( + "Evicting cached block with key {} because data block encoding mismatch; " + + "expected {}, actual {}, path={}", + cacheKey, actualDataBlockEncoding, expectedDataBlockEncoding, path); // This is an error scenario. so here we need to release the block. returnAndEvictBlock(cache, cacheKey, cachedBlock); } @@ -1191,8 +1166,7 @@ private void returnAndEvictBlock(BlockCache cache, BlockCacheKey cacheKey, Cache * @return block wrapped in a ByteBuffer, with header skipped */ @Override - public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) - throws IOException { + public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException { if (trailer.getMetaIndexCount() == 0) { return null; // there are no meta blocks } @@ -1201,8 +1175,7 @@ public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) } byte[] mbname = Bytes.toBytes(metaBlockName); - int block = metaBlockIndexReader.rootBlockContainingKey(mbname, - 0, mbname.length); + int block = metaBlockIndexReader.rootBlockContainingKey(mbname, 0, mbname.length); if (block == -1) { return null; } @@ -1215,11 +1188,11 @@ public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) // Check cache for block. If found return. long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block); BlockCacheKey cacheKey = - new BlockCacheKey(name, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META); + new BlockCacheKey(name, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META); cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); HFileBlock cachedBlock = - getCachedBlock(cacheKey, cacheBlock, false, true, true, BlockType.META, null); + getCachedBlock(cacheKey, cacheBlock, false, true, true, BlockType.META, null); if (cachedBlock != null) { assert cachedBlock.isUnpacked() : "Packed block leak."; // Return a distinct 'shallow copy' of the block, @@ -1229,7 +1202,7 @@ public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) // Cache Miss, please load. HFileBlock compressedBlock = - fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false, true); + fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false, true); HFileBlock uncompressedBlock = compressedBlock.unpack(hfileContext, fsBlockReader); if (compressedBlock != uncompressedBlock) { compressedBlock.release(); @@ -1263,20 +1236,17 @@ private boolean shouldUseHeap(BlockType expectedBlockType) { } @Override - public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, - final boolean cacheBlock, boolean pread, final boolean isCompaction, - boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) - throws IOException { + public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final boolean cacheBlock, + boolean pread, final boolean isCompaction, boolean updateCacheMetrics, + BlockType expectedBlockType, DataBlockEncoding expectedDataBlockEncoding) throws IOException { if (dataBlockIndexReader == null) { throw new IOException(path + " block index not loaded"); } long trailerOffset = trailer.getLoadOnOpenDataOffset(); if (dataBlockOffset < 0 || dataBlockOffset >= trailerOffset) { - throw new IOException("Requested block is out of range: " + dataBlockOffset + - ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() + - ", trailer.getLoadOnOpenDataOffset: " + trailerOffset + - ", path=" + path); + throw new IOException("Requested block is out of range: " + dataBlockOffset + + ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() + + ", trailer.getLoadOnOpenDataOffset: " + trailerOffset + ", path=" + path); } // For any given block from any given file, synchronize reads for said // block. @@ -1284,8 +1254,8 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, // the other choice is to duplicate work (which the cache would prevent you // from doing). - BlockCacheKey cacheKey = new BlockCacheKey(name, dataBlockOffset, - this.isPrimaryReplicaReader(), expectedBlockType); + BlockCacheKey cacheKey = + new BlockCacheKey(name, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType); boolean useLock = false; IdLock.Entry lockEntry = null; @@ -1319,8 +1289,8 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, returnAndEvictBlock(cache, cacheKey, cachedBlock); }); throw new IOException("Cached block under key " + cacheKey + " " - + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: " - + dataBlockEncoder.getDataBlockEncoding() + "), path=" + path); + + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: " + + dataBlockEncoder.getDataBlockEncoding() + "), path=" + path); } } // Cache-hit. Return! @@ -1375,16 +1345,13 @@ public boolean hasMVCCInfo() { } /** - * Compares the actual type of a block retrieved from cache or disk with its - * expected type and throws an exception in case of a mismatch. Expected - * block type of {@link BlockType#DATA} is considered to match the actual - * block type [@link {@link BlockType#ENCODED_DATA} as well. - * @param block a block retrieved from cache or disk - * @param expectedBlockType the expected block type, or null to skip the - * check + * Compares the actual type of a block retrieved from cache or disk with its expected type and + * throws an exception in case of a mismatch. Expected block type of {@link BlockType#DATA} is + * considered to match the actual block type [@link {@link BlockType#ENCODED_DATA} as well. + * @param block a block retrieved from cache or disk + * @param expectedBlockType the expected block type, or null to skip the check */ - private void validateBlockType(HFileBlock block, - BlockType expectedBlockType) throws IOException { + private void validateBlockType(HFileBlock block, BlockType expectedBlockType) throws IOException { if (expectedBlockType == null) { return; } @@ -1395,25 +1362,25 @@ private void validateBlockType(HFileBlock block, return; } if (actualBlockType != expectedBlockType) { - throw new IOException("Expected block type " + expectedBlockType + ", " + - "but got " + actualBlockType + ": " + block + ", path=" + path); + throw new IOException("Expected block type " + expectedBlockType + ", " + "but got " + + actualBlockType + ": " + block + ", path=" + path); } } /** - * @return Last key as cell in the file. May be null if file has no entries. Note that - * this is not the last row key, but it is the Cell representation of the last - * key + * @return Last key as cell in the file. May be null if file has no entries. Note that this is not + * the last row key, but it is the Cell representation of the last key */ @Override public Optional getLastKey() { - return dataBlockIndexReader.isEmpty() ? Optional.empty() : - Optional.of(fileInfo.getLastKeyCell()); + return dataBlockIndexReader.isEmpty() + ? Optional.empty() + : Optional.of(fileInfo.getLastKeyCell()); } /** - * @return Midkey for this file. We work with block boundaries only so - * returned midkey is an approximation only. + * @return Midkey for this file. We work with block boundaries only so returned midkey is an + * approximation only. */ @Override public Optional midKey() throws IOException { @@ -1444,8 +1411,8 @@ protected static class EncodedScanner extends HFileScannerImpl { private final DataBlockEncoder.EncodedSeeker seeker; private final DataBlockEncoder dataBlockEncoder; - public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, - boolean pread, boolean isCompaction, HFileContext meta, Configuration conf) { + public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, boolean pread, + boolean isCompaction, HFileContext meta, Configuration conf) { super(reader, cacheBlocks, pread, isCompaction); DataBlockEncoding encoding = reader.getDataBlockEncoding(); dataBlockEncoder = encoding.getEncoder(); @@ -1454,7 +1421,7 @@ public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, } @Override - public boolean isSeeked(){ + public boolean isSeeked() { return curBlock != null; } @@ -1467,8 +1434,8 @@ public void setNonSeekedState() { * Updates the current block to be the given {@link HFileBlock}. Seeks to the the first * key/value pair. * @param newBlock the block to make current, and read by {@link HFileReaderImpl#readBlock}, - * it's a totally new block with new allocated {@link ByteBuff}, so if no further - * reference to this block, we should release it carefully. + * it's a totally new block with new allocated {@link ByteBuff}, so if no + * further reference to this block, we should release it carefully. */ @Override protected void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException { @@ -1480,9 +1447,9 @@ protected void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileExcept short dataBlockEncoderId = newBlock.getDataBlockEncodingId(); if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) { String encoderCls = dataBlockEncoder.getClass().getName(); - throw new CorruptHFileException("Encoder " + encoderCls + - " doesn't support data block encoding " + - DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath()); + throw new CorruptHFileException( + "Encoder " + encoderCls + " doesn't support data block encoding " + + DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath()); } updateCurrBlockRef(newBlock); ByteBuff encodedBuffer = getEncodedBuffer(newBlock); @@ -1499,7 +1466,7 @@ private ByteBuff getEncodedBuffer(HFileBlock newBlock) { int pos = newBlock.headerSize() + DataBlockEncoding.ID_SIZE; origBlock.position(pos); origBlock - .limit(pos + newBlock.getUncompressedSizeWithoutHeader() - DataBlockEncoding.ID_SIZE); + .limit(pos + newBlock.getUncompressedSizeWithoutHeader() - DataBlockEncoding.ID_SIZE); return origBlock.slice(); } @@ -1567,8 +1534,8 @@ protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { } @Override - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, - boolean rewind, Cell key, boolean seekBefore) throws IOException { + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, + Cell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1585,8 +1552,7 @@ public int compareKey(CellComparator comparator, Cell key) { } /** - * Returns a buffer with the Bloom filter metadata. The caller takes - * ownership of the buffer. + * Returns a buffer with the Bloom filter metadata. The caller takes ownership of the buffer. */ @Override public DataInput getGeneralBloomFilterMetadata() throws IOException { @@ -1598,12 +1564,12 @@ public DataInput getDeleteBloomFilterMetadata() throws IOException { return this.getBloomFilterMetadata(BlockType.DELETE_FAMILY_BLOOM_META); } - private DataInput getBloomFilterMetadata(BlockType blockType) - throws IOException { - if (blockType != BlockType.GENERAL_BLOOM_META && - blockType != BlockType.DELETE_FAMILY_BLOOM_META) { - throw new RuntimeException("Block Type: " + blockType.toString() + - " is not supported, path=" + path) ; + private DataInput getBloomFilterMetadata(BlockType blockType) throws IOException { + if ( + blockType != BlockType.GENERAL_BLOOM_META && blockType != BlockType.DELETE_FAMILY_BLOOM_META + ) { + throw new RuntimeException( + "Block Type: " + blockType.toString() + " is not supported, path=" + path); } for (HFileBlock b : fileInfo.getLoadOnOpenBlocks()) { @@ -1624,8 +1590,8 @@ public HFileContext getFileContext() { } /** - * Returns false if block prefetching was requested for this file and has - * not completed, true otherwise + * Returns false if block prefetching was requested for this file and has not completed, true + * otherwise */ @Override public boolean prefetchComplete() { @@ -1634,15 +1600,14 @@ public boolean prefetchComplete() { /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. NOTE: Do not use this overload of getScanner for - * compactions. See {@link #getScanner(Configuration, boolean, boolean, boolean)} - * - * @param conf Store configuration. + * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up + * in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not use this + * overload of getScanner for compactions. See + * {@link #getScanner(Configuration, boolean, boolean, boolean)} + * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. - * @param pread Use positional read rather than seek+read if true (pread is - * better for random reads, seek+read is better scanning). + * @param pread Use positional read rather than seek+read if true (pread is better for + * random reads, seek+read is better scanning). * @return Scanner on this file. */ @Override @@ -1652,23 +1617,16 @@ public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final bo /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. - * @param conf - * Store configuration. - * @param cacheBlocks - * True if we should cache blocks read in by this scanner. - * @param pread - * Use positional read rather than seek+read if true (pread is better - * for random reads, seek+read is better scanning). - * @param isCompaction - * is scanner being used for a compaction? + * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up + * in a Scanner. Letting go of your references to the scanner is sufficient. n * Store + * configuration. n * True if we should cache blocks read in by this scanner. n * Use positional + * read rather than seek+read if true (pread is better for random reads, seek+read is better + * scanning). n * is scanner being used for a compaction? * @return Scanner on this file. */ @Override public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final boolean pread, - final boolean isCompaction) { + final boolean isCompaction) { if (dataBlockEncoder.useEncodedScanner()) { return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext, conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index a2a35fef37af..3e5ada1442f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,105 +20,84 @@ import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.Shipper; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.regionserver.Shipper; +import org.apache.yetus.audience.InterfaceAudience; /** - * A scanner allows you to position yourself within a HFile and - * scan through it. It allows you to reposition yourself as well. - * - *

        A scanner doesn't always have a key/value that it is pointing to - * when it is first created and before - * {@link #seekTo()}/{@link #seekTo(Cell)} are called. - * In this case, {@link #getKey()}/{@link #getValue()} returns null. At most - * other times, a key and value will be available. The general pattern is that - * you position the Scanner using the seekTo variants and then getKey and - * getValue. + * A scanner allows you to position yourself within a HFile and scan through it. It allows you to + * reposition yourself as well. + *

        + * A scanner doesn't always have a key/value that it is pointing to when it is first created and + * before {@link #seekTo()}/{@link #seekTo(Cell)} are called. In this case, + * {@link #getKey()}/{@link #getValue()} returns null. At most other times, a key and value will be + * available. The general pattern is that you position the Scanner using the seekTo variants and + * then getKey and getValue. */ @InterfaceAudience.Private public interface HFileScanner extends Shipper, Closeable { /** - * SeekTo or just before the passed cell. Examine the return - * code to figure whether we found the cell or not. - * Consider the cell stream of all the cells in the file, - * c[0] .. c[n], where there are n cells in the file. - * @param cell - * @return -1, if cell < c[0], no position; - * 0, such that c[i] = cell and scanner is left in position i; and - * 1, such that c[i] < cell, and scanner is left in position i. - * The scanner will position itself between c[i] and c[i+1] where - * c[i] < cell <= c[i+1]. - * If there is no cell c[i+1] greater than or equal to the input cell, then the - * scanner will position itself at the end of the file and next() will return - * false when it is called. - * @throws IOException + * SeekTo or just before the passed cell. Examine the return code to figure whether + * we found the cell or not. Consider the cell stream of all the cells in the file, + * c[0] .. c[n], where there are n cells in the file. n * @return -1, if cell < + * c[0], no position; 0, such that c[i] = cell and scanner is left in position i; and 1, such that + * c[i] < cell, and scanner is left in position i. The scanner will position itself between + * c[i] and c[i+1] where c[i] < cell <= c[i+1]. If there is no cell c[i+1] greater than or + * equal to the input cell, then the scanner will position itself at the end of the file and + * next() will return false when it is called. n */ int seekTo(Cell cell) throws IOException; /** - * Reseek to or just before the passed cell. Similar to seekTo - * except that this can be called even if the scanner is not at the beginning - * of a file. - * This can be used to seek only to cells which come after the current position - * of the scanner. - * Consider the cell stream of all the cells in the file, - * c[0] .. c[n], where there are n cellc in the file after - * current position of HFileScanner. - * The scanner will position itself between c[i] and c[i+1] where - * c[i] < cell <= c[i+1]. - * If there is no cell c[i+1] greater than or equal to the input cell, then the - * scanner will position itself at the end of the file and next() will return + * Reseek to or just before the passed cell. Similar to seekTo except that this can + * be called even if the scanner is not at the beginning of a file. This can be used to seek only + * to cells which come after the current position of the scanner. Consider the cell stream of all + * the cells in the file, c[0] .. c[n], where there are n cellc in the file after + * current position of HFileScanner. The scanner will position itself between c[i] and c[i+1] + * where c[i] < cell <= c[i+1]. If there is no cell c[i+1] greater than or equal to the + * input cell, then the scanner will position itself at the end of the file and next() will return * false when it is called. * @param cell Cell to find (should be non-null) - * @return -1, if cell < c[0], no position; - * 0, such that c[i] = cell and scanner is left in position i; and - * 1, such that c[i] < cell, and scanner is left in position i. - * @throws IOException + * @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in + * position i; and 1, such that c[i] < cell, and scanner is left in position i. n */ int reseekTo(Cell cell) throws IOException; /** - * Consider the cell stream of all the cells in the file, - * c[0] .. c[n], where there are n cells in the file. + * Consider the cell stream of all the cells in the file, c[0] .. c[n], where there + * are n cells in the file. * @param cell Cell to find - * @return false if cell <= c[0] or true with scanner in position 'i' such - * that: c[i] < cell. Furthermore: there may be a c[i+1], such that - * c[i] < cell <= c[i+1] but there may also NOT be a c[i+1], and next() will - * return false (EOF). - * @throws IOException + * @return false if cell <= c[0] or true with scanner in position 'i' such that: c[i] < + * cell. Furthermore: there may be a c[i+1], such that c[i] < cell <= c[i+1] but + * there may also NOT be a c[i+1], and next() will return false (EOF). n */ boolean seekBefore(Cell cell) throws IOException; /** * Positions this scanner at the start of the file. - * @return False if empty file; i.e. a call to next would return false and - * the current key and value are undefined. - * @throws IOException + * @return False if empty file; i.e. a call to next would return false and the current key and + * value are undefined. n */ boolean seekTo() throws IOException; /** * Scans to the next entry in the file. - * @return Returns false if you are at the end otherwise true if more in file. - * @throws IOException + * @return Returns false if you are at the end otherwise true if more in file. n */ boolean next() throws IOException; /** - * Gets the current key in the form of a cell. You must call - * {@link #seekTo(Cell)} before this method. + * Gets the current key in the form of a cell. You must call {@link #seekTo(Cell)} before this + * method. * @return gets the current key as a Cell. */ Cell getKey(); /** - * Gets a buffer view to the current value. You must call - * {@link #seekTo(Cell)} before this method. - * - * @return byte buffer for the value. The limit is set to the value size, and - * the position is 0, the start of the buffer view. + * Gets a buffer view to the current value. You must call {@link #seekTo(Cell)} before this + * method. + * @return byte buffer for the value. The limit is set to the value size, and the position is 0, + * the start of the buffer view. */ ByteBuffer getValue(); @@ -129,8 +107,8 @@ public interface HFileScanner extends Shipper, Closeable { Cell getCell(); /** - * Convenience method to get a copy of the key as a string - interpreting the - * bytes as UTF8. You must call {@link #seekTo(Cell)} before this method. + * Convenience method to get a copy of the key as a string - interpreting the bytes as UTF8. You + * must call {@link #seekTo(Cell)} before this method. * @return key as a string * @deprecated Since hbase-2.0.0 */ @@ -138,8 +116,8 @@ public interface HFileScanner extends Shipper, Closeable { String getKeyString(); /** - * Convenience method to get a copy of the value as a string - interpreting - * the bytes as UTF8. You must call {@link #seekTo(Cell)} before this method. + * Convenience method to get a copy of the value as a string - interpreting the bytes as UTF8. You + * must call {@link #seekTo(Cell)} before this method. * @return value as a string * @deprecated Since hbase-2.0.0 */ @@ -152,9 +130,8 @@ public interface HFileScanner extends Shipper, Closeable { HFile.Reader getReader(); /** - * @return True is scanner has had one of the seek calls invoked; i.e. - * {@link #seekBefore(Cell)} or {@link #seekTo()} or {@link #seekTo(Cell)}. - * Otherwise returns false. + * @return True is scanner has had one of the seek calls invoked; i.e. {@link #seekBefore(Cell)} + * or {@link #seekTo()} or {@link #seekTo(Cell)}. Otherwise returns false. */ boolean isSeeked(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java index 3f72b4adab32..5f31e148dbe0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,13 +22,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of {@link HFile.Reader} to deal with stream read - * do not perform any prefetch operations (HFilePreadReader will do this). + * Implementation of {@link HFile.Reader} to deal with stream read do not perform any prefetch + * operations (HFilePreadReader will do this). */ @InterfaceAudience.Private public class HFileStreamReader extends HFileReaderImpl { public HFileStreamReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, - Configuration conf) throws IOException { + Configuration conf) throws IOException { super(context, fileInfo, cacheConf, conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java index ec73f89631db..612f127e11ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +18,18 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private class HFileUtil { - /** guards against NullPointer - * utility which tries to seek on the DFSIS and will try an alternative source - * if the FSDataInputStream throws an NPE HBASE-17501 - * @param istream - * @param offset - * @throws IOException + /** + * guards against NullPointer utility which tries to seek on the DFSIS and will try an alternative + * source if the FSDataInputStream throws an NPE HBASE-17501 nnn */ - static public void seekOnMultipleSources(FSDataInputStream istream, long offset) throws IOException { + static public void seekOnMultipleSources(FSDataInputStream istream, long offset) + throws IOException { try { // attempt to seek inside of current blockReader istream.seek(offset); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 4275c368aa97..9170cbef4e70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutput; @@ -33,7 +32,6 @@ import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.MetaCellComparator; @@ -64,14 +62,14 @@ public class HFileWriterImpl implements HFile.Writer { private static final long UNSET = -1; - /** if this feature is enabled, preCalculate encoded data size before real encoding happens*/ + /** if this feature is enabled, preCalculate encoded data size before real encoding happens */ public static final String UNIFIED_ENCODED_BLOCKSIZE_RATIO = "hbase.writer.unified.encoded.blocksize.ratio"; - /** Block size limit after encoding, used to unify encoded block Cache entry size*/ + /** Block size limit after encoding, used to unify encoded block Cache entry size */ private final int encodedBlockSizeLimit; - /** The Cell previously appended. Becomes the last cell in the file.*/ + /** The Cell previously appended. Becomes the last cell in the file. */ protected Cell lastCell = null; /** FileSystem stream to write into. */ @@ -102,12 +100,10 @@ public class HFileWriterImpl implements HFile.Writer { protected List metaData = new ArrayList<>(); /** - * First cell in a block. - * This reference should be short-lived since we write hfiles in a burst. + * First cell in a block. This reference should be short-lived since we write hfiles in a burst. */ protected Cell firstCellInBlock = null; - /** May be null if we were passed a stream. */ protected final Path path; @@ -115,14 +111,14 @@ public class HFileWriterImpl implements HFile.Writer { protected final CacheConfig cacheConf; /** - * Name for this object used when logging or in toString. Is either - * the result of a toString on stream or else name of passed file Path. + * Name for this object used when logging or in toString. Is either the result of a toString on + * stream or else name of passed file Path. */ protected final String name; /** - * The data block encoding which will be used. - * {@link NoOpDataBlockEncoder#INSTANCE} if there is no encoding. + * The data block encoding which will be used. {@link NoOpDataBlockEncoder#INSTANCE} if there is + * no encoding. */ protected final HFileDataBlockEncoder blockEncoder; @@ -131,7 +127,7 @@ public class HFileWriterImpl implements HFile.Writer { private int maxTagsLength = 0; /** KeyValue version in FileInfo */ - public static final byte [] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION"); + public static final byte[] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION"); /** Version for KeyValue which includes memstore timestamp */ public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1; @@ -152,8 +148,8 @@ public class HFileWriterImpl implements HFile.Writer { protected long lastDataBlockOffset = UNSET; /** - * The last(stop) Cell of the previous data block. - * This reference should be short-lived since we write hfiles in a burst. + * The last(stop) Cell of the previous data block. This reference should be short-lived since we + * write hfiles in a burst. */ private Cell lastCellOfPreviousBlock = null; @@ -163,7 +159,7 @@ public class HFileWriterImpl implements HFile.Writer { protected long maxMemstoreTS = 0; public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path path, - FSDataOutputStream outputStream, HFileContext fileContext) { + FSDataOutputStream outputStream, HFileContext fileContext) { this.outputStream = outputStream; this.path = path; this.name = path != null ? path.getName() : outputStream.toString(); @@ -177,40 +173,35 @@ public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path pat closeOutputStream = path != null; this.cacheConf = cacheConf; float encodeBlockSizeRatio = conf.getFloat(UNIFIED_ENCODED_BLOCKSIZE_RATIO, 1f); - this.encodedBlockSizeLimit = (int)(hFileContext.getBlocksize() * encodeBlockSizeRatio); + this.encodedBlockSizeLimit = (int) (hFileContext.getBlocksize() * encodeBlockSizeRatio); finishInit(conf); if (LOG.isTraceEnabled()) { - LOG.trace("Writer" + (path != null ? " for " + path : "") + - " initialized with cacheConf: " + cacheConf + - " fileContext: " + fileContext); + LOG.trace("Writer" + (path != null ? " for " + path : "") + " initialized with cacheConf: " + + cacheConf + " fileContext: " + fileContext); } } /** * Add to the file info. All added key/value pairs can be obtained using * {@link HFile.Reader#getHFileInfo()}. - * * @param k Key * @param v Value * @throws IOException in case the key or the value are invalid */ @Override - public void appendFileInfo(final byte[] k, final byte[] v) - throws IOException { + public void appendFileInfo(final byte[] k, final byte[] v) throws IOException { fileInfo.append(k, v, true); } /** - * Sets the file info offset in the trailer, finishes up populating fields in - * the file info, and writes the file info into the given data output. The - * reason the data output is not always {@link #outputStream} is that we store - * file info as a block in version 2. - * + * Sets the file info offset in the trailer, finishes up populating fields in the file info, and + * writes the file info into the given data output. The reason the data output is not always + * {@link #outputStream} is that we store file info as a block in version 2. * @param trailer fixed file trailer - * @param out the data output to write the file info to + * @param out the data output to write the file info to */ protected final void writeFileInfo(FixedFileTrailer trailer, DataOutputStream out) - throws IOException { + throws IOException { trailer.setFileInfoOffset(outputStream.getPos()); finishFileInfo(); long startTime = EnvironmentEdgeManager.currentTime(); @@ -222,9 +213,9 @@ public long getPos() throws IOException { return outputStream.getPos(); } + /** * Checks that the given Cell's key does not violate the key order. - * * @param cell Cell whose key to check. * @return true if the key is duplicate * @throws IOException if the key or the key order is wrong @@ -254,15 +245,15 @@ private String getLexicalErrorMessage(Cell cell) { sb.append(cell); sb.append(", lastCell = "); sb.append(lastCell); - //file context includes HFile path and optionally table and CF of file being written + // file context includes HFile path and optionally table and CF of file being written sb.append("fileContext="); sb.append(hFileContext); return sb.toString(); } /** Checks the given value for validity. */ - protected void checkValue(final byte[] value, final int offset, - final int length) throws IOException { + protected void checkValue(final byte[] value, final int offset, final int length) + throws IOException { if (value == null) { throw new IOException("Value cannot be null"); } @@ -278,8 +269,8 @@ public Path getPath() { @Override public String toString() { - return "writer=" + (path != null ? path.toString() : null) + ", name=" - + name + ", compression=" + hFileContext.getCompression().getName(); + return "writer=" + (path != null ? path.toString() : null) + ", name=" + name + ", compression=" + + hFileContext.getCompression().getName(); } public static Compression.Algorithm compressionByName(String algoName) { @@ -290,10 +281,9 @@ public static Compression.Algorithm compressionByName(String algoName) { } /** A helper method to create HFile output streams in constructors */ - protected static FSDataOutputStream createOutputStream(Configuration conf, - FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException { - FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, - HConstants.DATA_FILE_UMASK_KEY); + protected static FSDataOutputStream createOutputStream(Configuration conf, FileSystem fs, + Path path, InetSocketAddress[] favoredNodes) throws IOException { + FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); return FSUtils.create(conf, fs, path, perms, favoredNodes); } @@ -302,17 +292,14 @@ protected void finishInit(final Configuration conf) { if (blockWriter != null) { throw new IllegalStateException("finishInit called twice"); } - blockWriter = new HFileBlock.Writer(conf, blockEncoder, hFileContext, - cacheConf.getByteBuffAllocator()); + blockWriter = + new HFileBlock.Writer(conf, blockEncoder, hFileContext, cacheConf.getByteBuffAllocator()); // Data block index writer boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(blockWriter, - cacheIndexesOnWrite ? cacheConf : null, - cacheIndexesOnWrite ? name : null); - dataBlockIndexWriter.setMaxChunkSize( - HFileBlockIndex.getMaxChunkSize(conf)); - dataBlockIndexWriter.setMinIndexNumEntries( - HFileBlockIndex.getMinIndexNumEntries(conf)); + cacheIndexesOnWrite ? cacheConf : null, cacheIndexesOnWrite ? name : null); + dataBlockIndexWriter.setMaxChunkSize(HFileBlockIndex.getMaxChunkSize(conf)); + dataBlockIndexWriter.setMinIndexNumEntries(HFileBlockIndex.getMinIndexNumEntries(conf)); inlineBlockWriters.add(dataBlockIndexWriter); // Meta data block index writer @@ -326,15 +313,17 @@ protected void finishInit(final Configuration conf) { protected void checkBlockBoundary() throws IOException { // For encoder like prefixTree, encoded size is not available, so we have to compare both // encoded size and unencoded size to blocksize limit. - if (blockWriter.encodedBlockSizeWritten() >= encodedBlockSizeLimit - || blockWriter.blockSizeWritten() >= hFileContext.getBlocksize()) { + if ( + blockWriter.encodedBlockSizeWritten() >= encodedBlockSizeLimit + || blockWriter.blockSizeWritten() >= hFileContext.getBlocksize() + ) { finishBlock(); writeInlineBlocks(false); newBlock(); } } - /** Clean up the data block that is currently being written.*/ + /** Clean up the data block that is currently being written. */ private void finishBlock() throws IOException { if (!blockWriter.isWriting() || blockWriter.blockSizeWritten() == 0) { return; @@ -359,14 +348,14 @@ private void finishBlock() throws IOException { } /** - * Try to return a Cell that falls between left and - * right but that is shorter; i.e. takes up less space. This - * trick is used building HFile block index. Its an optimization. It does not - * always work. In this case we'll just return the right cell. + * Try to return a Cell that falls between left and right but that is + * shorter; i.e. takes up less space. This trick is used building HFile block index. Its an + * optimization. It does not always work. In this case we'll just return the right + * cell. * @return A cell that sorts between left and right. */ public static Cell getMidpoint(final CellComparator comparator, final Cell left, - final Cell right) { + final Cell right) { if (right == null) { throw new IllegalArgumentException("right cell can not be null"); } @@ -380,8 +369,8 @@ public static Cell getMidpoint(final CellComparator comparator, final Cell left, return right; } byte[] midRow; - boolean bufferBacked = left instanceof ByteBufferExtendedCell - && right instanceof ByteBufferExtendedCell; + boolean bufferBacked = + left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell; if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getRowByteBuffer(), ((ByteBufferExtendedCell) left).getRowPosition(), left.getRowLength(), @@ -394,7 +383,7 @@ public static Cell getMidpoint(final CellComparator comparator, final Cell left, if (midRow != null) { return PrivateCellUtil.createFirstOnRow(midRow); } - //Rows are same. Compare on families. + // Rows are same. Compare on families. if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), @@ -429,43 +418,42 @@ public static Cell getMidpoint(final CellComparator comparator, final Cell left, /** * Try to get a byte array that falls between left and right as short as possible with * lexicographical order; - * - * @return Return a new array that is between left and right and minimally - * sized else just return null if left == right. + * @return Return a new array that is between left and right and minimally sized else just return + * null if left == right. */ private static byte[] getMinimumMidpointArray(final byte[] leftArray, final int leftOffset, - final int leftLength, final byte[] rightArray, final int rightOffset, final int rightLength) { + final int leftLength, final byte[] rightArray, final int rightOffset, final int rightLength) { int minLength = leftLength < rightLength ? leftLength : rightLength; int diffIdx = 0; for (; diffIdx < minLength; diffIdx++) { byte leftByte = leftArray[leftOffset + diffIdx]; byte rightByte = rightArray[rightOffset + diffIdx]; if ((leftByte & 0xff) > (rightByte & 0xff)) { - throw new IllegalArgumentException("Left byte array sorts after right row; left=" + Bytes - .toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + Bytes - .toStringBinary(rightArray, rightOffset, rightLength)); + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + Bytes.toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + + Bytes.toStringBinary(rightArray, rightOffset, rightLength)); } else if (leftByte != rightByte) { break; } } if (diffIdx == minLength) { if (leftLength > rightLength) { - //right is prefix of left - throw new IllegalArgumentException("Left byte array sorts after right row; left=" + Bytes - .toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + Bytes - .toStringBinary(rightArray, rightOffset, rightLength)); + // right is prefix of left + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + Bytes.toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + + Bytes.toStringBinary(rightArray, rightOffset, rightLength)); } else if (leftLength < rightLength) { - //left is prefix of right. + // left is prefix of right. byte[] minimumMidpointArray = new byte[minLength + 1]; System.arraycopy(rightArray, rightOffset, minimumMidpointArray, 0, minLength + 1); minimumMidpointArray[minLength] = 0x00; return minimumMidpointArray; } else { - //left == right + // left == right return null; } } - //Note that left[diffIdx] can never be equal to 0xff since left < right + // Note that left[diffIdx] can never be equal to 0xff since left < right byte[] minimumMidpointArray = new byte[diffIdx + 1]; System.arraycopy(leftArray, leftOffset, minimumMidpointArray, 0, diffIdx + 1); minimumMidpointArray[diffIdx] = (byte) (minimumMidpointArray[diffIdx] + 1); @@ -475,46 +463,43 @@ private static byte[] getMinimumMidpointArray(final byte[] leftArray, final int /** * Try to create a new byte array that falls between left and right as short as possible with * lexicographical order. - * - * @return Return a new array that is between left and right and minimally - * sized else just return null if left == right. + * @return Return a new array that is between left and right and minimally sized else just return + * null if left == right. */ private static byte[] getMinimumMidpointArray(ByteBuffer left, int leftOffset, int leftLength, - ByteBuffer right, int rightOffset, int rightLength) { + ByteBuffer right, int rightOffset, int rightLength) { int minLength = leftLength < rightLength ? leftLength : rightLength; int diffIdx = 0; for (; diffIdx < minLength; diffIdx++) { int leftByte = ByteBufferUtils.toByte(left, leftOffset + diffIdx); int rightByte = ByteBufferUtils.toByte(right, rightOffset + diffIdx); if ((leftByte & 0xff) > (rightByte & 0xff)) { - throw new IllegalArgumentException( - "Left byte array sorts after right row; left=" + ByteBufferUtils - .toStringBinary(left, leftOffset, leftLength) + ", right=" + ByteBufferUtils - .toStringBinary(right, rightOffset, rightLength)); + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + ByteBufferUtils.toStringBinary(left, leftOffset, leftLength) + ", right=" + + ByteBufferUtils.toStringBinary(right, rightOffset, rightLength)); } else if (leftByte != rightByte) { break; } } if (diffIdx == minLength) { if (leftLength > rightLength) { - //right is prefix of left - throw new IllegalArgumentException( - "Left byte array sorts after right row; left=" + ByteBufferUtils - .toStringBinary(left, leftOffset, leftLength) + ", right=" + ByteBufferUtils - .toStringBinary(right, rightOffset, rightLength)); + // right is prefix of left + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + ByteBufferUtils.toStringBinary(left, leftOffset, leftLength) + ", right=" + + ByteBufferUtils.toStringBinary(right, rightOffset, rightLength)); } else if (leftLength < rightLength) { - //left is prefix of right. + // left is prefix of right. byte[] minimumMidpointArray = new byte[minLength + 1]; - ByteBufferUtils - .copyFromBufferToArray(minimumMidpointArray, right, rightOffset, 0, minLength + 1); + ByteBufferUtils.copyFromBufferToArray(minimumMidpointArray, right, rightOffset, 0, + minLength + 1); minimumMidpointArray[minLength] = 0x00; return minimumMidpointArray; } else { - //left == right + // left == right return null; } } - //Note that left[diffIdx] can never be equal to 0xff since left < right + // Note that left[diffIdx] can never be equal to 0xff since left < right byte[] minimumMidpointArray = new byte[diffIdx + 1]; ByteBufferUtils.copyFromBufferToArray(minimumMidpointArray, left, leftOffset, 0, diffIdx + 1); minimumMidpointArray[diffIdx] = (byte) (minimumMidpointArray[diffIdx] + 1); @@ -527,11 +512,10 @@ private void writeInlineBlocks(boolean closing) throws IOException { while (ibw.shouldWriteBlock(closing)) { long offset = outputStream.getPos(); boolean cacheThisBlock = ibw.getCacheOnWrite(); - ibw.writeInlineBlock(blockWriter.startWriting( - ibw.getInlineBlockType())); + ibw.writeInlineBlock(blockWriter.startWriting(ibw.getInlineBlockType())); blockWriter.writeHeaderAndData(outputStream); ibw.blockWritten(offset, blockWriter.getOnDiskSizeWithHeader(), - blockWriter.getUncompressedSizeWithoutHeader()); + blockWriter.getUncompressedSizeWithoutHeader()); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); if (cacheThisBlock) { @@ -543,15 +527,14 @@ private void writeInlineBlocks(boolean closing) throws IOException { /** * Caches the last written HFile block. - * @param offset the offset of the block we want to cache. Used to determine - * the cache key. + * @param offset the offset of the block we want to cache. Used to determine the cache key. */ private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); try { cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), - cacheFormatBlock); + cacheFormatBlock); } finally { // refCnt will auto increase when block add to Cache, see RAMCache#putIfAbsent cacheFormatBlock.release(); @@ -572,15 +555,11 @@ protected void newBlock() throws IOException { } /** - * Add a meta block to the end of the file. Call before close(). Metadata - * blocks are expensive. Fill one with a bunch of serialized data rather than - * do a metadata block per metadata instance. If metadata is small, consider - * adding to file info using {@link #appendFileInfo(byte[], byte[])} - * - * @param metaBlockName - * name of the block - * @param content - * will call readFields to get data later (DO NOT REUSE) + * Add a meta block to the end of the file. Call before close(). Metadata blocks are expensive. + * Fill one with a bunch of serialized data rather than do a metadata block per metadata instance. + * If metadata is small, consider adding to file info using + * {@link #appendFileInfo(byte[], byte[])} n * name of the block n * will call readFields to get + * data later (DO NOT REUSE) */ @Override public void appendMetaBlock(String metaBlockName, Writable content) { @@ -589,8 +568,7 @@ public void appendMetaBlock(String metaBlockName, Writable content) { for (i = 0; i < metaNames.size(); ++i) { // stop when the current key is greater than our own byte[] cur = metaNames.get(i); - if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, - key.length) > 0) { + if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, key.length) > 0) { break; } } @@ -627,7 +605,7 @@ public void close() throws IOException { // Add the new meta block to the meta index. metaBlockIndexWriter.addEntry(metaNames.get(i), offset, - blockWriter.getOnDiskSizeWithHeader()); + blockWriter.getOnDiskSizeWithHeader()); } } @@ -644,8 +622,8 @@ public void close() throws IOException { trailer.setLoadOnOpenOffset(rootIndexOffset); // Meta block index. - metaBlockIndexWriter.writeSingleLevelIndex(blockWriter.startWriting( - BlockType.ROOT_INDEX), "meta"); + metaBlockIndexWriter.writeSingleLevelIndex(blockWriter.startWriting(BlockType.ROOT_INDEX), + "meta"); blockWriter.writeHeaderAndData(outputStream); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); @@ -660,21 +638,19 @@ public void close() throws IOException { totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); // Load-on-open data supplied by higher levels, e.g. Bloom filters. - for (BlockWritable w : additionalLoadOnOpenData){ + for (BlockWritable w : additionalLoadOnOpenData) { blockWriter.writeBlock(w, outputStream); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); } // Now finish off the trailer. trailer.setNumDataIndexLevels(dataBlockIndexWriter.getNumLevels()); - trailer.setUncompressedDataIndexSize( - dataBlockIndexWriter.getTotalUncompressedSize()); + trailer.setUncompressedDataIndexSize(dataBlockIndexWriter.getTotalUncompressedSize()); trailer.setFirstDataBlockOffset(firstDataBlockOffset); trailer.setLastDataBlockOffset(lastDataBlockOffset); trailer.setComparatorClass(this.hFileContext.getCellComparator().getClass()); trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries()); - finishClose(trailer); blockWriter.release(); @@ -695,16 +671,15 @@ public void addDeleteFamilyBloomFilter(final BloomFilterWriter bfw) { this.addBloomFilter(bfw, BlockType.DELETE_FAMILY_BLOOM_META); } - private void addBloomFilter(final BloomFilterWriter bfw, - final BlockType blockType) { + private void addBloomFilter(final BloomFilterWriter bfw, final BlockType blockType) { if (bfw.getKeyCount() <= 0) { return; } - if (blockType != BlockType.GENERAL_BLOOM_META && - blockType != BlockType.DELETE_FAMILY_BLOOM_META) { - throw new RuntimeException("Block Type: " + blockType.toString() + - "is not supported"); + if ( + blockType != BlockType.GENERAL_BLOOM_META && blockType != BlockType.DELETE_FAMILY_BLOOM_META + ) { + throw new RuntimeException("Block Type: " + blockType.toString() + "is not supported"); } additionalLoadOnOpenData.add(new BlockWritable() { @Override @@ -729,11 +704,8 @@ public HFileContext getFileContext() { } /** - * Add key/value to file. Keys must be added in an order that agrees with the - * Comparator passed on construction. - * - * @param cell - * Cell to add. Cannot be empty nor null. + * Add key/value to file. Keys must be added in an order that agrees with the Comparator passed on + * construction. n * Cell to add. Cannot be empty nor null. */ @Override public void append(final Cell cell) throws IOException { @@ -792,20 +764,18 @@ protected void finishFileInfo() throws IOException { if (lastCell != null) { // Make a copy. The copy is stuffed into our fileinfo map. Needs a clean // byte buffer. Won't take a tuple. - byte [] lastKey = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); + byte[] lastKey = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); fileInfo.append(HFileInfo.LASTKEY, lastKey, false); } // Average key length. - int avgKeyLen = - entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); + int avgKeyLen = entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); fileInfo.append(HFileInfo.AVG_KEY_LEN, Bytes.toBytes(avgKeyLen), false); fileInfo.append(HFileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), false); // Average value length. - int avgValueLen = - entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); + int avgValueLen = entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); fileInfo.append(HFileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); if (hFileContext.isIncludesTags()) { // When tags are not being written in this file, MAX_TAGS_LEN is excluded @@ -831,14 +801,14 @@ protected void finishClose(FixedFileTrailer trailer) throws IOException { if (cryptoContext != Encryption.Context.NONE) { // Wrap the context's key and write it as the encryption metadata, the wrapper includes // all information needed for decryption - trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(), - cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()), + trailer.setEncryptionKey(EncryptionUtil.wrapKey( + cryptoContext.getConf(), cryptoContext.getConf() + .get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), cryptoContext.getKey())); } // Now we can finish the close trailer.setMetaIndexCount(metaNames.size()); - trailer.setTotalUncompressedBytes(totalUncompressedBytes+ trailer.getTrailerSize()); + trailer.setTotalUncompressedBytes(totalUncompressedBytes + trailer.getTrailerSize()); trailer.setEntryCount(entryCount); trailer.setCompressionCodec(hFileContext.getCompression()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java index 8b85c68f9a58..bcc5466dcc0b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java @@ -1,22 +1,20 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import org.apache.yetus.audience.InterfaceAudience; @@ -24,13 +22,13 @@ @InterfaceAudience.Private public class InclusiveCombinedBlockCache extends CombinedBlockCache { public InclusiveCombinedBlockCache(FirstLevelBlockCache l1, BlockCache l2) { - super(l1,l2); + super(l1, l2); l1.setVictimCache(l2); } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, - boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { // On all external cache set ups the lru should have the l2 cache set as the victimHandler // Because of that all requests that miss inside of the lru block cache will be // tried in the l2 block cache. @@ -38,9 +36,8 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, } /** - * * @param cacheKey The block's cache key. - * @param buf The block contents wrapped in a ByteBuffer. + * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory. This parameter is only useful for * the L1 lru cache. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java index 50b195dd8e96..2841d9af16af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java @@ -21,10 +21,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * An on heap block cache implementation extended LruBlockCache and only cache index block. - * This block cache should be only used by - * {@link org.apache.hadoop.hbase.client.ClientSideRegionScanner} that normally considers to be - * used by client resides out of the region server, e.g. a container of a map reduce job. + * An on heap block cache implementation extended LruBlockCache and only cache index block. This + * block cache should be only used by {@link org.apache.hadoop.hbase.client.ClientSideRegionScanner} + * that normally considers to be used by client resides out of the region server, e.g. a container + * of a map reduce job. **/ @InterfaceAudience.Private public class IndexOnlyLruBlockCache extends LruBlockCache { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java index 12ae6a50a3c2..040ca6b9164f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,43 +19,36 @@ import java.io.DataOutput; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * A way to write "inline" blocks into an {@link HFile}. Inline blocks are - * interspersed with data blocks. For example, Bloom filter chunks and - * leaf-level blocks of a multi-level block index are stored as inline blocks. + * A way to write "inline" blocks into an {@link HFile}. Inline blocks are interspersed with data + * blocks. For example, Bloom filter chunks and leaf-level blocks of a multi-level block index are + * stored as inline blocks. */ @InterfaceAudience.Private public interface InlineBlockWriter { /** - * Determines whether there is a new block to be written out. - * - * @param closing - * whether the file is being closed, in which case we need to write - * out all available data and not wait to accumulate another block + * Determines whether there is a new block to be written out. n * whether the file is being + * closed, in which case we need to write out all available data and not wait to accumulate + * another block */ boolean shouldWriteBlock(boolean closing); /** - * Writes the block to the provided stream. Must not write any magic records. - * Called only if {@link #shouldWriteBlock(boolean)} returned true. - * - * @param out - * a stream (usually a compressing stream) to write the block to + * Writes the block to the provided stream. Must not write any magic records. Called only if + * {@link #shouldWriteBlock(boolean)} returned true. n * a stream (usually a compressing stream) + * to write the block to */ void writeInlineBlock(DataOutput out) throws IOException; /** - * Called after a block has been written, and its offset, raw size, and - * compressed size have been determined. Can be used to add an entry to a - * block index. If this type of inline blocks needs a block index, the inline - * block writer is responsible for maintaining it. - * - * @param offset the offset of the block in the stream - * @param onDiskSize the on-disk size of the block + * Called after a block has been written, and its offset, raw size, and compressed size have been + * determined. Can be used to add an entry to a block index. If this type of inline blocks needs a + * block index, the inline block writer is responsible for maintaining it. + * @param offset the offset of the block in the stream + * @param onDiskSize the on-disk size of the block * @param uncompressedSize the uncompressed size of the block */ void blockWritten(long offset, int onDiskSize, int uncompressedSize); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java index d0526656a3b2..eed3a53acfe4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java index 494a588aadb8..87932074bff1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,8 +25,6 @@ import java.util.List; import java.util.Map; import java.util.PriorityQueue; -import java.util.SortedSet; -import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -49,14 +47,13 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * This realisation improve performance of classical LRU - * cache up to 3 times via reduce GC job. + * This realisation improve performance of classical LRU cache up to 3 times via reduce GC + * job. *

        * The classical block cache implementation that is memory-aware using {@link HeapSize}, - * memory-bound using an - * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a - * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} - * operations. + * memory-bound using an LRU eviction algorithm, and concurrent: backed by a + * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving constant-time + * {@link #cacheBlock} and {@link #getBlock} operations. *

        * Contains three levels of block priority to allow for scan-resistance and in-memory families * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An @@ -91,57 +88,50 @@ *

        * Adaptive LRU cache lets speed up performance while we are reading much more data than can fit * into BlockCache and it is the cause of a high rate of evictions. This in turn leads to heavy - * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending - * a lot of CPU resources for cleaning. We could avoid this situation via parameters: + * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending a + * lot of CPU resources for cleaning. We could avoid this situation via parameters: *

        - * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the - * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it - * meats the feature will start at the beginning. But if we have some times short reading the same - * data and some times long-term reading - we can divide it by this parameter. For example we know - * that our short reading used to be about 1 minutes, then we have to set the parameter about 10 - * and it will enable the feature only for long time massive reading (after ~100 seconds). So when - * we use short-reading and want all of them in the cache we will have it (except for eviction of - * course). When we use long-term heavy reading the feature will be enabled after some time and - * bring better performance. + * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the + * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it meats + * the feature will start at the beginning. But if we have some times short reading the same data + * and some times long-term reading - we can divide it by this parameter. For example we know that + * our short reading used to be about 1 minutes, then we have to set the parameter about 10 and it + * will enable the feature only for long time massive reading (after ~100 seconds). So when we use + * short-reading and want all of them in the cache we will have it (except for eviction of course). + * When we use long-term heavy reading the feature will be enabled after some time and bring better + * performance. *

        * hbase.lru.cache.heavy.eviction.mb.size.limit - set how many bytes in 10 seconds desirable * putting into BlockCache (and evicted from it). The feature will try to reach this value and - * maintain it. Don't try to set it too small because it leads to premature exit from this mode. - * For powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system - * (~10 cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. - * How it works: we set the limit and after each ~10 second calculate how many bytes were freed. - * Overhead = Freed Bytes Sum (MB) * 100 / Limit (MB) - 100; - * For example we set the limit = 500 and were evicted 2000 MB. Overhead is: - * 2000 * 100 / 500 - 100 = 300% - * The feature is going to reduce a percent caching data blocks and fit evicted bytes closer to - * 100% (500 MB). Some kind of an auto-scaling. - * If freed bytes less then the limit we have got negative overhead. - * For example if were freed 200 MB: - * 200 * 100 / 500 - 100 = -60% - * The feature will increase the percent of caching blocks. - * That leads to fit evicted bytes closer to 100% (500 MB). - * The current situation we can find out in the log of RegionServer: - * BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current caching - * DataBlock (%): 100 - means no eviction, 100% blocks is caching - * BlockCache evicted (MB): 2000, overhead (%): 300, heavy eviction counter: 1, current caching - * DataBlock (%): 97 - means eviction begin, reduce of caching blocks by 3%. - * It help to tune your system and find out what value is better set. Don't try to reach 0% - * overhead, it is impossible. Quite good 50-100% overhead, - * it prevents premature exit from this mode. + * maintain it. Don't try to set it too small because it leads to premature exit from this mode. For + * powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system (~10 + * cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. How it works: we set + * the limit and after each ~10 second calculate how many bytes were freed. Overhead = Freed Bytes + * Sum (MB) * 100 / Limit (MB) - 100; For example we set the limit = 500 and were evicted 2000 MB. + * Overhead is: 2000 * 100 / 500 - 100 = 300% The feature is going to reduce a percent caching data + * blocks and fit evicted bytes closer to 100% (500 MB). Some kind of an auto-scaling. If freed + * bytes less then the limit we have got negative overhead. For example if were freed 200 MB: 200 * + * 100 / 500 - 100 = -60% The feature will increase the percent of caching blocks. That leads to fit + * evicted bytes closer to 100% (500 MB). The current situation we can find out in the log of + * RegionServer: BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current + * caching DataBlock (%): 100 - means no eviction, 100% blocks is caching BlockCache evicted (MB): + * 2000, overhead (%): 300, heavy eviction counter: 1, current caching DataBlock (%): 97 - means + * eviction begin, reduce of caching blocks by 3%. It help to tune your system and find out what + * value is better set. Don't try to reach 0% overhead, it is impossible. Quite good 50-100% + * overhead, it prevents premature exit from this mode. *

        * hbase.lru.cache.heavy.eviction.overhead.coefficient - set how fast we want to get the * result. If we know that our reading is heavy for a long time, we don't want to wait and can * increase the coefficient and get good performance sooner. But if we aren't sure we can do it - * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher - * we can get better performance when heavy reading is stable. But when reading is changing we - * can adjust to it and set the coefficient to lower value. - * For example, we set the coefficient = 0.01. It means the overhead (see above) will be - * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, - * if the overhead = 300% and the coefficient = 0.01, - * then percent of caching blocks will reduce by 3%. - * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term - * fluctuation and we will try to stay in this mode. It helps avoid premature exit during - * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. + * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher we + * can get better performance when heavy reading is stable. But when reading is changing we can + * adjust to it and set the coefficient to lower value. For example, we set the coefficient = 0.01. + * It means the overhead (see above) will be multiplied by 0.01 and the result is the value of + * reducing percent caching blocks. For example, if the overhead = 300% and the coefficient = 0.01, + * then percent of caching blocks will reduce by 3%. Similar logic when overhead has got negative + * value (overshooting). Maybe it is just short-term fluctuation and we will try to stay in this + * mode. It helps avoid premature exit during short-term fluctuation. Backpressure has simple logic: + * more overshooting - more caching blocks. *

        * Find more information about improvement: https://issues.apache.org/jira/browse/HBASE-23887 */ @@ -175,14 +165,14 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { "hbase.lru.blockcache.memory.percentage"; /** - * Configuration key to force data-block always (except in-memory are too much) - * cached in memory for in-memory hfile, unlike inMemory, which is a column-family - * configuration, inMemoryForceMode is a cluster-wide configuration + * Configuration key to force data-block always (except in-memory are too much) cached in memory + * for in-memory hfile, unlike inMemory, which is a column-family configuration, inMemoryForceMode + * is a cluster-wide configuration */ private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = "hbase.lru.rs.inmemoryforcemode"; - /* Default Configuration Parameters*/ + /* Default Configuration Parameters */ /* Backing Concurrent Map Configuration */ static final float DEFAULT_LOAD_FACTOR = 0.75f; @@ -206,29 +196,28 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; - private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT - = "hbase.lru.cache.heavy.eviction.count.limit"; + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = + "hbase.lru.cache.heavy.eviction.count.limit"; // Default value actually equal to disable feature of increasing performance. // Because 2147483647 is about ~680 years (after that it will start to work) // We can set it to 0-10 and get the profit right now. // (see details https://issues.apache.org/jira/browse/HBASE-23887). private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; - private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT - = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = + "hbase.lru.cache.heavy.eviction.mb.size.limit"; private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; - private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT - = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = + "hbase.lru.cache.heavy.eviction.overhead.coefficient"; private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in - * {@link LruAdaptiveBlockCache#getBlock}, we need to guarantee the atomicity - * of map#computeIfPresent (key, func). Besides, the func method must execute exactly once only - * when the key is present and under the lock context, otherwise the reference count will be - * messed up. Notice that the - * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + * {@link LruAdaptiveBlockCache#getBlock}, we need to guarantee the atomicity of + * map#computeIfPresent (key, func). Besides, the func method must execute exactly once only when + * the key is present and under the lock context, otherwise the reference count will be messed up. + * Notice that the {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. */ private transient final ConcurrentHashMap map; @@ -298,8 +287,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { /** * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an - * external cache as L2. - * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + * external cache as L2. Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache */ private transient BlockCache victimHandler = null; @@ -316,12 +304,10 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { private final float heavyEvictionOverheadCoefficient; /** - * Default constructor. Specify maximum size and expected average block - * size (approximation is fine). - * - *

        All other factors will be calculated based on defaults specified in - * this class. - * + * Default constructor. Specify maximum size and expected average block size (approximation is + * fine). + *

        + * All other factors will be calculated based on defaults specified in this class. * @param maxSize maximum size of cache, in bytes * @param blockSize approximate size of each block, in bytes */ @@ -330,37 +316,27 @@ public LruAdaptiveBlockCache(long maxSize, long blockSize) { } /** - * Constructor used for testing. Allows disabling of the eviction thread. + * Constructor used for testing. Allows disabling of the eviction thread. */ public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, - DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, - DEFAULT_SINGLE_FACTOR, - DEFAULT_MULTI_FACTOR, - DEFAULT_MEMORY_FACTOR, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, - false, - DEFAULT_MAX_BLOCK_SIZE, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, + DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, DEFAULT_MEMORY_FACTOR, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); } - public LruAdaptiveBlockCache(long maxSize, long blockSize, - boolean evictionThread, Configuration conf) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, - DEFAULT_CONCURRENCY_LEVEL, + public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread, + Configuration conf) { + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), - conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, @@ -376,38 +352,38 @@ public LruAdaptiveBlockCache(long maxSize, long blockSize, Configuration conf) { } /** - * Configurable constructor. Use this constructor if not using defaults. - * - * @param maxSize maximum size of this cache, in bytes - * @param blockSize expected average size of blocks, in bytes - * @param evictionThread whether to run evictions in a bg thread or not - * @param mapInitialSize initial size of backing ConcurrentHashMap - * @param mapLoadFactor initial load factor of backing ConcurrentHashMap - * @param mapConcurrencyLevel initial concurrency factor for backing CHM - * @param minFactor percentage of total size that eviction will evict until - * @param acceptableFactor percentage of total size that triggers eviction - * @param singleFactor percentage of total size for single-access blocks - * @param multiFactor percentage of total size for multiple-access blocks - * @param memoryFactor percentage of total size for in-memory blocks - * @param hardLimitFactor hard capacity limit - * @param forceInMemory in-memory hfile's data block has higher priority when evicting - * @param maxBlockSize maximum block size for caching - * @param heavyEvictionCountLimit when starts AdaptiveLRU algoritm work - * @param heavyEvictionMbSizeLimit how many bytes desirable putting into BlockCache - * @param heavyEvictionOverheadCoefficient how aggressive AdaptiveLRU will reduce GC + * Configurable constructor. Use this constructor if not using defaults. + * @param maxSize maximum size of this cache, in bytes + * @param blockSize expected average size of blocks, in bytes + * @param evictionThread whether to run evictions in a bg thread or not + * @param mapInitialSize initial size of backing ConcurrentHashMap + * @param mapLoadFactor initial load factor of backing ConcurrentHashMap + * @param mapConcurrencyLevel initial concurrency factor for backing CHM + * @param minFactor percentage of total size that eviction will evict until + * @param acceptableFactor percentage of total size that triggers eviction + * @param singleFactor percentage of total size for single-access blocks + * @param multiFactor percentage of total size for multiple-access blocks + * @param memoryFactor percentage of total size for in-memory blocks + * @param hardLimitFactor hard capacity limit + * @param forceInMemory in-memory hfile's data block has higher priority when + * evicting + * @param maxBlockSize maximum block size for caching + * @param heavyEvictionCountLimit when starts AdaptiveLRU algoritm work + * @param heavyEvictionMbSizeLimit how many bytes desirable putting into BlockCache + * @param heavyEvictionOverheadCoefficient how aggressive AdaptiveLRU will reduce GC */ public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread, - int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, - float minFactor, float acceptableFactor, float singleFactor, - float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, - int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, - float heavyEvictionOverheadCoefficient) { + int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, + float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, + float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, + long heavyEvictionMbSizeLimit, float heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; - if(singleFactor + multiFactor + memoryFactor != 1 || - singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { - throw new IllegalArgumentException("Single, multi, and memory factors " + - " should be non-negative and total 1.0"); + if ( + singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 + || memoryFactor < 0 + ) { + throw new IllegalArgumentException( + "Single, multi, and memory factors " + " should be non-negative and total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); @@ -447,7 +423,7 @@ public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThrea heavyEvictionOverheadCoefficient = Math.max(heavyEvictionOverheadCoefficient, 0.001f); this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, STAT_THREAD_PERIOD, TimeUnit.SECONDS); @@ -474,13 +450,13 @@ public int getCacheDataBlockPercent() { } /** - * The block cached in LruAdaptiveBlockCache will always be an heap block: on the one side, - * the heap access will be more faster then off-heap, the small index block or meta block - * cached in CombinedBlockCache will benefit a lot. on other side, the LruAdaptiveBlockCache - * size is always calculated based on the total heap size, if caching an off-heap block in - * LruAdaptiveBlockCache, the heap size will be messed up. Here we will clone the block into an - * heap block if it's an off-heap block, otherwise just use the original block. The key point is - * maintain the refCnt of the block (HBASE-22127):
        + * The block cached in LruAdaptiveBlockCache will always be an heap block: on the one side, the + * heap access will be more faster then off-heap, the small index block or meta block cached in + * CombinedBlockCache will benefit a lot. on other side, the LruAdaptiveBlockCache size is always + * calculated based on the total heap size, if caching an off-heap block in LruAdaptiveBlockCache, + * the heap size will be messed up. Here we will clone the block into an heap block if it's an + * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of + * the block (HBASE-22127):
        * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
        * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's * reservoir, if both RPC and LruAdaptiveBlockCache release the block, then it can be garbage @@ -507,7 +483,6 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { *

        * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) * this can happen, for which we compare the buffer contents. - * * @param cacheKey block's cache key * @param buf block buffer * @param inMemory if block is in-memory @@ -532,18 +507,15 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) // big this can make the logs way too noisy. // So we log 2% if (stats.failInsert() % 50 == 0) { - LOG.warn("Trying to cache too large a block " - + cacheKey.getHfileName() + " @ " - + cacheKey.getOffset() - + " is " + buf.heapSize() - + " which is larger than " + maxBlockSize); + LOG.warn("Trying to cache too large a block " + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + " is " + buf.heapSize() + " which is larger than " + + maxBlockSize); } return; } LruCachedBlock cb = map.get(cacheKey); - if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, - cacheKey, buf)) { + if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, buf)) { return; } long currentSize = size.get(); @@ -581,20 +553,20 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) } /** - * Sanity-checking for parity between actual block cache content and metrics. - * Intended only for use with TRACE level logging and -ea JVM. + * Sanity-checking for parity between actual block cache content and metrics. Intended only for + * use with TRACE level logging and -ea JVM. */ private static void assertCounterSanity(long mapSize, long counterVal) { if (counterVal < 0) { - LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); return; } if (mapSize < Integer.MAX_VALUE) { double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); if (pct_diff > 0.05) { - LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); } } } @@ -607,7 +579,7 @@ private static void assertCounterSanity(long mapSize, long counterVal) { * switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap, * otherwise the caching size is based on off-heap. * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { @@ -615,9 +587,8 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { } /** - * Helper function that updates the local size counter and also updates any - * per-cf or per-blocktype metrics it can discern from given - * {@link LruCachedBlock} + * Helper function that updates the local size counter and also updates any per-cf or + * per-blocktype metrics it can discern from given {@link LruCachedBlock} */ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); @@ -633,14 +604,11 @@ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { /** * Get the buffer of the block with the specified name. - * * @param cacheKey block's cache key * @param caching true if the caller caches blocks on cache misses - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check - * locking) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid + * double counting cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not - * * @return buffer of specified cache key, or null if not in cache */ @Override @@ -683,7 +651,6 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repea /** * Whether the cache contains block with specified cacheKey - * * @return true if contains the block */ @Override @@ -698,13 +665,11 @@ public boolean evictBlock(BlockCacheKey cacheKey) { } /** - * Evicts all blocks for a specific HFile. This is an - * expensive operation implemented as a linear-time search through all blocks - * in the cache. Ideally this should be a search in a log-access-time map. - * + * Evicts all blocks for a specific HFile. This is an expensive operation implemented as a + * linear-time search through all blocks in the cache. Ideally this should be a search in a + * log-access-time map. *

        * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override @@ -718,11 +683,9 @@ public int evictBlocksByHfileName(String hfileName) { } /** - * Evict the block, and it will be cached by the victim handler if exists && - * block may be read again later - * - * @param evictedByEvictionProcess true if the given block is evicted by - * EvictionThread + * Evict the block, and it will be cached by the victim handler if exists && block may be + * read again later + * @param evictedByEvictionProcess true if the given block is evicted by EvictionThread * @return the heap size of evicted block */ protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { @@ -774,11 +737,8 @@ long getOverhead() { } /** - * Eviction method. - * - * Evict items in order of use, allowing delete items - * which haven't been used for the longest amount of time. - * + * Eviction method. Evict items in order of use, allowing delete items which haven't been used for + * the longest amount of time. * @return how many bytes were freed */ long evict() { @@ -796,9 +756,8 @@ long evict() { bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { - LOG.trace("Block cache LRU eviction started; Attempting to free " + - StringUtils.byteDesc(bytesToFree) + " of total=" + - StringUtils.byteDesc(currentSize)); + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize)); } if (bytesToFree <= 0) { @@ -806,12 +765,9 @@ long evict() { } // Instantiate priority buckets - BlockBucket bucketSingle - = new BlockBucket("single", bytesToFree, blockSize, singleSize()); - BlockBucket bucketMulti - = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); - BlockBucket bucketMemory - = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); + BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); + BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); + BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); // Scan entire map putting into appropriate buckets for (LruCachedBlock cachedBlock : map.values()) { @@ -841,13 +797,13 @@ long evict() { bytesFreed = bucketSingle.free(s); bytesFreed += bucketMulti.free(m); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " from single and multi buckets"); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " from single and multi buckets"); } bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " total from all three buckets "); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " total from all three buckets "); } } else { // this means no need to evict block in memory bucket, @@ -894,12 +850,11 @@ long evict() { long single = bucketSingle.totalSize(); long multi = bucketMulti.totalSize(); long memory = bucketMemory.totalSize(); - LOG.trace("Block cache LRU eviction completed; " + - "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + - "total=" + StringUtils.byteDesc(this.size.get()) + ", " + - "single=" + StringUtils.byteDesc(single) + ", " + - "multi=" + StringUtils.byteDesc(multi) + ", " + - "memory=" + StringUtils.byteDesc(memory)); + LOG.trace( + "Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed) + + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single=" + + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); } } finally { stats.evict(); @@ -911,26 +866,21 @@ long evict() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) .add("currentSize", StringUtils.byteDesc(getCurrentSize())) .add("freeSize", StringUtils.byteDesc(getFreeSize())) .add("maxSize", StringUtils.byteDesc(getMaxSize())) .add("heapSize", StringUtils.byteDesc(heapSize())) - .add("minSize", StringUtils.byteDesc(minSize())) - .add("minFactor", minFactor) - .add("multiSize", StringUtils.byteDesc(multiSize())) - .add("multiFactor", multiFactor) - .add("singleSize", StringUtils.byteDesc(singleSize())) - .add("singleFactor", singleFactor) + .add("minSize", StringUtils.byteDesc(minSize())).add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())).add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())).add("singleFactor", singleFactor) .toString(); } /** - * Used to group blocks into priority buckets. There will be a BlockBucket - * for each priority (single, multi, memory). Once bucketed, the eviction - * algorithm takes the appropriate number of elements out of each according - * to configuration parameters and their relatives sizes. + * Used to group blocks into priority buckets. There will be a BlockBucket for each priority + * (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate number of + * elements out of each according to configuration parameters and their relatives sizes. */ private class BlockBucket implements Comparable { @@ -987,7 +937,7 @@ public boolean equals(Object that) { if (!(that instanceof BlockBucket)) { return false; } - return compareTo((BlockBucket)that) == 0; + return compareTo((BlockBucket) that) == 0; } @Override @@ -997,17 +947,14 @@ public int hashCode() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name) + return MoreObjects.toStringHelper(this).add("name", name) .add("totalSize", StringUtils.byteDesc(totalSize)) - .add("bucketSize", StringUtils.byteDesc(bucketSize)) - .toString(); + .add("bucketSize", StringUtils.byteDesc(bucketSize)).toString(); } } /** * Get the maximum size of this cache. - * * @return max size in bytes */ @@ -1051,10 +998,9 @@ EvictionThread getEvictionThread() { } /* - * Eviction thread. Sits in waiting state until an eviction is triggered - * when the cache size grows above the acceptable level.

        - * - * Thread is triggered into action by {@link LruAdaptiveBlockCache#runEviction()} + * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows + * above the acceptable level.

        Thread is triggered into action by {@link + * LruAdaptiveBlockCache#runEviction()} */ static class EvictionThread extends Thread { @@ -1079,7 +1025,7 @@ public void run() { while (this.go) { synchronized (this) { try { - this.wait(1000 * 10/*Don't wait for ever*/); + this.wait(1000 * 10/* Don't wait for ever */); } catch (InterruptedException e) { LOG.warn("Interrupted eviction thread ", e); Thread.currentThread().interrupt(); @@ -1089,18 +1035,15 @@ public void run() { if (cache == null) { break; } - freedSumMb += cache.evict()/1024/1024; + freedSumMb += cache.evict() / 1024 / 1024; /* - * Sometimes we are reading more data than can fit into BlockCache - * and it is the cause a high rate of evictions. - * This in turn leads to heavy Garbage Collector works. - * So a lot of blocks put into BlockCache but never read, - * but spending a lot of CPU resources. - * Here we will analyze how many bytes were freed and decide - * decide whether the time has come to reduce amount of caching blocks. - * It help avoid put too many blocks into BlockCache - * when evict() works very active and save CPU for other jobs. - * More delails: https://issues.apache.org/jira/browse/HBASE-23887 + * Sometimes we are reading more data than can fit into BlockCache and it is the cause a + * high rate of evictions. This in turn leads to heavy Garbage Collector works. So a lot of + * blocks put into BlockCache but never read, but spending a lot of CPU resources. Here we + * will analyze how many bytes were freed and decide decide whether the time has come to + * reduce amount of caching blocks. It help avoid put too many blocks into BlockCache when + * evict() works very active and save CPU for other jobs. More delails: + * https://issues.apache.org/jira/browse/HBASE-23887 */ // First of all we have to control how much time @@ -1165,11 +1108,10 @@ public void run() { cache.cacheDataBlockPercent = 100; } } - LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + - "heavy eviction counter: {}, " + - "current caching DataBlock (%): {}", - freedSumMb, freedDataOverheadPercent, - heavyEvictionCount, cache.cacheDataBlockPercent); + LOG.info( + "BlockCache evicted (MB): {}, overhead (%): {}, " + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + freedSumMb, freedDataOverheadPercent, heavyEvictionCount, cache.cacheDataBlockPercent); freedSumMb = 0; startTime = stopTime; @@ -1177,8 +1119,8 @@ public void run() { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="This is what we want") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "This is what we want") public void evict() { synchronized (this) { this.notifyAll(); @@ -1199,7 +1141,7 @@ boolean isEnteringRun() { } /* - * Statistics thread. Periodically prints the cache statistics to the log. + * Statistics thread. Periodically prints the cache statistics to the log. */ static class StatisticsThread extends Thread { @@ -1221,28 +1163,27 @@ public void logStats() { // Log size long totalSize = heapSize(); long freeSize = maxSize - totalSize; - LruAdaptiveBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "max=" + StringUtils.byteDesc(this.maxSize) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount() + ", " + - "evictedPerRun=" + stats.evictedPerEviction()); + LruAdaptiveBlockCache.LOG + .info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 + ? "0" + : (StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ")) + + ", " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 + ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount() + + ", " + "evictedPerRun=" + stats.evictedPerEviction()); } /** * Get counter statistics for this cache. - * - *

        Includes: total accesses, hits, misses, evicted blocks, and runs - * of the eviction processes. + *

        + * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes. */ @Override public CacheStats getStats() { @@ -1339,7 +1280,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1358,17 +1299,21 @@ public void remove() { // Simple calculators of sizes given factors and maxSize long acceptableSize() { - return (long)Math.floor(this.maxSize * this.acceptableFactor); + return (long) Math.floor(this.maxSize * this.acceptableFactor); } + private long minSize() { - return (long)Math.floor(this.maxSize * this.minFactor); + return (long) Math.floor(this.maxSize * this.minFactor); } + private long singleSize() { - return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.singleFactor * this.minFactor); } + private long multiSize() { - return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.multiFactor * this.minFactor); } + private long memorySize() { return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 3e5ba1d19c56..48ba0eaf5798 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -99,29 +99,29 @@ public class LruBlockCache implements FirstLevelBlockCache { * Acceptable size of cache (no evictions if size < acceptable) */ private static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = - "hbase.lru.blockcache.acceptable.factor"; + "hbase.lru.blockcache.acceptable.factor"; /** * Hard capacity limit of cache, will reject any put if size > this * acceptable */ static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = - "hbase.lru.blockcache.hard.capacity.limit.factor"; + "hbase.lru.blockcache.hard.capacity.limit.factor"; private static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.single.percentage"; + "hbase.lru.blockcache.single.percentage"; private static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.multi.percentage"; + "hbase.lru.blockcache.multi.percentage"; private static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.memory.percentage"; + "hbase.lru.blockcache.memory.percentage"; /** - * Configuration key to force data-block always (except in-memory are too much) - * cached in memory for in-memory hfile, unlike inMemory, which is a column-family - * configuration, inMemoryForceMode is a cluster-wide configuration + * Configuration key to force data-block always (except in-memory are too much) cached in memory + * for in-memory hfile, unlike inMemory, which is a column-family configuration, inMemoryForceMode + * is a cluster-wide configuration */ private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = - "hbase.lru.rs.inmemoryforcemode"; + "hbase.lru.rs.inmemoryforcemode"; - /* Default Configuration Parameters*/ + /* Default Configuration Parameters */ /* Backing Concurrent Map Configuration */ static final float DEFAULT_LOAD_FACTOR = 0.75f; @@ -220,18 +220,15 @@ public class LruBlockCache implements FirstLevelBlockCache { /** * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an - * external cache as L2. - * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + * external cache as L2. Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache */ private transient BlockCache victimHandler = null; /** - * Default constructor. Specify maximum size and expected average block - * size (approximation is fine). - * - *

        All other factors will be calculated based on defaults specified in - * this class. - * + * Default constructor. Specify maximum size and expected average block size (approximation is + * fine). + *

        + * All other factors will be calculated based on defaults specified in this class. * @param maxSize maximum size of cache, in bytes * @param blockSize approximate size of each block, in bytes */ @@ -240,35 +237,26 @@ public LruBlockCache(long maxSize, long blockSize) { } /** - * Constructor used for testing. Allows disabling of the eviction thread. + * Constructor used for testing. Allows disabling of the eviction thread. */ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, - DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, - DEFAULT_SINGLE_FACTOR, - DEFAULT_MULTI_FACTOR, - DEFAULT_MEMORY_FACTOR, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, - false, - DEFAULT_MAX_BLOCK_SIZE); + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, + DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, DEFAULT_MEMORY_FACTOR, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, - DEFAULT_CONCURRENCY_LEVEL, - conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), - conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), - conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), - conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), - conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), - conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), - conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), - conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, + conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), + conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), + conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), + conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), + conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -276,8 +264,7 @@ public LruBlockCache(long maxSize, long blockSize, Configuration conf) { } /** - * Configurable constructor. Use this constructor if not using defaults. - * + * Configurable constructor. Use this constructor if not using defaults. * @param maxSize maximum size of this cache, in bytes * @param blockSize expected average size of blocks, in bytes * @param evictionThread whether to run evictions in a bg thread or not @@ -290,16 +277,17 @@ public LruBlockCache(long maxSize, long blockSize, Configuration conf) { * @param multiFactor percentage of total size for multiple-access blocks * @param memoryFactor percentage of total size for in-memory blocks */ - public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, - int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, - float minFactor, float acceptableFactor, float singleFactor, - float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize) { + public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, + float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, + float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, + boolean forceInMemory, long maxBlockSize) { this.maxBlockSize = maxBlockSize; - if(singleFactor + multiFactor + memoryFactor != 1 || - singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { - throw new IllegalArgumentException("Single, multi, and memory factors " + - " should be non-negative and total 1.0"); + if ( + singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 + || memoryFactor < 0 + ) { + throw new IllegalArgumentException( + "Single, multi, and memory factors " + " should be non-negative and total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); @@ -330,10 +318,10 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, } else { this.evictionThread = null; } - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, - STAT_THREAD_PERIOD, TimeUnit.SECONDS); + STAT_THREAD_PERIOD, TimeUnit.SECONDS); } @Override @@ -385,7 +373,6 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { *

        * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) * this can happen, for which we compare the buffer contents. - * * @param cacheKey block's cache key * @param buf block buffer * @param inMemory if block is in-memory @@ -397,11 +384,9 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) // big this can make the logs way too noisy. // So we log 2% if (stats.failInsert() % 50 == 0) { - LOG.warn("Trying to cache too large a block " - + cacheKey.getHfileName() + " @ " - + cacheKey.getOffset() - + " is " + buf.heapSize() - + " which is larger than " + maxBlockSize); + LOG.warn("Trying to cache too large a block " + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + " is " + buf.heapSize() + " which is larger than " + + maxBlockSize); } return; } @@ -445,20 +430,20 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) } /** - * Sanity-checking for parity between actual block cache content and metrics. - * Intended only for use with TRACE level logging and -ea JVM. + * Sanity-checking for parity between actual block cache content and metrics. Intended only for + * use with TRACE level logging and -ea JVM. */ private static void assertCounterSanity(long mapSize, long counterVal) { if (counterVal < 0) { - LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); return; } if (mapSize < Integer.MAX_VALUE) { double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); if (pct_diff > 0.05) { - LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); } } } @@ -471,7 +456,7 @@ private static void assertCounterSanity(long mapSize, long counterVal) { * switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap, * otherwise the caching size is based on off-heap. * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { @@ -479,9 +464,8 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { } /** - * Helper function that updates the local size counter and also updates any - * per-cf or per-blocktype metrics it can discern from given - * {@link LruCachedBlock} + * Helper function that updates the local size counter and also updates any per-cf or + * per-blocktype metrics it can discern from given {@link LruCachedBlock} */ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); @@ -497,19 +481,16 @@ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { /** * Get the buffer of the block with the specified name. - * * @param cacheKey block's cache key * @param caching true if the caller caches blocks on cache misses - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check - * locking) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid + * double counting cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not - * * @return buffer of specified cache key, or null if not in cache */ @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, - boolean updateCacheMetrics) { + boolean updateCacheMetrics) { LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> { // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove @@ -547,7 +528,6 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repea /** * Whether the cache contains block with specified cacheKey - * * @return true if contains the block */ @Override @@ -562,13 +542,11 @@ public boolean evictBlock(BlockCacheKey cacheKey) { } /** - * Evicts all blocks for a specific HFile. This is an - * expensive operation implemented as a linear-time search through all blocks - * in the cache. Ideally this should be a search in a log-access-time map. - * + * Evicts all blocks for a specific HFile. This is an expensive operation implemented as a + * linear-time search through all blocks in the cache. Ideally this should be a search in a + * log-access-time map. *

        * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override @@ -588,11 +566,9 @@ public int evictBlocksByHfileName(String hfileName) { } /** - * Evict the block, and it will be cached by the victim handler if exists && - * block may be read again later - * - * @param evictedByEvictionProcess true if the given block is evicted by - * EvictionThread + * Evict the block, and it will be cached by the victim handler if exists && block may be + * read again later + * @param evictedByEvictionProcess true if the given block is evicted by EvictionThread * @return the heap size of evicted block */ protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { @@ -659,9 +635,8 @@ void evict() { long bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { - LOG.trace("Block cache LRU eviction started; Attempting to free " + - StringUtils.byteDesc(bytesToFree) + " of total=" + - StringUtils.byteDesc(currentSize)); + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize)); } if (bytesToFree <= 0) { @@ -701,13 +676,13 @@ void evict() { bytesFreed = bucketSingle.free(s); bytesFreed += bucketMulti.free(m); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " from single and multi buckets"); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " from single and multi buckets"); } bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " total from all three buckets "); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " total from all three buckets "); } } else { // this means no need to evict block in memory bucket, @@ -744,7 +719,7 @@ void evict() { long overflow = bucket.overflow(); if (overflow > 0) { long bucketBytesToFree = - Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); + Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); bytesFreed += bucket.free(bucketBytesToFree); } remainingBuckets--; @@ -754,12 +729,11 @@ void evict() { long single = bucketSingle.totalSize(); long multi = bucketMulti.totalSize(); long memory = bucketMemory.totalSize(); - LOG.trace("Block cache LRU eviction completed; " + - "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + - "total=" + StringUtils.byteDesc(this.size.get()) + ", " + - "single=" + StringUtils.byteDesc(single) + ", " + - "multi=" + StringUtils.byteDesc(multi) + ", " + - "memory=" + StringUtils.byteDesc(memory)); + LOG.trace( + "Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed) + + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single=" + + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); } } finally { stats.evict(); @@ -770,26 +744,21 @@ void evict() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) .add("currentSize", StringUtils.byteDesc(getCurrentSize())) .add("freeSize", StringUtils.byteDesc(getFreeSize())) .add("maxSize", StringUtils.byteDesc(getMaxSize())) .add("heapSize", StringUtils.byteDesc(heapSize())) - .add("minSize", StringUtils.byteDesc(minSize())) - .add("minFactor", minFactor) - .add("multiSize", StringUtils.byteDesc(multiSize())) - .add("multiFactor", multiFactor) - .add("singleSize", StringUtils.byteDesc(singleSize())) - .add("singleFactor", singleFactor) + .add("minSize", StringUtils.byteDesc(minSize())).add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())).add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())).add("singleFactor", singleFactor) .toString(); } /** - * Used to group blocks into priority buckets. There will be a BlockBucket - * for each priority (single, multi, memory). Once bucketed, the eviction - * algorithm takes the appropriate number of elements out of each according - * to configuration parameters and their relatives sizes. + * Used to group blocks into priority buckets. There will be a BlockBucket for each priority + * (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate number of + * elements out of each according to configuration parameters and their relatives sizes. */ private class BlockBucket implements Comparable { @@ -846,7 +815,7 @@ public boolean equals(Object that) { if (that == null || !(that instanceof BlockBucket)) { return false; } - return compareTo((BlockBucket)that) == 0; + return compareTo((BlockBucket) that) == 0; } @Override @@ -856,17 +825,14 @@ public int hashCode() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name) + return MoreObjects.toStringHelper(this).add("name", name) .add("totalSize", StringUtils.byteDesc(totalSize)) - .add("bucketSize", StringUtils.byteDesc(bucketSize)) - .toString(); + .add("bucketSize", StringUtils.byteDesc(bucketSize)).toString(); } } /** * Get the maximum size of this cache. - * * @return max size in bytes */ @@ -910,10 +876,9 @@ EvictionThread getEvictionThread() { } /* - * Eviction thread. Sits in waiting state until an eviction is triggered - * when the cache size grows above the acceptable level.

        - * - * Thread is triggered into action by {@link LruBlockCache#runEviction()} + * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows + * above the acceptable level.

        Thread is triggered into action by {@link + * LruBlockCache#runEviction()} */ static class EvictionThread extends Thread { @@ -934,7 +899,7 @@ public void run() { while (this.go) { synchronized (this) { try { - this.wait(1000 * 10/*Don't wait for ever*/); + this.wait(1000 * 10/* Don't wait for ever */); } catch (InterruptedException e) { LOG.warn("Interrupted eviction thread ", e); Thread.currentThread().interrupt(); @@ -949,8 +914,8 @@ public void run() { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="This is what we want") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "This is what we want") public void evict() { synchronized (this) { this.notifyAll(); @@ -975,7 +940,7 @@ boolean isEnteringRun() { } /* - * Statistics thread. Periodically prints the cache statistics to the log. + * Statistics thread. Periodically prints the cache statistics to the log. */ static class StatisticsThread extends Thread { @@ -997,28 +962,26 @@ public void logStats() { // Log size long totalSize = heapSize(); long freeSize = maxSize - totalSize; - LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "max=" + StringUtils.byteDesc(this.maxSize) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount() + ", " + - "evictedPerRun=" + stats.evictedPerEviction()); + LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 + ? "0" + : (StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ")) + + ", " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 + ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount() + ", " + + "evictedPerRun=" + stats.evictedPerEviction()); } /** * Get counter statistics for this cache. - * - *

        Includes: total accesses, hits, misses, evicted blocks, and runs - * of the eviction processes. + *

        + * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes. */ @Override public CacheStats getStats() { @@ -1026,7 +989,7 @@ public CacheStats getStats() { } public final static long CACHE_FIXED_OVERHEAD = - ClassSize.estimateBase(LruBlockCache.class, false); + ClassSize.estimateBase(LruBlockCache.class, false); @Override public long heapSize() { @@ -1036,8 +999,8 @@ public long heapSize() { private static long calculateOverhead(long maxSize, long blockSize, int concurrency) { // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP - + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) - + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } @Override @@ -1115,7 +1078,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1134,17 +1097,21 @@ public void remove() { // Simple calculators of sizes given factors and maxSize long acceptableSize() { - return (long)Math.floor(this.maxSize * this.acceptableFactor); + return (long) Math.floor(this.maxSize * this.acceptableFactor); } + private long minSize() { - return (long)Math.floor(this.maxSize * this.minFactor); + return (long) Math.floor(this.maxSize * this.minFactor); } + private long singleSize() { - return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.singleFactor * this.minFactor); } + private long multiSize() { - return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.multiFactor * this.minFactor); } + private long memorySize() { return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); } @@ -1182,7 +1149,6 @@ public void clearCache() { /** * Used in testing. May be very inefficient. - * * @return the set of cached file names */ SortedSet getCachedFileNamesForTest() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java index 32a277d46266..f60e4300e4ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,25 +17,24 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; /** * Represents an entry in the {@link LruBlockCache}. - * - *

        Makes the block memory-aware with {@link HeapSize} and Comparable - * to sort by access time for the LRU. It also takes care of priority by - * either instantiating as in-memory or handling the transition from single - * to multiple access. + *

        + * Makes the block memory-aware with {@link HeapSize} and Comparable to sort by access time for the + * LRU. It also takes care of priority by either instantiating as in-memory or handling the + * transition from single to multiple access. */ @InterfaceAudience.Private public class LruCachedBlock implements HeapSize, Comparable { - public final static long PER_BLOCK_OVERHEAD = ClassSize.align( - ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) + - ClassSize.STRING + ClassSize.BYTE_BUFFER); + public final static long PER_BLOCK_OVERHEAD = + ClassSize.align(ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) + + ClassSize.STRING + ClassSize.BYTE_BUFFER); private final BlockCacheKey cacheKey; private final Cacheable buf; @@ -44,7 +42,7 @@ public class LruCachedBlock implements HeapSize, Comparable { private long size; private BlockPriority priority; /** - * Time this block was cached. Presumes we are created just before we are added to the cache. + * Time this block was cached. Presumes we are created just before we are added to the cache. */ private final long cachedTime = System.nanoTime(); @@ -52,8 +50,7 @@ public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime) { this(cacheKey, buf, accessTime, false); } - public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, - boolean inMemory) { + public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, boolean inMemory) { this.cacheKey = cacheKey; this.buf = buf; this.accessTime = accessTime; @@ -62,9 +59,9 @@ public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, // the base classes. We also include the base class // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with // their buffer lengths. This variable is used elsewhere in unit tests. - this.size = ClassSize.align(cacheKey.heapSize()) - + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; - if(inMemory) { + this.size = + ClassSize.align(cacheKey.heapSize()) + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; + if (inMemory) { this.priority = BlockPriority.MEMORY; } else { this.priority = BlockPriority.SINGLE; @@ -74,11 +71,11 @@ public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, /** * Block has been accessed. * @param accessTime Last access; this is actually a incremented sequence number rather than an - * actual time. + * actual time. */ public void access(long accessTime) { this.accessTime = accessTime; - if(this.priority == BlockPriority.SINGLE) { + if (this.priority == BlockPriority.SINGLE) { this.priority = BlockPriority.MULTI; } } @@ -104,7 +101,7 @@ public int compareTo(LruCachedBlock that) { @Override public int hashCode() { - return (int)(accessTime ^ (accessTime >>> 32)); + return (int) (accessTime ^ (accessTime >>> 32)); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java index e68939191d0e..8e45cb772d22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,17 +24,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue; /** - * A memory-bound queue that will grow until an element brings - * total size >= maxSize. From then on, only entries that are sorted larger - * than the smallest current entry will be inserted/replaced. - * - *

        Use this when you want to find the largest elements (according to their - * ordering, not their heap size) that consume as close to the specified - * maxSize as possible. Default behavior is to grow just above rather than - * just below specified max. - * - *

        Object used in this queue must implement {@link HeapSize} as well as - * {@link Comparable}. + * A memory-bound queue that will grow until an element brings total size >= maxSize. From then + * on, only entries that are sorted larger than the smallest current entry will be + * inserted/replaced. + *

        + * Use this when you want to find the largest elements (according to their ordering, not their heap + * size) that consume as close to the specified maxSize as possible. Default behavior is to grow + * just above rather than just below specified max. + *

        + * Object used in this queue must implement {@link HeapSize} as well as {@link Comparable}. */ @InterfaceAudience.Private public class LruCachedBlockQueue implements HeapSize { @@ -46,7 +43,7 @@ public class LruCachedBlockQueue implements HeapSize { private long maxSize; /** - * @param maxSize the target size of elements in the queue + * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public LruCachedBlockQueue(long maxSize, long blockSize) { @@ -63,16 +60,16 @@ public LruCachedBlockQueue(long maxSize, long blockSize) { /** * Attempt to add the specified cached block to this queue. - * - *

        If the queue is smaller than the max size, or if the specified element - * is ordered before the smallest element in the queue, the element will be - * added to the queue. Otherwise, there is no side effect of this call. + *

        + * If the queue is smaller than the max size, or if the specified element is ordered before the + * smallest element in the queue, the element will be added to the queue. Otherwise, there is no + * side effect of this call. * @param cb block to try to add to the queue */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", - justification = "head can not be null as heapSize is greater than maxSize," - + " which means we have something in the queue") + value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", + justification = "head can not be null as heapSize is greater than maxSize," + + " which means we have something in the queue") public void add(LruCachedBlock cb) { if (heapSize < maxSize) { queue.add(cb); @@ -93,16 +90,14 @@ public void add(LruCachedBlock cb) { } /** - * @return The next element in this queue, or {@code null} if the queue is - * empty. + * @return The next element in this queue, or {@code null} if the queue is empty. */ public LruCachedBlock poll() { return queue.poll(); } /** - * @return The last element in this queue, or {@code null} if the queue is - * empty. + * @return The last element in this queue, or {@code null} if the queue is empty. */ public LruCachedBlock pollLast() { return queue.pollLast(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java index c519d9fd8095..d64f0e4ce53d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java @@ -1,27 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.EncodingState; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; @@ -29,6 +28,7 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; import org.apache.hadoop.hbase.io.encoding.NoneEncoder; +import org.apache.yetus.audience.InterfaceAudience; /** * Does not perform any kind of encoding/decoding. @@ -36,8 +36,7 @@ @InterfaceAudience.Private public class NoOpDataBlockEncoder implements HFileDataBlockEncoder { - public static final NoOpDataBlockEncoder INSTANCE = - new NoOpDataBlockEncoder(); + public static final NoOpDataBlockEncoder INSTANCE = new NoOpDataBlockEncoder(); private static class NoneEncodingState extends EncodingState { NoneEncoder encoder = null; @@ -48,10 +47,9 @@ private NoOpDataBlockEncoder() { } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, - DataOutputStream out) throws IOException { - NoneEncodingState state = (NoneEncodingState) encodingCtx - .getEncodingState(); + public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + throws IOException { + NoneEncodingState state = (NoneEncodingState) encodingCtx.getEncodingState(); NoneEncoder encoder = state.encoder; int size = encoder.write(cell); state.postCellEncode(size, size); @@ -75,7 +73,7 @@ public DataBlockEncoding getDataBlockEncoding() { public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) { return DataBlockEncoding.NONE; } - + @Override public String toString() { return getClass().getSimpleName(); @@ -83,23 +81,22 @@ public String toString() { @Override public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, - byte[] dummyHeader, HFileContext meta) { + byte[] dummyHeader, HFileContext meta) { return new HFileBlockDefaultEncodingContext(conf, null, dummyHeader, meta); } @Override public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, - HFileContext meta) { + HFileContext meta) { return new HFileBlockDefaultDecodingContext(conf, meta); } @Override - public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, - DataOutputStream out) throws IOException { + public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out) + throws IOException { if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) { throw new IOException(this.getClass().getName() + " only accepts " - + HFileBlockDefaultEncodingContext.class.getName() + " as the " - + "encoding context."); + + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context."); } HFileBlockDefaultEncodingContext encodingCtx = @@ -114,7 +111,7 @@ public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, @Override public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, - byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException { + byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException { encodingCtx.postEncoding(BlockType.DATA); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java index 80de44915f2e..8561fc1c8939 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +42,7 @@ public final class PrefetchExecutor { private static final Logger LOG = LoggerFactory.getLogger(PrefetchExecutor.class); /** Futures for tracking block prefetch activity */ - private static final Map> prefetchFutures = new ConcurrentSkipListMap<>(); + private static final Map> prefetchFutures = new ConcurrentSkipListMap<>(); /** Executor pool shared among all HFiles for block prefetch */ private static final ScheduledExecutorService prefetchExecutorPool; /** Delay before beginning prefetch */ @@ -59,15 +58,14 @@ public final class PrefetchExecutor { prefetchDelayMillis = conf.getInt("hbase.hfile.prefetch.delay", 1000); prefetchDelayVariation = conf.getFloat("hbase.hfile.prefetch.delay.variation", 0.2f); int prefetchThreads = conf.getInt("hbase.hfile.thread.prefetch", 4); - prefetchExecutorPool = new ScheduledThreadPoolExecutor(prefetchThreads, - new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime(); - Thread t = new Thread(r, name); - t.setDaemon(true); - return t; - } + prefetchExecutorPool = new ScheduledThreadPoolExecutor(prefetchThreads, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime(); + Thread t = new Thread(r, name); + t.setDaemon(true); + return t; + } }); } @@ -75,24 +73,17 @@ public Thread newThread(Runnable r) { // prefetching of file blocks but the Store level is where path convention // knowledge should be contained private static final Pattern prefetchPathExclude = - Pattern.compile( - "(" + - Path.SEPARATOR_CHAR + - HConstants.HBASE_TEMP_DIRECTORY.replace(".", "\\.") + - Path.SEPARATOR_CHAR + - ")|(" + - Path.SEPARATOR_CHAR + - HConstants.HREGION_COMPACTIONDIR_NAME.replace(".", "\\.") + - Path.SEPARATOR_CHAR + - ")"); + Pattern.compile("(" + Path.SEPARATOR_CHAR + HConstants.HBASE_TEMP_DIRECTORY.replace(".", "\\.") + + Path.SEPARATOR_CHAR + ")|(" + Path.SEPARATOR_CHAR + + HConstants.HREGION_COMPACTIONDIR_NAME.replace(".", "\\.") + Path.SEPARATOR_CHAR + ")"); public static void request(Path path, Runnable runnable) { if (!prefetchPathExclude.matcher(path.toString()).find()) { long delay; if (prefetchDelayMillis > 0) { - delay = (long)((prefetchDelayMillis * (1.0f - (prefetchDelayVariation/2))) + - (prefetchDelayMillis * (prefetchDelayVariation/2) * - ThreadLocalRandom.current().nextFloat())); + delay = (long) ((prefetchDelayMillis * (1.0f - (prefetchDelayVariation / 2))) + + (prefetchDelayMillis * (prefetchDelayVariation / 2) + * ThreadLocalRandom.current().nextFloat())); } else { delay = 0; } @@ -100,8 +91,8 @@ public static void request(Path path, Runnable runnable) { if (LOG.isDebugEnabled()) { LOG.debug("Prefetch requested for " + path + ", delay=" + delay + " ms"); } - prefetchFutures.put(path, prefetchExecutorPool.schedule(runnable, delay, - TimeUnit.MILLISECONDS)); + prefetchFutures.put(path, + prefetchExecutorPool.schedule(runnable, delay, TimeUnit.MILLISECONDS)); } catch (RejectedExecutionException e) { prefetchFutures.remove(path); LOG.warn("Prefetch request rejected for " + path); @@ -136,5 +127,6 @@ public static boolean isCompleted(Path path) { return true; } - private PrefetchExecutor() {} + private PrefetchExecutor() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java index bd3d63dab0c6..c652c4a18b53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,6 +32,7 @@ public enum ReaderType { PREAD, STREAM } + private final Path filePath; private final FSDataInputStreamWrapper fsdis; private final long fileSize; @@ -41,7 +41,7 @@ public enum ReaderType { private final ReaderType type; public ReaderContext(Path filePath, FSDataInputStreamWrapper fsdis, long fileSize, - HFileSystem hfs, boolean primaryReplicaReader, ReaderType type) { + HFileSystem hfs, boolean primaryReplicaReader, ReaderType type) { this.filePath = filePath; this.fsdis = fsdis; this.fileSize = fileSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java index 1f903cfbea64..0ec3de58fffb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +19,7 @@ import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull; + import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -40,7 +40,8 @@ public class ReaderContextBuilder { private boolean primaryReplicaReader = true; private ReaderType type = ReaderType.PREAD; - public ReaderContextBuilder() {} + public ReaderContextBuilder() { + } public ReaderContextBuilder withFilePath(Path filePath) { this.filePath = filePath; @@ -82,11 +83,9 @@ public ReaderContextBuilder withReaderType(ReaderType type) { } public ReaderContextBuilder withFileSystemAndPath(FileSystem fs, Path filePath) - throws IOException { - this.withFileSystem(fs) - .withFilePath(filePath) - .withFileSize(fs.getFileStatus(filePath).getLen()) - .withInputStreamWrapper(new FSDataInputStreamWrapper(fs, filePath)); + throws IOException { + this.withFileSystem(fs).withFilePath(filePath).withFileSize(fs.getFileStatus(filePath).getLen()) + .withInputStreamWrapper(new FSDataInputStreamWrapper(fs, filePath)); return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java index 76158b010694..f093073319e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java index 0d2217e1579f..d0d8fa7cfe0c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,11 +32,11 @@ public class SharedMemHFileBlock extends HFileBlock { SharedMemHFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, - long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, - ByteBuffAllocator alloc) { + int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, + long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, + ByteBuffAllocator alloc) { super(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, buf, - fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); + fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java index e5e2e8fb6320..7852f19bd63e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,38 +19,36 @@ import static java.util.Objects.requireNonNull; +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.Policy.Eviction; +import com.github.benmanes.caffeine.cache.RemovalCause; +import com.github.benmanes.caffeine.cache.RemovalListener; import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; - -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.Policy.Eviction; -import com.github.benmanes.caffeine.cache.RemovalCause; -import com.github.benmanes.caffeine.cache.RemovalListener; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * A block cache that is memory-aware using {@link HeapSize}, memory bounded using the W-TinyLFU * eviction algorithm, and concurrent. This implementation delegates to a Caffeine cache to provide * O(1) read and write operations. *

          - *
        • W-TinyLFU: http://arxiv.org/pdf/1512.00727.pdf
        • - *
        • Caffeine: https://github.com/ben-manes/caffeine
        • - *
        • Cache design: http://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html
        • + *
        • W-TinyLFU: http://arxiv.org/pdf/1512.00727.pdf
        • + *
        • Caffeine: https://github.com/ben-manes/caffeine
        • + *
        • Cache design: http://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html
        • *
        */ @InterfaceAudience.Private @@ -72,44 +70,39 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { /** * Creates a block cache. - * * @param maximumSizeInBytes maximum size of this cache, in bytes - * @param avgBlockSize expected average size of blocks, in bytes - * @param executor the cache's executor - * @param conf additional configuration + * @param avgBlockSize expected average size of blocks, in bytes + * @param executor the cache's executor + * @param conf additional configuration */ - public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, - Executor executor, Configuration conf) { - this(maximumSizeInBytes, avgBlockSize, - conf.getLong(MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), executor); + public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, Executor executor, + Configuration conf) { + this(maximumSizeInBytes, avgBlockSize, conf.getLong(MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + executor); } /** * Creates a block cache. - * * @param maximumSizeInBytes maximum size of this cache, in bytes - * @param avgBlockSize expected average size of blocks, in bytes - * @param maxBlockSize maximum size of a block, in bytes - * @param executor the cache's executor + * @param avgBlockSize expected average size of blocks, in bytes + * @param maxBlockSize maximum size of a block, in bytes + * @param executor the cache's executor */ - public TinyLfuBlockCache(long maximumSizeInBytes, - long avgBlockSize, long maxBlockSize, Executor executor) { - this.cache = Caffeine.newBuilder() - .executor(executor) - .maximumWeight(maximumSizeInBytes) - .removalListener(new EvictionListener()) - .weigher((BlockCacheKey key, Cacheable value) -> - (int) Math.min(value.heapSize(), Integer.MAX_VALUE)) - .initialCapacity((int) Math.ceil((1.2 * maximumSizeInBytes) / avgBlockSize)) - .build(); + public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, long maxBlockSize, + Executor executor) { + this.cache = Caffeine.newBuilder().executor(executor).maximumWeight(maximumSizeInBytes) + .removalListener(new EvictionListener()) + .weigher( + (BlockCacheKey key, Cacheable value) -> (int) Math.min(value.heapSize(), Integer.MAX_VALUE)) + .initialCapacity((int) Math.ceil((1.2 * maximumSizeInBytes) / avgBlockSize)).build(); this.maxBlockSize = maxBlockSize; this.policy = cache.policy().eviction().get(); this.stats = new CacheStats(getClass().getSimpleName()); statsThreadPool = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder() - .setNameFormat("TinyLfuBlockCacheStatsExecutor").setDaemon(true).build()); - statsThreadPool.scheduleAtFixedRate(this::logStats, - STAT_THREAD_PERIOD_SECONDS, STAT_THREAD_PERIOD_SECONDS, TimeUnit.SECONDS); + .setNameFormat("TinyLfuBlockCacheStatsExecutor").setDaemon(true).build()); + statsThreadPool.scheduleAtFixedRate(this::logStats, STAT_THREAD_PERIOD_SECONDS, + STAT_THREAD_PERIOD_SECONDS, TimeUnit.SECONDS); } @Override @@ -156,8 +149,8 @@ public boolean containsBlock(BlockCacheKey cacheKey) { } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, - boolean caching, boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { Cacheable value = cache.asMap().computeIfPresent(cacheKey, (blockCacheKey, cacheable) -> { // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove @@ -214,9 +207,8 @@ public void cacheBlock(BlockCacheKey key, Cacheable value) { * the block (HBASE-22127):
        * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
        * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's - * reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be - * garbage collected by JVM, so need a retain here. - * + * reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be garbage + * collected by JVM, so need a retain here. * @param buf the original block * @return an block with an heap memory backend. */ @@ -276,38 +268,29 @@ public BlockCache[] getBlockCaches() { public Iterator iterator() { long now = System.nanoTime(); return cache.asMap().entrySet().stream() - .map(entry -> (CachedBlock) new CachedBlockView(entry.getKey(), entry.getValue(), now)) - .iterator(); + .map(entry -> (CachedBlock) new CachedBlockView(entry.getKey(), entry.getValue(), now)) + .iterator(); } private void logStats() { - LOG.info( - "totalSize=" + StringUtils.byteDesc(heapSize()) + ", " + - "freeSize=" + StringUtils.byteDesc(getFreeSize()) + ", " + - "max=" + StringUtils.byteDesc(size()) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0," : StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ") + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount()); + LOG.info("totalSize=" + StringUtils.byteDesc(heapSize()) + ", " + "freeSize=" + + StringUtils.byteDesc(getFreeSize()) + ", " + "max=" + StringUtils.byteDesc(size()) + ", " + + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 ? "0," : StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ") + + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 + ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount()); } @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) - .add("currentSize", getCurrentSize()) - .add("freeSize", getFreeSize()) - .add("maxSize", size()) - .add("heapSize", heapSize()) - .add("victimCache", (victimCache != null)) - .toString(); + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) + .add("currentSize", getCurrentSize()).add("freeSize", getFreeSize()).add("maxSize", size()) + .add("heapSize", heapSize()).add("victimCache", (victimCache != null)).toString(); } /** A removal listener to asynchronously record evictions and populate the victim cache. */ @@ -335,10 +318,10 @@ public void onRemoval(BlockCacheKey key, Cacheable value, RemovalCause cause) { } /** - * Records an eviction. The number of eviction operations and evicted blocks are identical, as - * an eviction is triggered immediately when the capacity has been exceeded. An eviction is - * performed asynchronously. See the library's documentation for details on write buffers, - * batching, and maintenance behavior. + * Records an eviction. The number of eviction operations and evicted blocks are identical, as an + * eviction is triggered immediately when the capacity has been exceeded. An eviction is performed + * asynchronously. See the library's documentation for details on write buffers, batching, and + * maintenance behavior. */ private void recordEviction() { // FIXME: Currently does not capture the insertion time @@ -347,9 +330,8 @@ private void recordEviction() { } private static final class CachedBlockView implements CachedBlock { - private static final Comparator COMPARATOR = Comparator - .comparing(CachedBlock::getFilename) - .thenComparing(CachedBlock::getOffset) + private static final Comparator COMPARATOR = + Comparator.comparing(CachedBlock::getFilename).thenComparing(CachedBlock::getOffset) .thenComparing(CachedBlock::getCachedTime); private final BlockCacheKey key; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index bbbce76cf8e8..5d89f0cbdd3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile.bucket; import java.util.Arrays; @@ -114,8 +113,8 @@ public long getBaseOffset() { } /** - * Allocate a block in this bucket, return the offset representing the - * position in physical space + * Allocate a block in this bucket, return the offset representing the position in physical + * space * @return the offset in the IOEngine */ public long allocate() { @@ -130,18 +129,16 @@ public long allocate() { public void addAllocation(long offset) throws BucketAllocatorException { offset -= baseOffset; if (offset < 0 || offset % itemAllocationSize != 0) - throw new BucketAllocatorException( - "Attempt to add allocation for bad offset: " + offset + " base=" - + baseOffset + ", bucket size=" + itemAllocationSize); + throw new BucketAllocatorException("Attempt to add allocation for bad offset: " + offset + + " base=" + baseOffset + ", bucket size=" + itemAllocationSize); int idx = (int) (offset / itemAllocationSize); boolean matchFound = false; for (int i = 0; i < freeCount; ++i) { if (matchFound) freeList[i - 1] = freeList[i]; else if (freeList[i] == idx) matchFound = true; } - if (!matchFound) - throw new BucketAllocatorException("Couldn't find match for index " - + idx + " in free list"); + if (!matchFound) throw new BucketAllocatorException( + "Couldn't find match for index " + idx + " in free list"); ++usedCount; --freeCount; } @@ -260,10 +257,8 @@ public synchronized IndexStatistics statistics() { @Override public String toString() { - return MoreObjects.toStringHelper(this.getClass()) - .add("sizeIndex", sizeIndex) - .add("bucketSize", bucketSizes[sizeIndex]) - .toString(); + return MoreObjects.toStringHelper(this.getClass()).add("sizeIndex", sizeIndex) + .add("bucketSize", bucketSizes[sizeIndex]).toString(); } } @@ -272,20 +267,17 @@ public String toString() { // The real block size in hfile maybe a little larger than the size we configured , // so we need add extra 1024 bytes for fit. // TODO Support the view of block size distribution statistics - private static final int DEFAULT_BUCKET_SIZES[] = { 4 * 1024 + 1024, 8 * 1024 + 1024, - 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024, 48 * 1024 + 1024, - 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024, - 192 * 1024 + 1024, 256 * 1024 + 1024, 384 * 1024 + 1024, - 512 * 1024 + 1024 }; + private static final int DEFAULT_BUCKET_SIZES[] = + { 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024, + 48 * 1024 + 1024, 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024, + 192 * 1024 + 1024, 256 * 1024 + 1024, 384 * 1024 + 1024, 512 * 1024 + 1024 }; /** - * Round up the given block size to bucket size, and get the corresponding - * BucketSizeInfo + * Round up the given block size to bucket size, and get the corresponding BucketSizeInfo */ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { for (int i = 0; i < bucketSizes.length; ++i) - if (blockSize <= bucketSizes[i]) - return bucketSizeInfos[i]; + if (blockSize <= bucketSizes[i]) return bucketSizeInfos[i]; return null; } @@ -303,16 +295,15 @@ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { private final long totalSize; private transient long usedSize = 0; - BucketAllocator(long availableSpace, int[] bucketSizes) - throws BucketAllocatorException { + BucketAllocator(long availableSpace, int[] bucketSizes) throws BucketAllocatorException { this.bucketSizes = bucketSizes == null ? DEFAULT_BUCKET_SIZES : bucketSizes; Arrays.sort(this.bucketSizes); this.bigItemSize = Ints.max(this.bucketSizes); this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * (long) bigItemSize; buckets = new Bucket[(int) (availableSpace / bucketCapacity)]; if (buckets.length < this.bucketSizes.length) - throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length + - "); must have room for at least " + this.bucketSizes.length + " buckets"); + throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length + + "); must have room for at least " + this.bucketSizes.length + " buckets"); bucketSizeInfos = new BucketSizeInfo[this.bucketSizes.length]; for (int i = 0; i < this.bucketSizes.length; ++i) { bucketSizeInfos[i] = new BucketSizeInfo(i); @@ -320,27 +311,26 @@ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { for (int i = 0; i < buckets.length; ++i) { buckets[i] = new Bucket(bucketCapacity * i); bucketSizeInfos[i < this.bucketSizes.length ? i : this.bucketSizes.length - 1] - .instantiateBucket(buckets[i]); + .instantiateBucket(buckets[i]); } this.totalSize = ((long) buckets.length) * bucketCapacity; if (LOG.isInfoEnabled()) { - LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length + - ", bucket capacity=" + this.bucketCapacity + - "=(" + FEWEST_ITEMS_IN_BUCKET + "*" + this.bigItemSize + ")=" + - "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))"); + LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length + + ", bucket capacity=" + this.bucketCapacity + "=(" + FEWEST_ITEMS_IN_BUCKET + "*" + + this.bigItemSize + ")=" + + "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))"); } } /** * Rebuild the allocator's data structures from a persisted map. * @param availableSpace capacity of cache - * @param map A map stores the block key and BucketEntry(block's meta data - * like offset, length) - * @param realCacheSize cached data size statistics for bucket cache - * @throws BucketAllocatorException + * @param map A map stores the block key and BucketEntry(block's meta data like offset, + * length) + * @param realCacheSize cached data size statistics for bucket cache n */ BucketAllocator(long availableSpace, int[] bucketSizes, Map map, - LongAdder realCacheSize) throws BucketAllocatorException { + LongAdder realCacheSize) throws BucketAllocatorException { this(availableSpace, bucketSizes); // each bucket has an offset, sizeindex. probably the buckets are too big @@ -381,7 +371,7 @@ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { } else { if (!b.isCompletelyFree()) { throw new BucketAllocatorException( - "Reconfiguring bucket " + bucketNo + " but it's already allocated; corrupt data"); + "Reconfiguring bucket " + bucketNo + " but it's already allocated; corrupt data"); } // Need to remove the bucket from whichever list it's currently in at // the moment... @@ -398,8 +388,8 @@ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { } if (sizeNotMatchedCount > 0) { - LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be rebuilt because " + - "there is no matching bucket size for these blocks"); + LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be rebuilt because " + + "there is no matching bucket size for these blocks"); } if (insufficientCapacityCount > 0) { LOG.warn("There are " + insufficientCapacityCount + " blocks which can't be rebuilt - " @@ -433,25 +423,21 @@ public long getTotalSize() { /** * Allocate a block with specified size. Return the offset - * @param blockSize size of block - * @throws BucketAllocatorException - * @throws CacheFullException - * @return the offset in the IOEngine + * @param blockSize size of block nn * @return the offset in the IOEngine */ - public synchronized long allocateBlock(int blockSize) throws CacheFullException, - BucketAllocatorException { + public synchronized long allocateBlock(int blockSize) + throws CacheFullException, BucketAllocatorException { assert blockSize > 0; BucketSizeInfo bsi = roundUpToBucketSizeInfo(blockSize); if (bsi == null) { - throw new BucketAllocatorException("Allocation too big size=" + blockSize + - "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY + - " to accomodate if size seems reasonable and you want it cached."); + throw new BucketAllocatorException("Allocation too big size=" + blockSize + + "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY + + " to accomodate if size seems reasonable and you want it cached."); } long offset = bsi.allocateBlock(); // Ask caller to free up space and try again! - if (offset < 0) - throw new CacheFullException(blockSize, bsi.sizeIndex()); + if (offset < 0) throw new CacheFullException(blockSize, bsi.sizeIndex()); usedSize += bucketSizes[bsi.sizeIndex()]; return offset; } @@ -539,7 +525,7 @@ public void setTo(long free, long used, long itemSize) { } } - public Bucket [] getBuckets() { + public Bucket[] getBuckets() { return this.buckets; } @@ -547,11 +533,11 @@ void logStatistics() { IndexStatistics total = new IndexStatistics(); IndexStatistics[] stats = getIndexStatistics(total); LOG.info("Bucket allocator statistics follow:\n"); - LOG.info(" Free bytes=" + total.freeBytes() + "+; used bytes=" - + total.usedBytes() + "; total bytes=" + total.totalBytes()); + LOG.info(" Free bytes=" + total.freeBytes() + "+; used bytes=" + total.usedBytes() + + "; total bytes=" + total.totalBytes()); for (IndexStatistics s : stats) { - LOG.info(" Object size " + s.itemSize() + " used=" + s.usedCount() - + "; free=" + s.freeCount() + "; total=" + s.totalCount()); + LOG.info(" Object size " + s.itemSize() + " used=" + s.usedCount() + "; free=" + + s.freeCount() + "; total=" + s.totalCount()); } } @@ -585,33 +571,28 @@ public int getBucketIndex(long offset) { } /** - * Returns a set of indices of the buckets that are least filled - * excluding the offsets, we also the fully free buckets for the - * BucketSizes where everything is empty and they only have one + * Returns a set of indices of the buckets that are least filled excluding the offsets, we also + * the fully free buckets for the BucketSizes where everything is empty and they only have one * completely free bucket as a reserved - * - * @param excludedBuckets the buckets that need to be excluded due to - * currently being in used + * @param excludedBuckets the buckets that need to be excluded due to currently being in used * @param bucketCount max Number of buckets to return * @return set of bucket indices which could be used for eviction */ - public Set getLeastFilledBuckets(Set excludedBuckets, - int bucketCount) { - Queue queue = MinMaxPriorityQueue.orderedBy( - new Comparator() { - @Override - public int compare(Integer left, Integer right) { - // We will always get instantiated buckets - return Float.compare( - ((float) buckets[left].usedCount) / buckets[left].itemCount, - ((float) buckets[right].usedCount) / buckets[right].itemCount); - } - }).maximumSize(bucketCount).create(); - - for (int i = 0; i < buckets.length; i ++ ) { + public Set getLeastFilledBuckets(Set excludedBuckets, int bucketCount) { + Queue queue = MinMaxPriorityQueue. orderedBy(new Comparator() { + @Override + public int compare(Integer left, Integer right) { + // We will always get instantiated buckets + return Float.compare(((float) buckets[left].usedCount) / buckets[left].itemCount, + ((float) buckets[right].usedCount) / buckets[right].itemCount); + } + }).maximumSize(bucketCount).create(); + + for (int i = 0; i < buckets.length; i++) { if (!excludedBuckets.contains(i) && !buckets[i].isUninstantiated() && - // Avoid the buckets that are the only buckets for a sizeIndex - bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1) { + // Avoid the buckets that are the only buckets for a sizeIndex + bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1 + ) { queue.add(i); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java index 55172cf7fb94..c141edb947dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java @@ -1,25 +1,23 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index e05645415fc1..004c5c463664 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -50,7 +47,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; import java.util.function.Function; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; @@ -88,21 +84,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos; /** - * BucketCache uses {@link BucketAllocator} to allocate/free blocks, and uses - * BucketCache#ramCache and BucketCache#backingMap in order to - * determine if a given element is in the cache. The bucket cache can use on-heap or - * off-heap memory {@link ByteBufferIOEngine} or in a file {@link FileIOEngine} to - * store/read the block data. - * - *

        Eviction is via a similar algorithm as used in + * BucketCache uses {@link BucketAllocator} to allocate/free blocks, and uses BucketCache#ramCache + * and BucketCache#backingMap in order to determine if a given element is in the cache. The bucket + * cache can use on-heap or off-heap memory {@link ByteBufferIOEngine} or in a file + * {@link FileIOEngine} to store/read the block data. + *

        + * Eviction is via a similar algorithm as used in * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} - * - *

        BucketCache can be used as mainly a block cache (see - * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with - * a BlockCache to decrease CMS GC and heap fragmentation. - * - *

        It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store - * blocks) to enlarge cache space via a victim cache. + *

        + * BucketCache can be used as mainly a block cache (see + * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with a BlockCache to + * decrease CMS GC and heap fragmentation. + *

        + * It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store blocks) to + * enlarge cache space via a victim cache. */ @InterfaceAudience.Private public class BucketCache implements BlockCache, HeapSize { @@ -147,18 +142,17 @@ public class BucketCache implements BlockCache, HeapSize { transient ConcurrentHashMap backingMap; /** - * Flag if the cache is enabled or not... We shut it off if there are IO - * errors for some time, so that Bucket IO exceptions/errors don't bring down - * the HBase server. + * Flag if the cache is enabled or not... We shut it off if there are IO errors for some time, so + * that Bucket IO exceptions/errors don't bring down the HBase server. */ private volatile boolean cacheEnabled; /** - * A list of writer queues. We have a queue per {@link WriterThread} we have running. - * In other words, the work adding blocks to the BucketCache is divided up amongst the - * running WriterThreads. Its done by taking hash of the cache key modulo queue count. - * WriterThread when it runs takes whatever has been recently added and 'drains' the entries - * to the BucketCache. It then updates the ramCache and backingMap accordingly. + * A list of writer queues. We have a queue per {@link WriterThread} we have running. In other + * words, the work adding blocks to the BucketCache is divided up amongst the running + * WriterThreads. Its done by taking hash of the cache key modulo queue count. WriterThread when + * it runs takes whatever has been recently added and 'drains' the entries to the BucketCache. It + * then updates the ramCache and backingMap accordingly. */ transient final ArrayList> writerQueues = new ArrayList<>(); transient final WriterThread[] writerThreads; @@ -178,9 +172,9 @@ public class BucketCache implements BlockCache, HeapSize { private static final int DEFAULT_CACHE_WAIT_TIME = 50; /** - * Used in tests. If this flag is false and the cache speed is very fast, - * bucket cache will skip some blocks when caching. If the flag is true, we - * will wait until blocks are flushed to IOEngine. + * Used in tests. If this flag is false and the cache speed is very fast, bucket cache will skip + * some blocks when caching. If the flag is true, we will wait until blocks are flushed to + * IOEngine. */ boolean wait_when_cache = false; @@ -201,8 +195,8 @@ public class BucketCache implements BlockCache, HeapSize { private volatile long ioErrorStartTime = -1; /** - * A ReentrantReadWriteLock to lock on a particular block identified by offset. - * The purpose of this is to avoid freeing the block which is being read. + * A ReentrantReadWriteLock to lock on a particular block identified by offset. The purpose of + * this is to avoid freeing the block which is being read. *

        */ transient final IdReadWriteLock offsetLock; @@ -229,7 +223,10 @@ public class BucketCache implements BlockCache, HeapSize { /** Minimum threshold of cache (when evicting, evict until size < min) */ private float minFactor; - /** Free this floating point factor of extra blocks when evicting. For example free the number of blocks requested * (1 + extraFreeFactor) */ + /** + * Free this floating point factor of extra blocks when evicting. For example free the number of + * blocks requested * (1 + extraFreeFactor) + */ private float extraFreeFactor; /** Single access bucket size */ @@ -246,9 +243,9 @@ public class BucketCache implements BlockCache, HeapSize { private static final String DEFAULT_FILE_VERIFY_ALGORITHM = "MD5"; /** - * Use {@link java.security.MessageDigest} class's encryption algorithms to check - * persistent file integrity, default algorithm is MD5 - * */ + * Use {@link java.security.MessageDigest} class's encryption algorithms to check persistent file + * integrity, default algorithm is MD5 + */ private String algorithm; /* Tracing failed Bucket Cache allocations. */ @@ -256,14 +253,14 @@ public class BucketCache implements BlockCache, HeapSize { private static final int ALLOCATION_FAIL_LOG_TIME_PERIOD = 60000; // Default 1 minute. public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, - int writerThreadNum, int writerQLen, String persistencePath) throws IOException { + int writerThreadNum, int writerQLen, String persistencePath) throws IOException { this(ioEngineName, capacity, blockSize, bucketSizes, writerThreadNum, writerQLen, - persistencePath, DEFAULT_ERROR_TOLERATION_DURATION, HBaseConfiguration.create()); + persistencePath, DEFAULT_ERROR_TOLERATION_DURATION, HBaseConfiguration.create()); } public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, - int writerThreadNum, int writerQLen, String persistencePath, int ioErrorsTolerationDuration, - Configuration conf) throws IOException { + int writerThreadNum, int writerQLen, String persistencePath, int ioErrorsTolerationDuration, + Configuration conf) throws IOException { boolean useStrongRef = conf.getBoolean(STRONG_REF_KEY, STRONG_REF_DEFAULT); if (useStrongRef) { this.offsetLock = new IdReadWriteLockStrongRef<>(); @@ -288,9 +285,10 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck sanityCheckConfigs(); - LOG.info("Instantiating BucketCache with acceptableFactor: " + acceptableFactor + ", minFactor: " + minFactor + - ", extraFreeFactor: " + extraFreeFactor + ", singleFactor: " + singleFactor + ", multiFactor: " + multiFactor + - ", memoryFactor: " + memoryFactor + ", useStrongRef: " + useStrongRef); + LOG.info("Instantiating BucketCache with acceptableFactor: " + acceptableFactor + + ", minFactor: " + minFactor + ", extraFreeFactor: " + extraFreeFactor + ", singleFactor: " + + singleFactor + ", multiFactor: " + multiFactor + ", memoryFactor: " + memoryFactor + + ", useStrongRef: " + useStrongRef); this.cacheCapacity = capacity; this.persistencePath = persistencePath; @@ -326,27 +324,34 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck startWriterThreads(); // Run the statistics thread periodically to print the cache statistics log - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. - this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), - statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS); - LOG.info("Started bucket cache; ioengine=" + ioEngineName + - ", capacity=" + StringUtils.byteDesc(capacity) + - ", blockSize=" + StringUtils.byteDesc(blockSize) + ", writerThreadNum=" + - writerThreadNum + ", writerQLen=" + writerQLen + ", persistencePath=" + - persistencePath + ", bucketAllocator=" + this.bucketAllocator.getClass().getName()); + this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod, + statThreadPeriod, TimeUnit.SECONDS); + LOG.info("Started bucket cache; ioengine=" + ioEngineName + ", capacity=" + + StringUtils.byteDesc(capacity) + ", blockSize=" + StringUtils.byteDesc(blockSize) + + ", writerThreadNum=" + writerThreadNum + ", writerQLen=" + writerQLen + ", persistencePath=" + + persistencePath + ", bucketAllocator=" + this.bucketAllocator.getClass().getName()); } private void sanityCheckConfigs() { - Preconditions.checkArgument(acceptableFactor <= 1 && acceptableFactor >= 0, ACCEPT_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(minFactor <= 1 && minFactor >= 0, MIN_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(minFactor <= acceptableFactor, MIN_FACTOR_CONFIG_NAME + " must be <= " + ACCEPT_FACTOR_CONFIG_NAME); - Preconditions.checkArgument(extraFreeFactor >= 0, EXTRA_FREE_FACTOR_CONFIG_NAME + " must be greater than 0.0"); - Preconditions.checkArgument(singleFactor <= 1 && singleFactor >= 0, SINGLE_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(multiFactor <= 1 && multiFactor >= 0, MULTI_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(memoryFactor <= 1 && memoryFactor >= 0, MEMORY_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument((singleFactor + multiFactor + memoryFactor) == 1, SINGLE_FACTOR_CONFIG_NAME + ", " + - MULTI_FACTOR_CONFIG_NAME + ", and " + MEMORY_FACTOR_CONFIG_NAME + " segments must add up to 1.0"); + Preconditions.checkArgument(acceptableFactor <= 1 && acceptableFactor >= 0, + ACCEPT_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(minFactor <= 1 && minFactor >= 0, + MIN_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(minFactor <= acceptableFactor, + MIN_FACTOR_CONFIG_NAME + " must be <= " + ACCEPT_FACTOR_CONFIG_NAME); + Preconditions.checkArgument(extraFreeFactor >= 0, + EXTRA_FREE_FACTOR_CONFIG_NAME + " must be greater than 0.0"); + Preconditions.checkArgument(singleFactor <= 1 && singleFactor >= 0, + SINGLE_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(multiFactor <= 1 && multiFactor >= 0, + MULTI_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(memoryFactor <= 1 && memoryFactor >= 0, + MEMORY_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument((singleFactor + multiFactor + memoryFactor) == 1, + SINGLE_FACTOR_CONFIG_NAME + ", " + MULTI_FACTOR_CONFIG_NAME + ", and " + + MEMORY_FACTOR_CONFIG_NAME + " segments must add up to 1.0"); } /** @@ -373,21 +378,16 @@ public String getIoEngine() { } /** - * Get the IOEngine from the IO engine name - * @param ioEngineName - * @param capacity - * @param persistencePath - * @return the IOEngine - * @throws IOException + * Get the IOEngine from the IO engine name nnn * @return the IOEngine n */ private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String persistencePath) - throws IOException { + throws IOException { if (ioEngineName.startsWith("file:") || ioEngineName.startsWith("files:")) { // In order to make the usage simple, we only need the prefix 'files:' in // document whether one or multiple file(s), but also support 'file:' for // the compatibility - String[] filePaths = ioEngineName.substring(ioEngineName.indexOf(":") + 1) - .split(FileIOEngine.FILE_DELIMITER); + String[] filePaths = + ioEngineName.substring(ioEngineName.indexOf(":") + 1).split(FileIOEngine.FILE_DELIMITER); return new FileIOEngine(capacity, persistencePath != null, filePaths); } else if (ioEngineName.startsWith("offheap")) { return new ByteBufferIOEngine(capacity); @@ -403,14 +403,14 @@ private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String return new SharedMemoryMmapIOEngine(ioEngineName.substring(5), capacity); } else { throw new IllegalArgumentException( - "Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap"); + "Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap"); } } /** * Cache the block with the specified name and buffer. * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { @@ -419,9 +419,9 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { /** * Cache the block with the specified name and buffer. - * @param cacheKey block's cache key + * @param cacheKey block's cache key * @param cachedItem block buffer - * @param inMemory if block is in-memory + * @param inMemory if block is in-memory */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory) { @@ -430,13 +430,13 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inM /** * Cache the block to ramCache - * @param cacheKey block's cache key + * @param cacheKey block's cache key * @param cachedItem block buffer - * @param inMemory if block is in-memory - * @param wait if true, blocking wait when queue is full + * @param inMemory if block is in-memory + * @param wait if true, blocking wait when queue is full */ public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, - boolean wait) { + boolean wait) { if (cacheEnabled) { if (backingMap.containsKey(cacheKey) || ramCache.containsKey(cacheKey)) { if (shouldReplaceExistingCacheBlock(cacheKey, cachedItem)) { @@ -458,14 +458,14 @@ protected boolean shouldReplaceExistingCacheBlock(BlockCacheKey cacheKey, Cachea } protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cachedItem, - boolean inMemory, boolean wait) { + boolean inMemory, boolean wait) { if (!cacheEnabled) { return; } LOG.trace("Caching key={}, item={}", cacheKey, cachedItem); // Stuff the entry into the RAM cache so it can get drained to the persistent store RAMQueueEntry re = - new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory); + new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory); /** * Don't use ramCache.put(cacheKey, re) here. because there may be a existing entry with same * key in ramCache, the heap size of bucket cache need to update if replacing entry from @@ -499,15 +499,15 @@ protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cach /** * Get the buffer of the block with the specified key. - * @param key block's cache key - * @param caching true if the caller caches blocks on cache misses - * @param repeat Whether this is a repeat lookup for the same block + * @param key block's cache key + * @param caching true if the caller caches blocks on cache misses + * @param repeat Whether this is a repeat lookup for the same block * @param updateCacheMetrics Whether we should update cache metrics or not * @return buffer of specified cache key, or null if not in cache */ @Override public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, - boolean updateCacheMetrics) { + boolean updateCacheMetrics) { if (!cacheEnabled) { return null; } @@ -609,7 +609,7 @@ public boolean evictBlock(BlockCacheKey cacheKey) { * {@link BucketCache#ramCache}.
        * NOTE:When Evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and * {@link BucketEntry} could be removed. - * @param cacheKey {@link BlockCacheKey} to evict. + * @param cacheKey {@link BlockCacheKey} to evict. * @param bucketEntry {@link BucketEntry} matched {@link BlockCacheKey} to evict. * @return true to indicate whether we've evicted successfully or not. */ @@ -676,7 +676,7 @@ public boolean evictBlockIfNoRpcReferenced(BlockCacheKey blockCacheKey) { * NOTE:When evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and * {@link BucketEntry} could be removed. * @param blockCacheKey {@link BlockCacheKey} to evict. - * @param bucketEntry {@link BucketEntry} matched {@link BlockCacheKey} to evict. + * @param bucketEntry {@link BucketEntry} matched {@link BlockCacheKey} to evict. * @return true to indicate whether we've evicted successfully or not. */ boolean evictBucketEntryIfNoRpcReferenced(BlockCacheKey blockCacheKey, BucketEntry bucketEntry) { @@ -696,7 +696,7 @@ protected boolean removeFromRamCache(BlockCacheKey cacheKey) { } /* - * Statistics thread. Periodically output cache statistics to the log. + * Statistics thread. Periodically output cache statistics to the log. */ private static class StatisticsThread extends Thread { private final BucketCache bucketCache; @@ -718,25 +718,24 @@ public void logStats() { long usedSize = bucketAllocator.getUsedSize(); long freeSize = totalSize - usedSize; long cacheSize = getRealCacheSize(); - LOG.info("failedBlockAdditions=" + cacheStats.getFailedInserts() + ", " + - "totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "usedSize=" + StringUtils.byteDesc(usedSize) +", " + - "cacheSize=" + StringUtils.byteDesc(cacheSize) +", " + - "accesses=" + cacheStats.getRequestCount() + ", " + - "hits=" + cacheStats.getHitCount() + ", " + - "IOhitsPerSecond=" + cacheStats.getIOHitsPerSecond() + ", " + - "IOTimePerHit=" + String.format("%.2f", cacheStats.getIOTimePerHit())+ ", " + - "hitRatio=" + (cacheStats.getHitCount() == 0 ? "0," : - (StringUtils.formatPercent(cacheStats.getHitRatio(), 2)+ ", ")) + - "cachingAccesses=" + cacheStats.getRequestCachingCount() + ", " + - "cachingHits=" + cacheStats.getHitCachingCount() + ", " + - "cachingHitsRatio=" +(cacheStats.getHitCachingCount() == 0 ? "0," : - (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2)+ ", ")) + - "evictions=" + cacheStats.getEvictionCount() + ", " + - "evicted=" + cacheStats.getEvictedCount() + ", " + - "evictedPerRun=" + cacheStats.evictedPerEviction() + ", " + - "allocationFailCount=" + cacheStats.getAllocationFailCount()); + LOG.info("failedBlockAdditions=" + cacheStats.getFailedInserts() + ", " + "totalSize=" + + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + + "usedSize=" + StringUtils.byteDesc(usedSize) + ", " + "cacheSize=" + + StringUtils.byteDesc(cacheSize) + ", " + "accesses=" + cacheStats.getRequestCount() + ", " + + "hits=" + cacheStats.getHitCount() + ", " + "IOhitsPerSecond=" + + cacheStats.getIOHitsPerSecond() + ", " + "IOTimePerHit=" + + String.format("%.2f", cacheStats.getIOTimePerHit()) + ", " + "hitRatio=" + + (cacheStats.getHitCount() == 0 + ? "0," + : (StringUtils.formatPercent(cacheStats.getHitRatio(), 2) + ", ")) + + "cachingAccesses=" + cacheStats.getRequestCachingCount() + ", " + "cachingHits=" + + cacheStats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (cacheStats.getHitCachingCount() == 0 + ? "0," + : (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + cacheStats.getEvictionCount() + ", " + "evicted=" + + cacheStats.getEvictedCount() + ", " + "evictedPerRun=" + cacheStats.evictedPerEviction() + + ", " + "allocationFailCount=" + cacheStats.getAllocationFailCount()); cacheStats.reset(); } @@ -769,12 +768,10 @@ private int bucketSizesAboveThresholdCount(float minFactor) { } /** - * This method will find the buckets that are minimally occupied - * and are not reference counted and will free them completely - * without any constraint on the access times of the elements, - * and as a process will completely free at most the number of buckets - * passed, sometimes it might not due to changing refCounts - * + * This method will find the buckets that are minimally occupied and are not reference counted and + * will free them completely without any constraint on the access times of the elements, and as a + * process will completely free at most the number of buckets passed, sometimes it might not due + * to changing refCounts * @param completelyFreeBucketsNeeded number of buckets to free **/ private void freeEntireBuckets(int completelyFreeBucketsNeeded) { @@ -788,7 +785,7 @@ private void freeEntireBuckets(int completelyFreeBucketsNeeded) { } }); Set candidateBuckets = - bucketAllocator.getLeastFilledBuckets(inUseBuckets, completelyFreeBucketsNeeded); + bucketAllocator.getLeastFilledBuckets(inUseBuckets, completelyFreeBucketsNeeded); for (Map.Entry entry : backingMap.entrySet()) { if (candidateBuckets.contains(bucketAllocator.getBucketIndex(entry.getValue().offset()))) { evictBucketEntryIfNoRpcReferenced(entry.getKey(), entry.getValue()); @@ -798,9 +795,9 @@ private void freeEntireBuckets(int completelyFreeBucketsNeeded) { } /** - * Free the space if the used size reaches acceptableSize() or one size block - * couldn't be allocated. When freeing the space, we use the LRU algorithm and - * ensure there must be some blocks evicted + * Free the space if the used size reaches acceptableSize() or one size block couldn't be + * allocated. When freeing the space, we use the LRU algorithm and ensure there must be some + * blocks evicted * @param why Why we are being called */ private void freeSpace(final String why) { @@ -812,7 +809,7 @@ private void freeSpace(final String why) { freeInProgress = true; long bytesToFreeWithoutExtra = 0; // Calculate free byte for each bucketSizeinfo - StringBuilder msgBuffer = LOG.isDebugEnabled()? new StringBuilder(): null; + StringBuilder msgBuffer = LOG.isDebugEnabled() ? new StringBuilder() : null; BucketAllocator.IndexStatistics[] stats = bucketAllocator.getIndexStatistics(); long[] bytesToFreeForBucket = new long[stats.length]; for (int i = 0; i < stats.length; i++) { @@ -838,21 +835,22 @@ private void freeSpace(final String why) { long currentSize = bucketAllocator.getUsedSize(); long totalSize = bucketAllocator.getTotalSize(); if (LOG.isDebugEnabled() && msgBuffer != null) { - LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString() + - " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize=" + - StringUtils.byteDesc(realCacheSize.sum()) + ", total=" + StringUtils.byteDesc(totalSize)); + LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString() + + " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize=" + + StringUtils.byteDesc(realCacheSize.sum()) + ", total=" + + StringUtils.byteDesc(totalSize)); } - long bytesToFreeWithExtra = (long) Math.floor(bytesToFreeWithoutExtra - * (1 + extraFreeFactor)); + long bytesToFreeWithExtra = + (long) Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor)); // Instantiate priority buckets - BucketEntryGroup bucketSingle = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(singleFactor)); - BucketEntryGroup bucketMulti = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(multiFactor)); - BucketEntryGroup bucketMemory = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(memoryFactor)); + BucketEntryGroup bucketSingle = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(singleFactor)); + BucketEntryGroup bucketMulti = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(multiFactor)); + BucketEntryGroup bucketMemory = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(memoryFactor)); // Scan entire map putting bucket entry into appropriate bucket entry // group @@ -873,8 +871,8 @@ private void freeSpace(final String why) { } } - PriorityQueue bucketQueue = new PriorityQueue<>(3, - Comparator.comparingLong(BucketEntryGroup::overflow)); + PriorityQueue bucketQueue = + new PriorityQueue<>(3, Comparator.comparingLong(BucketEntryGroup::overflow)); bucketQueue.add(bucketSingle); bucketQueue.add(bucketMulti); @@ -887,8 +885,8 @@ private void freeSpace(final String why) { while ((bucketGroup = bucketQueue.poll()) != null) { long overflow = bucketGroup.overflow(); if (overflow > 0) { - long bucketBytesToFree = Math.min(overflow, - (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets); + long bucketBytesToFree = + Math.min(overflow, (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets); bytesFreed += bucketGroup.free(bucketBytesToFree); } remainingBuckets--; @@ -915,8 +913,7 @@ private void freeSpace(final String why) { // there might be some buckets where the occupancy is very sparse and thus are not // yielding the free for the other bucket sizes, the fix for this to evict some // of the buckets, we do this by evicting the buckets that are least fulled - freeEntireBuckets(DEFAULT_FREE_ENTIRE_BLOCK_FACTOR * - bucketSizesAboveThresholdCount(1.0f)); + freeEntireBuckets(DEFAULT_FREE_ENTIRE_BLOCK_FACTOR * bucketSizesAboveThresholdCount(1.0f)); if (LOG.isDebugEnabled()) { long single = bucketSingle.totalSize(); @@ -924,11 +921,9 @@ private void freeSpace(final String why) { long memory = bucketMemory.totalSize(); if (LOG.isDebugEnabled()) { LOG.debug("Bucket cache free space completed; " + "freed=" - + StringUtils.byteDesc(bytesFreed) + ", " + "total=" - + StringUtils.byteDesc(totalSize) + ", " + "single=" - + StringUtils.byteDesc(single) + ", " + "multi=" - + StringUtils.byteDesc(multi) + ", " + "memory=" - + StringUtils.byteDesc(memory)); + + StringUtils.byteDesc(bytesFreed) + ", " + "total=" + StringUtils.byteDesc(totalSize) + + ", " + "single=" + StringUtils.byteDesc(single) + ", " + "multi=" + + StringUtils.byteDesc(multi) + ", " + "memory=" + StringUtils.byteDesc(memory)); } } @@ -992,7 +987,7 @@ public void run() { * bucketAllocator do not free its memory. * @see BlockCacheUtil#shouldReplaceExistingCacheBlock(BlockCache blockCache,BlockCacheKey * cacheKey, Cacheable newBlock) - * @param key Block cache key + * @param key Block cache key * @param bucketEntry Bucket entry to put into backingMap. */ protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { @@ -1008,11 +1003,11 @@ protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { /** * Prepare and return a warning message for Bucket Allocator Exception * @param fle The exception - * @param re The RAMQueueEntry for which the exception was thrown. + * @param re The RAMQueueEntry for which the exception was thrown. * @return A warning message created from the input RAMQueueEntry object. */ private static String getAllocationFailWarningMessage(final BucketAllocatorException fle, - final RAMQueueEntry re) { + final RAMQueueEntry re) { final StringBuilder sb = new StringBuilder(); sb.append("Most recent failed allocation after "); sb.append(ALLOCATION_FAIL_LOG_TIME_PERIOD); @@ -1045,7 +1040,7 @@ private static String getAllocationFailWarningMessage(final BucketAllocatorExcep * are passed in even if failure being sure to remove from ramCache else we'll never undo the * references and we'll OOME. * @param entries Presumes list passed in here will be processed by this invocation only. No - * interference expected. + * interference expected. */ void doDrain(final List entries, ByteBuffer metaBuff) throws InterruptedException { if (entries.isEmpty()) { @@ -1081,8 +1076,8 @@ void doDrain(final List entries, ByteBuffer metaBuff) throws Inte // transferred with our current IOEngines. Should take care, when we have new kinds of // IOEngine in the future. metaBuff.clear(); - BucketEntry bucketEntry = re.writeToCache(ioEngine, bucketAllocator, realCacheSize, - this::createRecycler, metaBuff); + BucketEntry bucketEntry = + re.writeToCache(ioEngine, bucketAllocator, realCacheSize, this::createRecycler, metaBuff); // Successfully added. Up index and add bucketEntry. Clear io exceptions. bucketEntries[index] = bucketEntry; if (ioErrorStartTime > 0) { @@ -1092,7 +1087,9 @@ void doDrain(final List entries, ByteBuffer metaBuff) throws Inte } catch (BucketAllocatorException fle) { long currTs = EnvironmentEdgeManager.currentTime(); cacheStats.allocationFailed(); // Record the warning. - if (allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD) { + if ( + allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD + ) { LOG.warn(getAllocationFailWarningMessage(fle, re)); allocFailLogPrevTs = currTs; } @@ -1165,12 +1162,12 @@ void doDrain(final List entries, ByteBuffer metaBuff) throws Inte * Blocks until elements available in {@code q} then tries to grab as many as possible before * returning. * @param receptacle Where to stash the elements taken from queue. We clear before we use it just - * in case. - * @param q The queue to take from. + * in case. + * @param q The queue to take from. * @return {@code receptacle} laden with elements taken from the queue or empty if none found. */ static List getRAMQueueEntries(BlockingQueue q, - List receptacle) throws InterruptedException { + List receptacle) throws InterruptedException { // Clear sets all entries to null and sets size to 0. We retain allocations. Presume it // ok even if list grew to accommodate thousands. receptacle.clear(); @@ -1182,7 +1179,7 @@ static List getRAMQueueEntries(BlockingQueue q, /** * @see #retrieveFromFile(int[]) */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="OBL_UNSATISFIED_OBLIGATION", + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "OBL_UNSATISFIED_OBLIGATION", justification = "false positive, try-with-resources ensures close is called.") private void persistToFile() throws IOException { assert !cacheEnabled; @@ -1211,13 +1208,13 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { int read = in.read(pbuf); if (read != pblen) { throw new IOException("Incorrect number of bytes read while checking for protobuf magic " - + "number. Requested=" + pblen + ", Received= " + read + ", File=" + persistencePath); + + "number. Requested=" + pblen + ", Received= " + read + ", File=" + persistencePath); } - if (! ProtobufMagic.isPBMagicPrefix(pbuf)) { + if (!ProtobufMagic.isPBMagicPrefix(pbuf)) { // In 3.0 we have enough flexibility to dump the old cache data. // TODO: In 2.x line, this might need to be filled in to support reading the old format - throw new IOException("Persistence file does not start with protobuf magic number. " + - persistencePath); + throw new IOException( + "Persistence file does not start with protobuf magic number. " + persistencePath); } parsePB(BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in)); bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize); @@ -1228,6 +1225,7 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { /** * Create an input stream that deletes the file after reading it. Use in try-with-resources to * avoid this pattern where an exception thrown from a finally block may mask earlier exceptions: + * *

            *   File f = ...
            *   try (FileInputStream fis = new FileInputStream(f)) {
        @@ -1236,6 +1234,7 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException {
            *     if (!f.delete()) throw new IOException("failed to delete");
            *   }
            * 
        + * * @param file the file to read and delete * @return a FileInputStream for the given file * @throws IOException if there is a problem creating the stream @@ -1243,10 +1242,12 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { private FileInputStream deleteFileOnClose(final File file) throws IOException { return new FileInputStream(file) { private File myFile; + private FileInputStream init(File file) { myFile = file; return this; } + @Override public void close() throws IOException { // close() will be called during try-with-resources and it will be @@ -1266,19 +1267,18 @@ public void close() throws IOException { } private void verifyCapacityAndClasses(long capacitySize, String ioclass, String mapclass) - throws IOException { + throws IOException { if (capacitySize != cacheCapacity) { - throw new IOException("Mismatched cache capacity:" - + StringUtils.byteDesc(capacitySize) + ", expected: " - + StringUtils.byteDesc(cacheCapacity)); + throw new IOException("Mismatched cache capacity:" + StringUtils.byteDesc(capacitySize) + + ", expected: " + StringUtils.byteDesc(cacheCapacity)); } if (!ioEngine.getClass().getName().equals(ioclass)) { - throw new IOException("Class name for IO engine mismatch: " + ioclass - + ", expected:" + ioEngine.getClass().getName()); + throw new IOException("Class name for IO engine mismatch: " + ioclass + ", expected:" + + ioEngine.getClass().getName()); } if (!backingMap.getClass().getName().equals(mapclass)) { - throw new IOException("Class name for cache map mismatch: " + mapclass - + ", expected:" + backingMap.getClass().getName()); + throw new IOException("Class name for cache map mismatch: " + mapclass + ", expected:" + + backingMap.getClass().getName()); } } @@ -1296,9 +1296,8 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio } /** - * Check whether we tolerate IO error this time. If the duration of IOEngine - * throwing errors exceeds ioErrorsDurationTimeTolerated, we will disable the - * cache + * Check whether we tolerate IO error this time. If the duration of IOEngine throwing errors + * exceeds ioErrorsDurationTimeTolerated, we will disable the cache */ private void checkIOErrorIsTolerated() { long now = EnvironmentEdgeManager.currentTime(); @@ -1306,8 +1305,8 @@ private void checkIOErrorIsTolerated() { long ioErrorStartTimeTmp = this.ioErrorStartTime; if (ioErrorStartTimeTmp > 0) { if (cacheEnabled && (now - ioErrorStartTimeTmp) > this.ioErrorsTolerationDuration) { - LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + - "ms, disabling cache, please check your IOEngine"); + LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + + "ms, disabling cache, please check your IOEngine"); disableCache(); } } else { @@ -1323,7 +1322,8 @@ private void disableCache() { cacheEnabled = false; ioEngine.shutdown(); this.scheduleThreadPool.shutdown(); - for (int i = 0; i < writerThreads.length; ++i) writerThreads[i].interrupt(); + for (int i = 0; i < writerThreads.length; ++i) + writerThreads[i].interrupt(); this.ramCache.clear(); if (!ioEngine.isPersistent() || persistencePath == null) { // If persistent ioengine and a path, we will serialize out the backingMap. @@ -1339,8 +1339,8 @@ private void join() throws InterruptedException { @Override public void shutdown() { disableCache(); - LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() - + "; path to write=" + persistencePath); + LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() + "; path to write=" + + persistencePath); if (ioEngine.isPersistent() && persistencePath != null) { try { join(); @@ -1405,19 +1405,17 @@ protected String getAlgorithm() { * Evicts all blocks for a specific HFile. *

        * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override public int evictBlocksByHfileName(String hfileName) { - Set keySet = blocksByHFile.subSet( - new BlockCacheKey(hfileName, Long.MIN_VALUE), true, - new BlockCacheKey(hfileName, Long.MAX_VALUE), true); + Set keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), + true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); int numEvicted = 0; for (BlockCacheKey key : keySet) { if (evictBlock(key)) { - ++numEvicted; + ++numEvicted; } } @@ -1425,10 +1423,9 @@ public int evictBlocksByHfileName(String hfileName) { } /** - * Used to group bucket entries into priority buckets. There will be a - * BucketEntryGroup for each priority (single, multi, memory). Once bucketed, - * the eviction algorithm takes the appropriate number of elements out of each - * according to configuration parameters and their relative sizes. + * Used to group bucket entries into priority buckets. There will be a BucketEntryGroup for each + * priority (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate + * number of elements out of each according to configuration parameters and their relative sizes. */ private class BucketEntryGroup { @@ -1510,8 +1507,8 @@ private ByteBuffAllocator getByteBuffAllocator() { } public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator alloc, - final LongAdder realCacheSize, Function createRecycler, - ByteBuffer metaBuff) throws IOException { + final LongAdder realCacheSize, Function createRecycler, + ByteBuffer metaBuff) throws IOException { int len = data.getSerializedLength(); // This cacheable thing can't be serialized if (len == 0) { @@ -1522,7 +1519,7 @@ public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator a BucketEntry bucketEntry = null; try { bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory, createRecycler, - getByteBuffAllocator()); + getByteBuffAllocator()); bucketEntry.setDeserializerReference(data.getDeserializer()); if (data instanceof HFileBlock) { // If an instance of HFileBlock, save on some allocations. @@ -1549,8 +1546,7 @@ public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator a } /** - * Only used in test - * @throws InterruptedException + * Only used in test n */ void stopWriterThreads() throws InterruptedException { for (WriterThread writerThread : writerThreads) { @@ -1563,8 +1559,7 @@ void stopWriterThreads() throws InterruptedException { @Override public Iterator iterator() { // Don't bother with ramcache since stuff is in here only a little while. - final Iterator> i = - this.backingMap.entrySet().iterator(); + final Iterator> i = this.backingMap.entrySet().iterator(); return new Iterator() { private final long now = System.nanoTime(); @@ -1589,7 +1584,7 @@ public BlockPriority getBlockPriority() { @Override public BlockType getBlockType() { - // Not held by BucketEntry. Could add it if wanted on BucketEntry creation. + // Not held by BucketEntry. Could add it if wanted on BucketEntry creation. return null; } @@ -1621,8 +1616,8 @@ public int compareTo(CachedBlock other) { diff = Long.compare(this.getOffset(), other.getOffset()); if (diff != 0) return diff; if (other.getCachedTime() < 0 || this.getCachedTime() < 0) { - throw new IllegalStateException("" + this.getCachedTime() + ", " + - other.getCachedTime()); + throw new IllegalStateException( + "" + this.getCachedTime() + ", " + other.getCachedTime()); } return Long.compare(other.getCachedTime(), this.getCachedTime()); } @@ -1635,7 +1630,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1695,11 +1690,11 @@ static class RAMCache { /** * Defined the map as {@link ConcurrentHashMap} explicitly here, because in * {@link RAMCache#get(BlockCacheKey)} and - * {@link RAMCache#putIfAbsent(BlockCacheKey, BucketCache.RAMQueueEntry)} , we need to - * guarantee the atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). - * Besides, the func method can execute exactly once only when the key is present(or absent) - * and under the lock context. Otherwise, the reference count of block will be messed up. - * Notice that the {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + * {@link RAMCache#putIfAbsent(BlockCacheKey, BucketCache.RAMQueueEntry)} , we need to guarantee + * the atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). Besides, the + * func method can execute exactly once only when the key is present(or absent) and under the + * lock context. Otherwise, the reference count of block will be messed up. Notice that the + * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. */ final ConcurrentHashMap delegate = new ConcurrentHashMap<>(); @@ -1733,7 +1728,8 @@ public RAMQueueEntry putIfAbsent(BlockCacheKey key, RAMQueueEntry entry) { } public boolean remove(BlockCacheKey key) { - return remove(key, re->{}); + return remove(key, re -> { + }); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java index 4a2b0a13590d..73ca011004a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java @@ -1,29 +1,27 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** * Class that implements cache metrics for bucket cache. @@ -46,9 +44,8 @@ public class BucketCacheStats extends CacheStats { @Override public String toString() { - return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() + - ", ioTimePerHit=" + getIOTimePerHit() + ", allocationFailCount=" + - getAllocationFailCount(); + return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() + ", ioTimePerHit=" + + getIOTimePerHit() + ", allocationFailCount=" + getAllocationFailCount(); } public void ioHit(long time) { @@ -79,7 +76,7 @@ public long getAllocationFailCount() { return allocationFailCount.sum(); } - public void allocationFailed () { + public void allocationFailed() { allocationFailCount.increment(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java index 222cd804112d..a04a32bfe645 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -26,7 +23,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; import org.apache.hadoop.hbase.io.hfile.BlockPriority; @@ -50,7 +46,7 @@ class BucketEntry implements HBaseReferenceCounted { // access counter comparator, descending order static final Comparator COMPARATOR = - Comparator.comparingLong(BucketEntry::getAccessCounter).reversed(); + Comparator.comparingLong(BucketEntry::getAccessCounter).reversed(); private int offsetBase; private int length; @@ -99,11 +95,11 @@ class BucketEntry implements HBaseReferenceCounted { /** * @param createRecycler used to free this {@link BucketEntry} when {@link BucketEntry#refCnt} - * becoming 0. NOTICE that {@link ByteBuffAllocator#NONE} could only be used for test. + * becoming 0. NOTICE that {@link ByteBuffAllocator#NONE} could only be used + * for test. */ BucketEntry(long offset, int length, long accessCounter, boolean inMemory, - Function createRecycler, - ByteBuffAllocator allocator) { + Function createRecycler, ByteBuffAllocator allocator) { if (createRecycler == null) { throw new IllegalArgumentException("createRecycler could not be null!"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index b2a00f1795e5..ff4e90b88650 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -24,7 +21,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; @@ -32,9 +28,10 @@ import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; import org.apache.hadoop.hbase.io.hfile.HFileBlock; -import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos; @InterfaceAudience.Private @@ -44,39 +41,33 @@ private BucketProtoUtils() { } static BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache) { - return BucketCacheProtos.BucketCacheEntry.newBuilder() - .setCacheCapacity(cache.getMaxSize()) + return BucketCacheProtos.BucketCacheEntry.newBuilder().setCacheCapacity(cache.getMaxSize()) .setIoClass(cache.ioEngine.getClass().getName()) .setMapClass(cache.backingMap.getClass().getName()) .putAllDeserializers(CacheableDeserializerIdManager.save()) .setBackingMap(BucketProtoUtils.toPB(cache.backingMap)) - .setChecksum(ByteString.copyFrom(((PersistentIOEngine) cache.ioEngine). - calculateChecksum(cache.getAlgorithm()))).build(); + .setChecksum(ByteString + .copyFrom(((PersistentIOEngine) cache.ioEngine).calculateChecksum(cache.getAlgorithm()))) + .build(); } - private static BucketCacheProtos.BackingMap toPB( - Map backingMap) { + private static BucketCacheProtos.BackingMap toPB(Map backingMap) { BucketCacheProtos.BackingMap.Builder builder = BucketCacheProtos.BackingMap.newBuilder(); for (Map.Entry entry : backingMap.entrySet()) { - builder.addEntry(BucketCacheProtos.BackingMapEntry.newBuilder() - .setKey(toPB(entry.getKey())) - .setValue(toPB(entry.getValue())) - .build()); + builder.addEntry(BucketCacheProtos.BackingMapEntry.newBuilder().setKey(toPB(entry.getKey())) + .setValue(toPB(entry.getValue())).build()); } return builder.build(); } private static BucketCacheProtos.BlockCacheKey toPB(BlockCacheKey key) { - return BucketCacheProtos.BlockCacheKey.newBuilder() - .setHfilename(key.getHfileName()) - .setOffset(key.getOffset()) - .setPrimaryReplicaBlock(key.isPrimary()) - .setBlockType(toPB(key.getBlockType())) - .build(); + return BucketCacheProtos.BlockCacheKey.newBuilder().setHfilename(key.getHfileName()) + .setOffset(key.getOffset()).setPrimaryReplicaBlock(key.isPrimary()) + .setBlockType(toPB(key.getBlockType())).build(); } private static BucketCacheProtos.BlockType toPB(BlockType blockType) { - switch(blockType) { + switch (blockType) { case DATA: return BucketCacheProtos.BlockType.data; case META: @@ -107,13 +98,9 @@ private static BucketCacheProtos.BlockType toPB(BlockType blockType) { } private static BucketCacheProtos.BucketEntry toPB(BucketEntry entry) { - return BucketCacheProtos.BucketEntry.newBuilder() - .setOffset(entry.offset()) - .setLength(entry.getLength()) - .setDeserialiserIndex(entry.deserializerIndex) - .setAccessCounter(entry.getAccessCounter()) - .setPriority(toPB(entry.getPriority())) - .build(); + return BucketCacheProtos.BucketEntry.newBuilder().setOffset(entry.offset()) + .setLength(entry.getLength()).setDeserialiserIndex(entry.deserializerIndex) + .setAccessCounter(entry.getAccessCounter()).setPriority(toPB(entry.getPriority())).build(); } private static BucketCacheProtos.BlockPriority toPB(BlockPriority p) { @@ -129,24 +116,21 @@ private static BucketCacheProtos.BlockPriority toPB(BlockPriority p) { } } - static ConcurrentHashMap fromPB( - Map deserializers, BucketCacheProtos.BackingMap backingMap, - Function createRecycler) - throws IOException { + static ConcurrentHashMap fromPB(Map deserializers, + BucketCacheProtos.BackingMap backingMap, Function createRecycler) + throws IOException { ConcurrentHashMap result = new ConcurrentHashMap<>(); for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) { BucketCacheProtos.BlockCacheKey protoKey = entry.getKey(); BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), protoKey.getOffset(), - protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); + protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); BucketCacheProtos.BucketEntry protoValue = entry.getValue(); // TODO:We use ByteBuffAllocator.HEAP here, because we could not get the ByteBuffAllocator // which created by RpcServer elegantly. - BucketEntry value = new BucketEntry( - protoValue.getOffset(), - protoValue.getLength(), - protoValue.getAccessCounter(), - protoValue.getPriority() == BucketCacheProtos.BlockPriority.memory, createRecycler, - ByteBuffAllocator.HEAP); + BucketEntry value = new BucketEntry(protoValue.getOffset(), protoValue.getLength(), + protoValue.getAccessCounter(), + protoValue.getPriority() == BucketCacheProtos.BlockPriority.memory, createRecycler, + ByteBuffAllocator.HEAP); // This is the deserializer that we stored int oldIndex = protoValue.getDeserialiserIndex(); String deserializerClass = deserializers.get(oldIndex); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java index b0415e3e50ba..78166e88ffdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java @@ -1,31 +1,29 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferAllocator; import org.apache.hadoop.hbase.util.ByteBufferArray; +import org.apache.yetus.audience.InterfaceAudience; /** * IO engine that stores data in memory using an array of ByteBuffers {@link ByteBufferArray}. @@ -66,9 +64,8 @@ public class ByteBufferIOEngine implements IOEngine { private final long capacity; /** - * Construct the ByteBufferIOEngine with the given capacity - * @param capacity - * @throws IOException ideally here no exception to be thrown from the allocator + * Construct the ByteBufferIOEngine with the given capacity n * @throws IOException ideally here + * no exception to be thrown from the allocator */ public ByteBufferIOEngine(long capacity) throws IOException { this.capacity = capacity; @@ -78,14 +75,12 @@ public ByteBufferIOEngine(long capacity) throws IOException { @Override public String toString() { - return "ioengine=" + this.getClass().getSimpleName() + ", capacity=" + - String.format("%,d", this.capacity); + return "ioengine=" + this.getClass().getSimpleName() + ", capacity=" + + String.format("%,d", this.capacity); } /** - * Memory IO engine is always unable to support persistent storage for the - * cache - * @return false + * Memory IO engine is always unable to support persistent storage for the cache n */ @Override public boolean isPersistent() { @@ -111,7 +106,7 @@ public Cacheable read(BucketEntry be) throws IOException { /** * Transfers data from the given {@link ByteBuffer} to the buffer array. Position of source will * be advanced by the {@link ByteBuffer#remaining()}. - * @param src the given byte buffer from which bytes are to be read. + * @param src the given byte buffer from which bytes are to be read. * @param offset The offset in the ByteBufferArray of the first byte to be written * @throws IOException throws IOException if writing to the array throws exception */ @@ -123,7 +118,7 @@ public void write(ByteBuffer src, long offset) throws IOException { /** * Transfers data from the given {@link ByteBuff} to the buffer array. Position of source will be * advanced by the {@link ByteBuffer#remaining()}. - * @param src the given byte buffer from which bytes are to be read. + * @param src the given byte buffer from which bytes are to be read. * @param offset The offset in the ByteBufferArray of the first byte to be written * @throws IOException throws IOException if writing to the array throws exception */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java index d2cbdb7b16c6..15c7ee3236cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java @@ -1,30 +1,27 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown by {@link BucketAllocator#allocateBlock(int)} when cache is full for - * the requested size + * Thrown by {@link BucketAllocator#allocateBlock(int)} when cache is full for the requested size */ @InterfaceAudience.Private public class CacheFullException extends IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java index b4e77bd2348e..daa25cee1de2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java @@ -1,43 +1,38 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; - import java.util.Comparator; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue; /** - * A memory-bound queue that will grow until an element brings total size larger - * than maxSize. From then on, only entries that are sorted larger than the - * smallest current entry will be inserted/replaced. - * + * A memory-bound queue that will grow until an element brings total size larger than maxSize. From + * then on, only entries that are sorted larger than the smallest current entry will be + * inserted/replaced. *

        - * Use this when you want to find the largest elements (according to their - * ordering, not their heap size) that consume as close to the specified maxSize - * as possible. Default behavior is to grow just above rather than just below - * specified max. + * Use this when you want to find the largest elements (according to their ordering, not their heap + * size) that consume as close to the specified maxSize as possible. Default behavior is to grow + * just above rather than just below specified max. */ @InterfaceAudience.Private public class CachedEntryQueue { @@ -51,7 +46,7 @@ public class CachedEntryQueue { private long maxSize; /** - * @param maxSize the target size of elements in the queue + * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public CachedEntryQueue(long maxSize, long blockSize) { @@ -69,15 +64,15 @@ public CachedEntryQueue(long maxSize, long blockSize) { /** * Attempt to add the specified entry to this queue. *

        - * If the queue is smaller than the max size, or if the specified element is - * ordered after the smallest element in the queue, the element will be added - * to the queue. Otherwise, there is no side effect of this call. + * If the queue is smaller than the max size, or if the specified element is ordered after the + * smallest element in the queue, the element will be added to the queue. Otherwise, there is no + * side effect of this call. * @param entry a bucket entry with key to try to add to the queue */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", - justification = "head can not be null as cacheSize is greater than maxSize," - + " which means we have something in the queue") + value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", + justification = "head can not be null as cacheSize is greater than maxSize," + + " which means we have something in the queue") public void add(Map.Entry entry) { if (cacheSize < maxSize) { queue.add(entry); @@ -98,16 +93,14 @@ public void add(Map.Entry entry) { } /** - * @return The next element in this queue, or {@code null} if the queue is - * empty. + * @return The next element in this queue, or {@code null} if the queue is empty. */ public Map.Entry poll() { return queue.poll(); } /** - * @return The last element in this queue, or {@code null} if the queue is - * empty. + * @return The last element in this queue, or {@code null} if the queue is empty. */ public Map.Entry pollLast() { return queue.pollLast(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java index 3169a66539aa..da5f49596c2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java @@ -1,23 +1,23 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java index e4a2c0b1aeaa..511d8afff461 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java @@ -1,20 +1,19 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; @@ -55,7 +54,7 @@ public class FileIOEngine extends PersistentIOEngine { private FileWriteAccessor writeAccessor = new FileWriteAccessor(); public FileIOEngine(long capacity, boolean maintainPersistence, String... filePaths) - throws IOException { + throws IOException { super(filePaths); this.sizePerFile = capacity / filePaths.length; this.capacity = this.sizePerFile * filePaths.length; @@ -82,9 +81,8 @@ public FileIOEngine(long capacity, boolean maintainPersistence, String... filePa if (totalSpace < sizePerFile) { // The next setting length will throw exception,logging this message // is just used for the detail reason of exception, - String msg = "Only " + StringUtils.byteDesc(totalSpace) - + " total space under " + filePath + ", not enough for requested " - + StringUtils.byteDesc(sizePerFile); + String msg = "Only " + StringUtils.byteDesc(totalSpace) + " total space under " + filePath + + ", not enough for requested " + StringUtils.byteDesc(sizePerFile); LOG.warn(msg); } File file = new File(filePath); @@ -95,8 +93,8 @@ public FileIOEngine(long capacity, boolean maintainPersistence, String... filePa } fileChannels[i] = rafs[i].getChannel(); channelLocks[i] = new ReentrantLock(); - LOG.info("Allocating cache " + StringUtils.byteDesc(sizePerFile) - + ", on the path:" + filePath); + LOG.info( + "Allocating cache " + StringUtils.byteDesc(sizePerFile) + ", on the path:" + filePath); } catch (IOException fex) { LOG.error("Failed allocating cache on " + filePath, fex); shutdown(); @@ -107,13 +105,12 @@ public FileIOEngine(long capacity, boolean maintainPersistence, String... filePa @Override public String toString() { - return "ioengine=" + this.getClass().getSimpleName() + ", paths=" - + Arrays.asList(filePaths) + ", capacity=" + String.format("%,d", this.capacity); + return "ioengine=" + this.getClass().getSimpleName() + ", paths=" + Arrays.asList(filePaths) + + ", capacity=" + String.format("%,d", this.capacity); } /** - * File IO engine is always able to support persistent storage for the cache - * @return true + * File IO engine is always able to support persistent storage for the cache n */ @Override public boolean isPersistent() { @@ -141,7 +138,7 @@ public Cacheable read(BucketEntry be) throws IOException { // ensure that the results are not corrupted before consuming them. if (dstBuff.limit() != length) { throw new IllegalArgumentIOException( - "Only " + dstBuff.limit() + " bytes read, " + length + " expected"); + "Only " + dstBuff.limit() + " bytes read, " + length + " expected"); } } catch (IOException ioe) { dstBuff.release(); @@ -153,7 +150,7 @@ public Cacheable read(BucketEntry be) throws IOException { } void closeFileChannels() { - for (FileChannel fileChannel: fileChannels) { + for (FileChannel fileChannel : fileChannels) { try { fileChannel.close(); } catch (IOException e) { @@ -165,8 +162,7 @@ void closeFileChannels() { /** * Transfers data from the given byte buffer to file * @param srcBuffer the given byte buffer from which bytes are to be read - * @param offset The offset in the file where the first byte to be written - * @throws IOException + * @param offset The offset in the file where the first byte to be written n */ @Override public void write(ByteBuffer srcBuffer, long offset) throws IOException { @@ -174,8 +170,7 @@ public void write(ByteBuffer srcBuffer, long offset) throws IOException { } /** - * Sync the data to file after writing - * @throws IOException + * Sync the data to file after writing n */ @Override public void sync() throws IOException { @@ -218,8 +213,8 @@ public void write(ByteBuff srcBuff, long offset) throws IOException { accessFile(writeAccessor, srcBuff, offset); } - private void accessFile(FileAccessor accessor, ByteBuff buff, - long globalOffset) throws IOException { + private void accessFile(FileAccessor accessor, ByteBuff buff, long globalOffset) + throws IOException { int startFileNum = getFileNum(globalOffset); int remainingAccessDataLen = buff.remaining(); int endFileNum = getFileNum(globalOffset + remainingAccessDataLen - 1); @@ -252,17 +247,15 @@ private void accessFile(FileAccessor accessor, ByteBuff buff, } if (accessFileNum >= fileChannels.length) { throw new IOException("Required data len " + StringUtils.byteDesc(buff.remaining()) - + " exceed the engine's capacity " + StringUtils.byteDesc(capacity) + " where offset=" - + globalOffset); + + " exceed the engine's capacity " + StringUtils.byteDesc(capacity) + " where offset=" + + globalOffset); } } } /** - * Get the absolute offset in given file with the relative global offset. - * @param fileNum - * @param globalOffset - * @return the absolute offset + * Get the absolute offset in given file with the relative global offset. nn * @return the + * absolute offset */ private long getAbsoluteOffsetInFile(int fileNum, long globalOffset) { return globalOffset - fileNum * sizePerFile; @@ -274,8 +267,7 @@ private int getFileNum(long offset) { } int fileNum = (int) (offset / sizePerFile); if (fileNum >= fileChannels.length) { - throw new RuntimeException("Not expected offset " + offset - + " where capacity=" + capacity); + throw new RuntimeException("Not expected offset " + offset + " where capacity=" + capacity); } return fileNum; } @@ -298,31 +290,30 @@ void refreshFileConnection(int accessFileNum, IOException ioe) throws IOExceptio fileChannel.close(); } LOG.warn("Caught ClosedChannelException accessing BucketCache, reopening file: " - + filePaths[accessFileNum], ioe); + + filePaths[accessFileNum], ioe); rafs[accessFileNum] = new RandomAccessFile(filePaths[accessFileNum], "rw"); fileChannels[accessFileNum] = rafs[accessFileNum].getChannel(); - } finally{ + } finally { channelLock.unlock(); } } private interface FileAccessor { - int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) - throws IOException; + int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) throws IOException; } private static class FileReadAccessor implements FileAccessor { @Override - public int access(FileChannel fileChannel, ByteBuff buff, - long accessOffset) throws IOException { + public int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) + throws IOException { return buff.read(fileChannel, accessOffset); } } private static class FileWriteAccessor implements FileAccessor { @Override - public int access(FileChannel fileChannel, ByteBuff buff, - long accessOffset) throws IOException { + public int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) + throws IOException { return buff.write(fileChannel, accessOffset); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java index c0cb22d0b074..b09e0963ca22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferAllocator; @@ -34,8 +33,7 @@ import org.slf4j.LoggerFactory; /** - * IO engine that stores data to a file on the specified file system using memory mapping - * mechanism + * IO engine that stores data to a file on the specified file system using memory mapping mechanism */ @InterfaceAudience.Private public abstract class FileMmapIOEngine extends PersistentIOEngine { @@ -93,12 +91,11 @@ private long roundUp(long n, long to) { @Override public String toString() { return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path + ", size=" - + String.format("%,d", this.size); + + String.format("%,d", this.size); } /** - * File IO engine is always able to support persistent storage for the cache - * @return true + * File IO engine is always able to support persistent storage for the cache n */ @Override public boolean isPersistent() { @@ -112,8 +109,7 @@ public boolean isPersistent() { /** * Transfers data from the given byte buffer to file * @param srcBuffer the given byte buffer from which bytes are to be read - * @param offset The offset in the file where the first byte to be written - * @throws IOException + * @param offset The offset in the file where the first byte to be written n */ @Override public void write(ByteBuffer srcBuffer, long offset) throws IOException { @@ -126,8 +122,7 @@ public void write(ByteBuff srcBuffer, long offset) throws IOException { } /** - * Sync the data to file after writing - * @throws IOException + * Sync the data to file after writing n */ @Override public void sync() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java index 3ffb57ebcf03..62ff19878fc3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java @@ -1,33 +1,30 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.yetus.audience.InterfaceAudience; /** - * A class implementing IOEngine interface supports data services for - * {@link BucketCache}. + * A class implementing IOEngine interface supports data services for {@link BucketCache}. */ @InterfaceAudience.Private public interface IOEngine { @@ -49,7 +46,7 @@ default boolean usesSharedMemory() { * Transfers data from IOEngine to a Cacheable object. * @param be maintains an (offset,len,refCnt) inside. * @return Cacheable which will wrap the NIO ByteBuffers from IOEngine. - * @throws IOException when any IO error happen + * @throws IOException when any IO error happen * @throws IllegalArgumentException when the length of the ByteBuff read is less than 'len' */ Cacheable read(BucketEntry be) throws IOException; @@ -57,23 +54,19 @@ default boolean usesSharedMemory() { /** * Transfers data from the given byte buffer to IOEngine * @param srcBuffer the given byte buffer from which bytes are to be read - * @param offset The offset in the IO engine where the first byte to be - * written - * @throws IOException + * @param offset The offset in the IO engine where the first byte to be written n */ void write(ByteBuffer srcBuffer, long offset) throws IOException; /** * Transfers the data from the given MultiByteBuffer to IOEngine * @param srcBuffer the given MultiBytebufffers from which bytes are to be read - * @param offset the offset in the IO engine where the first byte to be written - * @throws IOException + * @param offset the offset in the IO engine where the first byte to be written n */ void write(ByteBuff srcBuffer, long offset) throws IOException; /** - * Sync the data to IOEngine after writing - * @throws IOException + * Sync the data to IOEngine after writing n */ void sync() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java index 4ee7d0ed1bec..495814fdc5fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.Shell; import org.apache.yetus.audience.InterfaceAudience; @@ -29,13 +28,13 @@ import org.slf4j.LoggerFactory; /** - * A class implementing PersistentIOEngine interface supports file integrity verification - * for {@link BucketCache} which use persistent IOEngine + * A class implementing PersistentIOEngine interface supports file integrity verification for + * {@link BucketCache} which use persistent IOEngine */ @InterfaceAudience.Private public abstract class PersistentIOEngine implements IOEngine { private static final Logger LOG = LoggerFactory.getLogger(PersistentIOEngine.class); - private static final DuFileCommand DU = new DuFileCommand(new String[] {"du", ""}); + private static final DuFileCommand DU = new DuFileCommand(new String[] { "du", "" }); protected final String[] filePaths; public PersistentIOEngine(String... filePaths) { @@ -50,22 +49,22 @@ protected void verifyFileIntegrity(byte[] persistentChecksum, String algorithm) throws IOException { byte[] calculateChecksum = calculateChecksum(algorithm); if (!Bytes.equals(persistentChecksum, calculateChecksum)) { - throw new IOException("Mismatch of checksum! The persistent checksum is " + - Bytes.toString(persistentChecksum) + ", but the calculate checksum is " + - Bytes.toString(calculateChecksum)); + throw new IOException( + "Mismatch of checksum! The persistent checksum is " + Bytes.toString(persistentChecksum) + + ", but the calculate checksum is " + Bytes.toString(calculateChecksum)); } } /** * Using an encryption algorithm to calculate a checksum, the default encryption algorithm is MD5 * @return the checksum which is convert to HexString - * @throws IOException something happened like file not exists + * @throws IOException something happened like file not exists * @throws NoSuchAlgorithmException no such algorithm */ protected byte[] calculateChecksum(String algorithm) { try { StringBuilder sb = new StringBuilder(); - for (String filePath : filePaths){ + for (String filePath : filePaths) { File file = new File(filePath); sb.append(filePath); sb.append(getFileSize(filePath)); @@ -113,4 +112,3 @@ public String[] getExecString() { } } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java index 53690602093a..77c881888fc8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java index b1f298e3772f..d17ee627e3ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,13 @@ import java.lang.management.ManagementFactory; import java.lang.management.MemoryType; import java.lang.management.MemoryUsage; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.regionserver.MemStoreLAB; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.MemStoreLAB; -import org.apache.hadoop.hbase.util.Pair; /** * Util class to calculate memory size for memstore, block cache(L1, L2) of RS. @@ -37,15 +36,15 @@ public class MemorySizeUtil { public static final String MEMSTORE_SIZE_KEY = "hbase.regionserver.global.memstore.size"; public static final String MEMSTORE_SIZE_OLD_KEY = - "hbase.regionserver.global.memstore.upperLimit"; + "hbase.regionserver.global.memstore.upperLimit"; public static final String MEMSTORE_SIZE_LOWER_LIMIT_KEY = - "hbase.regionserver.global.memstore.size.lower.limit"; + "hbase.regionserver.global.memstore.size.lower.limit"; public static final String MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY = - "hbase.regionserver.global.memstore.lowerLimit"; + "hbase.regionserver.global.memstore.lowerLimit"; // Max global off heap memory that can be used for all memstores // This should be an absolute value in MBs and not percent. public static final String OFFHEAP_MEMSTORE_SIZE_KEY = - "hbase.regionserver.offheap.global.memstore.size"; + "hbase.regionserver.offheap.global.memstore.size"; public static final float DEFAULT_MEMSTORE_SIZE = 0.4f; // Default lower water mark limit is 95% size of memstore size. @@ -55,15 +54,15 @@ public class MemorySizeUtil { // a constant to convert a fraction to a percentage private static final int CONVERT_TO_PERCENTAGE = 100; - private static final String JVM_HEAP_EXCEPTION = "Got an exception while attempting to read " + - "information about the JVM heap. Please submit this log information in a bug report and " + - "include your JVM settings, specifically the GC in use and any -XX options. Consider " + - "restarting the service."; + private static final String JVM_HEAP_EXCEPTION = "Got an exception while attempting to read " + + "information about the JVM heap. Please submit this log information in a bug report and " + + "include your JVM settings, specifically the GC in use and any -XX options. Consider " + + "restarting the service."; /** * Return JVM memory statistics while properly handling runtime exceptions from the JVM. - * @return a memory usage object, null if there was a runtime exception. (n.b. you - * could also get -1 values back from the JVM) + * @return a memory usage object, null if there was a runtime exception. (n.b. you could also get + * -1 values back from the JVM) * @see MemoryUsage */ public static MemoryUsage safeGetHeapMemoryUsage() { @@ -78,43 +77,41 @@ public static MemoryUsage safeGetHeapMemoryUsage() { /** * Checks whether we have enough heap memory left out after portion for Memstore and Block cache. - * We need atleast 20% of heap left out for other RS functions. - * @param conf + * We need atleast 20% of heap left out for other RS functions. n */ public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) { if (conf.get(MEMSTORE_SIZE_OLD_KEY) != null) { LOG.warn(MEMSTORE_SIZE_OLD_KEY + " is deprecated by " + MEMSTORE_SIZE_KEY); } float globalMemstoreSize = getGlobalMemStoreHeapPercent(conf, false); - int gml = (int)(globalMemstoreSize * CONVERT_TO_PERCENTAGE); + int gml = (int) (globalMemstoreSize * CONVERT_TO_PERCENTAGE); float blockCacheUpperLimit = getBlockCacheHeapPercent(conf); - int bcul = (int)(blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); - if (CONVERT_TO_PERCENTAGE - (gml + bcul) - < (int)(CONVERT_TO_PERCENTAGE * - HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD)) { + int bcul = (int) (blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); + if ( + CONVERT_TO_PERCENTAGE - (gml + bcul) + < (int) (CONVERT_TO_PERCENTAGE * HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD) + ) { throw new RuntimeException("Current heap configuration for MemStore and BlockCache exceeds " - + "the threshold required for successful cluster operation. " - + "The combined value cannot exceed 0.8. Please check " - + "the settings for hbase.regionserver.global.memstore.size and " - + "hfile.block.cache.size in your configuration. " - + "hbase.regionserver.global.memstore.size is " + globalMemstoreSize - + " hfile.block.cache.size is " + blockCacheUpperLimit); + + "the threshold required for successful cluster operation. " + + "The combined value cannot exceed 0.8. Please check " + + "the settings for hbase.regionserver.global.memstore.size and " + + "hfile.block.cache.size in your configuration. " + + "hbase.regionserver.global.memstore.size is " + globalMemstoreSize + + " hfile.block.cache.size is " + blockCacheUpperLimit); } } /** - * Retrieve global memstore configured size as percentage of total heap. - * @param c - * @param logInvalid + * Retrieve global memstore configured size as percentage of total heap. nn */ public static float getGlobalMemStoreHeapPercent(final Configuration c, - final boolean logInvalid) { - float limit = c.getFloat(MEMSTORE_SIZE_KEY, - c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE)); + final boolean logInvalid) { + float limit = + c.getFloat(MEMSTORE_SIZE_KEY, c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE)); if (limit > 0.8f || limit <= 0.0f) { if (logInvalid) { LOG.warn("Setting global memstore limit to default of " + DEFAULT_MEMSTORE_SIZE - + " because supplied value outside allowed range of (0 -> 0.8]"); + + " because supplied value outside allowed range of (0 -> 0.8]"); } limit = DEFAULT_MEMSTORE_SIZE; } @@ -126,13 +123,13 @@ public static float getGlobalMemStoreHeapPercent(final Configuration c, * size. */ public static float getGlobalMemStoreHeapLowerMark(final Configuration conf, - boolean honorOldConfig) { + boolean honorOldConfig) { String lowMarkPercentStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_KEY); if (lowMarkPercentStr != null) { float lowMarkPercent = Float.parseFloat(lowMarkPercentStr); if (lowMarkPercent > 1.0f) { LOG.error("Bad configuration value for " + MEMSTORE_SIZE_LOWER_LIMIT_KEY + ": " - + lowMarkPercent + ". Using 1.0f instead."); + + lowMarkPercent + ". Using 1.0f instead."); lowMarkPercent = 1.0f; } return lowMarkPercent; @@ -141,15 +138,15 @@ public static float getGlobalMemStoreHeapLowerMark(final Configuration conf, String lowerWaterMarkOldValStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY); if (lowerWaterMarkOldValStr != null) { LOG.warn(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " is deprecated. Instead use " - + MEMSTORE_SIZE_LOWER_LIMIT_KEY); + + MEMSTORE_SIZE_LOWER_LIMIT_KEY); float lowerWaterMarkOldVal = Float.parseFloat(lowerWaterMarkOldValStr); float upperMarkPercent = getGlobalMemStoreHeapPercent(conf, false); if (lowerWaterMarkOldVal > upperMarkPercent) { lowerWaterMarkOldVal = upperMarkPercent; LOG.error("Value of " + MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " (" + lowerWaterMarkOldVal - + ") is greater than global memstore limit (" + upperMarkPercent + ") set by " - + MEMSTORE_SIZE_KEY + "/" + MEMSTORE_SIZE_OLD_KEY + ". Setting memstore lower limit " - + "to " + upperMarkPercent); + + ") is greater than global memstore limit (" + upperMarkPercent + ") set by " + + MEMSTORE_SIZE_KEY + "/" + MEMSTORE_SIZE_OLD_KEY + ". Setting memstore lower limit " + + "to " + upperMarkPercent); } return lowerWaterMarkOldVal / upperMarkPercent; } @@ -174,8 +171,8 @@ public static Pair getGlobalMemStoreSize(Configuration conf) { // Off heap max memstore size is configured with turning off MSLAB. It makes no sense. Do a // warn log and go with on heap memstore percentage. By default it will be 40% of Xmx LOG.warn("There is no relevance of configuring '" + OFFHEAP_MEMSTORE_SIZE_KEY + "' when '" - + MemStoreLAB.USEMSLAB_KEY + "' is turned off." - + " Going with on heap global memstore size ('" + MEMSTORE_SIZE_KEY + "')"); + + MemStoreLAB.USEMSLAB_KEY + "' is turned off." + + " Going with on heap global memstore size ('" + MEMSTORE_SIZE_KEY + "')"); } } return new Pair<>(getOnheapGlobalMemStoreSize(conf), MemoryType.HEAP); @@ -183,9 +180,7 @@ public static Pair getGlobalMemStoreSize(Configuration conf) { /** * Returns the onheap global memstore limit based on the config - * 'hbase.regionserver.global.memstore.size'. - * @param conf - * @return the onheap global memstore limt + * 'hbase.regionserver.global.memstore.size'. n * @return the onheap global memstore limt */ public static long getOnheapGlobalMemStoreSize(Configuration conf) { long max = -1L; @@ -198,13 +193,12 @@ public static long getOnheapGlobalMemStoreSize(Configuration conf) { } /** - * Retrieve configured size for on heap block cache as percentage of total heap. - * @param conf + * Retrieve configured size for on heap block cache as percentage of total heap. n */ public static float getBlockCacheHeapPercent(final Configuration conf) { // L1 block cache is always on heap float l1CachePercent = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, - HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); return l1CachePercent; } @@ -220,25 +214,25 @@ public static long getOnHeapCacheSize(final Configuration conf) { return -1; } if (cachePercentage > 1.0) { - throw new IllegalArgumentException(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + - " must be between 0.0 and 1.0, and not > 1.0"); + throw new IllegalArgumentException( + HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " must be between 0.0 and 1.0, and not > 1.0"); } long max = -1L; final MemoryUsage usage = safeGetHeapMemoryUsage(); if (usage != null) { max = usage.getMax(); } - float onHeapCacheFixedSize = (float) conf - .getLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, + float onHeapCacheFixedSize = + (float) conf.getLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT) / max; // Calculate the amount of heap to give the heap. - return (onHeapCacheFixedSize > 0 && onHeapCacheFixedSize < cachePercentage) ? - (long) (max * onHeapCacheFixedSize) : - (long) (max * cachePercentage); + return (onHeapCacheFixedSize > 0 && onHeapCacheFixedSize < cachePercentage) + ? (long) (max * onHeapCacheFixedSize) + : (long) (max * cachePercentage); } /** - * @param conf used to read config for bucket cache size. + * @param conf used to read config for bucket cache size. * @return the number of bytes to use for bucket cache, negative if disabled. */ public static long getBucketCacheSize(final Configuration conf) { @@ -246,7 +240,7 @@ public static long getBucketCacheSize(final Configuration conf) { float bucketCacheSize = conf.getFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F); if (bucketCacheSize < 1) { throw new IllegalArgumentException("Bucket Cache should be minimum 1 MB in size." - + "Configure 'hbase.bucketcache.size' with > 1 value"); + + "Configure 'hbase.bucketcache.size' with > 1 value"); } return (long) (bucketCacheSize * 1024 * 1024); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java index b5b79670d930..dee517c2e0af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,22 +25,17 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.LongAdder; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** - * Adaptive LIFO blocking queue utilizing CoDel algorithm to prevent queue overloading. - * - * Implementing {@link BlockingQueue} interface to be compatible with {@link RpcExecutor}. - * - * Currently uses milliseconds internally, need to look into whether we should use - * nanoseconds for timeInterval and minDelay. - * + * Adaptive LIFO blocking queue utilizing CoDel algorithm to prevent queue overloading. Implementing + * {@link BlockingQueue} interface to be compatible with {@link RpcExecutor}. Currently uses + * milliseconds internally, need to look into whether we should use nanoseconds for timeInterval and + * minDelay. * @see Fail at Scale paper - * - * @see - * CoDel version for generic job queues in Wangle library + * @see CoDel + * version for generic job queues in Wangle library */ @InterfaceAudience.Private public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { @@ -76,7 +71,7 @@ public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { private AtomicBoolean isOverloaded = new AtomicBoolean(false); public AdaptiveLifoCoDelCallQueue(int capacity, int targetDelay, int interval, - double lifoThreshold, LongAdder numGeneralCallsDropped, LongAdder numLifoModeSwitches) { + double lifoThreshold, LongAdder numGeneralCallsDropped, LongAdder numLifoModeSwitches) { this.maxCapacity = capacity; this.queue = new LinkedBlockingDeque<>(capacity); this.codelTargetDelay = targetDelay; @@ -88,29 +83,27 @@ public AdaptiveLifoCoDelCallQueue(int capacity, int targetDelay, int interval, /** * Update tunables. - * * @param newCodelTargetDelay new CoDel target delay - * @param newCodelInterval new CoDel interval - * @param newLifoThreshold new Adaptive Lifo threshold + * @param newCodelInterval new CoDel interval + * @param newLifoThreshold new Adaptive Lifo threshold */ public void updateTunables(int newCodelTargetDelay, int newCodelInterval, - double newLifoThreshold) { + double newLifoThreshold) { this.codelTargetDelay = newCodelTargetDelay; this.codelInterval = newCodelInterval; this.lifoThreshold = newLifoThreshold; } /** - * Behaves as {@link LinkedBlockingQueue#take()}, except it will silently - * skip all calls which it thinks should be dropped. - * + * Behaves as {@link LinkedBlockingQueue#take()}, except it will silently skip all calls which it + * thinks should be dropped. * @return the head of this queue * @throws InterruptedException if interrupted while waiting */ @Override public CallRunner take() throws InterruptedException { CallRunner cr; - while(true) { + while (true) { if (((double) queue.size() / this.maxCapacity) > lifoThreshold) { numLifoModeSwitches.increment(); cr = queue.takeLast(); @@ -130,7 +123,7 @@ public CallRunner take() throws InterruptedException { public CallRunner poll() { CallRunner cr; boolean switched = false; - while(true) { + while (true) { if (((double) queue.size() / this.maxCapacity) > lifoThreshold) { // Only count once per switch. if (!switched) { @@ -156,8 +149,8 @@ public CallRunner poll() { /** * @param callRunner to validate - * @return true if this call needs to be skipped based on call timestamp - * and internal queue state (deemed overloaded). + * @return true if this call needs to be skipped based on call timestamp and internal queue state + * (deemed overloaded). */ private boolean needToDrop(CallRunner callRunner) { long now = EnvironmentEdgeManager.currentTime(); @@ -167,9 +160,7 @@ private boolean needToDrop(CallRunner callRunner) { // Try and determine if we should reset // the delay time and determine overload - if (now > intervalTime && - !resetDelay.get() && - !resetDelay.getAndSet(true)) { + if (now > intervalTime && !resetDelay.get() && !resetDelay.getAndSet(true)) { intervalTime = now + codelInterval; isOverloaded.set(localMinDelay > codelTargetDelay); @@ -209,129 +200,128 @@ public String toString() { @Override public CallRunner poll(long timeout, TimeUnit unit) throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } - @Override public CallRunner peek() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean remove(Object o) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean contains(Object o) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public Object[] toArray() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public T[] toArray(T[] a) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public void clear() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int drainTo(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int drainTo(Collection c, int maxElements) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public Iterator iterator() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean add(CallRunner callRunner) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public CallRunner remove() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public CallRunner element() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean addAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean isEmpty() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean containsAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean removeAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean retainAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int remainingCapacity() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public void put(CallRunner callRunner) throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean offer(CallRunner callRunner, long timeout, TimeUnit unit) - throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throws InterruptedException { + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java index a2d0169010eb..e4aea3799396 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java index 8e5467478caf..24bda5a6e123 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java @@ -37,15 +37,15 @@ public class BalancedQueueRpcExecutor extends RpcExecutor { private final QueueBalancer balancer; public BalancedQueueRpcExecutor(final String name, final int handlerCount, - final int maxQueueLength, final PriorityFunction priority, final Configuration conf, - final Abortable abortable) { + final int maxQueueLength, final PriorityFunction priority, final Configuration conf, + final Abortable abortable) { this(name, handlerCount, conf.get(CALL_QUEUE_TYPE_CONF_KEY, CALL_QUEUE_TYPE_CONF_DEFAULT), - maxQueueLength, priority, conf, abortable); + maxQueueLength, priority, conf, abortable); } public BalancedQueueRpcExecutor(final String name, final int handlerCount, - final String callQueueType, final int maxQueueLength, final PriorityFunction priority, - final Configuration conf, final Abortable abortable) { + final String callQueueType, final int maxQueueLength, final PriorityFunction priority, + final Configuration conf, final Abortable abortable) { super(name, handlerCount, callQueueType, maxQueueLength, priority, conf, abortable); initializeQueues(this.numCallQueues); this.balancer = getBalancer(name, conf, getQueues()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java index 915b82df4261..534a467eda16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,12 +20,10 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.GatheringByteChannel; - import org.apache.yetus.audience.InterfaceAudience; /** - * Chain of ByteBuffers. - * Used writing out an array of byte buffers. Writes in chunks. + * Chain of ByteBuffers. Used writing out an array of byte buffers. Writes in chunks. */ @InterfaceAudience.Private class BufferChain { @@ -43,15 +41,15 @@ class BufferChain { } /** - * Expensive. Makes a new buffer to hold a copy of what is in contained ByteBuffers. This - * call drains this instance; it cannot be used subsequent to the call. + * Expensive. Makes a new buffer to hold a copy of what is in contained ByteBuffers. This call + * drains this instance; it cannot be used subsequent to the call. * @return A new byte buffer with the content of all contained ByteBuffers. */ - byte [] getBytes() { + byte[] getBytes() { if (!hasRemaining()) throw new IllegalAccessError(); - byte [] bytes = new byte [this.remaining]; + byte[] bytes = new byte[this.remaining]; int offset = 0; - for (ByteBuffer bb: this.buffers) { + for (ByteBuffer bb : this.buffers) { int length = bb.remaining(); bb.get(bytes, offset, length); offset += length; @@ -65,10 +63,9 @@ boolean hasRemaining() { /** * Write out our chain of buffers in chunks - * @param channel Where to write + * @param channel Where to write * @param chunkSize Size of chunks to write. - * @return Amount written. - * @throws IOException + * @return Amount written. n */ long write(GatheringByteChannel channel, int chunkSize) throws IOException { int chunkRemaining = chunkSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java index 19a75eae1101..f0bd9bb2562f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,10 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.HashMap; import java.util.Map; import java.util.Set; - +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class CallQueueInfo { @@ -45,7 +43,8 @@ public Set getCalledMethodNames(String callQueueName) { public long getCallMethodCount(String callQueueName, String methodName) { long methodCount; - Map methodCountMap = callQueueMethodCountsSummary.getOrDefault(callQueueName, null); + Map methodCountMap = + callQueueMethodCountsSummary.getOrDefault(callQueueName, null); if (null != methodCountMap) { methodCount = methodCountMap.getOrDefault(methodName, 0L); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 1f85346908ff..0134e11d8914 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -35,19 +35,18 @@ import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** * The request processing logic, which is usually executed in thread pools provided by an - * {@link RpcScheduler}. Call {@link #run()} to actually execute the contained - * RpcServer.Call + * {@link RpcScheduler}. Call {@link #run()} to actually execute the contained RpcServer.Call */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class CallRunner { - private static final CallDroppedException CALL_DROPPED_EXCEPTION - = new CallDroppedException(); + private static final CallDroppedException CALL_DROPPED_EXCEPTION = new CallDroppedException(); private RpcCall call; private RpcServerInterface rpcServer; @@ -57,7 +56,7 @@ public class CallRunner { /** * On construction, adds the size of this call to the running count of outstanding call sizes. - * Presumption is that we are put on a queue while we wait on an executor to run us. During this + * Presumption is that we are put on a queue while we wait on an executor to run us. During this * time we occupy heap. */ // The constructor is shutdown so only RpcServer in this class can make one of these. @@ -118,8 +117,8 @@ public void run() { try (Scope ignored1 = ipcServerSpan.makeCurrent()) { if (!this.rpcServer.isStarted()) { InetSocketAddress address = rpcServer.getListenerAddress(); - throw new ServerNotRunningYetException("Server " + - (address != null ? address : "(channel closed)") + " is not running yet"); + throw new ServerNotRunningYetException( + "Server " + (address != null ? address : "(channel closed)") + " is not running yet"); } // make the call resultPair = this.rpcServer.call(call, this.status); @@ -141,7 +140,7 @@ public void run() { errorThrowable = e; error = StringUtils.stringifyException(e); if (e instanceof Error) { - throw (Error)e; + throw (Error) e; } } finally { RpcServer.CurCall.set(null); @@ -163,8 +162,9 @@ public void run() { // don't touch `span` here because its status and `end()` are managed in `call#setResponse()` } catch (OutOfMemoryError e) { TraceUtil.setError(span, e); - if (this.rpcServer.getErrorHandler() != null - && this.rpcServer.getErrorHandler().checkOOME(e)) { + if ( + this.rpcServer.getErrorHandler() != null && this.rpcServer.getErrorHandler().checkOOME(e) + ) { RpcServer.LOG.info("{}: exiting on OutOfMemoryError", Thread.currentThread().getName()); // exception intentionally swallowed } else { @@ -173,9 +173,10 @@ public void run() { } } catch (ClosedChannelException cce) { InetSocketAddress address = rpcServer.getListenerAddress(); - RpcServer.LOG.warn("{}: caught a ClosedChannelException, " + - "this means that the server " + (address != null ? address : "(channel closed)") + - " was processing a request but the client went away. The error message was: {}", + RpcServer.LOG.warn( + "{}: caught a ClosedChannelException, " + "this means that the server " + + (address != null ? address : "(channel closed)") + + " was processing a request but the client went away. The error message was: {}", Thread.currentThread().getName(), cce.getMessage()); TraceUtil.setError(span, cce); } catch (Exception e) { @@ -217,9 +218,10 @@ public void drop() { this.rpcServer.getMetrics().exception(CALL_DROPPED_EXCEPTION); } catch (ClosedChannelException cce) { InetSocketAddress address = rpcServer.getListenerAddress(); - RpcServer.LOG.warn("{}: caught a ClosedChannelException, " + - "this means that the server " + (address != null ? address : "(channel closed)") + - " was processing a request but the client went away. The error message was: {}", + RpcServer.LOG.warn( + "{}: caught a ClosedChannelException, " + "this means that the server " + + (address != null ? address : "(channel closed)") + + " was processing a request but the client went away. The error message was: {}", Thread.currentThread().getName(), cce.getMessage()); TraceUtil.setError(span, cce); } catch (Exception e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java index 9ca292751d4f..798c3bed959a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,4 +21,5 @@ @InterfaceAudience.Private @SuppressWarnings("serial") -public class EmptyServiceNameException extends FatalConnectionException {} +public class EmptyServiceNameException extends FatalConnectionException { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java index 9e6a0bb103a1..7dcf5c1361af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java @@ -26,10 +26,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Balanced queue executor with a fastpath. Because this is FIFO, it has no respect for - * ordering so a fast path skipping the queuing of Calls if an Handler is available, is possible. - * Just pass the Call direct to waiting Handler thread. Try to keep the hot Handlers bubbling - * rather than let them go cold and lose context. Idea taken from Apace Kudu (incubating). See + * Balanced queue executor with a fastpath. Because this is FIFO, it has no respect for ordering so + * a fast path skipping the queuing of Calls if an Handler is available, is possible. Just pass the + * Call direct to waiting Handler thread. Try to keep the hot Handlers bubbling rather than let them + * go cold and lose context. Idea taken from Apace Kudu (incubating). See * https://gerrit.cloudera.org/#/c/2938/7/src/kudu/rpc/service_queue.h */ @InterfaceAudience.Private @@ -42,35 +42,35 @@ public class FastPathBalancedQueueRpcExecutor extends BalancedQueueRpcExecutor { private final Deque fastPathHandlerStack = new ConcurrentLinkedDeque<>(); public FastPathBalancedQueueRpcExecutor(final String name, final int handlerCount, - final int maxQueueLength, final PriorityFunction priority, final Configuration conf, - final Abortable abortable) { + final int maxQueueLength, final PriorityFunction priority, final Configuration conf, + final Abortable abortable) { super(name, handlerCount, maxQueueLength, priority, conf, abortable); } public FastPathBalancedQueueRpcExecutor(final String name, final int handlerCount, - final String callQueueType, final int maxQueueLength, final PriorityFunction priority, - final Configuration conf, final Abortable abortable) { + final String callQueueType, final int maxQueueLength, final PriorityFunction priority, + final Configuration conf, final Abortable abortable) { super(name, handlerCount, callQueueType, maxQueueLength, priority, conf, abortable); } @Override protected RpcHandler getHandler(final String name, final double handlerFailureThreshhold, - final int handlerCount, final BlockingQueue q, - final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, - final Abortable abortable) { + final int handlerCount, final BlockingQueue q, + final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, + final Abortable abortable) { return new FastPathRpcHandler(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, failedHandlerCount, abortable, fastPathHandlerStack); } @Override public boolean dispatch(CallRunner callTask) { - //FastPathHandlers don't check queue limits, so if we're completely shut down - //we have to prevent ourselves from using the handler in the first place - if (currentQueueLimit == 0){ + // FastPathHandlers don't check queue limits, so if we're completely shut down + // we have to prevent ourselves from using the handler in the first place + if (currentQueueLimit == 0) { return false; } FastPathRpcHandler handler = popReadyHandler(); - return handler != null? handler.loadCallRunner(callTask): super.dispatch(callTask); + return handler != null ? handler.loadCallRunner(callTask) : super.dispatch(callTask); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java index b07f44900fbb..63436e1dd4ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java @@ -31,7 +31,7 @@ * RPC Executor that extends {@link RWQueueRpcExecutor} with fast-path feature, used in * {@link FastPathBalancedQueueRpcExecutor}. */ -@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class FastPathRWQueueRpcExecutor extends RWQueueRpcExecutor { @@ -40,17 +40,18 @@ public class FastPathRWQueueRpcExecutor extends RWQueueRpcExecutor { private final Deque scanHandlerStack = new ConcurrentLinkedDeque<>(); public FastPathRWQueueRpcExecutor(String name, int handlerCount, int maxQueueLength, - PriorityFunction priority, Configuration conf, Abortable abortable) { + PriorityFunction priority, Configuration conf, Abortable abortable) { super(name, handlerCount, maxQueueLength, priority, conf, abortable); } @Override protected RpcHandler getHandler(final String name, final double handlerFailureThreshhold, - final int handlerCount, final BlockingQueue q, - final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, - final Abortable abortable) { - Deque handlerStack = name.contains("read") ? readHandlerStack : - name.contains("write") ? writeHandlerStack : scanHandlerStack; + final int handlerCount, final BlockingQueue q, + final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, + final Abortable abortable) { + Deque handlerStack = name.contains("read") ? readHandlerStack + : name.contains("write") ? writeHandlerStack + : scanHandlerStack; return new FastPathRpcHandler(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, failedHandlerCount, abortable, handlerStack); } @@ -60,9 +61,11 @@ public boolean dispatch(final CallRunner callTask) { RpcCall call = callTask.getRpcCall(); boolean shouldDispatchToWriteQueue = isWriteRequest(call.getHeader(), call.getParam()); boolean shouldDispatchToScanQueue = shouldDispatchToScanQueue(callTask); - FastPathRpcHandler handler = shouldDispatchToWriteQueue ? writeHandlerStack.poll() : - shouldDispatchToScanQueue ? scanHandlerStack.poll() : readHandlerStack.poll(); - return handler != null ? handler.loadCallRunner(callTask) : - dispatchTo(shouldDispatchToWriteQueue, shouldDispatchToScanQueue, callTask); + FastPathRpcHandler handler = shouldDispatchToWriteQueue ? writeHandlerStack.poll() + : shouldDispatchToScanQueue ? scanHandlerStack.poll() + : readHandlerStack.poll(); + return handler != null + ? handler.loadCallRunner(callTask) + : dispatchTo(shouldDispatchToWriteQueue, shouldDispatchToScanQueue, callTask); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java index 3064c7aa324d..991a8019d0f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,9 +36,8 @@ public class FastPathRpcHandler extends RpcHandler { private CallRunner loadedCallRunner; FastPathRpcHandler(String name, double handlerFailureThreshhold, int handlerCount, - BlockingQueue q, AtomicInteger activeHandlerCount, - AtomicInteger failedHandlerCount, final Abortable abortable, - final Deque fastPathHandlerStack) { + BlockingQueue q, AtomicInteger activeHandlerCount, AtomicInteger failedHandlerCount, + final Abortable abortable, final Deque fastPathHandlerStack) { super(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, failedHandlerCount, abortable); this.fastPathHandlerStack = fastPathHandlerStack; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java index cfd085ebc771..b1b2193d5b9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java @@ -28,13 +28,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil; /** - * A very simple {@code }RpcScheduler} that serves incoming requests in order. - * - * This can be used for HMaster, where no prioritization is needed. + * A very simple {@code }RpcScheduler} that serves incoming requests in order. This can be used for + * HMaster, where no prioritization is needed. */ @InterfaceAudience.Private public class FifoRpcScheduler extends RpcScheduler { @@ -47,7 +47,7 @@ public class FifoRpcScheduler extends RpcScheduler { public FifoRpcScheduler(Configuration conf, int handlerCount) { this.handlerCount = handlerCount; this.maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, - handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); } @Override @@ -95,7 +95,7 @@ public boolean dispatch(final CallRunner task) { } protected boolean executeRpcCall(final ThreadPoolExecutor executor, final AtomicInteger queueSize, - final CallRunner task) { + final CallRunner task) { // Executors provide no offer, so make our own. int queued = queueSize.getAndIncrement(); if (maxQueueLength > 0 && queued >= maxQueueLength) { @@ -103,7 +103,7 @@ protected boolean executeRpcCall(final ThreadPoolExecutor executor, final Atomic return false; } - executor.execute(new FifoCallRunner(task){ + executor.execute(new FifoCallRunner(task) { @Override public void run() { task.setStatus(RpcServer.getStatus()); @@ -217,7 +217,7 @@ public CallQueueInfo getCallQueueInfo() { } protected void updateMethodCountAndSizeByQueue(BlockingQueue queue, - HashMap methodCount, HashMap methodSize) { + HashMap methodCount, HashMap methodSize) { for (Runnable r : queue) { FifoCallRunner mcr = (FifoCallRunner) r; RpcCall rpcCall = mcr.getCallRunner().getRpcCall(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java index f8ba186fb3d3..3d1a7aa8ade7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; @@ -31,5 +29,5 @@ public interface HBaseRPCErrorHandler { * @param e the throwable * @return if the server should be shut down */ - boolean checkOOME(final Throwable e) ; + boolean checkOOME(final Throwable e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java index a52aa7e759a9..f139ab3e563d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java @@ -28,6 +28,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -47,7 +48,7 @@ public class MasterFifoRpcScheduler extends FifoRpcScheduler { * is "hbase.regionserver.handler.count" value minus RSReport handlers count, but at least 1 too. */ public static final String MASTER_SERVER_REPORT_HANDLER_COUNT = - "hbase.master.server.report.handler.count"; + "hbase.master.server.report.handler.count"; private static final String REGION_SERVER_REPORT = "RegionServerReport"; private final int rsReportHandlerCount; private final int rsRsreportMaxQueueLength; @@ -55,7 +56,7 @@ public class MasterFifoRpcScheduler extends FifoRpcScheduler { private ThreadPoolExecutor rsReportExecutor; public MasterFifoRpcScheduler(Configuration conf, int callHandlerCount, - int rsReportHandlerCount) { + int rsReportHandlerCount) { super(conf, callHandlerCount); this.rsReportHandlerCount = rsReportHandlerCount; this.rsRsreportMaxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, @@ -66,7 +67,7 @@ public MasterFifoRpcScheduler(Configuration conf, int callHandlerCount, public void start() { LOG.info( "Using {} as call queue; handlerCount={}; maxQueueLength={}; rsReportHandlerCount={}; " - + "rsReportMaxQueueLength={}", + + "rsReportMaxQueueLength={}", this.getClass().getSimpleName(), handlerCount, maxQueueLength, rsReportHandlerCount, rsRsreportMaxQueueLength); this.executor = new ThreadPoolExecutor(handlerCount, handlerCount, 60, TimeUnit.SECONDS, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java index c9e4270d918c..a86e6554b1cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.conf.Configuration; @@ -30,13 +29,13 @@ @InterfaceStability.Evolving public class MetaRWQueueRpcExecutor extends RWQueueRpcExecutor { public static final String META_CALL_QUEUE_READ_SHARE_CONF_KEY = - "hbase.ipc.server.metacallqueue.read.ratio"; + "hbase.ipc.server.metacallqueue.read.ratio"; public static final String META_CALL_QUEUE_SCAN_SHARE_CONF_KEY = - "hbase.ipc.server.metacallqueue.scan.ratio"; + "hbase.ipc.server.metacallqueue.scan.ratio"; public static final float DEFAULT_META_CALL_QUEUE_READ_SHARE = 0.9f; public MetaRWQueueRpcExecutor(final String name, final int handlerCount, final int maxQueueLength, - final PriorityFunction priority, final Configuration conf, final Abortable abortable) { + final PriorityFunction priority, final Configuration conf, final Abortable abortable) { super(name, handlerCount, maxQueueLength, priority, conf, abortable); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java index 2e78ef374414..53471e684d6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CallQueueTooBigException; +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.UnknownScannerException; -import org.apache.hadoop.hbase.exceptions.RequestTooBigException; -import org.apache.hadoop.hbase.quotas.QuotaExceededException; -import org.apache.hadoop.hbase.quotas.RpcThrottlingException; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.exceptions.RequestTooBigException; import org.apache.hadoop.hbase.exceptions.ScannerResetException; +import org.apache.hadoop.hbase.quotas.QuotaExceededException; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; +import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,7 +45,7 @@ public class MetricsHBaseServer { public MetricsHBaseServer(String serverName, MetricsHBaseServerWrapper wrapper) { serverWrapper = wrapper; source = CompatibilitySingletonFactory.getInstance(MetricsHBaseServerSourceFactory.class) - .create(serverName, wrapper); + .create(serverName, wrapper); } void authorizationSuccess() { @@ -78,9 +76,13 @@ void receivedBytes(int count) { source.receivedBytes(count); } - void sentResponse(long count) { source.sentResponse(count); } + void sentResponse(long count) { + source.sentResponse(count); + } - void receivedRequest(long count) { source.receivedRequest(count); } + void receivedRequest(long count) { + source.receivedRequest(count); + } void dequeuedCall(int qTime) { source.dequeuedCall(qTime); @@ -98,12 +100,9 @@ public void exception(Throwable throwable) { source.exception(); /** - * Keep some metrics for commonly seen exceptions - * - * Try and put the most common types first. - * Place child types before the parent type that they extend. - * - * If this gets much larger we might have to go to a hashmap + * Keep some metrics for commonly seen exceptions Try and put the most common types first. Place + * child types before the parent type that they extend. If this gets much larger we might have + * to go to a hashmap */ if (throwable != null) { if (throwable instanceof OutOfOrderScannerNextException) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java index 7df63586ab8c..0b00bba04fb7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.util.DirectMemoryUtils; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java index 01cf9b59d06a..0a5dd0ecf502 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -33,10 +33,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; - /** * Decoder for extracting frame - * * @since 2.0.0 */ @InterfaceAudience.Private @@ -59,8 +57,7 @@ void setConnection(NettyServerRpcConnection connection) { } @Override - protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) - throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { if (requestTooBig) { handleTooBigRequest(in); return; @@ -78,11 +75,10 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) if (frameLength > maxFrameLength) { requestTooBig = true; - requestTooBigMessage = - "RPC data length of " + frameLength + " received from " + connection.getHostAddress() - + " is greater than max allowed " + connection.rpcServer.maxRequestSize + ". Set \"" - + SimpleRpcServer.MAX_REQUEST_SIZE - + "\" on server to override this limit (not recommended)"; + requestTooBigMessage = "RPC data length of " + frameLength + " received from " + + connection.getHostAddress() + " is greater than max allowed " + + connection.rpcServer.maxRequestSize + ". Set \"" + SimpleRpcServer.MAX_REQUEST_SIZE + + "\" on server to override this limit (not recommended)"; NettyRpcServer.LOG.warn(requestTooBigMessage); @@ -135,8 +131,10 @@ private void handleTooBigRequest(ByteBuf in) throws IOException { // Make sure the client recognizes the underlying exception // Otherwise, throw a DoNotRetryIOException. - if (VersionInfoUtil.hasMinimumVersion(connection.connectionHeader.getVersionInfo(), - RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION)) { + if ( + VersionInfoUtil.hasMinimumVersion(connection.connectionHeader.getVersionInfo(), + RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION) + ) { reqTooBig.setResponse(null, null, reqTooBigEx, requestTooBigMessage); } else { reqTooBig.setResponse(null, null, new DoNotRetryIOException(requestTooBigMessage), @@ -174,10 +172,8 @@ private RPCProtos.RequestHeader getHeader(ByteBuf in, int headerSize) throws IOE } /** - * Reads variable length 32bit int from buffer - * This method is from ProtobufVarint32FrameDecoder in Netty and modified a little bit - * to pass the cyeckstyle rule. - * + * Reads variable length 32bit int from buffer This method is from ProtobufVarint32FrameDecoder in + * Netty and modified a little bit to pass the cyeckstyle rule. * @return decoded int if buffers readerIndex has been forwarded else nonsense value */ private static int readRawVarint32(ByteBuf buffer) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index a3ee71fc6fb2..9c8319944e78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -53,13 +53,13 @@ * An RPC server with Netty4 implementation. * @since 2.0.0 */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.CONFIG }) public class NettyRpcServer extends RpcServer { public static final Logger LOG = LoggerFactory.getLogger(NettyRpcServer.class); /** - * Name of property to change netty rpc server eventloop thread count. Default is 0. - * Tests may set this down from unlimited. + * Name of property to change netty rpc server eventloop thread count. Default is 0. Tests may set + * this down from unlimited. */ public static final String HBASE_NETTY_EVENTLOOP_RPCSERVER_THREADCOUNT_KEY = "hbase.netty.eventloop.rpcserver.thread.count"; @@ -73,8 +73,8 @@ public class NettyRpcServer extends RpcServer { new DefaultChannelGroup(GlobalEventExecutor.INSTANCE, true); public NettyRpcServer(Server server, String name, List services, - InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler, - boolean reservoirEnabled) throws IOException { + InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler, + boolean reservoirEnabled) throws IOException { super(server, name, services, bindAddress, conf, scheduler, reservoirEnabled); this.bindAddress = bindAddress; EventLoopGroup eventLoopGroup; @@ -84,31 +84,32 @@ public NettyRpcServer(Server server, String name, List() { + .childOption(ChannelOption.TCP_NODELAY, tcpNoDelay) + .childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive) + .childOption(ChannelOption.SO_REUSEADDR, true) + .childHandler(new ChannelInitializer() { - @Override - protected void initChannel(Channel ch) throws Exception { - ChannelPipeline pipeline = ch.pipeline(); - FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6); - preambleDecoder.setSingleDecode(true); - pipeline.addLast("preambleDecoder", preambleDecoder); - pipeline.addLast("preambleHandler", createNettyRpcServerPreambleHandler()); - pipeline.addLast("frameDecoder", new NettyRpcFrameDecoder(maxRequestSize)); - pipeline.addLast("decoder", new NettyRpcServerRequestDecoder(allChannels, metrics)); - pipeline.addLast("encoder", new NettyRpcServerResponseEncoder(metrics)); - } - }); + @Override + protected void initChannel(Channel ch) throws Exception { + ChannelPipeline pipeline = ch.pipeline(); + FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6); + preambleDecoder.setSingleDecode(true); + pipeline.addLast("preambleDecoder", preambleDecoder); + pipeline.addLast("preambleHandler", createNettyRpcServerPreambleHandler()); + pipeline.addLast("frameDecoder", new NettyRpcFrameDecoder(maxRequestSize)); + pipeline.addLast("decoder", new NettyRpcServerRequestDecoder(allChannels, metrics)); + pipeline.addLast("encoder", new NettyRpcServerResponseEncoder(metrics)); + } + }); try { serverChannel = bootstrap.bind(this.bindAddress).sync().channel(); LOG.info("Bind to {}", serverChannel.localAddress()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java index 855cf2fda4d6..cf2551e1c087 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.ipc; +import java.nio.ByteBuffer; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; - /** * Handle connection preamble. * @since 2.0.0` diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java index 40f59ad1259d..cc8b07702b45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.ipc; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter; import org.apache.hbase.thirdparty.io.netty.channel.group.ChannelGroup; -import org.apache.yetus.audience.InterfaceAudience; - /** * Decoder for rpc request. * @since 2.0.0 @@ -50,7 +50,7 @@ void setConnection(NettyServerRpcConnection connection) { public void channelActive(ChannelHandlerContext ctx) throws Exception { allChannels.add(ctx.channel()); NettyRpcServer.LOG.trace("Connection {}; # active connections={}", - ctx.channel().remoteAddress(), (allChannels.size() - 1)); + ctx.channel().remoteAddress(), (allChannels.size() - 1)); super.channelActive(ctx); } @@ -66,7 +66,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception public void channelInactive(ChannelHandlerContext ctx) throws Exception { allChannels.remove(ctx.channel()); NettyRpcServer.LOG.trace("Disconnection {}; # active connections={}", - ctx.channel().remoteAddress(), (allChannels.size() - 1)); + ctx.channel().remoteAddress(), (allChannels.size() - 1)); super.channelInactive(ctx); } @@ -74,7 +74,7 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { public void exceptionCaught(ChannelHandlerContext ctx, Throwable e) { allChannels.remove(ctx.channel()); NettyRpcServer.LOG.trace("Connection {}; caught unexpected downstream exception.", - ctx.channel().remoteAddress(), e); + ctx.channel().remoteAddress(), e); ctx.channel().close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java index 09589da16354..30f8dba236a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.ipc; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; -import org.apache.yetus.audience.InterfaceAudience; - /** * Encoder for {@link RpcResponse}. * @since 2.0.0 @@ -39,7 +39,7 @@ class NettyRpcServerResponseEncoder extends ChannelOutboundHandlerAdapter { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + throws Exception { if (msg instanceof RpcResponse) { RpcResponse resp = (RpcResponse) msg; BufferChain buf = resp.getResponse(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java index 8dc08c97bd9f..fd0c6d75d888 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +19,15 @@ import java.io.IOException; import java.net.InetAddress; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** @@ -38,11 +39,11 @@ class NettyServerCall extends ServerCall { NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, - Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size, - InetAddress remoteAddress, long receiveTime, int timeout, ByteBuffAllocator bbAllocator, - CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { + Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size, + InetAddress remoteAddress, long receiveTime, int timeout, ByteBuffAllocator bbAllocator, + CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { super(id, service, md, header, param, cellScanner, connection, size, remoteAddress, receiveTime, - timeout, bbAllocator, cellBlockBuilder, reqCleanup); + timeout, bbAllocator, cellBlockBuilder, reqCleanup); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java index deed9875670a..91468fdd0398 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,23 +17,23 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.Channel; - import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.CellScanner; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.Channel; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** @@ -113,12 +113,11 @@ public boolean isConnectionOpen() { @Override public NettyServerCall createCall(int id, final BlockingService service, - final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, - long size, final InetAddress remoteAddress, int timeout, - CallCleanup reqCleanup) { + final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, + long size, final InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, - remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, - this.rpcServer.cellBlockBuilder, reqCleanup); + remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, + this.rpcServer.cellBlockBuilder, reqCleanup); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PluggableBlockingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PluggableBlockingQueue.java index 0b88b6ccaa75..8f9f3ed72362 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PluggableBlockingQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PluggableBlockingQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,21 +23,15 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Abstract class template for defining a pluggable blocking queue implementation to be used - * by the 'pluggable' call queue type in the RpcExecutor. - * - * The intention is that the constructor shape helps re-inforce the expected parameters needed - * to match up to how the RpcExecutor will instantiate instances of the queue. - * - * If the implementation class implements the - * {@link org.apache.hadoop.hbase.conf.ConfigurationObserver} interface, it will also be wired - * into configuration changes. - * - * Instantiation requires a constructor with {@code + * Abstract class template for defining a pluggable blocking queue implementation to be used by the + * 'pluggable' call queue type in the RpcExecutor. The intention is that the constructor shape helps + * re-inforce the expected parameters needed to match up to how the RpcExecutor will instantiate + * instances of the queue. If the implementation class implements the + * {@link org.apache.hadoop.hbase.conf.ConfigurationObserver} interface, it will also be wired into + * configuration changes. Instantiation requires a constructor with {@code * final int maxQueueLength, * final PriorityFunction priority, - * final Configuration conf)} - * as the arguments. + * final Configuration conf)} as the arguments. */ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -46,8 +40,8 @@ public abstract class PluggableBlockingQueue implements BlockingQueue} */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java index b294db3aa453..ef19dea2dfbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,35 +17,30 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.security.User; /** * Function to figure priority of incoming request. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public interface PriorityFunction { /** - * Returns the 'priority type' of the specified request. - * The returned value is mainly used to select the dispatch queue. - * @param header - * @param param - * @param user - * @return Priority of this request. + * Returns the 'priority type' of the specified request. The returned value is mainly used to + * select the dispatch queue. nnn * @return Priority of this request. */ int getPriority(RequestHeader header, Message param, User user); /** - * Returns the deadline of the specified request. - * The returned value is used to sort the dispatch queue. - * @param header - * @param param - * @return Deadline of this request. 0 now, otherwise msec of 'delay' + * Returns the deadline of the specified request. The returned value is used to sort the dispatch + * queue. nn * @return Deadline of this request. 0 now, otherwise msec of 'delay' */ long getDeadline(RequestHeader header, Message param); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java index ca1546cd83ae..dc496de6b737 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java index d1141d093edb..a13f5d858235 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java index a5ed6fe0eae7..2005cab83fcf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos; /** @@ -30,21 +31,18 @@ */ @InterfaceAudience.Private final class RPCTInfoGetter implements TextMapGetter { - RPCTInfoGetter() { } + RPCTInfoGetter() { + } @Override public Iterable keys(TracingProtos.RPCTInfo carrier) { - return Optional.ofNullable(carrier) - .map(TracingProtos.RPCTInfo::getHeadersMap) - .map(Map::keySet) + return Optional.ofNullable(carrier).map(TracingProtos.RPCTInfo::getHeadersMap).map(Map::keySet) .orElse(Collections.emptySet()); } @Override public String get(TracingProtos.RPCTInfo carrier, String key) { - return Optional.ofNullable(carrier) - .map(TracingProtos.RPCTInfo::getHeadersMap) - .map(map -> map.get(key)) - .orElse(null); + return Optional.ofNullable(carrier).map(TracingProtos.RPCTInfo::getHeadersMap) + .map(map -> map.get(key)).orElse(null); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java index 835966847a32..4030304a11e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.Queue; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -30,7 +27,9 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; @@ -40,19 +39,18 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; /** - * RPC Executor that uses different queues for reads and writes. - * With the options to use different queues/executors for gets and scans. - * Each handler has its own queue and there is no stealing. + * RPC Executor that uses different queues for reads and writes. With the options to use different + * queues/executors for gets and scans. Each handler has its own queue and there is no stealing. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class RWQueueRpcExecutor extends RpcExecutor { private static final Logger LOG = LoggerFactory.getLogger(RWQueueRpcExecutor.class); public static final String CALL_QUEUE_READ_SHARE_CONF_KEY = - "hbase.ipc.server.callqueue.read.ratio"; + "hbase.ipc.server.callqueue.read.ratio"; public static final String CALL_QUEUE_SCAN_SHARE_CONF_KEY = - "hbase.ipc.server.callqueue.scan.ratio"; + "hbase.ipc.server.callqueue.scan.ratio"; private final QueueBalancer writeBalancer; private final QueueBalancer readBalancer; @@ -69,7 +67,7 @@ public class RWQueueRpcExecutor extends RpcExecutor { private final AtomicInteger activeScanHandlerCount = new AtomicInteger(0); public RWQueueRpcExecutor(final String name, final int handlerCount, final int maxQueueLength, - final PriorityFunction priority, final Configuration conf, final Abortable abortable) { + final PriorityFunction priority, final Configuration conf, final Abortable abortable) { super(name, handlerCount, maxQueueLength, priority, conf, abortable); float callqReadShare = getReadShare(conf); @@ -81,8 +79,8 @@ public RWQueueRpcExecutor(final String name, final int handlerCount, final int m int readQueues = calcNumReaders(this.numCallQueues, callqReadShare); int readHandlers = Math.max(readQueues, calcNumReaders(handlerCount, callqReadShare)); - int scanQueues = Math.max(0, (int)Math.floor(readQueues * callqScanShare)); - int scanHandlers = Math.max(0, (int)Math.floor(readHandlers * callqScanShare)); + int scanQueues = Math.max(0, (int) Math.floor(readQueues * callqScanShare)); + int scanHandlers = Math.max(0, (int) Math.floor(readHandlers * callqScanShare)); if ((readQueues - scanQueues) > 0) { readQueues -= scanQueues; @@ -102,11 +100,13 @@ public RWQueueRpcExecutor(final String name, final int handlerCount, final int m initializeQueues(numScanQueues); this.writeBalancer = getBalancer(name, conf, queues.subList(0, numWriteQueues)); - this.readBalancer = getBalancer(name, conf, queues.subList(numWriteQueues, numWriteQueues + numReadQueues)); - this.scanBalancer = numScanQueues > 0 ? - getBalancer(name, conf, queues.subList(numWriteQueues + numReadQueues, - numWriteQueues + numReadQueues + numScanQueues)) : - null; + this.readBalancer = + getBalancer(name, conf, queues.subList(numWriteQueues, numWriteQueues + numReadQueues)); + this.scanBalancer = numScanQueues > 0 + ? getBalancer(name, conf, + queues.subList(numWriteQueues + numReadQueues, + numWriteQueues + numReadQueues + numScanQueues)) + : null; LOG.info(getName() + " writeQueues=" + numWriteQueues + " writeHandlers=" + writeHandlersCount + " readQueues=" + numReadQueues + " readHandlers=" + readHandlersCount + " scanQueues=" @@ -176,8 +176,8 @@ public int getReadQueueLength() { @Override public int getScanQueueLength() { int length = 0; - for (int i = numWriteQueues + numReadQueues; - i < (numWriteQueues + numReadQueues + numScanQueues); i++) { + for (int i = numWriteQueues + numReadQueues; i + < (numWriteQueues + numReadQueues + numScanQueues); i++) { length += queues.get(i).size(); } return length; @@ -186,7 +186,7 @@ public int getScanQueueLength() { @Override public int getActiveHandlerCount() { return activeWriteHandlerCount.get() + activeReadHandlerCount.get() - + activeScanHandlerCount.get(); + + activeScanHandlerCount.get(); } @Override @@ -207,9 +207,9 @@ public int getActiveScanHandlerCount() { protected boolean isWriteRequest(final RequestHeader header, final Message param) { // TODO: Is there a better way to do this? if (param instanceof MultiRequest) { - MultiRequest multi = (MultiRequest)param; + MultiRequest multi = (MultiRequest) param; for (RegionAction regionAction : multi.getRegionActionList()) { - for (Action action: regionAction.getActionList()) { + for (Action action : regionAction.getActionList()) { if (action.hasMutation()) { return true; } @@ -267,16 +267,16 @@ protected float getScanShare(final Configuration conf) { } /* - * Calculate the number of writers based on the "total count" and the read share. - * You'll get at least one writer. + * Calculate the number of writers based on the "total count" and the read share. You'll get at + * least one writer. */ private static int calcNumWriters(final int count, final float readShare) { - return Math.max(1, count - Math.max(1, (int)Math.round(count * readShare))); + return Math.max(1, count - Math.max(1, (int) Math.round(count * readShare))); } /* - * Calculate the number of readers based on the "total count" and the read share. - * You'll get at least one reader. + * Calculate the number of readers based on the "total count" and the read share. You'll get at + * least one reader. */ private static int calcNumReaders(final int count, final float readShare) { return count - calcNumWriters(count, readShare); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java index 528affc48049..22699cc03ee3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.List; @@ -35,7 +33,8 @@ public class RandomQueueBalancer implements QueueBalancer { private final int queueSize; private final List> queues; - public RandomQueueBalancer(Configuration conf, String executorName, List> queues) { + public RandomQueueBalancer(Configuration conf, String executorName, + List> queues) { this.queueSize = queues.size(); this.queues = queues; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 7571ac1539c2..e12bcf6964f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,25 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; - import java.io.IOException; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** * Interface of all necessary to carry out a RPC method invocation on the server. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public interface RpcCall extends RpcCallContext { @@ -84,15 +82,15 @@ public interface RpcCall extends RpcCallContext { int getPriority(); /** - * Return the deadline of this call. If we can not complete this call in time, - * we can throw a TimeoutIOException and RPCServer will drop it. + * Return the deadline of this call. If we can not complete this call in time, we can throw a + * TimeoutIOException and RPCServer will drop it. * @return The system timestamp of deadline. */ long getDeadline(); /** - * Used to calculate the request call queue size. - * If the total request call size exceeds a limit, the call will be rejected. + * Used to calculate the request call queue size. If the total request call size exceeds a limit, + * the call will be rejected. * @return The raw size of this call. */ long getSize(); @@ -109,17 +107,16 @@ public interface RpcCall extends RpcCallContext { /** * Set the response resulting from this RPC call. - * @param param The result message as response. - * @param cells The CellScanner that possibly carries the payload. + * @param param The result message as response. + * @param cells The CellScanner that possibly carries the payload. * @param errorThrowable The error Throwable resulting from the call. - * @param error Extra error message. + * @param error Extra error message. */ void setResponse(Message param, CellScanner cells, Throwable errorThrowable, String error); /** - * Send the response of this RPC call. - * Implementation provides the underlying facility (connection, etc) to send. - * @throws IOException + * Send the response of this RPC call. Implementation provides the underlying facility + * (connection, etc) to send. n */ void sendResponseIfReady() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index 6a4d3a29a52d..6f045731ecb5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,10 @@ import java.net.InetAddress; import java.util.Optional; - +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; -import org.apache.hadoop.hbase.security.User; /** * Interface of all necessary to carry out a RPC service invocation on the server. This interface @@ -31,19 +31,19 @@ @InterfaceAudience.Private public interface RpcCallContext { /** - * Check if the caller who made this IPC call has disconnected. - * If called from outside the context of IPC, this does nothing. - * @return < 0 if the caller is still connected. The time in ms - * since the disconnection otherwise + * Check if the caller who made this IPC call has disconnected. If called from outside the context + * of IPC, this does nothing. + * @return < 0 if the caller is still connected. The time in ms since the disconnection + * otherwise */ long disconnectSince(); /** * If the client connected and specified a codec to use, then we will use this codec making - * cellblocks to return. If the client did not specify a codec, we assume it does not support - * cellblocks and will return all content protobuf'd (though it makes our serving slower). - * We need to ask this question per call because a server could be hosting both clients that - * support cellblocks while fielding requests from clients that do not. + * cellblocks to return. If the client did not specify a codec, we assume it does not support + * cellblocks and will return all content protobuf'd (though it makes our serving slower). We need + * to ask this question per call because a server could be hosting both clients that support + * cellblocks while fielding requests from clients that do not. * @return True if the client supports cellblocks, else return all content in pb */ boolean isClientCellBlockSupported(); @@ -74,27 +74,22 @@ default Optional getRequestUserName() { /** * Sets a callback which has to be executed at the end of this RPC call. Such a callback is an - * optional one for any Rpc call. - * - * @param callback + * optional one for any Rpc call. n */ void setCallBack(RpcCallback callback); boolean isRetryImmediatelySupported(); /** - * The size of response cells that have been accumulated so far. - * This along with the corresponding increment call is used to ensure that multi's or - * scans dont get too excessively large + * The size of response cells that have been accumulated so far. This along with the corresponding + * increment call is used to ensure that multi's or scans dont get too excessively large */ long getResponseCellSize(); /** - * Add on the given amount to the retained cell size. - * - * This is not thread safe and not synchronized at all. If this is used by more than one thread - * then everything will break. Since this is called for every row synchronization would be too - * onerous. + * Add on the given amount to the retained cell size. This is not thread safe and not synchronized + * at all. If this is used by more than one thread then everything will break. Since this is + * called for every row synchronization would be too onerous. */ void incrementResponseCellSize(long cellSize); @@ -103,5 +98,6 @@ default Optional getRequestUserName() { void incrementResponseBlockSize(long blockSize); long getResponseExceptionSize(); + void incrementResponseExceptionSize(long exceptionSize); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java index f0074b54437c..a8bf2d762255 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,10 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** * Denotes a callback action that has to be executed at the end of an Rpc Call. - * * @see RpcCallContext#setCallBack(RpcCallback) */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java index 40ed856be427..e00ca6a991c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.ArrayList; @@ -43,6 +42,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; @@ -80,7 +80,6 @@ public abstract class RpcExecutor { "hbase.ipc.server.callqueue.balancer.class"; public static final Class CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT = RandomQueueBalancer.class; - // These 3 are only used by Codel executor public static final String CALL_QUEUE_CODEL_TARGET_DELAY = "hbase.ipc.server.callqueue.codel.target.delay"; @@ -119,23 +118,25 @@ public abstract class RpcExecutor { private final Abortable abortable; public RpcExecutor(final String name, final int handlerCount, final int maxQueueLength, - final PriorityFunction priority, final Configuration conf, final Abortable abortable) { - this(name, handlerCount, conf.get(CALL_QUEUE_TYPE_CONF_KEY, - CALL_QUEUE_TYPE_CONF_DEFAULT), maxQueueLength, priority, conf, abortable); + final PriorityFunction priority, final Configuration conf, final Abortable abortable) { + this(name, handlerCount, conf.get(CALL_QUEUE_TYPE_CONF_KEY, CALL_QUEUE_TYPE_CONF_DEFAULT), + maxQueueLength, priority, conf, abortable); } public RpcExecutor(final String name, final int handlerCount, final String callQueueType, - final int maxQueueLength, final PriorityFunction priority, final Configuration conf, - final Abortable abortable) { + final int maxQueueLength, final PriorityFunction priority, final Configuration conf, + final Abortable abortable) { this.name = Strings.nullToEmpty(name); this.conf = conf; this.abortable = abortable; float callQueuesHandlersFactor = this.conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.1f); - if (Float.compare(callQueuesHandlersFactor, 1.0f) > 0 || - Float.compare(0.0f, callQueuesHandlersFactor) > 0) { - LOG.warn(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + - " is *ILLEGAL*, it should be in range [0.0, 1.0]"); + if ( + Float.compare(callQueuesHandlersFactor, 1.0f) > 0 + || Float.compare(0.0f, callQueuesHandlersFactor) > 0 + ) { + LOG.warn( + CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + " is *ILLEGAL*, it should be in range [0.0, 1.0]"); // For callQueuesHandlersFactor > 1.0, we just set it 1.0f. if (Float.compare(callQueuesHandlersFactor, 1.0f) > 0) { LOG.warn("Set " + CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + " 1.0f"); @@ -154,16 +155,16 @@ public RpcExecutor(final String name, final int handlerCount, final String callQ if (isDeadlineQueueType(callQueueType)) { this.name += ".Deadline"; - this.queueInitArgs = new Object[] { maxQueueLength, - new CallPriorityComparator(conf, priority) }; + this.queueInitArgs = + new Object[] { maxQueueLength, new CallPriorityComparator(conf, priority) }; this.queueClass = BoundedPriorityBlockingQueue.class; } else if (isCodelQueueType(callQueueType)) { this.name += ".Codel"; - int codelTargetDelay = conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, - CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); + int codelTargetDelay = + conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); int codelInterval = conf.getInt(CALL_QUEUE_CODEL_INTERVAL, CALL_QUEUE_CODEL_DEFAULT_INTERVAL); - double codelLifoThreshold = conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, - CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); + double codelLifoThreshold = + conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); this.queueInitArgs = new Object[] { maxQueueLength, codelTargetDelay, codelInterval, codelLifoThreshold, numGeneralCallsDropped, numLifoModeSwitches }; this.queueClass = AdaptiveLifoCoDelCallQueue.class; @@ -172,8 +173,8 @@ public RpcExecutor(final String name, final int handlerCount, final String callQ getPluggableQueueClass(); if (!pluggableQueueClass.isPresent()) { - throw new PluggableRpcQueueNotFound("Pluggable call queue failed to load and selected call" - + " queue type required"); + throw new PluggableRpcQueueNotFound( + "Pluggable call queue failed to load and selected call" + " queue type required"); } else { this.queueInitArgs = new Object[] { maxQueueLength, priority, conf }; this.queueClass = pluggableQueueClass.get(); @@ -184,9 +185,10 @@ public RpcExecutor(final String name, final int handlerCount, final String callQ this.queueClass = LinkedBlockingQueue.class; } - LOG.info("Instantiated {} with queueClass={}; " + - "numCallQueues={}, maxQueueLength={}, handlerCount={}", - this.name, this.queueClass, this.numCallQueues, maxQueueLength, this.handlerCount); + LOG.info( + "Instantiated {} with queueClass={}; " + + "numCallQueues={}, maxQueueLength={}, handlerCount={}", + this.name, this.queueClass, this.numCallQueues, maxQueueLength, this.handlerCount); } protected int computeNumCallQueues(final int handlerCount, final float callQueuesHandlersFactor) { @@ -197,33 +199,25 @@ protected int computeNumCallQueues(final int handlerCount, final float callQueue * Return the {@link Descriptors.MethodDescriptor#getName()} from {@code callRunner} or "Unknown". */ private static String getMethodName(final CallRunner callRunner) { - return Optional.ofNullable(callRunner) - .map(CallRunner::getRpcCall) - .map(RpcCall::getMethod) - .map(Descriptors.MethodDescriptor::getName) - .orElse("Unknown"); + return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getMethod) + .map(Descriptors.MethodDescriptor::getName).orElse("Unknown"); } /** * Return the {@link RpcCall#getSize()} from {@code callRunner} or 0L. */ private static long getRpcCallSize(final CallRunner callRunner) { - return Optional.ofNullable(callRunner) - .map(CallRunner::getRpcCall) - .map(RpcCall::getSize) + return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getSize) .orElse(0L); } public Map getCallQueueCountsSummary() { - return queues.stream() - .flatMap(Collection::stream) - .map(RpcExecutor::getMethodName) + return queues.stream().flatMap(Collection::stream).map(RpcExecutor::getMethodName) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); } public Map getCallQueueSizeSummary() { - return queues.stream() - .flatMap(Collection::stream) + return queues.stream().flatMap(Collection::stream) .map(callRunner -> new Pair<>(getMethodName(callRunner), getRpcCallSize(callRunner))) .collect(Collectors.groupingBy(Pair::getFirst, Collectors.summingLong(Pair::getSecond))); } @@ -266,9 +260,9 @@ protected void startHandlers(final int port) { * Override if providing alternate Handler implementation. */ protected RpcHandler getHandler(final String name, final double handlerFailureThreshhold, - final int handlerCount, final BlockingQueue q, - final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, - final Abortable abortable) { + final int handlerCount, final BlockingQueue q, + final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, + final Abortable abortable) { return new RpcHandler(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, failedHandlerCount, abortable); } @@ -277,23 +271,24 @@ protected RpcHandler getHandler(final String name, final double handlerFailureTh * Start up our handlers. */ protected void startHandlers(final String nameSuffix, final int numHandlers, - final List> callQueues, final int qindex, final int qsize, - final int port, final AtomicInteger activeHandlerCount) { + final List> callQueues, final int qindex, final int qsize, + final int port, final AtomicInteger activeHandlerCount) { final String threadPrefix = name + Strings.nullToEmpty(nameSuffix); - double handlerFailureThreshhold = conf == null ? 1.0 : conf.getDouble( - HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT); + double handlerFailureThreshhold = conf == null + ? 1.0 + : conf.getDouble(HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, + HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT); for (int i = 0; i < numHandlers; i++) { final int index = qindex + (i % qsize); String name = "RpcServer." + threadPrefix + ".handler=" + handlers.size() + ",queue=" + index - + ",port=" + port; + + ",port=" + port; RpcHandler handler = getHandler(name, handlerFailureThreshhold, handlerCount, callQueues.get(index), activeHandlerCount, failedHandlerCount, abortable); handler.start(); handlers.add(handler); } LOG.debug("Started handlerCount={} with threadPrefix={}, numCallQueues={}, port={}", - handlers.size(), threadPrefix, qsize, port); + handlers.size(), threadPrefix, qsize, port); } /** @@ -301,17 +296,14 @@ protected void startHandlers(final String nameSuffix, final int numHandlers, */ private static final QueueBalancer ONE_QUEUE = val -> 0; - public static QueueBalancer getBalancer( - final String executorName, - final Configuration conf, - final List> queues - ) { + public static QueueBalancer getBalancer(final String executorName, final Configuration conf, + final List> queues) { Preconditions.checkArgument(queues.size() > 0, "Queue size is <= 0, must be at least 1"); if (queues.size() == 1) { return ONE_QUEUE; } else { - Class balancerClass = conf.getClass( - CALL_QUEUE_QUEUE_BALANCER_CLASS, CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT); + Class balancerClass = + conf.getClass(CALL_QUEUE_QUEUE_BALANCER_CLASS, CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT); return (QueueBalancer) ReflectionUtils.newInstance(balancerClass, conf, executorName, queues); } } @@ -362,16 +354,16 @@ public static boolean isPluggableQueueType(String callQueueType) { } public static boolean isPluggableQueueWithFastPath(String callQueueType, Configuration conf) { - return isPluggableQueueType(callQueueType) && - conf.getBoolean(PLUGGABLE_CALL_QUEUE_WITH_FAST_PATH_ENABLED, false); + return isPluggableQueueType(callQueueType) + && conf.getBoolean(PLUGGABLE_CALL_QUEUE_WITH_FAST_PATH_ENABLED, false); } private Optional>> getPluggableQueueClass() { String queueClassName = conf.get(PLUGGABLE_CALL_QUEUE_CLASS_NAME); if (queueClassName == null) { - LOG.error("Pluggable queue class config at " + PLUGGABLE_CALL_QUEUE_CLASS_NAME + - " was not found"); + LOG.error( + "Pluggable queue class config at " + PLUGGABLE_CALL_QUEUE_CLASS_NAME + " was not found"); return Optional.empty(); } @@ -381,8 +373,8 @@ private Optional>> getPluggableQueueCl if (BlockingQueue.class.isAssignableFrom(clazz)) { return Optional.of((Class>) clazz); } else { - LOG.error("Pluggable Queue class " + queueClassName + - " does not extend BlockingQueue"); + LOG.error( + "Pluggable Queue class " + queueClassName + " does not extend BlockingQueue"); return Optional.empty(); } } catch (ClassNotFoundException exception) { @@ -418,7 +410,7 @@ public int getActiveScanHandlerCount() { /** Returns the length of the pending queue */ public int getQueueLength() { int length = 0; - for (final BlockingQueue queue: queues) { + for (final BlockingQueue queue : queues) { length += queue.size(); } return length; @@ -459,18 +451,18 @@ public void resizeQueues(Configuration conf) { public void onConfigurationChange(Configuration conf) { // update CoDel Scheduler tunables - int codelTargetDelay = conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, - CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); + int codelTargetDelay = + conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); int codelInterval = conf.getInt(CALL_QUEUE_CODEL_INTERVAL, CALL_QUEUE_CODEL_DEFAULT_INTERVAL); - double codelLifoThreshold = conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, - CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); + double codelLifoThreshold = + conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); for (BlockingQueue queue : queues) { if (queue instanceof AdaptiveLifoCoDelCallQueue) { ((AdaptiveLifoCoDelCallQueue) queue).updateTunables(codelTargetDelay, codelInterval, codelLifoThreshold); } else if (queue instanceof ConfigurationObserver) { - ((ConfigurationObserver)queue).onConfigurationChange(conf); + ((ConfigurationObserver) queue).onConfigurationChange(conf); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java index f46dcfcc08eb..8c762b6873a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,7 @@ import org.slf4j.LoggerFactory; /** - * Thread to handle rpc call. - * Should only be used in {@link RpcExecutor} and its sub-classes. + * Thread to handle rpc call. Should only be used in {@link RpcExecutor} and its sub-classes. */ @InterfaceAudience.Private public class RpcHandler extends Thread { @@ -52,8 +51,8 @@ public class RpcHandler extends Thread { private boolean running; RpcHandler(final String name, final double handlerFailureThreshhold, final int handlerCount, - final BlockingQueue q, final AtomicInteger activeHandlerCount, - final AtomicInteger failedHandlerCount, final Abortable abortable) { + final BlockingQueue q, final AtomicInteger activeHandlerCount, + final AtomicInteger failedHandlerCount, final Abortable abortable) { super(name); setDaemon(true); this.q = q; @@ -65,8 +64,7 @@ public class RpcHandler extends Thread { } /** - * @return A {@link CallRunner} - * @throws InterruptedException + * @return A {@link CallRunner} n */ protected CallRunner getCallRunner() throws InterruptedException { return this.q.take(); @@ -107,8 +105,10 @@ private void run(CallRunner cr) { } catch (Throwable e) { if (e instanceof Error) { int failedCount = failedHandlerCount.incrementAndGet(); - if (this.handlerFailureThreshhold >= 0 - && failedCount > handlerCount * this.handlerFailureThreshhold) { + if ( + this.handlerFailureThreshhold >= 0 + && failedCount > handlerCount * this.handlerFailureThreshhold + ) { String message = "Number of failed RpcServer handler runs exceeded threshhold " + this.handlerFailureThreshhold + "; reason: " + StringUtils.stringifyException(e); if (abortable != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java index 7174a409c932..7840228621ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java index 0f935f6a76dc..0d0a1f8659c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java @@ -25,16 +25,16 @@ /** * An interface for RPC request scheduling algorithm. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public abstract class RpcScheduler { public static final String IPC_SERVER_MAX_CALLQUEUE_LENGTH = - "hbase.ipc.server.max.callqueue.length"; + "hbase.ipc.server.max.callqueue.length"; public static final String IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH = - "hbase.ipc.server.priority.max.callqueue.length"; + "hbase.ipc.server.priority.max.callqueue.length"; public static final String IPC_SERVER_REPLICATION_MAX_CALLQUEUE_LENGTH = - "hbase.ipc.server.replication.max.callqueue.length"; + "hbase.ipc.server.replication.max.callqueue.length"; /** Exposes runtime information of a {@code RpcServer} that a {@code RpcScheduler} may need. */ public static abstract class Context { @@ -42,9 +42,8 @@ public static abstract class Context { } /** - * Does some quick initialization. Heavy tasks (e.g. starting threads) should be - * done in {@link #start()}. This method is called before {@code start}. - * + * Does some quick initialization. Heavy tasks (e.g. starting threads) should be done in + * {@link #start()}. This method is called before {@code start}. * @param context provides methods to retrieve runtime information from */ public abstract void init(Context context); @@ -60,7 +59,6 @@ public static abstract class Context { /** * Dispatches an RPC request asynchronously. An implementation is free to choose to process the * request immediately or delay it for later processing. - * * @param task the request to be dispatched */ public abstract boolean dispatch(CallRunner task); @@ -96,15 +94,15 @@ public static abstract class Context { public abstract int getActiveReplicationRpcHandlerCount(); /** - * If CoDel-based RPC executors are used, retrieves the number of Calls that were dropped - * from general queue because RPC executor is under high load; returns 0 otherwise. + * If CoDel-based RPC executors are used, retrieves the number of Calls that were dropped from + * general queue because RPC executor is under high load; returns 0 otherwise. */ public abstract long getNumGeneralCallsDropped(); /** - * If CoDel-based RPC executors are used, retrieves the number of Calls that were - * picked from the tail of the queue (indicating adaptive LIFO mode, when - * in the period of overloade we serve last requests first); returns 0 otherwise. + * If CoDel-based RPC executors are used, retrieves the number of Calls that were picked from the + * tail of the queue (indicating adaptive LIFO mode, when in the period of overloade we serve last + * requests first); returns 0 otherwise. */ public abstract long getNumLifoModeSwitches(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java index 12da141290f6..bab3e80d322d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetSocketAddress; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -26,7 +25,7 @@ class RpcSchedulerContext extends RpcScheduler.Context { private final RpcServer rpcServer; /** - * @param rpcServer + * n */ RpcSchedulerContext(final RpcServer rpcServer) { this.rpcServer = rpcServer; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 9a7ba922cbce..58d55f73b53f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; @@ -80,15 +79,13 @@ /** * An RPC server that hosts protobuf described Services. - * */ @InterfaceAudience.Private -public abstract class RpcServer implements RpcServerInterface, - ConfigurationObserver { +public abstract class RpcServer implements RpcServerInterface, ConfigurationObserver { // LOG is being used in CallRunner and the log level is being changed in tests public static final Logger LOG = LoggerFactory.getLogger(RpcServer.class); - protected static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION - = new CallQueueTooBigException(); + protected static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION = + new CallQueueTooBigException(); private static final String MULTI_GETS = "multi.gets"; private static final String MULTI_MUTATIONS = "multi.mutations"; @@ -104,7 +101,7 @@ public abstract class RpcServer implements RpcServerInterface, * Whether we allow a fallback to SIMPLE auth for insecure clients when security is enabled. */ public static final String FALLBACK_TO_INSECURE_CLIENT_AUTH = - "hbase.ipc.server.fallback-to-simple-auth-allowed"; + "hbase.ipc.server.fallback-to-simple-auth-allowed"; /** * How many calls/handler are allowed in the queue. @@ -115,15 +112,15 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String AUTH_FAILED_FOR = "Auth failed for "; protected static final String AUTH_SUCCESSFUL_FOR = "Auth successful for "; - protected static final Logger AUDITLOG = LoggerFactory.getLogger("SecurityLogger." - + Server.class.getName()); + protected static final Logger AUDITLOG = + LoggerFactory.getLogger("SecurityLogger." + Server.class.getName()); protected SecretManager secretManager; protected final Map saslProps; protected ServiceAuthorizationManager authManager; - /** This is set to Call object before Handler invokes an RPC and ybdie - * after the call returns. + /** + * This is set to Call object before Handler invokes an RPC and ybdie after the call returns. */ protected static final ThreadLocal CurCall = new ThreadLocal<>(); @@ -159,9 +156,9 @@ public abstract class RpcServer implements RpcServerInterface, protected final boolean tcpKeepAlive; // if T then use keepalives /** - * This flag is used to indicate to sub threads when they should go down. When we call - * {@link #start()}, all threads started will consult this flag on whether they should - * keep going. It is set to false when {@link #stop()} is called. + * This flag is used to indicate to sub threads when they should go down. When we call + * {@link #start()}, all threads started will consult this flag on whether they should keep going. + * It is set to false when {@link #stop()} is called. */ volatile boolean running = true; @@ -181,8 +178,8 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String WARN_RESPONSE_SIZE = "hbase.ipc.warn.response.size"; /** - * Minimum allowable timeout (in milliseconds) in rpc request's header. This - * configuration exists to prevent the rpc service regarding this request as timeout immediately. + * Minimum allowable timeout (in milliseconds) in rpc request's header. This configuration exists + * to prevent the rpc service regarding this request as timeout immediately. */ protected static final String MIN_CLIENT_REQUEST_TIMEOUT = "hbase.ipc.min.client.request.timeout"; protected static final int DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT = 20; @@ -221,7 +218,6 @@ public abstract class RpcServer implements RpcServerInterface, */ private RSRpcServices rsRpcServices; - /** * Use to add online slowlog responses */ @@ -233,22 +229,25 @@ protected interface CallCleanup { } /** - * Datastructure for passing a {@link BlockingService} and its associated class of - * protobuf service interface. For example, a server that fielded what is defined - * in the client protobuf service would pass in an implementation of the client blocking service - * and then its ClientService.BlockingInterface.class. Used checking connection setup. + * Datastructure for passing a {@link BlockingService} and its associated class of protobuf + * service interface. For example, a server that fielded what is defined in the client protobuf + * service would pass in an implementation of the client blocking service and then its + * ClientService.BlockingInterface.class. Used checking connection setup. */ public static class BlockingServiceAndInterface { private final BlockingService service; private final Class serviceInterface; + public BlockingServiceAndInterface(final BlockingService service, - final Class serviceInterface) { + final Class serviceInterface) { this.service = service; this.serviceInterface = serviceInterface; } + public Class getServiceInterface() { return this.serviceInterface; } + public BlockingService getBlockingService() { return this.service; } @@ -256,19 +255,15 @@ public BlockingService getBlockingService() { /** * Constructs a server listening on the named port and address. - * @param server hosting instance of {@link Server}. We will do authentications if an - * instance else pass null for no authentication check. - * @param name Used keying this rpc servers' metrics and for naming the Listener thread. - * @param services A list of services. - * @param bindAddress Where to listen - * @param conf - * @param scheduler - * @param reservoirEnabled Enable ByteBufferPool or not. + * @param server hosting instance of {@link Server}. We will do authentications if an + * instance else pass null for no authentication check. + * @param name Used keying this rpc servers' metrics and for naming the Listener thread. + * @param services A list of services. + * @param bindAddress Where to listen nn * @param reservoirEnabled Enable ByteBufferPool or not. */ public RpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { this.bbAllocator = ByteBuffAllocator.create(conf, reservoirEnabled); this.server = server; this.services = services; @@ -280,8 +275,8 @@ public RpcServer(final Server server, final String name, this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME); this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, DEFAULT_WARN_RESPONSE_SIZE); - this.minClientRequestTimeout = conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT, - DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT); + this.minClientRequestTimeout = + conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT, DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT); this.maxRequestSize = conf.getInt(MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE); this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this)); @@ -354,12 +349,10 @@ protected AuthenticationTokenSecretManager createSecretManager() { if (!isSecurityEnabled) return null; if (server == null) return null; Configuration conf = server.getConfiguration(); - long keyUpdateInterval = - conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000); - long maxAge = - conf.getLong("hbase.auth.token.max.lifetime", 7*24*60*60*1000); + long keyUpdateInterval = conf.getLong("hbase.auth.key.update.interval", 24 * 60 * 60 * 1000); + long maxAge = conf.getLong("hbase.auth.token.max.lifetime", 7 * 24 * 60 * 60 * 1000); return new AuthenticationTokenSecretManager(conf, server.getZooKeeper(), - server.getServerName().toString(), keyUpdateInterval, maxAge); + server.getServerName().toString(), keyUpdateInterval, maxAge); } public SecretManager getSecretManager() { @@ -372,22 +365,21 @@ public void setSecretManager(SecretManager secretMana } /** - * This is a server side method, which is invoked over RPC. On success - * the return response has protobuf response payload. On failure, the - * exception name and the stack trace are returned in the protobuf response. + * This is a server side method, which is invoked over RPC. On success the return response has + * protobuf response payload. On failure, the exception name and the stack trace are returned in + * the protobuf response. */ @Override - public Pair call(RpcCall call, - MonitoredRPCHandler status) throws IOException { + public Pair call(RpcCall call, MonitoredRPCHandler status) + throws IOException { try { MethodDescriptor md = call.getMethod(); Message param = call.getParam(); - status.setRPC(md.getName(), new Object[]{param}, - call.getReceiveTime()); + status.setRPC(md.getName(), new Object[] { param }, call.getReceiveTime()); // TODO: Review after we add in encoded data blocks. status.setRPCPacket(param); status.resume("Servicing call"); - //get an instance of the method arg type + // get an instance of the method arg type HBaseRpcController controller = new HBaseRpcControllerImpl(call.getCellScanner()); controller.setCallTimeout(call.getTimeout()); Message result = call.getService().callBlockingMethod(md, controller, param); @@ -398,11 +390,9 @@ public Pair call(RpcCall call, int qTime = (int) (startTime - receiveTime); int totalTime = (int) (endTime - receiveTime); if (LOG.isTraceEnabled()) { - LOG.trace(CurCall.get().toString() + - ", response " + TextFormat.shortDebugString(result) + - " queueTime: " + qTime + - " processingTime: " + processingTime + - " totalTime: " + totalTime); + LOG.trace(CurCall.get().toString() + ", response " + TextFormat.shortDebugString(result) + + " queueTime: " + qTime + " processingTime: " + processingTime + " totalTime: " + + totalTime); } // Use the raw request call size for now. long requestSize = call.getSize(); @@ -425,24 +415,21 @@ public Pair call(RpcCall call, final String userName = call.getRequestUserName().orElse(StringUtils.EMPTY); // when tagging, we let TooLarge trump TooSmall to keep output simple // note that large responses will often also be slow. - logResponse(param, - md.getName(), md.getName() + "(" + param.getClass().getName() + ")", - tooLarge, tooSlow, - status.getClient(), startTime, processingTime, qTime, - responseSize, userName); + logResponse(param, md.getName(), md.getName() + "(" + param.getClass().getName() + ")", + tooLarge, tooSlow, status.getClient(), startTime, processingTime, qTime, responseSize, + userName); if (this.namedQueueRecorder != null && this.isOnlineLogProviderEnabled) { // send logs to ring buffer owned by slowLogRecorder final String className = server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); - this.namedQueueRecorder.addRecord( - new RpcLogDetails(call, param, status.getClient(), responseSize, className, tooSlow, - tooLarge)); + this.namedQueueRecorder.addRecord(new RpcLogDetails(call, param, status.getClient(), + responseSize, className, tooSlow, tooLarge)); } } return new Pair<>(result, controller.cellScanner()); } catch (Throwable e) { - // The above callBlockingMethod will always return a SE. Strip the SE wrapper before - // putting it on the wire. Its needed to adhere to the pb Service Interface but we don't + // The above callBlockingMethod will always return a SE. Strip the SE wrapper before + // putting it on the wire. Its needed to adhere to the pb Service Interface but we don't // need to pass it over the wire. if (e instanceof ServiceException) { if (e.getCause() == null) { @@ -456,33 +443,31 @@ public Pair call(RpcCall call, metrics.exception(e); if (e instanceof LinkageError) throw new DoNotRetryIOException(e); - if (e instanceof IOException) throw (IOException)e; + if (e instanceof IOException) throw (IOException) e; LOG.error("Unexpected throwable object ", e); throw new IOException(e.getMessage(), e); } } /** - * Logs an RPC response to the LOG file, producing valid JSON objects for - * client Operations. - * @param param The parameters received in the call. - * @param methodName The name of the method invoked - * @param call The string representation of the call - * @param tooLarge To indicate if the event is tooLarge - * @param tooSlow To indicate if the event is tooSlow - * @param clientAddress The address of the client who made this call. - * @param startTime The time that the call was initiated, in ms. + * Logs an RPC response to the LOG file, producing valid JSON objects for client Operations. + * @param param The parameters received in the call. + * @param methodName The name of the method invoked + * @param call The string representation of the call + * @param tooLarge To indicate if the event is tooLarge + * @param tooSlow To indicate if the event is tooSlow + * @param clientAddress The address of the client who made this call. + * @param startTime The time that the call was initiated, in ms. * @param processingTime The duration that the call took to run, in ms. - * @param qTime The duration that the call spent on the queue - * prior to being initiated, in ms. - * @param responseSize The size in bytes of the response buffer. - * @param userName UserName of the current RPC Call - */ - void logResponse(Message param, String methodName, String call, boolean tooLarge, - boolean tooSlow, String clientAddress, long startTime, int processingTime, int qTime, - long responseSize, String userName) { - final String className = server == null ? StringUtils.EMPTY : - server.getClass().getSimpleName(); + * @param qTime The duration that the call spent on the queue prior to being initiated, + * in ms. + * @param responseSize The size in bytes of the response buffer. + * @param userName UserName of the current RPC Call + */ + void logResponse(Message param, String methodName, String call, boolean tooLarge, boolean tooSlow, + String clientAddress, long startTime, int processingTime, int qTime, long responseSize, + String userName) { + final String className = server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); // base information that is reported regardless of type of call Map responseInfo = new HashMap<>(); responseInfo.put("starttimems", startTime); @@ -517,9 +502,9 @@ void logResponse(Message param, String methodName, String call, boolean tooLarge int numGets = 0; int numMutations = 0; int numServiceCalls = 0; - ClientProtos.MultiRequest multi = (ClientProtos.MultiRequest)param; + ClientProtos.MultiRequest multi = (ClientProtos.MultiRequest) param; for (ClientProtos.RegionAction regionAction : multi.getRegionActionList()) { - for (ClientProtos.Action action: regionAction.getActionList()) { + for (ClientProtos.Action action : regionAction.getActionList()) { if (action.hasMutation()) { numMutations++; } @@ -535,15 +520,14 @@ void logResponse(Message param, String methodName, String call, boolean tooLarge responseInfo.put(MULTI_MUTATIONS, numMutations); responseInfo.put(MULTI_SERVICE_CALLS, numServiceCalls); } - final String tag = (tooLarge && tooSlow) ? "TooLarge & TooSlow" - : (tooSlow ? "TooSlow" : "TooLarge"); + final String tag = + (tooLarge && tooSlow) ? "TooLarge & TooSlow" : (tooSlow ? "TooSlow" : "TooLarge"); LOG.warn("(response" + tag + "): " + GSON.toJson(responseInfo)); } - /** - * Truncate to number of chars decided by conf hbase.ipc.trace.log.max.length - * if TRACE is on else to 150 chars Refer to Jira HBASE-20826 and HBASE-20942 + * Truncate to number of chars decided by conf hbase.ipc.trace.log.max.length if TRACE is on else + * to 150 chars Refer to Jira HBASE-20826 and HBASE-20942 * @param strParam stringifiedParam to be truncated * @return truncated trace log string */ @@ -551,7 +535,7 @@ String truncateTraceLog(String strParam) { if (LOG.isTraceEnabled()) { int traceLogMaxLength = getConf().getInt(TRACE_LOG_MAX_LENGTH, DEFAULT_TRACE_LOG_MAX_LENGTH); int truncatedLength = - strParam.length() < traceLogMaxLength ? strParam.length() : traceLogMaxLength; + strParam.length() < traceLogMaxLength ? strParam.length() : traceLogMaxLength; String truncatedFlag = truncatedLength == strParam.length() ? "" : KEY_WORD_TRUNCATED; return strParam.subSequence(0, truncatedLength) + truncatedFlag; } @@ -587,13 +571,13 @@ public void addCallSize(final long diff) { /** * Authorize the incoming client connection. - * @param user client user + * @param user client user * @param connection incoming connection - * @param addr InetAddress of incoming connection + * @param addr InetAddress of incoming connection * @throws AuthorizationException when the client isn't authorized to talk the protocol */ public synchronized void authorize(UserGroupInformation user, ConnectionHeader connection, - InetAddress addr) throws AuthorizationException { + InetAddress addr) throws AuthorizationException { if (authorize) { Class c = getServiceInterface(services, connection.getServiceName()); authManager.authorize(user, c, getConf(), addr); @@ -601,29 +585,28 @@ public synchronized void authorize(UserGroupInformation user, ConnectionHeader c } /** - * When the read or write buffer size is larger than this limit, i/o will be - * done in chunks of this size. Most RPC requests and responses would be - * be smaller. + * When the read or write buffer size is larger than this limit, i/o will be done in chunks of + * this size. Most RPC requests and responses would be be smaller. */ - protected static final int NIO_BUFFER_LIMIT = 64 * 1024; //should not be more than 64KB. + protected static final int NIO_BUFFER_LIMIT = 64 * 1024; // should not be more than 64KB. /** - * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of - * ByteBuffer increases. There should not be any performance degredation. - * + * This is a wrapper around + * {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. If the amount of data + * is large, it writes to channel in smaller chunks. This is to avoid jdk from creating many + * direct buffers as the size of ByteBuffer increases. There should not be any performance + * degredation. * @param channel writable byte channel to write on - * @param buffer buffer to write + * @param buffer buffer to write * @return number of bytes written * @throws java.io.IOException e * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer) */ - protected int channelRead(ReadableByteChannel channel, - ByteBuffer buffer) throws IOException { + protected int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { - int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? - channel.read(buffer) : channelIO(channel, null, buffer); + int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) + ? channel.read(buffer) + : channelIO(channel, null, buffer); if (count > 0) { metrics.receivedBytes(count); } @@ -633,17 +616,15 @@ protected int channelRead(ReadableByteChannel channel, /** * Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}. * Only one of readCh or writeCh should be non-null. - * - * @param readCh read channel + * @param readCh read channel * @param writeCh write channel - * @param buf buffer to read or write into/out of + * @param buf buffer to read or write into/out of * @return bytes written * @throws java.io.IOException e * @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer) */ - private static int channelIO(ReadableByteChannel readCh, - WritableByteChannel writeCh, - ByteBuffer buf) throws IOException { + private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, + ByteBuffer buf) throws IOException { int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); @@ -670,9 +651,8 @@ private static int channelIO(ReadableByteChannel readCh, } /** - * Needed for features such as delayed calls. We need to be able to store the current call - * so that we can complete it later or ask questions of what is supported by the current ongoing - * call. + * Needed for features such as delayed calls. We need to be able to store the current call so that + * we can complete it later or ask questions of what is supported by the current ongoing call. * @return An RpcCallContext backed by the currently ongoing call (gotten from a thread local) */ public static Optional getCurrentCall() { @@ -732,8 +712,8 @@ public static Optional getRequestUser() { abstract public int getNumOpenConnections(); /** - * Returns the username for any user associated with the current RPC - * request or not present if no user is set. + * Returns the username for any user associated with the current RPC request or not present if no + * user is set. */ public static Optional getRequestUserName() { return getRequestUser().map(User::getShortName); @@ -748,11 +728,11 @@ public static Optional getRemoteAddress() { /** * @param serviceName Some arbitrary string that represents a 'service'. - * @param services Available service instances + * @param services Available service instances * @return Matching BlockingServiceAndInterface pair */ protected static BlockingServiceAndInterface getServiceAndInterface( - final List services, final String serviceName) { + final List services, final String serviceName) { for (BlockingServiceAndInterface bs : services) { if (bs.getBlockingService().getDescriptorForType().getName().equals(serviceName)) { return bs; @@ -763,32 +743,28 @@ protected static BlockingServiceAndInterface getServiceAndInterface( /** * @param serviceName Some arbitrary string that represents a 'service'. - * @param services Available services and their service interfaces. + * @param services Available services and their service interfaces. * @return Service interface class for serviceName */ - protected static Class getServiceInterface( - final List services, - final String serviceName) { - BlockingServiceAndInterface bsasi = - getServiceAndInterface(services, serviceName); - return bsasi == null? null: bsasi.getServiceInterface(); + protected static Class getServiceInterface(final List services, + final String serviceName) { + BlockingServiceAndInterface bsasi = getServiceAndInterface(services, serviceName); + return bsasi == null ? null : bsasi.getServiceInterface(); } /** * @param serviceName Some arbitrary string that represents a 'service'. - * @param services Available services and their service interfaces. + * @param services Available services and their service interfaces. * @return BlockingService that goes with the passed serviceName */ - protected static BlockingService getService( - final List services, - final String serviceName) { - BlockingServiceAndInterface bsasi = - getServiceAndInterface(services, serviceName); - return bsasi == null? null: bsasi.getBlockingService(); + protected static BlockingService getService(final List services, + final String serviceName) { + BlockingServiceAndInterface bsasi = getServiceAndInterface(services, serviceName); + return bsasi == null ? null : bsasi.getBlockingService(); } protected static MonitoredRPCHandler getStatus() { - // It is ugly the way we park status up in RpcServer. Let it be for now. TODO. + // It is ugly the way we park status up in RpcServer. Let it be for now. TODO. MonitoredRPCHandler status = RpcServer.MONITORED_RPC.get(); if (status != null) { return status; @@ -799,9 +775,9 @@ protected static MonitoredRPCHandler getStatus() { return status; } - /** Returns the remote side ip address when invoked inside an RPC - * Returns null incase of an error. - * @return InetAddress + /** + * Returns the remote side ip address when invoked inside an RPC Returns null incase of an error. + * n */ public static InetAddress getRemoteIp() { RpcCall call = CurCall.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java index 298b47231160..83da2cb4a986 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,15 +20,15 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor; -import org.apache.hadoop.hbase.util.ReflectionUtils; @InterfaceAudience.Private public class RpcServerFactory { @@ -44,19 +44,18 @@ private RpcServerFactory() { } public static RpcServer createRpcServer(final Server server, final String name, - final List services, final InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException { + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler) throws IOException { return createRpcServer(server, name, services, bindAddress, conf, scheduler, true); } public static RpcServer createRpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { - String rpcServerClass = conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY, - NettyRpcServer.class.getName()); + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + String rpcServerClass = + conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY, NettyRpcServer.class.getName()); StringBuilder servicesList = new StringBuilder(); - for (BlockingServiceAndInterface s: services) { + for (BlockingServiceAndInterface s : services) { ServiceDescriptor sd = s.getBlockingService().getDescriptorForType(); if (sd == null) continue; // Can be null for certain tests like TestTokenAuthentication if (servicesList.length() > 0) servicesList.append(", "); @@ -64,8 +63,8 @@ public static RpcServer createRpcServer(final Server server, final String name, } LOG.info("Creating " + rpcServerClass + " hosting " + servicesList); return ReflectionUtils.instantiateWithCustomCtor(rpcServerClass, - new Class[] { Server.class, String.class, List.class, - InetSocketAddress.class, Configuration.class, RpcScheduler.class, boolean.class }, - new Object[] { server, name, services, bindAddress, conf, scheduler, reservoirEnabled }); + new Class[] { Server.class, String.class, List.class, InetSocketAddress.class, + Configuration.class, RpcScheduler.class, boolean.class }, + new Object[] { server, name, services, bindAddress, conf, scheduler, reservoirEnabled }); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index ee6e57a2a9f5..80549067972d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.io.IOException; @@ -36,18 +34,21 @@ @InterfaceAudience.Private public interface RpcServerInterface { void start(); + boolean isStarted(); void stop(); + void join() throws InterruptedException; void setSocketSendBufSize(int size); + InetSocketAddress getListenerAddress(); - Pair call(RpcCall call, MonitoredRPCHandler status) - throws IOException; + Pair call(RpcCall call, MonitoredRPCHandler status) throws IOException; void setErrorHandler(HBaseRPCErrorHandler handler); + HBaseRPCErrorHandler getErrorHandler(); /** @@ -56,15 +57,14 @@ Pair call(RpcCall call, MonitoredRPCHandler status) MetricsHBaseServer getMetrics(); /** - * Add/subtract from the current size of all outstanding calls. Called on setup of a call to add + * Add/subtract from the current size of all outstanding calls. Called on setup of a call to add * call total size and then again at end of a call to remove the call size. * @param diff Change (plus or minus) */ void addCallSize(long diff); /** - * Refresh authentication manager policy. - * @param pp + * Refresh authentication manager policy. n */ void refreshAuthManager(Configuration conf, PolicyProvider pp); @@ -80,7 +80,6 @@ Pair call(RpcCall call, MonitoredRPCHandler status) /** * Set Online SlowLog Provider - * * @param namedQueueRecorder instance of {@link NamedQueueRecorder} */ void setNamedQueueRecorder(final NamedQueueRecorder namedQueueRecorder); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index 2ed3ebb99219..44a7a74006a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,10 +41,12 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; @@ -53,22 +55,22 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; /** - * Datastructure that holds all necessary to a method invocation and then afterward, carries - * the result. + * Datastructure that holds all necessary to a method invocation and then afterward, carries the + * result. */ @InterfaceAudience.Private public abstract class ServerCall implements RpcCall, RpcResponse { - protected final int id; // the client's call id + protected final int id; // the client's call id protected final BlockingService service; protected final MethodDescriptor md; protected final RequestHeader header; - protected Message param; // the parameter passed + protected Message param; // the parameter passed // Optional cell data passed outside of protobufs. protected final CellScanner cellScanner; - protected final T connection; // connection to client - protected final long receiveTime; // the time received when response is null - // the time served when response is not null + protected final T connection; // connection to client + protected final long receiveTime; // the time received when response is null + // the time served when response is not null protected final int timeout; protected long startTime; protected final long deadline;// the deadline to handle this call, if exceed we can drop it. @@ -82,7 +84,7 @@ public abstract class ServerCall implements RpcCa */ protected BufferChain response; - protected final long size; // size of current call + protected final long size; // size of current call protected boolean isError; protected ByteBufferListOutputStream cellBlockStream = null; protected CallCleanup reqCleanup = null; @@ -110,9 +112,9 @@ public abstract class ServerCall implements RpcCa @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "Can't figure why this complaint is happening... see below") ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, - Message param, CellScanner cellScanner, T connection, long size, InetAddress remoteAddress, - long receiveTime, int timeout, ByteBuffAllocator byteBuffAllocator, - CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { + Message param, CellScanner cellScanner, T connection, long size, InetAddress remoteAddress, + long receiveTime, int timeout, ByteBuffAllocator byteBuffAllocator, + CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { this.id = id; this.service = service; this.md = md; @@ -125,7 +127,7 @@ public abstract class ServerCall implements RpcCa this.isError = false; this.size = size; if (connection != null) { - this.user = connection.user; + this.user = connection.user; this.retryImmediatelySupported = connection.retryImmediatelySupported; } else { this.user = null; @@ -141,8 +143,7 @@ public abstract class ServerCall implements RpcCa } /** - * Call is done. Execution happened and we returned results to client. It is - * now safe to cleanup. + * Call is done. Execution happened and we returned results to client. It is now safe to cleanup. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", justification = "Presume the lock on processing request held by caller is protection enough") @@ -196,9 +197,9 @@ public void releaseByWAL() { @Override public String toString() { - return toShortString() + " param: " + - (this.param != null? ProtobufUtil.getShortTextFormat(this.param): "") + - " connection: " + connection.toString(); + return toShortString() + " param: " + + (this.param != null ? ProtobufUtil.getShortTextFormat(this.param) : "") + " connection: " + + connection.toString(); } @Override @@ -217,12 +218,13 @@ public int getPriority() { */ @Override public String toShortString() { - String serviceName = this.connection.service != null ? - this.connection.service.getDescriptorForType().getName() : "null"; - return "callId: " + this.id + " service: " + serviceName + - " methodName: " + ((this.md != null) ? this.md.getName() : "n/a") + - " size: " + StringUtils.TraditionalBinaryPrefix.long2String(this.size, "", 1) + - " connection: " + connection + " deadline: " + deadline; + String serviceName = this.connection.service != null + ? this.connection.service.getDescriptorForType().getName() + : "null"; + return "callId: " + this.id + " service: " + serviceName + " methodName: " + + ((this.md != null) ? this.md.getName() : "n/a") + " size: " + + StringUtils.TraditionalBinaryPrefix.long2String(this.size, "", 1) + " connection: " + + connection + " deadline: " + deadline; } @Override @@ -274,8 +276,7 @@ public synchronized void setResponse(Message m, final CellScanner cells, Throwab headerBuilder.setCellBlockMeta(cellBlockBuilder.build()); } Message header = headerBuilder.build(); - ByteBuffer headerBuf = - createHeaderAndMessageBytes(m, header, cellBlockSize, cellBlock); + ByteBuffer headerBuf = createHeaderAndMessageBytes(m, header, cellBlockSize, cellBlock); ByteBuffer[] responseBufs = null; int cellBlockBufferSize = 0; if (cellBlock != null) { @@ -309,16 +310,16 @@ public synchronized void setResponse(Message m, final CellScanner cells, Throwab } static void setExceptionResponse(Throwable t, String errorMsg, - ResponseHeader.Builder headerBuilder) { + ResponseHeader.Builder headerBuilder) { ExceptionResponse.Builder exceptionBuilder = ExceptionResponse.newBuilder(); exceptionBuilder.setExceptionClassName(t.getClass().getName()); exceptionBuilder.setStackTrace(errorMsg); exceptionBuilder.setDoNotRetry(t instanceof DoNotRetryIOException); if (t instanceof RegionMovedException) { - // Special casing for this exception. This is only one carrying a payload. + // Special casing for this exception. This is only one carrying a payload. // Do this instead of build a generic system for allowing exceptions carry // any kind of payload. - RegionMovedException rme = (RegionMovedException)t; + RegionMovedException rme = (RegionMovedException) t; exceptionBuilder.setHostname(rme.getHostname()); exceptionBuilder.setPort(rme.getPort()); } else if (t instanceof HBaseServerException) { @@ -329,8 +330,8 @@ static void setExceptionResponse(Throwable t, String errorMsg, headerBuilder.setException(exceptionBuilder.build()); } - static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, - int cellBlockSize, List cellBlock) throws IOException { + static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, int cellBlockSize, + List cellBlock) throws IOException { // Organize the response as a set of bytebuffers rather than collect it all together inside // one big byte array; save on allocations. // for writing the header, we check if there is available space in the buffers @@ -338,10 +339,8 @@ static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, // the last buffer in the cellblock. This applies to the cellblock created from the // pool or even the onheap cellblock buffer in case there is no pool enabled. // Possible reuse would avoid creating a temporary array for storing the header every time. - ByteBuffer possiblePBBuf = - (cellBlockSize > 0) ? cellBlock.get(cellBlock.size() - 1) : null; - int headerSerializedSize = 0, resultSerializedSize = 0, headerVintSize = 0, - resultVintSize = 0; + ByteBuffer possiblePBBuf = (cellBlockSize > 0) ? cellBlock.get(cellBlock.size() - 1) : null; + int headerSerializedSize = 0, resultSerializedSize = 0, headerVintSize = 0, resultVintSize = 0; if (header != null) { headerSerializedSize = header.getSerializedSize(); headerVintSize = CodedOutputStream.computeUInt32SizeNoTag(headerSerializedSize); @@ -351,15 +350,13 @@ static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, resultVintSize = CodedOutputStream.computeUInt32SizeNoTag(resultSerializedSize); } // calculate the total size - int totalSize = headerSerializedSize + headerVintSize - + (resultSerializedSize + resultVintSize) - + cellBlockSize; - int totalPBSize = headerSerializedSize + headerVintSize + resultSerializedSize - + resultVintSize + Bytes.SIZEOF_INT; + int totalSize = headerSerializedSize + headerVintSize + (resultSerializedSize + resultVintSize) + + cellBlockSize; + int totalPBSize = headerSerializedSize + headerVintSize + resultSerializedSize + resultVintSize + + Bytes.SIZEOF_INT; // Only if the last buffer has enough space for header use it. Else allocate // a new buffer. Assume they are all flipped - if (possiblePBBuf != null - && possiblePBBuf.limit() + totalPBSize <= possiblePBBuf.capacity()) { + if (possiblePBBuf != null && possiblePBBuf.limit() + totalPBSize <= possiblePBBuf.capacity()) { // duplicate the buffer. This is where the header is going to be written ByteBuffer pbBuf = possiblePBBuf.duplicate(); // get the current limit @@ -380,7 +377,7 @@ static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, } private static void writeToCOS(Message result, Message header, int totalSize, ByteBuffer pbBuf) - throws IOException { + throws IOException { ByteBufferUtils.putInt(pbBuf, totalSize); // create COS that works on BB CodedOutputStream cos = CodedOutputStream.newInstance(pbBuf); @@ -395,7 +392,7 @@ private static void writeToCOS(Message result, Message header, int totalSize, By } private static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, - int totalSize, int totalPBSize) throws IOException { + int totalSize, int totalPBSize) throws IOException { ByteBuffer pbBuf = ByteBuffer.allocate(totalPBSize); writeToCOS(result, header, totalSize, pbBuf); pbBuf.flip(); @@ -406,10 +403,10 @@ protected BufferChain wrapWithSasl(BufferChain bc) throws IOException { if (!this.connection.useSasl) { return bc; } - // Looks like no way around this; saslserver wants a byte array. I have to make it one. + // Looks like no way around this; saslserver wants a byte array. I have to make it one. // THIS IS A BIG UGLY COPY. - byte [] responseBytes = bc.getBytes(); - byte [] token; + byte[] responseBytes = bc.getBytes(); + byte[] token; // synchronization may be needed since there can be multiple Handler // threads using saslServer or Crypto AES to wrap responses. if (connection.useCryptoAesWrap) { @@ -423,8 +420,8 @@ protected BufferChain wrapWithSasl(BufferChain bc) throws IOException { } } if (RpcServer.LOG.isTraceEnabled()) { - RpcServer.LOG.trace("Adding saslServer wrapped token of size " + token.length - + " as call response."); + RpcServer.LOG + .trace("Adding saslServer wrapped token of size " + token.length + " as call response."); } ByteBuffer[] responseBufs = new ByteBuffer[2]; @@ -471,6 +468,7 @@ public void incrementResponseBlockSize(long blockSize) { public long getResponseExceptionSize() { return exceptionSize; } + @Override public void incrementResponseExceptionSize(long exSize) { exceptionSize += exSize; @@ -560,8 +558,8 @@ public int getRemotePort() { public synchronized BufferChain getResponse() { if (connection.useWrap) { /* - * wrapping result with SASL as the last step just before sending it out, so - * every message must have the right increasing sequence number + * wrapping result with SASL as the last step just before sending it out, so every message + * must have the right increasing sequence number */ try { return wrapWithSasl(response); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 89b1adc2cd89..5eb6e27596e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -89,9 +89,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo; /** Reads calls from a connection and queues them for handling. */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="VO_VOLATILE_INCREMENT", - justification="False positive according to http://sourceforge.net/p/findbugs/bugs/1032/") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT", + justification = "False positive according to http://sourceforge.net/p/findbugs/bugs/1032/") @InterfaceAudience.Private abstract class ServerRpcConnection implements Closeable { @@ -172,25 +171,23 @@ public VersionInfo getVersionInfo() { } private String getFatalConnectionString(final int version, final byte authByte) { - return "serverVersion=" + RpcServer.CURRENT_VERSION + - ", clientVersion=" + version + ", authMethod=" + authByte + - // The provider may be null if we failed to parse the header of the request - ", authName=" + (provider == null ? "unknown" : provider.getSaslAuthMethod().getName()) + - " from " + toString(); + return "serverVersion=" + RpcServer.CURRENT_VERSION + ", clientVersion=" + version + + ", authMethod=" + authByte + + // The provider may be null if we failed to parse the header of the request + ", authName=" + (provider == null ? "unknown" : provider.getSaslAuthMethod().getName()) + + " from " + toString(); } /** - * Set up cell block codecs - * @throws FatalConnectionException + * Set up cell block codecs n */ - private void setupCellBlockCodecs(final ConnectionHeader header) - throws FatalConnectionException { + private void setupCellBlockCodecs(final ConnectionHeader header) throws FatalConnectionException { // TODO: Plug in other supported decoders. if (!header.hasCellBlockCodecClass()) return; String className = header.getCellBlockCodecClass(); if (className == null || className.length() == 0) return; try { - this.codec = (Codec)Class.forName(className).getDeclaredConstructor().newInstance(); + this.codec = (Codec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCellCodecException(className, e); } @@ -198,50 +195,46 @@ private void setupCellBlockCodecs(final ConnectionHeader header) className = header.getCellBlockCompressorClass(); try { this.compressionCodec = - (CompressionCodec)Class.forName(className).getDeclaredConstructor().newInstance(); + (CompressionCodec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCompressionCodecException(className, e); } } /** - * Set up cipher for rpc encryption with Apache Commons Crypto - * - * @throws FatalConnectionException + * Set up cipher for rpc encryption with Apache Commons Crypto n */ private void setupCryptoCipher(final ConnectionHeader header, - RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) - throws FatalConnectionException { + RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) throws FatalConnectionException { // If simple auth, return if (saslServer == null) return; // check if rpc encryption with Crypto AES String qop = saslServer.getNegotiatedQop(); - boolean isEncryption = SaslUtil.QualityOfProtection.PRIVACY - .getSaslQop().equalsIgnoreCase(qop); - boolean isCryptoAesEncryption = isEncryption && this.rpcServer.conf.getBoolean( - "hbase.rpc.crypto.encryption.aes.enabled", false); + boolean isEncryption = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop().equalsIgnoreCase(qop); + boolean isCryptoAesEncryption = isEncryption + && this.rpcServer.conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false); if (!isCryptoAesEncryption) return; if (!header.hasRpcCryptoCipherTransformation()) return; String transformation = header.getRpcCryptoCipherTransformation(); if (transformation == null || transformation.length() == 0) return; - // Negotiates AES based on complete saslServer. - // The Crypto metadata need to be encrypted and send to client. + // Negotiates AES based on complete saslServer. + // The Crypto metadata need to be encrypted and send to client. Properties properties = new Properties(); // the property for SecureRandomFactory properties.setProperty(CryptoRandomFactory.CLASSES_KEY, - this.rpcServer.conf.get("hbase.crypto.sasl.encryption.aes.crypto.random", - "org.apache.commons.crypto.random.JavaCryptoRandom")); + this.rpcServer.conf.get("hbase.crypto.sasl.encryption.aes.crypto.random", + "org.apache.commons.crypto.random.JavaCryptoRandom")); // the property for cipher class properties.setProperty(CryptoCipherFactory.CLASSES_KEY, - this.rpcServer.conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", - "org.apache.commons.crypto.cipher.JceCipher")); + this.rpcServer.conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", + "org.apache.commons.crypto.cipher.JceCipher")); - int cipherKeyBits = this.rpcServer.conf.getInt( - "hbase.rpc.crypto.encryption.aes.cipher.keySizeBits", 128); + int cipherKeyBits = + this.rpcServer.conf.getInt("hbase.rpc.crypto.encryption.aes.cipher.keySizeBits", 128); // generate key and iv if (cipherKeyBits % 8 != 0) { - throw new IllegalArgumentException("The AES cipher key size in bits" + - " should be a multiple of byte"); + throw new IllegalArgumentException( + "The AES cipher key size in bits" + " should be a multiple of byte"); } int len = cipherKeyBits / 8; byte[] inKey = new byte[len]; @@ -258,10 +251,9 @@ private void setupCryptoCipher(final ConnectionHeader header, secureRandom.nextBytes(outIv); // create CryptoAES for server - cryptoAES = new CryptoAES(transformation, properties, - inKey, outKey, inIv, outIv); + cryptoAES = new CryptoAES(transformation, properties, inKey, outKey, inIv, outIv); // create SaslCipherMeta and send to client, - // for client, the [inKey, outKey], [inIv, outIv] should be reversed + // for client, the [inKey, outKey], [inIv, outIv] should be reversed RPCProtos.CryptoCipherMeta.Builder ccmBuilder = RPCProtos.CryptoCipherMeta.newBuilder(); ccmBuilder.setTransformation(transformation); ccmBuilder.setInIv(getByteString(outIv)); @@ -297,8 +289,7 @@ private UserGroupInformation createUser(ConnectionHeader head) { } if (effectiveUser != null) { if (realUser != null) { - UserGroupInformation realUserUgi = - UserGroupInformation.createRemoteUser(realUser); + UserGroupInformation realUserUgi = UserGroupInformation.createRemoteUser(realUser); ugi = UserGroupInformation.createProxyUser(effectiveUser, realUserUgi); } else { ugi = UserGroupInformation.createRemoteUser(effectiveUser); @@ -317,13 +308,13 @@ protected final void disposeSasl() { /** * No protobuf encoding of raw sasl messages */ - protected final void doRawSaslReply(SaslStatus status, Writable rv, - String errorClass, String error) throws IOException { + protected final void doRawSaslReply(SaslStatus status, Writable rv, String errorClass, + String error) throws IOException { BufferChain bc; // In my testing, have noticed that sasl messages are usually // in the ballpark of 100-200. That's why the initial capacity is 256. try (ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256); - DataOutputStream out = new DataOutputStream(saslResponse)) { + DataOutputStream out = new DataOutputStream(saslResponse)) { out.writeInt(status.state); // write status if (status == SaslStatus.SUCCESS) { rv.write(out); @@ -336,8 +327,7 @@ protected final void doRawSaslReply(SaslStatus status, Writable rv, doRespond(() -> bc); } - public void saslReadAndProcess(ByteBuff saslToken) throws IOException, - InterruptedException { + public void saslReadAndProcess(ByteBuff saslToken) throws IOException, InterruptedException { if (saslContextEstablished) { RpcServer.LOG.trace("Read input token of size={} for processing by saslServer.unwrap()", saslToken.limit()); @@ -345,7 +335,7 @@ public void saslReadAndProcess(ByteBuff saslToken) throws IOException, processOneRpc(saslToken); } else { byte[] b = saslToken.hasArray() ? saslToken.array() : saslToken.toBytes(); - byte [] plaintextData; + byte[] plaintextData; if (useCryptoAesWrap) { // unwrap with CryptoAES plaintextData = cryptoAES.unwrap(b, 0, b.length); @@ -361,18 +351,19 @@ public void saslReadAndProcess(ByteBuff saslToken) throws IOException, try { saslServer = new HBaseSaslRpcServer(provider, rpcServer.saslProps, rpcServer.secretManager); - } catch (Exception e){ + } catch (Exception e) { RpcServer.LOG.error("Error when trying to create instance of HBaseSaslRpcServer " + "with sasl provider: " + provider, e); throw e; } RpcServer.LOG.debug("Created SASL server with mechanism={}", - provider.getSaslAuthMethod().getAuthMethod()); + provider.getSaslAuthMethod().getAuthMethod()); } - RpcServer.LOG.debug("Read input token of size={} for processing by saslServer." + - "evaluateResponse()", saslToken.limit()); - replyToken = saslServer.evaluateResponse(saslToken.hasArray()? - saslToken.array() : saslToken.toBytes()); + RpcServer.LOG.debug( + "Read input token of size={} for processing by saslServer." + "evaluateResponse()", + saslToken.limit()); + replyToken = saslServer + .evaluateResponse(saslToken.hasArray() ? saslToken.array() : saslToken.toBytes()); } catch (IOException e) { RpcServer.LOG.debug("Failed to execute SASL handshake", e); IOException sendToClient = e; @@ -389,26 +380,24 @@ public void saslReadAndProcess(ByteBuff saslToken) throws IOException, this.rpcServer.metrics.authenticationFailure(); String clientIP = this.toString(); // attempting user could be null - RpcServer.AUDITLOG - .warn("{}{}: {}", RpcServer.AUTH_FAILED_FOR, clientIP, saslServer.getAttemptingUser()); + RpcServer.AUDITLOG.warn("{}{}: {}", RpcServer.AUTH_FAILED_FOR, clientIP, + saslServer.getAttemptingUser()); throw e; } if (replyToken != null) { if (RpcServer.LOG.isDebugEnabled()) { - RpcServer.LOG.debug("Will send token of size " + replyToken.length - + " from saslServer."); + RpcServer.LOG.debug("Will send token of size " + replyToken.length + " from saslServer."); } - doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null, - null); + doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null, null); } if (saslServer.isComplete()) { String qop = saslServer.getNegotiatedQop(); useWrap = qop != null && !"auth".equalsIgnoreCase(qop); - ugi = provider.getAuthorizedUgi(saslServer.getAuthorizationID(), - this.rpcServer.secretManager); + ugi = + provider.getAuthorizedUgi(saslServer.getAuthorizationID(), this.rpcServer.secretManager); RpcServer.LOG.debug( - "SASL server context established. Authenticated client: {}. Negotiated QoP is {}", - ugi, qop); + "SASL server context established. Authenticated client: {}. Negotiated QoP is {}", ugi, + qop); this.rpcServer.metrics.authenticationSuccess(); RpcServer.AUDITLOG.info(RpcServer.AUTH_SUCCESSFUL_FOR + ugi); saslContextEstablished = true; @@ -423,8 +412,7 @@ private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedE int count; if (unwrappedDataLengthBuffer.remaining() > 0) { count = this.rpcServer.channelRead(ch, unwrappedDataLengthBuffer); - if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0) - return; + if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0) return; } if (unwrappedData == null) { @@ -432,8 +420,7 @@ private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedE int unwrappedDataLength = unwrappedDataLengthBuffer.getInt(); if (unwrappedDataLength == RpcClient.PING_CALL_ID) { - if (RpcServer.LOG.isDebugEnabled()) - RpcServer.LOG.debug("Received ping message"); + if (RpcServer.LOG.isDebugEnabled()) RpcServer.LOG.debug("Received ping message"); unwrappedDataLengthBuffer.clear(); continue; // ping message } @@ -441,8 +428,7 @@ private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedE } count = this.rpcServer.channelRead(ch, unwrappedData); - if (count <= 0 || unwrappedData.remaining() > 0) - return; + if (count <= 0 || unwrappedData.remaining() > 0) return; if (unwrappedData.remaining() == 0) { unwrappedDataLengthBuffer.clear(); @@ -453,8 +439,7 @@ private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedE } } - public void processOneRpc(ByteBuff buf) throws IOException, - InterruptedException { + public void processOneRpc(ByteBuff buf) throws IOException, InterruptedException { if (connectionHeaderRead) { processRequest(buf); } else { @@ -463,8 +448,8 @@ public void processOneRpc(ByteBuff buf) throws IOException, if (rpcServer.needAuthorization() && !authorizeConnection()) { // Throw FatalConnectionException wrapping ACE so client does right thing and closes // down the connection instead of trying to read non-existent retun. - throw new AccessDeniedException("Connection from " + this + " for service " + - connectionHeader.getServiceName() + " is unauthorized for user: " + ugi); + throw new AccessDeniedException("Connection from " + this + " for service " + + connectionHeader.getServiceName() + " is unauthorized for user: " + ugi); } this.user = this.rpcServer.userProvider.create(this.ugi); } @@ -476,8 +461,7 @@ private boolean authorizeConnection() throws IOException { // real user for the effective user, therefore not required to // authorize real user. doAs is allowed only for simple or kerberos // authentication - if (ugi != null && ugi.getRealUser() != null - && provider.supportsProtocolAuthentication()) { + if (ugi != null && ugi.getRealUser() != null && provider.supportsProtocolAuthentication()) { ProxyUsers.authorize(ugi, this.getHostAddress(), this.rpcServer.conf); } this.rpcServer.authorize(ugi, connectionHeader, getHostInetAddress()); @@ -498,8 +482,8 @@ private void processConnectionHeader(ByteBuff buf) throws IOException { if (buf.hasArray()) { this.connectionHeader = ConnectionHeader.parseFrom(buf.array()); } else { - CodedInputStream cis = UnsafeByteOperations.unsafeWrap( - new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); + CodedInputStream cis = UnsafeByteOperations + .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); cis.enableAliasing(true); this.connectionHeader = ConnectionHeader.parseFrom(cis); } @@ -509,7 +493,7 @@ private void processConnectionHeader(ByteBuff buf) throws IOException { if (this.service == null) throw new UnknownServiceException(serviceName); setupCellBlockCodecs(this.connectionHeader); RPCProtos.ConnectionHeaderResponse.Builder chrBuilder = - RPCProtos.ConnectionHeaderResponse.newBuilder(); + RPCProtos.ConnectionHeaderResponse.newBuilder(); setupCryptoCipher(this.connectionHeader, chrBuilder); responseConnectionHeader(chrBuilder); UserGroupInformation protocolUser = createUser(connectionHeader); @@ -520,29 +504,26 @@ private void processConnectionHeader(ByteBuff buf) throws IOException { } // audit logging for SASL authenticated users happens in saslReadAndProcess() if (authenticatedWithFallback) { - RpcServer.LOG.warn("Allowed fallback to SIMPLE auth for {} connecting from {}", - ugi, getHostAddress()); + RpcServer.LOG.warn("Allowed fallback to SIMPLE auth for {} connecting from {}", ugi, + getHostAddress()); } } else { // user is authenticated ugi.setAuthenticationMethod(provider.getSaslAuthMethod().getAuthMethod()); - //Now we check if this is a proxy user case. If the protocol user is - //different from the 'user', it is a proxy user scenario. However, - //this is not allowed if user authenticated with DIGEST. - if ((protocolUser != null) - && (!protocolUser.getUserName().equals(ugi.getUserName()))) { + // Now we check if this is a proxy user case. If the protocol user is + // different from the 'user', it is a proxy user scenario. However, + // this is not allowed if user authenticated with DIGEST. + if ((protocolUser != null) && (!protocolUser.getUserName().equals(ugi.getUserName()))) { if (!provider.supportsProtocolAuthentication()) { // Not allowed to doAs if token authentication is used throw new AccessDeniedException("Authenticated user (" + ugi - + ") doesn't match what the client claims to be (" - + protocolUser + ")"); + + ") doesn't match what the client claims to be (" + protocolUser + ")"); } else { // Effective user can be different from authenticated user // for simple auth or kerberos auth // The user is the real user. Now we create a proxy user UserGroupInformation realUser = ugi; - ugi = UserGroupInformation.createProxyUser(protocolUser - .getUserName(), realUser); + ugi = UserGroupInformation.createProxyUser(protocolUser.getUserName(), realUser); // Now the user is a proxy user, set Authentication method Proxy. ugi.setAuthenticationMethod(AuthenticationMethod.PROXY); } @@ -551,21 +532,20 @@ private void processConnectionHeader(ByteBuff buf) throws IOException { String version; if (this.connectionHeader.hasVersionInfo()) { // see if this connection will support RetryImmediatelyException - this.retryImmediatelySupported = - VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2); + this.retryImmediatelySupported = VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2); version = this.connectionHeader.getVersionInfo().getVersion(); } else { version = "UNKNOWN"; } RpcServer.AUDITLOG.info("Connection from {}:{}, version={}, sasl={}, ugi={}, service={}", - this.hostAddress, this.remotePort, version, this.useSasl, this.ugi, serviceName); + this.hostAddress, this.remotePort, version, this.useSasl, this.ugi, serviceName); } /** * Send the response for connection header */ private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) - throws FatalConnectionException { + throws FatalConnectionException { // Response the connection header if Crypto AES is enabled if (!chrBuilder.hasCryptoCipherMeta()) return; try { @@ -577,7 +557,7 @@ private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder byte[] wrapped = saslServer.wrap(unwrapped, 0, unwrapped.length); BufferChain bc; try (ByteBufferOutputStream response = new ByteBufferOutputStream(wrapped.length + 4); - DataOutputStream out = new DataOutputStream(response)) { + DataOutputStream out = new DataOutputStream(response)) { out.writeInt(wrapped.length); out.write(wrapped); bc = new BufferChain(response.getByteBuffer()); @@ -591,14 +571,10 @@ private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder protected abstract void doRespond(RpcResponse resp) throws IOException; /** - * @param buf - * Has the request header and the request param and optionally - * encoded data buffer all in this one array. - * @throws IOException - * @throws InterruptedException + * n * Has the request header and the request param and optionally encoded data buffer all in this + * one array. nn */ - protected void processRequest(ByteBuff buf) throws IOException, - InterruptedException { + protected void processRequest(ByteBuff buf) throws IOException, InterruptedException { long totalRequestSize = buf.limit(); int offset = 0; // Here we read in the header. We avoid having pb @@ -609,7 +585,7 @@ protected void processRequest(ByteBuff buf) throws IOException, cis = UnsafeByteOperations.unsafeWrap(buf.array(), 0, buf.limit()).newCodedInput(); } else { cis = UnsafeByteOperations - .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); + .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); } cis.enableAliasing(true); int headerSize = cis.readRawVarint32(); @@ -631,20 +607,22 @@ protected void processRequest(ByteBuff buf) throws IOException, try (Scope ignored = span.makeCurrent()) { int id = header.getCallId(); if (RpcServer.LOG.isTraceEnabled()) { - RpcServer.LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) + - " totalRequestSize: " + totalRequestSize + " bytes"); + RpcServer.LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) + + " totalRequestSize: " + totalRequestSize + " bytes"); } // Enforcing the call queue size, this triggers a retry in the client // This is a bit late to be doing this check - we have already read in the // total request. - if ((totalRequestSize + - this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) { + if ( + (totalRequestSize + this.rpcServer.callQueueSizeInBytes.sum()) + > this.rpcServer.maxQueueSizeInBytes + ) { final ServerCall callTooBig = createCall(id, this.service, null, null, null, null, totalRequestSize, null, 0, this.callCleanup); this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, - "Call queue is full on " + this.rpcServer.server.getServerName() + - ", is hbase.ipc.server.max.callqueue.size too small?"); + "Call queue is full on " + this.rpcServer.server.getServerName() + + ", is hbase.ipc.server.max.callqueue.size too small?"); TraceUtil.setError(span, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); callTooBig.sendResponseIfReady(); return; @@ -670,8 +648,8 @@ protected void processRequest(ByteBuff buf) throws IOException, } else { // currently header must have request param, so we directly throw // exception here - String msg = "Invalid request header: " + TextFormat.shortDebugString(header) + - ", should have param set in it"; + String msg = "Invalid request header: " + TextFormat.shortDebugString(header) + + ", should have param set in it"; RpcServer.LOG.warn(msg); throw new DoNotRetryIOException(msg); } @@ -684,8 +662,8 @@ protected void processRequest(ByteBuff buf) throws IOException, } } catch (Throwable thrown) { InetSocketAddress address = this.rpcServer.getListenerAddress(); - String msg = (address != null ? address : "(channel closed)") + - " is unable to read call parameter from client " + getHostAddress(); + String msg = (address != null ? address : "(channel closed)") + + " is unable to read call parameter from client " + getHostAddress(); RpcServer.LOG.warn(msg, thrown); this.rpcServer.metrics.exception(thrown); @@ -703,8 +681,8 @@ protected void processRequest(ByteBuff buf) throws IOException, ServerCall readParamsFailedCall = createCall(id, this.service, null, null, null, null, totalRequestSize, null, 0, this.callCleanup); - readParamsFailedCall.setResponse(null, null, responseThrowable, msg + "; " - + responseThrowable.getMessage()); + readParamsFailedCall.setResponse(null, null, responseThrowable, + msg + "; " + responseThrowable.getMessage()); TraceUtil.setError(span, responseThrowable); readParamsFailedCall.sendResponseIfReady(); return; @@ -724,8 +702,8 @@ protected void processRequest(ByteBuff buf) throws IOException, this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize()); this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); call.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, - "Call queue is full on " + this.rpcServer.server.getServerName() + - ", too many items queued ?"); + "Call queue is full on " + this.rpcServer.server.getServerName() + + ", too many items queued ?"); TraceUtil.setError(span, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); call.sendResponseIfReady(); } @@ -740,7 +718,7 @@ protected final RpcResponse getErrorResponse(String msg, Exception e) throws IOE ResponseHeader.Builder headerBuilder = ResponseHeader.newBuilder().setCallId(-1); ServerCall.setExceptionResponse(e, msg, headerBuilder); ByteBuffer headerBuf = - ServerCall.createHeaderAndMessageBytes(null, headerBuilder.build(), 0, null); + ServerCall.createHeaderAndMessageBytes(null, headerBuilder.build(), 0, null); BufferChain buf = new BufferChain(headerBuf); return () -> buf; } @@ -759,9 +737,9 @@ protected final boolean processPreamble(ByteBuffer preambleBuffer) throws IOExce for (int i = 0; i < RPC_HEADER.length; i++) { if (RPC_HEADER[i] != preambleBuffer.get()) { doBadPreambleHandling( - "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" + - Bytes.toStringBinary(preambleBuffer.array(), 0, RPC_HEADER.length) + " from " + - toString()); + "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" + + Bytes.toStringBinary(preambleBuffer.array(), 0, RPC_HEADER.length) + " from " + + toString()); return false; } } @@ -810,8 +788,8 @@ boolean isSimpleAuthentication() { public abstract boolean isConnectionOpen(); public abstract ServerCall createCall(int id, BlockingService service, MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, long size, - InetAddress remoteAddress, int timeout, CallCleanup reqCleanup); + RequestHeader header, Message param, CellScanner cellScanner, long size, + InetAddress remoteAddress, int timeout, CallCleanup reqCleanup); private static class ByteBuffByteInput extends ByteInput { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java index 641aaefa4e1e..2d0c8f1f62ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java @@ -29,11 +29,13 @@ /** * The default scheduler. Configurable. Maintains isolated handler pools for general ('default'), * high-priority ('priority'), and replication ('replication') requests. Default behavior is to - * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc. - * See below article for explanation of options. - * @see Overview on Request Queuing + * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc. See + * below article for explanation of options. + * @see Overview + * on Request Queuing */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObserver { private int port; @@ -53,37 +55,29 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs private Abortable abortable = null; /** - * @param conf - * @param handlerCount the number of handler threads that will be used to process calls - * @param priorityHandlerCount How many threads for priority handling. - * @param replicationHandlerCount How many threads for replication handling. - * @param highPriorityLevel - * @param priority Function to extract request priority. + * n * @param handlerCount the number of handler threads that will be used to process calls + * @param priorityHandlerCount How many threads for priority handling. + * @param replicationHandlerCount How many threads for replication handling. n * @param priority + * Function to extract request priority. */ - public SimpleRpcScheduler( - Configuration conf, - int handlerCount, - int priorityHandlerCount, - int replicationHandlerCount, - int metaTransitionHandler, - PriorityFunction priority, - Abortable server, - int highPriorityLevel) { + public SimpleRpcScheduler(Configuration conf, int handlerCount, int priorityHandlerCount, + int replicationHandlerCount, int metaTransitionHandler, PriorityFunction priority, + Abortable server, int highPriorityLevel) { int maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, - handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); int maxPriorityQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH, priorityHandlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); int maxReplicationQueueLength = - conf.getInt(RpcScheduler.IPC_SERVER_REPLICATION_MAX_CALLQUEUE_LENGTH, - replicationHandlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + conf.getInt(RpcScheduler.IPC_SERVER_REPLICATION_MAX_CALLQUEUE_LENGTH, + replicationHandlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); this.priority = priority; this.highPriorityLevel = highPriorityLevel; this.abortable = server; - String callQueueType = conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, - RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); + String callQueueType = + conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); float callqReadShare = conf.getFloat(RWQueueRpcExecutor.CALL_QUEUE_READ_SHARE_CONF_KEY, 0); if (callqReadShare > 0) { @@ -91,50 +85,49 @@ public SimpleRpcScheduler( callExecutor = new FastPathRWQueueRpcExecutor("default.FPRWQ", Math.max(2, handlerCount), maxQueueLength, priority, conf, server); } else { - if (RpcExecutor.isFifoQueueType(callQueueType) || - RpcExecutor.isCodelQueueType(callQueueType) || - RpcExecutor.isPluggableQueueWithFastPath(callQueueType, conf)) { + if ( + RpcExecutor.isFifoQueueType(callQueueType) || RpcExecutor.isCodelQueueType(callQueueType) + || RpcExecutor.isPluggableQueueWithFastPath(callQueueType, conf) + ) { callExecutor = new FastPathBalancedQueueRpcExecutor("default.FPBQ", handlerCount, - maxQueueLength, priority, conf, server); + maxQueueLength, priority, conf, server); } else { callExecutor = new BalancedQueueRpcExecutor("default.BQ", handlerCount, maxQueueLength, - priority, conf, server); + priority, conf, server); } } float metaCallqReadShare = - conf.getFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_READ_SHARE_CONF_KEY, - MetaRWQueueRpcExecutor.DEFAULT_META_CALL_QUEUE_READ_SHARE); + conf.getFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_READ_SHARE_CONF_KEY, + MetaRWQueueRpcExecutor.DEFAULT_META_CALL_QUEUE_READ_SHARE); if (metaCallqReadShare > 0) { // different read/write handler for meta, at least 1 read handler and 1 write handler - this.priorityExecutor = - new MetaRWQueueRpcExecutor("priority.RWQ", Math.max(2, priorityHandlerCount), - maxPriorityQueueLength, priority, conf, server); + this.priorityExecutor = new MetaRWQueueRpcExecutor("priority.RWQ", + Math.max(2, priorityHandlerCount), maxPriorityQueueLength, priority, conf, server); } else { // Create 2 queues to help priorityExecutor be more scalable. - this.priorityExecutor = priorityHandlerCount > 0 ? - new FastPathBalancedQueueRpcExecutor("priority.FPBQ", priorityHandlerCount, - RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, - abortable) : - null; + this.priorityExecutor = priorityHandlerCount > 0 + ? new FastPathBalancedQueueRpcExecutor("priority.FPBQ", priorityHandlerCount, + RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, + abortable) + : null; } - this.replicationExecutor = - replicationHandlerCount > 0 - ? new FastPathBalancedQueueRpcExecutor("replication.FPBQ", replicationHandlerCount, - RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxReplicationQueueLength, priority, - conf, abortable) - : null; - this.metaTransitionExecutor = metaTransitionHandler > 0 ? - new FastPathBalancedQueueRpcExecutor("metaPriority.FPBQ", metaTransitionHandler, - RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, - abortable) : - null; + this.replicationExecutor = replicationHandlerCount > 0 + ? new FastPathBalancedQueueRpcExecutor("replication.FPBQ", replicationHandlerCount, + RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxReplicationQueueLength, priority, conf, + abortable) + : null; + this.metaTransitionExecutor = metaTransitionHandler > 0 + ? new FastPathBalancedQueueRpcExecutor("metaPriority.FPBQ", metaTransitionHandler, + RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, + abortable) + : null; } public SimpleRpcScheduler(Configuration conf, int handlerCount, int priorityHandlerCount, - int replicationHandlerCount, PriorityFunction priority, int highPriorityLevel) { + int replicationHandlerCount, PriorityFunction priority, int highPriorityLevel) { this(conf, handlerCount, priorityHandlerCount, replicationHandlerCount, 0, priority, null, - highPriorityLevel); + highPriorityLevel); } /** @@ -154,10 +147,11 @@ public void onConfigurationChange(Configuration conf) { metaTransitionExecutor.resizeQueues(conf); } - String callQueueType = conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, - RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); - if (RpcExecutor.isCodelQueueType(callQueueType) || - RpcExecutor.isPluggableQueueType(callQueueType)) { + String callQueueType = + conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); + if ( + RpcExecutor.isCodelQueueType(callQueueType) || RpcExecutor.isPluggableQueueType(callQueueType) + ) { callExecutor.onConfigurationChange(conf); } } @@ -200,13 +194,15 @@ public void stop() { @Override public boolean dispatch(CallRunner callTask) { RpcCall call = callTask.getRpcCall(); - int level = priority.getPriority(call.getHeader(), call.getParam(), - call.getRequestUser().orElse(null)); + int level = + priority.getPriority(call.getHeader(), call.getParam(), call.getRequestUser().orElse(null)); if (level == HConstants.PRIORITY_UNSET) { level = HConstants.NORMAL_QOS; } - if (metaTransitionExecutor != null && - level == MasterAnnotationReadingPriorityFunction.META_TRANSITION_QOS) { + if ( + metaTransitionExecutor != null + && level == MasterAnnotationReadingPriorityFunction.META_TRANSITION_QOS + ) { return metaTransitionExecutor.dispatch(callTask); } else if (priorityExecutor != null && level > highPriorityLevel) { return priorityExecutor.dispatch(callTask); @@ -240,7 +236,7 @@ public int getReplicationQueueLength() { @Override public int getActiveRpcHandlerCount() { return callExecutor.getActiveHandlerCount() + getActivePriorityRpcHandlerCount() - + getActiveReplicationRpcHandlerCount() + getActiveMetaPriorityRpcHandlerCount(); + + getActiveReplicationRpcHandlerCount() + getActiveMetaPriorityRpcHandlerCount(); } @Override @@ -330,7 +326,7 @@ public CallQueueInfo getCallQueueInfo() { if (null != metaTransitionExecutor) { queueName = "Meta Transition Queue"; callQueueInfo.setCallMethodCount(queueName, - metaTransitionExecutor.getCallQueueCountsSummary()); + metaTransitionExecutor.getCallQueueCountsSummary()); callQueueInfo.setCallMethodSize(queueName, metaTransitionExecutor.getCallQueueSizeSummary()); } @@ -338,4 +334,3 @@ public CallQueueInfo getCallQueueInfo() { } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 20ea1f544182..5a56fff4ad4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,47 +54,41 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * The RPC server with native java NIO implementation deriving from Hadoop to - * host protobuf described Services. It's the original one before HBASE-17262, - * and the default RPC server for now. - * - * An RpcServer instance has a Listener that hosts the socket. Listener has fixed number - * of Readers in an ExecutorPool, 10 by default. The Listener does an accept and then - * round robin a Reader is chosen to do the read. The reader is registered on Selector. Read does - * total read off the channel and the parse from which it makes a Call. The call is wrapped in a - * CallRunner and passed to the scheduler to be run. Reader goes back to see if more to be done - * and loops till done. - * - *

        Scheduler can be variously implemented but default simple scheduler has handlers to which it - * has given the queues into which calls (i.e. CallRunner instances) are inserted. Handlers run - * taking from the queue. They run the CallRunner#run method on each item gotten from queue - * and keep taking while the server is up. - * - * CallRunner#run executes the call. When done, asks the included Call to put itself on new - * queue for Responder to pull from and return result to client. - * + * The RPC server with native java NIO implementation deriving from Hadoop to host protobuf + * described Services. It's the original one before HBASE-17262, and the default RPC server for now. + * An RpcServer instance has a Listener that hosts the socket. Listener has fixed number of Readers + * in an ExecutorPool, 10 by default. The Listener does an accept and then round robin a Reader is + * chosen to do the read. The reader is registered on Selector. Read does total read off the channel + * and the parse from which it makes a Call. The call is wrapped in a CallRunner and passed to the + * scheduler to be run. Reader goes back to see if more to be done and loops till done. + *

        + * Scheduler can be variously implemented but default simple scheduler has handlers to which it has + * given the queues into which calls (i.e. CallRunner instances) are inserted. Handlers run taking + * from the queue. They run the CallRunner#run method on each item gotten from queue and keep taking + * while the server is up. CallRunner#run executes the call. When done, asks the included Call to + * put itself on new queue for Responder to pull from and return result to client. * @see BlockingRpcClient */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.CONFIG }) public class SimpleRpcServer extends RpcServer { - protected int port; // port we listen on - protected InetSocketAddress address; // inet address we listen on - private int readThreads; // number of read threads + protected int port; // port we listen on + protected InetSocketAddress address; // inet address we listen on + private int readThreads; // number of read threads protected int socketSendBufferSize; - protected final long purgeTimeout; // in milliseconds + protected final long purgeTimeout; // in milliseconds // maintains the set of client connections and handles idle timeouts private ConnectionManager connectionManager; private Listener listener = null; protected SimpleRpcServerResponder responder = null; - /** Listens on the socket. Creates jobs for the handler threads*/ + /** Listens on the socket. Creates jobs for the handler threads */ private class Listener extends Thread { - private ServerSocketChannel acceptChannel = null; //the accept channel - private Selector selector = null; //the selector that we use for the server + private ServerSocketChannel acceptChannel = null; // the accept channel + private Selector selector = null; // the selector that we use for the server private Reader[] readers = null; private int currentReader = 0; private final int readerPendingConnectionQueueLength; @@ -106,15 +100,15 @@ public Listener(final String name) throws IOException { // The backlog of requests that we will have the serversocket carry. int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128); readerPendingConnectionQueueLength = - conf.getInt("hbase.ipc.server.read.connection-queue.size", 100); + conf.getInt("hbase.ipc.server.read.connection-queue.size", 100); // Create a new server socket and set to non blocking mode acceptChannel = ServerSocketChannel.open(); acceptChannel.configureBlocking(false); // Bind the server socket to the binding addrees (can be different from the default interface) bind(acceptChannel.socket(), bindAddress, backlogLength); - port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port - address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); + port = acceptChannel.socket().getLocalPort(); // Could be an ephemeral port + address = (InetSocketAddress) acceptChannel.socket().getLocalSocketAddress(); // create a selector; selector = Selector.open(); @@ -122,10 +116,9 @@ public Listener(final String name) throws IOException { // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it // has an advantage in that it is easy to shutdown the pool. readPool = Executors.newFixedThreadPool(readThreads, - new ThreadFactoryBuilder().setNameFormat( - "Reader=%d,bindAddress=" + bindAddress.getHostName() + - ",port=" + port).setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadFactoryBuilder() + .setNameFormat("Reader=%d,bindAddress=" + bindAddress.getHostName() + ",port=" + port) + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); for (int i = 0; i < readThreads; ++i) { Reader reader = new Reader(); readers[i] = reader; @@ -139,7 +132,6 @@ public Listener(final String name) throws IOException { this.setDaemon(true); } - private class Reader implements Runnable { final private LinkedBlockingQueue pendingConnections; private final Selector readSelector; @@ -168,7 +160,7 @@ private synchronized void doRunLoop() { // Consume as many connections as currently queued to avoid // unbridled acceptance of connections that starves the select int size = pendingConnections.size(); - for (int i=size; i>0; i--) { + for (int i = size; i > 0; i--) { SimpleServerRpcConnection conn = pendingConnections.take(); conn.channel.register(readSelector, SelectionKey.OP_READ, conn); } @@ -185,7 +177,7 @@ private synchronized void doRunLoop() { key = null; } } catch (InterruptedException e) { - if (running) { // unexpected -- log it + if (running) { // unexpected -- log it LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e); } } catch (CancelledKeyException e) { @@ -197,9 +189,9 @@ private synchronized void doRunLoop() { } /** - * Updating the readSelector while it's being used is not thread-safe, - * so the connection must be queued. The reader will drain the queue - * and update its readSelector before performing the next select + * Updating the readSelector while it's being used is not thread-safe, so the connection must + * be queued. The reader will drain the queue and update its readSelector before performing + * the next select */ public void addConnection(SimpleServerRpcConnection conn) throws IOException { pendingConnections.add(conn); @@ -208,9 +200,9 @@ public void addConnection(SimpleServerRpcConnection conn) throws IOException { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", - justification="selector access is not synchronized; seems fine but concerned changing " + - "it will have per impact") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", + justification = "selector access is not synchronized; seems fine but concerned changing " + + "it will have per impact") public void run() { LOG.info(getName() + ": starting"); connectionManager.startIdleScan(); @@ -224,8 +216,7 @@ public void run() { iter.remove(); try { if (key.isValid()) { - if (key.isAcceptable()) - doAccept(key); + if (key.isAcceptable()) doAccept(key); } } catch (IOException ignored) { if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored); @@ -266,8 +257,8 @@ public void run() { if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored); } - selector= null; - acceptChannel= null; + selector = null; + acceptChannel = null; // close all connections connectionManager.stopIdleScan(); @@ -277,7 +268,7 @@ public void run() { private void closeCurrentConnection(SelectionKey key, Throwable e) { if (key != null) { - SimpleServerRpcConnection c = (SimpleServerRpcConnection)key.attachment(); + SimpleServerRpcConnection c = (SimpleServerRpcConnection) key.attachment(); if (c != null) { closeConnection(c); key.attach(null); @@ -305,7 +296,7 @@ void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfM } continue; } - key.attach(c); // so closeCurrentConnection can get the object + key.attach(c); // so closeCurrentConnection can get the object reader.addConnection(c); } } @@ -320,13 +311,14 @@ void doRead(SelectionKey key) throws InterruptedException { try { count = c.readAndProcess(); } catch (InterruptedException ieo) { - LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo); + LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", + ieo); throw ieo; } catch (Exception e) { if (LOG.isDebugEnabled()) { LOG.debug("Caught exception while reading:", e); } - count = -1; //so that the (count < 0) block is executed + count = -1; // so that the (count < 0) block is executed } if (count < 0) { closeConnection(c); @@ -361,24 +353,20 @@ Reader getReader() { /** * Constructs a server listening on the named port and address. - * @param server hosting instance of {@link Server}. We will do authentications if an - * instance else pass null for no authentication check. - * @param name Used keying this rpc servers' metrics and for naming the Listener thread. - * @param services A list of services. - * @param bindAddress Where to listen - * @param conf - * @param scheduler - * @param reservoirEnabled Enable ByteBufferPool or not. + * @param server hosting instance of {@link Server}. We will do authentications if an + * instance else pass null for no authentication check. + * @param name Used keying this rpc servers' metrics and for naming the Listener thread. + * @param services A list of services. + * @param bindAddress Where to listen nn * @param reservoirEnabled Enable ByteBufferPool or not. */ public SimpleRpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { super(server, name, services, bindAddress, conf, scheduler, reservoirEnabled); this.socketSendBufferSize = 0; this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10); - this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout", - 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.purgeTimeout = + conf.getLong("hbase.ipc.client.call.purge.timeout", 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); // Start the listener here and let it bind to the port listener = new Listener(name); @@ -393,8 +381,7 @@ public SimpleRpcServer(final Server server, final String name, } /** - * Subclasses of HBaseServer can override this to provide their own - * Connection implementations. + * Subclasses of HBaseServer can override this to provide their own Connection implementations. */ protected SimpleServerRpcConnection getConnection(SocketChannel channel, long time) { return new SimpleServerRpcConnection(this, channel, time); @@ -404,11 +391,14 @@ protected void closeConnection(SimpleServerRpcConnection connection) { connectionManager.close(connection); } - /** Sets the socket buffer size used for responding to RPCs. + /** + * Sets the socket buffer size used for responding to RPCs. * @param size send size */ @Override - public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } + public void setSocketSendBufSize(int size) { + this.socketSendBufferSize = size; + } /** Starts the service. Must be called before any calls will be handled. */ @Override @@ -461,10 +451,10 @@ public synchronized void join() throws InterruptedException { } /** - * Return the socket (ip+port) on which the RPC server is listening to. May return null if - * the listener channel is closed. + * Return the socket (ip+port) on which the RPC server is listening to. May return null if the + * listener channel is closed. * @return the socket (ip+port) on which the RPC server is listening to, or null if this - * information cannot be determined + * information cannot be determined */ @Override public synchronized InetSocketAddress getListenerAddress() { @@ -475,14 +465,12 @@ public synchronized InetSocketAddress getListenerAddress() { } /** - * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of - * buffer increases. This also minimizes extra copies in NIO layer - * as a result of multiple write operations required to write a large - * buffer. - * - * @param channel writable byte channel to write to + * This is a wrapper around + * {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. If the amount of data + * is large, it writes to channel in smaller chunks. This is to avoid jdk from creating many + * direct buffers as the size of buffer increases. This also minimizes extra copies in NIO layer + * as a result of multiple write operations required to write a large buffer. + * @param channel writable byte channel to write to * @param bufferChain Chain of buffers to write * @return number of bytes written * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer) @@ -497,14 +485,14 @@ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChai } /** - * A convenience method to bind to a given address and report - * better exceptions if the address is not a valid host. - * @param socket the socket to bind + * A convenience method to bind to a given address and report better exceptions if the address is + * not a valid host. + * @param socket the socket to bind * @param address the address to bind to * @param backlog the number of connections allowed in the queue - * @throws BindException if the address can't be bound + * @throws BindException if the address can't be bound * @throws UnknownHostException if the address isn't a valid host name - * @throws IOException other random errors from bind + * @throws IOException other random errors from bind */ public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { @@ -548,18 +536,18 @@ private class ConnectionManager { this.idleScanTimer = new Timer("RpcServer idle connection scanner for port " + port, true); this.idleScanThreshold = conf.getInt("hbase.ipc.client.idlethreshold", 4000); this.idleScanInterval = - conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000); + conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000); this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); this.maxIdleToClose = conf.getInt("hbase.ipc.client.kill.max", 10); int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); int maxConnectionQueueSize = - handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100); + handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100); // create a set with concurrency -and- a thread-safe iterator, add 2 // for listener and idle closer threads - this.connections = Collections.newSetFromMap( - new ConcurrentHashMap( - maxConnectionQueueSize, 0.75f, readThreads+2)); + this.connections = + Collections.newSetFromMap(new ConcurrentHashMap( + maxConnectionQueueSize, 0.75f, readThreads + 2)); } private boolean add(SimpleServerRpcConnection connection) { @@ -587,16 +575,15 @@ SimpleServerRpcConnection[] toArray() { } SimpleServerRpcConnection register(SocketChannel channel) { - SimpleServerRpcConnection connection = getConnection(channel, - EnvironmentEdgeManager.currentTime()); + SimpleServerRpcConnection connection = + getConnection(channel, EnvironmentEdgeManager.currentTime()); add(connection); if (LOG.isTraceEnabled()) { - LOG.trace("Connection from " + connection + - "; connections=" + size() + - ", queued calls size (bytes)=" + callQueueSizeInBytes.sum() + - ", general queued calls=" + scheduler.getGeneralQueueLength() + - ", priority queued calls=" + scheduler.getPriorityQueueLength() + - ", meta priority queued calls=" + scheduler.getMetaPriorityQueueLength()); + LOG.trace("Connection from " + connection + "; connections=" + size() + + ", queued calls size (bytes)=" + callQueueSizeInBytes.sum() + ", general queued calls=" + + scheduler.getGeneralQueueLength() + ", priority queued calls=" + + scheduler.getPriorityQueueLength() + ", meta priority queued calls=" + + scheduler.getMetaPriorityQueueLength()); } return connection; } @@ -605,9 +592,8 @@ boolean close(SimpleServerRpcConnection connection) { boolean exists = remove(connection); if (exists) { if (LOG.isTraceEnabled()) { - LOG.trace(Thread.currentThread().getName() + - ": disconnecting client " + connection + - ". Number of active connections: "+ size()); + LOG.trace(Thread.currentThread().getName() + ": disconnecting client " + connection + + ". Number of active connections: " + size()); } // only close if actually removed to avoid double-closing due // to possible races @@ -630,10 +616,10 @@ synchronized void closeIdle(boolean scanAll) { break; } // stop if not scanning all and max connections are closed - if (connection.isIdle() && - connection.getLastContact() < minLastContact && - close(connection) && - !scanAll && (++closed == maxIdleToClose)) { + if ( + connection.isIdle() && connection.getLastContact() < minLastContact && close(connection) + && !scanAll && (++closed == maxIdleToClose) + ) { break; } } @@ -659,7 +645,7 @@ private void scheduleIdleScanTask() { if (!running) { return; } - TimerTask idleScanTask = new TimerTask(){ + TimerTask idleScanTask = new TimerTask() { @Override public void run() { if (!running) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java index d6d5dd09a85b..200c4ebd1af8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Iterator; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; @@ -43,7 +42,7 @@ class SimpleRpcServerResponder extends Thread { private final SimpleRpcServer simpleRpcServer; private final Selector writeSelector; private final Set writingCons = - Collections.newSetFromMap(new ConcurrentHashMap<>()); + Collections.newSetFromMap(new ConcurrentHashMap<>()); SimpleRpcServerResponder(SimpleRpcServer simpleRpcServer) throws IOException { this.simpleRpcServer = simpleRpcServer; @@ -152,7 +151,7 @@ private void doRunLoop() { } } catch (Exception e) { SimpleRpcServer.LOG - .warn(getName() + ": exception in Responder " + StringUtils.stringifyException(e), e); + .warn(getName() + ": exception in Responder " + StringUtils.stringifyException(e), e); } } SimpleRpcServer.LOG.info(getName() + ": stopped"); @@ -176,8 +175,10 @@ private long purge(long lastPurgeTime) { if (connection == null) { throw new IllegalStateException("Coding error: SelectionKey key without attachment."); } - if (connection.lastSentTime > 0 && - now > connection.lastSentTime + this.simpleRpcServer.purgeTimeout) { + if ( + connection.lastSentTime > 0 + && now > connection.lastSentTime + this.simpleRpcServer.purgeTimeout + ) { conWithOldCalls.add(connection); } } @@ -218,17 +219,15 @@ private void doAsyncWrite(SelectionKey key) throws IOException { /** * Process the response for this call. You need to have the lock on * {@link org.apache.hadoop.hbase.ipc.SimpleServerRpcConnection#responseWriteLock} - * @return true if we proceed the call fully, false otherwise. - * @throws IOException + * @return true if we proceed the call fully, false otherwise. n */ private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp) - throws IOException { + throws IOException { boolean error = true; BufferChain buf = resp.getResponse(); try { // Send as much data as we can in the non-blocking fashion - long numBytes = - this.simpleRpcServer.channelWrite(conn.channel, buf); + long numBytes = this.simpleRpcServer.channelWrite(conn.channel, buf); if (numBytes < 0) { throw new HBaseIOException("Error writing on the socket " + conn); } @@ -256,11 +255,10 @@ private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp /** * Process all the responses for this connection * @return true if all the calls were processed or that someone else is doing it. false if there * - * is still some work to do. In this case, we expect the caller to delay us. - * @throws IOException + * is still some work to do. In this case, we expect the caller to delay us. n */ private boolean processAllResponses(final SimpleServerRpcConnection connection) - throws IOException { + throws IOException { // We want only one writer on the channel for a connection at a time. connection.responseWriteLock.lock(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java index 311b4c7b1a9c..861da8055d11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +19,15 @@ import java.io.IOException; import java.net.InetAddress; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** @@ -41,13 +42,12 @@ class SimpleServerCall extends ServerCall { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "Can't figure why this complaint is happening... see below") SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, - SimpleServerRpcConnection connection, long size, final InetAddress remoteAddress, - long receiveTime, int timeout, ByteBuffAllocator bbAllocator, - CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, - SimpleRpcServerResponder responder) { + RequestHeader header, Message param, CellScanner cellScanner, + SimpleServerRpcConnection connection, long size, final InetAddress remoteAddress, + long receiveTime, int timeout, ByteBuffAllocator bbAllocator, CellBlockBuilder cellBlockBuilder, + CallCleanup reqCleanup, SimpleRpcServerResponder responder) { super(id, service, md, header, param, cellScanner, connection, size, remoteAddress, receiveTime, - timeout, bbAllocator, cellBlockBuilder, reqCleanup); + timeout, bbAllocator, cellBlockBuilder, reqCleanup); this.responder = responder; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java index 622e67ab781d..f59c002e6bba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection { long lastSentTime = -1L; public SimpleServerRpcConnection(SimpleRpcServer rpcServer, SocketChannel channel, - long lastContact) { + long lastContact) { super(rpcServer); this.channel = channel; this.lastContact = lastContact; @@ -144,9 +144,7 @@ private int read4Bytes() throws IOException { /** * Read off the wire. If there is not enough data to read, update the connection state with what * we have and returns. - * @return Returns -1 if failure (and caller will close connection), else zero or more. - * @throws IOException - * @throws InterruptedException + * @return Returns -1 if failure (and caller will close connection), else zero or more. nn */ public int readAndProcess() throws IOException, InterruptedException { // If we have not read the connection setup preamble, look to see if that is on the wire. @@ -177,14 +175,14 @@ public int readAndProcess() throws IOException, InterruptedException { } if (dataLength < 0) { // A data length of zero is legal. throw new DoNotRetryIOException( - "Unexpected data length " + dataLength + "!! from " + getHostAddress()); + "Unexpected data length " + dataLength + "!! from " + getHostAddress()); } if (dataLength > this.rpcServer.maxRequestSize) { - String msg = "RPC data length of " + dataLength + " received from " + getHostAddress() + - " is greater than max allowed " + this.rpcServer.maxRequestSize + ". Set \"" + - SimpleRpcServer.MAX_REQUEST_SIZE + - "\" on server to override this limit (not recommended)"; + String msg = "RPC data length of " + dataLength + " received from " + getHostAddress() + + " is greater than max allowed " + this.rpcServer.maxRequestSize + ". Set \"" + + SimpleRpcServer.MAX_REQUEST_SIZE + + "\" on server to override this limit (not recommended)"; SimpleRpcServer.LOG.warn(msg); if (connectionHeaderRead && connectionPreambleRead) { @@ -211,14 +209,16 @@ public int read() throws IOException { // Notify the client about the offending request SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null, - null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0, - this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder); + null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0, + this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder); RequestTooBigException reqTooBigEx = new RequestTooBigException(msg); this.rpcServer.metrics.exception(reqTooBigEx); // Make sure the client recognizes the underlying exception // Otherwise, throw a DoNotRetryIOException. - if (VersionInfoUtil.hasMinimumVersion(connectionHeader.getVersionInfo(), - RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION)) { + if ( + VersionInfoUtil.hasMinimumVersion(connectionHeader.getVersionInfo(), + RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION) + ) { reqTooBig.setResponse(null, null, reqTooBigEx, msg); } else { reqTooBig.setResponse(null, null, new DoNotRetryIOException(msg), msg); @@ -327,11 +327,11 @@ public boolean isConnectionOpen() { @Override public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, long size, - InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { + RequestHeader header, Message param, CellScanner cellScanner, long size, + InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, - remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, - this.rpcServer.cellBlockBuilder, reqCleanup, this.responder); + remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, + this.rpcServer.cellBlockBuilder, reqCleanup, this.responder); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java index 5b4a2c241b44..d848a7ae495d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java index ebe24463ea4a..50628f8717b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.master; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.List; @@ -36,22 +36,24 @@ import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * Handles everything on master-side related to master election. Keeps track of - * currently active master and registered backup masters. - * - *

        Listens and responds to ZooKeeper notifications on the master znodes, - * both nodeCreated and nodeDeleted. - * - *

        Contains blocking methods which will hold up backup masters, waiting - * for the active master to fail. - * - *

        This class is instantiated in the HMaster constructor and the method - * #blockUntilBecomingActiveMaster() is called to wait until becoming - * the active master of the cluster. + * Handles everything on master-side related to master election. Keeps track of currently active + * master and registered backup masters. + *

        + * Listens and responds to ZooKeeper notifications on the master znodes, both + * nodeCreated and nodeDeleted. + *

        + * Contains blocking methods which will hold up backup masters, waiting for the active master to + * fail. + *

        + * This class is instantiated in the HMaster constructor and the method + * #blockUntilBecomingActiveMaster() is called to wait until becoming the active master of the + * cluster. */ @InterfaceAudience.Private public class ActiveMasterManager extends ZKListener { @@ -75,11 +77,11 @@ public class ActiveMasterManager extends ZKListener { /** * @param watcher ZK watcher - * @param sn ServerName - * @param master In an instance of a Master. + * @param sn ServerName + * @param master In an instance of a Master. */ ActiveMasterManager(ZKWatcher watcher, ServerName sn, Server master) - throws InterruptedIOException { + throws InterruptedIOException { super(watcher); watcher.registerListener(this); this.sn = sn; @@ -117,7 +119,7 @@ public void nodeDeleted(String path) { // shut down, so that state is now irrelevant. This means that the shutdown // state must be set while we wait on the active master in order // to shutdown this master. See HBASE-8519. - if(path.equals(watcher.getZNodePaths().clusterStateZNode) && !master.isStopped()) { + if (path.equals(watcher.getZNodePaths().clusterStateZNode) && !master.isStopped()) { clusterShutDown.set(true); } handle(path); @@ -131,7 +133,7 @@ void handle(final String path) { private void updateBackupMasters() throws InterruptedIOException { backupMasters = - ImmutableList.copyOf(MasterAddressTracker.getBackupMastersAndRenewWatch(watcher)); + ImmutableList.copyOf(MasterAddressTracker.getBackupMastersAndRenewWatch(watcher)); } /** @@ -177,22 +179,21 @@ public int getBackupMasterInfoPort(final ServerName sn) { } /** - * Handle a change in the master node. Doesn't matter whether this was called - * from a nodeCreated or nodeDeleted event because there are no guarantees - * that the current state of the master node matches the event at the time of - * our next ZK request. - * - *

        Uses the watchAndCheckExists method which watches the master address node - * regardless of whether it exists or not. If it does exist (there is an - * active master), it returns true. Otherwise it returns false. - * - *

        A watcher is set which guarantees that this method will get called again if - * there is another change in the master node. + * Handle a change in the master node. Doesn't matter whether this was called from a nodeCreated + * or nodeDeleted event because there are no guarantees that the current state of the master node + * matches the event at the time of our next ZK request. + *

        + * Uses the watchAndCheckExists method which watches the master address node regardless of whether + * it exists or not. If it does exist (there is an active master), it returns true. Otherwise it + * returns false. + *

        + * A watcher is set which guarantees that this method will get called again if there is another + * change in the master node. */ private void handleMasterNodeChange() { // Watch the node and check if it exists. try { - synchronized(clusterHasActiveMaster) { + synchronized (clusterHasActiveMaster) { if (ZKUtil.watchAndCheckExists(watcher, watcher.getZNodePaths().masterAddressZNode)) { // A master node exists, there is an active master LOG.trace("A master is now available"); @@ -214,30 +215,26 @@ private void handleMasterNodeChange() { } /** - * Block until becoming the active master. - * - * Method blocks until there is not another active master and our attempt - * to become the new active master is successful. - * - * This also makes sure that we are watching the master znode so will be - * notified if another master dies. + * Block until becoming the active master. Method blocks until there is not another active master + * and our attempt to become the new active master is successful. This also makes sure that we are + * watching the master znode so will be notified if another master dies. * @param checkInterval the interval to check if the master is stopped * @param startupStatus the monitor status to track the progress - * @return True if no issue becoming active master else false if another - * master was running or if some other problem (zookeeper, stop flag has been - * set on this Master) + * @return True if no issue becoming active master else false if another master was running or if + * some other problem (zookeeper, stop flag has been set on this Master) */ - boolean blockUntilBecomingActiveMaster( - int checkInterval, MonitoredTask startupStatus) { - String backupZNode = ZNodePaths.joinZNode( - this.watcher.getZNodePaths().backupMasterAddressesZNode, this.sn.toString()); + boolean blockUntilBecomingActiveMaster(int checkInterval, MonitoredTask startupStatus) { + String backupZNode = ZNodePaths + .joinZNode(this.watcher.getZNodePaths().backupMasterAddressesZNode, this.sn.toString()); while (!(master.isAborted() || master.isStopped())) { startupStatus.setStatus("Trying to register in ZK as active master"); // Try to become the active master, watch if there is another master. // Write out our ServerName as versioned bytes. try { - if (MasterAddressTracker.setMasterAddress(this.watcher, - this.watcher.getZNodePaths().masterAddressZNode, this.sn, infoPort)) { + if ( + MasterAddressTracker.setMasterAddress(this.watcher, + this.watcher.getZNodePaths().masterAddressZNode, this.sn, infoPort) + ) { // If we were a backup master before, delete our ZNode from the backup // master directory since we are the active now) @@ -267,30 +264,30 @@ boolean blockUntilBecomingActiveMaster( byte[] bytes = ZKUtil.getDataAndWatch(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); if (bytes == null) { - msg = ("A master was detected, but went down before its address " + - "could be read. Attempting to become the next active master"); + msg = ("A master was detected, but went down before its address " + + "could be read. Attempting to become the next active master"); } else { ServerName currentMaster; try { currentMaster = ProtobufUtil.parseServerNameFrom(bytes); } catch (DeserializationException e) { LOG.warn("Failed parse", e); - // Hopefully next time around we won't fail the parse. Dangerous. + // Hopefully next time around we won't fail the parse. Dangerous. continue; } if (ServerName.isSameAddress(currentMaster, this.sn)) { - msg = ("Current master has this master's address, " + - currentMaster + "; master was restarted? Deleting node."); + msg = ("Current master has this master's address, " + currentMaster + + "; master was restarted? Deleting node."); // Hurry along the expiration of the znode. ZKUtil.deleteNode(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); // We may have failed to delete the znode at the previous step, but - // we delete the file anyway: a second attempt to delete the znode is likely to fail - // again. + // we delete the file anyway: a second attempt to delete the znode is likely to fail + // again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } else { - msg = "Another master is the active master, " + currentMaster + - "; waiting to become the next active master"; + msg = "Another master is the active master, " + currentMaster + + "; waiting to become the next active master"; } } LOG.info(msg); @@ -305,13 +302,12 @@ boolean blockUntilBecomingActiveMaster( clusterHasActiveMaster.wait(checkInterval); } catch (InterruptedException e) { // We expect to be interrupted when a master dies, - // will fall out if so + // will fall out if so LOG.debug("Interrupted waiting for master to die", e); } } if (clusterShutDown.get()) { - this.master.stop( - "Cluster went down before this master became active"); + this.master.stop("Cluster went down before this master became active"); } } } @@ -326,10 +322,8 @@ boolean hasActiveMaster() { if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().masterAddressZNode) >= 0) { return true; } - } - catch (KeeperException ke) { - LOG.info("Received an unexpected KeeperException when checking " + - "isActiveMaster : "+ ke); + } catch (KeeperException ke) { + LOG.info("Received an unexpected KeeperException when checking " + "isActiveMaster : " + ke); } return false; } @@ -348,15 +342,14 @@ public void stop() { } catch (IOException e) { LOG.warn("Failed get of master address: " + e.toString()); } - if (activeMaster != null && activeMaster.equals(this.sn)) { + if (activeMaster != null && activeMaster.equals(this.sn)) { ZKUtil.deleteNode(watcher, watcher.getZNodePaths().masterAddressZNode); // We may have failed to delete the znode at the previous step, but - // we delete the file anyway: a second attempt to delete the znode is likely to fail again. + // we delete the file anyway: a second attempt to delete the znode is likely to fail again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } } catch (KeeperException e) { - LOG.debug(this.watcher.prefix("Failed delete of our master address node; " + - e.getMessage())); + LOG.debug(this.watcher.prefix("Failed delete of our master address node; " + e.getMessage())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java index 2f75560dae8c..67d8ef80ce69 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.io.IOException; @@ -35,11 +34,9 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Caches the cluster ID of the cluster. For standby masters, this is used to serve the client - * RPCs that fetch the cluster ID. ClusterID is only created by an active master if one does not - * already exist. Standby masters just read the information from the file system. This class is - * thread-safe. - * + * Caches the cluster ID of the cluster. For standby masters, this is used to serve the client RPCs + * that fetch the cluster ID. ClusterID is only created by an active master if one does not already + * exist. Standby masters just read the information from the file system. This class is thread-safe. * TODO: Make it a singleton without affecting concurrent junit tests. */ @InterfaceAudience.Private @@ -95,8 +92,8 @@ private String getClusterId() { /** * Attempts to fetch the cluster ID from the file system. If no attempt is already in progress, - * synchronously fetches the cluster ID and sets it. If an attempt is already in progress, - * returns right away and the caller is expected to wait for the fetch to finish. + * synchronously fetches the cluster ID and sets it. If an attempt is already in progress, returns + * right away and the caller is expected to wait for the fetch to finish. * @return true if the attempt is done, false if another thread is already fetching it. */ private boolean attemptFetch() { @@ -130,12 +127,11 @@ private void waitForFetchToFinish() throws InterruptedException { } /** - * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached - * copy and is thread-safe. Optimized to do a single fetch when there are multiple threads are - * trying get from a clean cache. - * - * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does - * not exist on the file system or if the server initiated a tear down. + * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached copy + * and is thread-safe. Optimized to do a single fetch when there are multiple threads are trying + * get from a clean cache. + * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does not + * exist on the file system or if the server initiated a tear down. */ public String getFromCacheOrFetch() { if (server.isStopping() || server.isStopped()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java index 0f7153ba8014..1121101024db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,20 +19,17 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServiceNotRunningException; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.NonceKey; +import org.apache.yetus.audience.InterfaceAudience; /** - * View and edit the current cluster schema. Use this API making any modification to - * namespaces, tables, etc. - * - *

        Implementation Notes

        - * Nonces are for when operation is non-idempotent to ensure once-only semantic, even - * across process failures. + * View and edit the current cluster schema. Use this API making any modification to namespaces, + * tables, etc. + *

        Implementation Notes

        Nonces are for when operation is non-idempotent to ensure once-only + * semantic, even across process failures. */ // ClusterSchema is introduced to encapsulate schema modification. Currently the different aspects // are spread about the code base. This effort is about cleanup, shutting down access, and @@ -61,16 +58,15 @@ public interface ClusterSchema { * Timeout for cluster operations in milliseconds. */ public static final String HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY = - "hbase.master.cluster.schema.operation.timeout"; + "hbase.master.cluster.schema.operation.timeout"; /** * Default operation timeout in milliseconds. */ - public static final int DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT = - 5 * 60 * 1000; + public static final int DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT = 5 * 60 * 1000; /** - * For internals use only. Do not use! Provisionally part of this Interface. - * Prefer the high-level APIs available elsewhere in this API. + * For internals use only. Do not use! Provisionally part of this Interface. Prefer the high-level + * APIs available elsewhere in this API. * @return Instance of {@link TableNamespaceManager} */ // TODO: Remove from here. Keep internal. This Interface is too high-level to host this accessor. @@ -79,34 +75,33 @@ public interface ClusterSchema { /** * Create a new Namespace. * @param namespaceDescriptor descriptor for new Namespace - * @param nonceKey A unique identifier for this operation from the client or process. - * @param latch A latch to block on for precondition validation + * @param nonceKey A unique identifier for this operation from the client or process. + * @param latch A latch to block on for precondition validation * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ - long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, + ProcedurePrepareLatch latch) throws IOException; /** * Modify an existing Namespace. * @param nonceKey A unique identifier for this operation from the client or process. - * @param latch A latch to block on for precondition validation + * @param latch A latch to block on for precondition validation * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ - long modifyNamespace(NamespaceDescriptor descriptor, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + long modifyNamespace(NamespaceDescriptor descriptor, NonceKey nonceKey, + ProcedurePrepareLatch latch) throws IOException; /** - * Delete an existing Namespace. - * Only empty Namespaces (no tables) can be removed. + * Delete an existing Namespace. Only empty Namespaces (no tables) can be removed. * @param nonceKey A unique identifier for this operation from the client or process. - * @param latch A latch to block on for precondition validation + * @param latch A latch to block on for precondition validation * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ long deleteNamespace(String name, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + throws IOException; /** * Get a Namespace diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java index fadb28ccca9c..4e4e1d1e5a7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,12 @@ package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service; /** * Mixes in ClusterSchema and Service */ @InterfaceAudience.Private -public interface ClusterSchemaService extends ClusterSchema, Service {} +public interface ClusterSchemaService extends ClusterSchema, Service { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java index 2188dc3d324c..39d00d0908ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,7 +76,7 @@ public TableNamespaceManager getTableNamespaceManager() { } private long submitProcedure(final Procedure procedure, - final NonceKey nonceKey) throws ServiceNotRunningException { + final NonceKey nonceKey) throws ServiceNotRunningException { checkIsRunning(); ProcedureExecutor pe = this.masterServices.getMasterProcedureExecutor(); return pe.submitProcedure(procedure, nonceKey); @@ -84,27 +84,25 @@ private long submitProcedure(final Procedure procedure, @Override public long createNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey nonceKey, - final ProcedurePrepareLatch latch) - throws IOException { + final ProcedurePrepareLatch latch) throws IOException { return submitProcedure(new CreateNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, latch), - nonceKey); + this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, + latch), nonceKey); } @Override public long modifyNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey nonceKey, - final ProcedurePrepareLatch latch) throws IOException { + final ProcedurePrepareLatch latch) throws IOException { return submitProcedure(new ModifyNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, latch), - nonceKey); + this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, + latch), nonceKey); } @Override - public long deleteNamespace(String name, final NonceKey nonceKey, final ProcedurePrepareLatch latch) - throws IOException { + public long deleteNamespace(String name, final NonceKey nonceKey, + final ProcedurePrepareLatch latch) throws IOException { return submitProcedure(new DeleteNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), name, latch), - nonceKey); + this.masterServices.getMasterProcedureExecutor().getEnvironment(), name, latch), nonceKey); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index dd67c05eae0e..adbbac0dfba6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -15,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.master; import java.io.Closeable; @@ -47,8 +45,11 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.Channel; @@ -64,27 +65,24 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioDatagramChannel; import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageEncoder; import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class to publish the cluster status to the client. This allows them to know immediately - * the dead region servers, hence to cut the connection they have with them, eventually stop - * waiting on the socket. This improves the mean time to recover, and as well allows to increase - * on the client the different timeouts, as the dead servers will be detected separately. + * Class to publish the cluster status to the client. This allows them to know immediately the dead + * region servers, hence to cut the connection they have with them, eventually stop waiting on the + * socket. This improves the mean time to recover, and as well allows to increase on the client the + * different timeouts, as the dead servers will be detected separately. */ @InterfaceAudience.Private public class ClusterStatusPublisher extends ScheduledChore { private static Logger LOG = LoggerFactory.getLogger(ClusterStatusPublisher.class); /** - * The implementation class used to publish the status. Default is null (no publish). - * Use org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher to multicast the + * The implementation class used to publish the status. Default is null (no publish). Use + * org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher to multicast the * status. */ public static final String STATUS_PUBLISHER_CLASS = "hbase.status.publisher.class"; - public static final Class - DEFAULT_STATUS_PUBLISHER_CLASS = + public static final Class< + ? extends ClusterStatusPublisher.Publisher> DEFAULT_STATUS_PUBLISHER_CLASS = org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher.class; /** @@ -101,8 +99,8 @@ public class ClusterStatusPublisher extends ScheduledChore { private boolean connected = false; /** - * We want to limit the size of the protobuf message sent, do fit into a single packet. - * a reasonable size for ip / ethernet is less than 1Kb. + * We want to limit the size of the protobuf message sent, do fit into a single packet. a + * reasonable size for ip / ethernet is less than 1Kb. */ public final static int MAX_SERVER_PER_MESSAGE = 10; @@ -113,10 +111,9 @@ public class ClusterStatusPublisher extends ScheduledChore { public final static int NB_SEND = 5; public ClusterStatusPublisher(HMaster master, Configuration conf, - Class publisherClass) - throws IOException { - super("ClusterStatusPublisher for=" + master.getName(), master, conf.getInt( - STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD)); + Class publisherClass) throws IOException { + super("ClusterStatusPublisher for=" + master.getName(), master, + conf.getInt(STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD)); this.master = master; this.messagePeriod = conf.getInt(STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD); try { @@ -162,13 +159,10 @@ protected void chore() { // We're reusing an existing protobuf message, but we don't send everything. // This could be extended in the future, for example if we want to send stuff like the - // hbase:meta server name. - publisher.publish(ClusterMetricsBuilder.newBuilder() - .setHBaseVersion(VersionInfo.getVersion()) + // hbase:meta server name. + publisher.publish(ClusterMetricsBuilder.newBuilder().setHBaseVersion(VersionInfo.getVersion()) .setClusterId(master.getMasterFileSystem().getClusterId().toString()) - .setMasterName(master.getServerName()) - .setDeadServerNames(sns) - .build()); + .setMasterName(master.getServerName()).setDeadServerNames(sns).build()); } @Override @@ -183,8 +177,8 @@ private synchronized boolean isConnected() { /** * Create the dead server to send. A dead server is sent NB_SEND times. We send at max - * MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly - * dead first. + * MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly dead + * first. */ protected List generateDeadServersListToSend() { // We're getting the message sent since last time, and add them to the list @@ -221,8 +215,8 @@ public int compare(Map.Entry o1, Map.Entry> getDeadServers(long since) { if (master.getServerManager() == null) { @@ -232,7 +226,6 @@ protected List> getDeadServers(long since) { return master.getServerManager().getDeadServers().copyDeadServersSince(since); } - public interface Publisher extends Closeable { void connect(Configuration conf) throws IOException; @@ -260,10 +253,10 @@ public String toString() { @Override public void connect(Configuration conf) throws IOException { - String mcAddress = conf.get(HConstants.STATUS_MULTICAST_ADDRESS, - HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); - int port = conf.getInt(HConstants.STATUS_MULTICAST_PORT, - HConstants.DEFAULT_STATUS_MULTICAST_PORT); + String mcAddress = + conf.get(HConstants.STATUS_MULTICAST_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); + int port = + conf.getInt(HConstants.STATUS_MULTICAST_PORT, HConstants.DEFAULT_STATUS_MULTICAST_PORT); String bindAddress = conf.get(HConstants.STATUS_MULTICAST_PUBLISHER_BIND_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_PUBLISHER_BIND_ADDRESS); String niName = conf.get(HConstants.STATUS_MULTICAST_NI_NAME); @@ -300,8 +293,7 @@ public void connect(Configuration conf) throws IOException { Bootstrap b = new Bootstrap(); b.group(group) .channelFactory(new HBaseDatagramChannelFactory(NioDatagramChannel.class, family)) - .option(ChannelOption.SO_REUSEADDR, true) - .handler(new ClusterMetricsEncoder(isa)); + .option(ChannelOption.SO_REUSEADDR, true).handler(new ClusterMetricsEncoder(isa)); try { LOG.debug("Channel bindAddress={}, networkInterface={}, INA={}", bindAddress, ni, ina); channel = (DatagramChannel) b.bind(bindAddress, 0).sync().channel(); @@ -321,7 +313,7 @@ public void connect(Configuration conf) throws IOException { } private static final class HBaseDatagramChannelFactory - implements ChannelFactory { + implements ChannelFactory { private final Class clazz; private final InternetProtocolFamily family; @@ -348,7 +340,7 @@ public String toString() { } private static final class ClusterMetricsEncoder - extends MessageToMessageEncoder { + extends MessageToMessageEncoder { final private InetSocketAddress isa; private ClusterMetricsEncoder(InetSocketAddress isa) { @@ -358,8 +350,8 @@ private ClusterMetricsEncoder(InetSocketAddress isa) { @Override protected void encode(ChannelHandlerContext channelHandlerContext, ClusterMetrics clusterStatus, List objects) { - objects.add(new DatagramPacket(Unpooled.wrappedBuffer( - ClusterMetricsBuilder.toClusterStatus(clusterStatus).toByteArray()), isa)); + objects.add(new DatagramPacket(Unpooled + .wrappedBuffer(ClusterMetricsBuilder.toClusterStatus(clusterStatus).toByteArray()), isa)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index 0471fabe3489..9467512fc66c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -34,13 +34,12 @@ import org.slf4j.LoggerFactory; /** - * Class to hold dead servers list and utility querying dead server list. - * Servers are added when they expire or when we find them in filesystem on startup. - * When a server crash procedure is queued, it will populate the processing list and - * then remove the server from processing list when done. Servers are removed from - * dead server list when a new instance is started over the old on same hostname and - * port or when new Master comes online tidying up after all initialization. Processing - * list and deadserver list are not tied together (you don't have to be in deadservers + * Class to hold dead servers list and utility querying dead server list. Servers are added when + * they expire or when we find them in filesystem on startup. When a server crash procedure is + * queued, it will populate the processing list and then remove the server from processing list when + * done. Servers are removed from dead server list when a new instance is started over the old on + * same hostname and port or when new Master comes online tidying up after all initialization. + * Processing list and deadserver list are not tied together (you don't have to be in deadservers * list to be processing and vice versa). */ @InterfaceAudience.Private @@ -48,11 +47,10 @@ public class DeadServer { private static final Logger LOG = LoggerFactory.getLogger(DeadServer.class); /** - * Set of known dead servers. On znode expiration, servers are added here. - * This is needed in case of a network partitioning where the server's lease - * expires, but the server is still running. After the network is healed, - * and it's server logs are recovered, it will be told to call server startup - * because by then, its regions have probably been reassigned. + * Set of known dead servers. On znode expiration, servers are added here. This is needed in case + * of a network partitioning where the server's lease expires, but the server is still running. + * After the network is healed, and it's server logs are recovered, it will be told to call server + * startup because by then, its regions have probably been reassigned. */ private final Map deadServers = new HashMap<>(); @@ -86,10 +84,9 @@ synchronized boolean isEmpty() { } /** - * Handles restart of a server. The new server instance has a different start code. - * The new start code should be greater than the old one. We don't check that here. - * Removes the old server from deadserver list. - * + * Handles restart of a server. The new server instance has a different start code. The new start + * code should be greater than the old one. We don't check that here. Removes the old server from + * deadserver list. * @param newServerName Servername as either host:port or * host,port,startcode. * @return true if this server was dead before and coming back alive again @@ -112,14 +109,13 @@ synchronized void cleanAllPreviousInstances(final ServerName newServerName) { } /** - * @param newServerName Server to match port and hostname against. + * @param newServerName Server to match port and hostname against. * @param deadServerIterator Iterator primed so can call 'next' on it. - * @return True if newServerName and current primed - * iterator ServerName have same host and port and we removed old server - * from iterator and from processing list. + * @return True if newServerName and current primed iterator ServerName have same + * host and port and we removed old server from iterator and from processing list. */ private boolean cleanOldServerName(ServerName newServerName, - Iterator deadServerIterator) { + Iterator deadServerIterator) { ServerName sn = deadServerIterator.next(); if (ServerName.isSameAddress(sn, newServerName)) { // Remove from dead servers list. Don't remove from the processing list -- @@ -151,10 +147,10 @@ public synchronized String toString() { * @return a sorted array list, by death time, lowest values first. */ synchronized List> copyDeadServersSince(long ts) { - List> res = new ArrayList<>(size()); + List> res = new ArrayList<>(size()); - for (Map.Entry entry:deadServers.entrySet()){ - if (entry.getValue() >= ts){ + for (Map.Entry entry : deadServers.entrySet()) { + if (entry.getValue() >= ts) { res.add(new Pair<>(entry.getKey(), entry.getValue())); } } @@ -162,13 +158,13 @@ synchronized List> copyDeadServersSince(long ts) { Collections.sort(res, (o1, o2) -> o1.getSecond().compareTo(o2.getSecond())); return res; } - + /** * Get the time when a server died * @param deadServerName the dead server name - * @return the date when the server died + * @return the date when the server died */ - public synchronized Date getTimeOfDeath(final ServerName deadServerName){ + public synchronized Date getTimeOfDeath(final ServerName deadServerName) { Long time = deadServers.get(deadServerName); return time == null ? null : new Date(time); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java index 14c4a3ec85f6..28795eab28e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,33 +21,30 @@ import java.util.List; import java.util.NavigableSet; import java.util.TreeSet; - +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.ServerName; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Tracks the list of draining region servers via ZK. - * - *

        This class is responsible for watching for changes to the draining - * servers list. It handles adds/deletes in the draining RS list and - * watches each node. - * - *

        If an RS gets deleted from draining list, we call + *

        + * This class is responsible for watching for changes to the draining servers list. It handles + * adds/deletes in the draining RS list and watches each node. + *

        + * If an RS gets deleted from draining list, we call * {@link ServerManager#removeServerFromDrainList(ServerName)} - * - *

        If an RS gets added to the draining list, we add a watcher to it and call + *

        + * If an RS gets added to the draining list, we add a watcher to it and call * {@link ServerManager#addServerToDrainList(ServerName)} - * - *

        This class is deprecated in 2.0 because decommission/draining API goes through - * master in 2.0. Can remove this class in 3.0. - * + *

        + * This class is deprecated in 2.0 because decommission/draining API goes through master in 2.0. Can + * remove this class in 3.0. */ @InterfaceAudience.Private public class DrainingServerTracker extends ZKListener { @@ -57,8 +54,8 @@ public class DrainingServerTracker extends ZKListener { private final NavigableSet drainingServers = new TreeSet<>(); private Abortable abortable; - public DrainingServerTracker(ZKWatcher watcher, - Abortable abortable, ServerManager serverManager) { + public DrainingServerTracker(ZKWatcher watcher, Abortable abortable, + ServerManager serverManager) { super(watcher); this.abortable = abortable; this.serverManager = serverManager; @@ -66,10 +63,8 @@ public DrainingServerTracker(ZKWatcher watcher, /** * Starts the tracking of draining RegionServers. - * - *

        All Draining RSs will be tracked after this method is called. - * - * @throws KeeperException + *

        + * All Draining RSs will be tracked after this method is called. n */ public void start() throws KeeperException, IOException { watcher.registerListener(this); @@ -77,7 +72,7 @@ public void start() throws KeeperException, IOException { serverManager.registerListener(new ServerListener() { @Override public void serverAdded(ServerName sn) { - if (drainingServers.contains(sn)){ + if (drainingServers.contains(sn)) { serverManager.addServerToDrainList(sn); } } @@ -88,21 +83,20 @@ public void serverAdded(ServerName sn) { } private void add(final List servers) throws IOException { - synchronized(this.drainingServers) { + synchronized (this.drainingServers) { this.drainingServers.clear(); - for (String n: servers) { + for (String n : servers) { final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(n)); this.drainingServers.add(sn); this.serverManager.addServerToDrainList(sn); - LOG.info("Draining RS node created, adding to list [" + - sn + "]"); + LOG.info("Draining RS node created, adding to list [" + sn + "]"); } } } private void remove(final ServerName sn) { - synchronized(this.drainingServers) { + synchronized (this.drainingServers) { this.drainingServers.remove(sn); this.serverManager.removeServerFromDrainList(sn); } @@ -110,17 +104,16 @@ private void remove(final ServerName sn) { @Override public void nodeDeleted(final String path) { - if(path.startsWith(watcher.getZNodePaths().drainingZNode)) { + if (path.startsWith(watcher.getZNodePaths().drainingZNode)) { final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(path)); - LOG.info("Draining RS node deleted, removing from list [" + - sn + "]"); + LOG.info("Draining RS node deleted, removing from list [" + sn + "]"); remove(sn); } } @Override public void nodeChildrenChanged(final String path) { - if(path.equals(watcher.getZNodePaths().drainingZNode)) { + if (path.equals(watcher.getZNodePaths().drainingZNode)) { try { final List newNodes = ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().drainingZNode); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 01e4e5cd961e..566e465ea4de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -262,6 +262,7 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext; import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; @@ -456,8 +457,8 @@ public class HMaster extends HBaseServerBase implements Maste * *

        * Remaining steps of initialization occur in - * {@link #finishActiveMasterInitialization(MonitoredTask)} after the master becomes the - * active one. + * {@link #finishActiveMasterInitialization(MonitoredTask)} after the master becomes the active + * one. */ public HMaster(final Configuration conf) throws IOException { super(conf, "Master"); @@ -482,7 +483,7 @@ public HMaster(final Configuration conf) throws IOException { decorateMasterConfiguration(this.conf); - // Hack! Maps DFSClient => Master for logs. HDFS made this + // Hack! Maps DFSClient => Master for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. if (this.conf.get("mapreduce.task.attempt.id") == null) { this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString()); @@ -495,22 +496,22 @@ public HMaster(final Configuration conf) throws IOException { this.maxBalancingTime = getMaxBalancingTime(); this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT, - HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT); + HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT); // Do we publish the status? - boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED, - HConstants.STATUS_PUBLISHED_DEFAULT); + boolean shouldPublish = + conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); Class publisherClass = - conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS, - ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS, - ClusterStatusPublisher.Publisher.class); + conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS, + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS, + ClusterStatusPublisher.Publisher.class); if (shouldPublish) { if (publisherClass == null) { - LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + - ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS + - " is not set - not publishing status"); + LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS + + " is not set - not publishing status"); } else { clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass); LOG.debug("Created {}", this.clusterStatusPublisherChore); @@ -534,7 +535,7 @@ public HMaster(final Configuration conf) throws IOException { * implementation. */ protected ActiveMasterManager createActiveMasterManager(ZKWatcher zk, ServerName sn, - org.apache.hadoop.hbase.Server server) throws InterruptedIOException { + org.apache.hadoop.hbase.Server server) throws InterruptedIOException { return new ActiveMasterManager(zk, sn, server); } @@ -583,8 +584,8 @@ public void run() { // If on way out, then we are no longer active master. this.clusterSchemaService.stopAsync(); try { - this.clusterSchemaService.awaitTerminated( - getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, + this.clusterSchemaService + .awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); } catch (TimeoutException te) { LOG.warn("Failed shutdown of clusterSchemaService", te); @@ -599,8 +600,8 @@ private int putUpJettyServer() throws IOException { if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) { return -1; } - final int infoPort = conf.getInt("hbase.master.info.port.orig", - HConstants.DEFAULT_MASTER_INFOPORT); + final int infoPort = + conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT); // -1 is for disabling info server, so no redirecting if (infoPort < 0 || infoServer == null) { return -1; @@ -611,10 +612,9 @@ private int putUpJettyServer() throws IOException { } final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0"); if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) { - String msg = - "Failed to start redirecting jetty server. Address " + addr - + " does not belong to this host. Correct configuration parameter: " - + "hbase.master.info.bindAddress"; + String msg = "Failed to start redirecting jetty server. Address " + addr + + " does not belong to this host. Correct configuration parameter: " + + "hbase.master.info.bindAddress"; LOG.error(msg); throw new IOException(msg); } @@ -631,10 +631,11 @@ private int putUpJettyServer() throws IOException { masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler())); final String redirectHostname = - StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; + StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname); - final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); + final WebAppContext context = + new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); context.addServlet(new ServletHolder(redirect), "/*"); context.setServer(masterJettyServer); @@ -767,8 +768,8 @@ private void initializeZKBasedSystemTrackers() // we need to take care of the ZK information synchronization // if given client ZK are not observer nodes ZKWatcher clientZkWatcher = new ZKWatcher(conf, - getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this, - false, true); + getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this, + false, true); this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this); this.metaLocationSyncer.start(); this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this); @@ -777,15 +778,14 @@ private void initializeZKBasedSystemTrackers() ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId()); } - // Set the cluster as up. If new RSs, they'll be waiting on this before + // Set the cluster as up. If new RSs, they'll be waiting on this before // going ahead with their startup. boolean wasUp = this.clusterStatusTracker.isClusterUp(); if (!wasUp) this.clusterStatusTracker.setClusterUp(); - LOG.info("Active/primary master=" + this.serverName + - ", sessionid=0x" + - Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) + - ", setting cluster-up flag (Was=" + wasUp + ")"); + LOG.info("Active/primary master=" + this.serverName + ", sessionid=0x" + + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) + + ", setting cluster-up flag (Was=" + wasUp + ")"); // create/initialize the snapshot manager and other procedure managers this.snapshotManager = new SnapshotManager(); @@ -881,8 +881,8 @@ private void tryMigrateMetaLocationsFromZooKeeper() throws IOException, KeeperEx * Notice that now we will not schedule a special procedure to make meta online(unless the first * time where meta has not been created yet), we will rely on SCP to bring meta online. */ - private void finishActiveMasterInitialization(MonitoredTask status) throws IOException, - InterruptedException, KeeperException, ReplicationException { + private void finishActiveMasterInitialization(MonitoredTask status) + throws IOException, InterruptedException, KeeperException, ReplicationException { /* * We are active master now... go initialize components we need to run. */ @@ -918,7 +918,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc Pair result = null; try { result = HBaseFsck.checkAndMarkRunningHbck(this.conf, - HBaseFsck.createLockRetryCounterFactory(this.conf).create()); + HBaseFsck.createLockRetryCounterFactory(this.conf).create()); } finally { if (result != null) { Closeables.close(result.getSecond(), true); @@ -935,19 +935,17 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc this.serverManager = createServerManager(this, rsListStorage); this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this); - if (!conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, - DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) { + if ( + !conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) + ) { this.splitWALManager = new SplitWALManager(this); } - - tryMigrateMetaLocationsFromZooKeeper(); createProcedureExecutor(); - Map, List>> procsByType = - procedureExecutor.getActiveProceduresNoCopy().stream() - .collect(Collectors.groupingBy(p -> p.getClass())); + Map, List>> procsByType = procedureExecutor + .getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass())); // Create Assignment Manager this.assignmentManager = createAssignmentManager(this, masterRegion); @@ -980,15 +978,14 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc try { this.serverManager.loadLastFlushedSequenceIds(); } catch (IOException e) { - LOG.info("Failed to load last flushed sequence id of regions" - + " from file system", e); + LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e); } // Set ourselves as active Master now our claim has succeeded up in zk. this.activeMaster = true; // Start the Zombie master detector after setting master as active, see HBASE-21535 Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), - "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime()); + "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime()); zombieDetector.setDaemon(true); zombieDetector.start(); @@ -1057,8 +1054,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc return; } - TableDescriptor metaDescriptor = - tableDescriptors.get(TableName.META_TABLE_NAME); + TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME); final ColumnFamilyDescriptor tableFamilyDesc = metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY); final ColumnFamilyDescriptor replBarrierFamilyDesc = @@ -1083,8 +1079,8 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc int existingReplicasCount = assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); if (existingReplicasCount > metaDesc.getRegionReplication()) { - LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + - " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(existingReplicasCount).build(); tableDescriptors.update(metaDesc); @@ -1092,8 +1088,8 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc // check again, and issue a ModifyTableProcedure if needed if (metaDesc.getRegionReplication() != replicasNumInConf) { LOG.info( - "The {} config is {} while the replica count in TableDescriptor is {}" + - " for hbase:meta, altering...", + "The {} config is {} while the replica count in TableDescriptor is {}" + + " for hbase:meta, altering...", HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); procedureExecutor.submitProcedure(new ModifyTableProcedure( procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) @@ -1136,10 +1132,12 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc try { initClusterSchemaService(); } catch (IllegalStateException e) { - if (e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException - && tableFamilyDesc == null && replBarrierFamilyDesc == null) { + if ( + e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException + && tableFamilyDesc == null && replBarrierFamilyDesc == null + ) { LOG.info("ClusterSchema service could not be initialized. This is " - + "expected during HBase 1 to 2 upgrade", e); + + "expected during HBase 1 to 2 upgrade", e); } else { throw e; } @@ -1155,7 +1153,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc status.markComplete("Initialization successful"); LOG.info(String.format("Master has completed initialization %.3fsec", - (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); + (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime(); configurationManager.registerObserver(this.balancer); configurationManager.registerObserver(this.hfileCleanerPool); @@ -1185,8 +1183,8 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc // next active master init will not face any issues and all mandatory // services will be started during master init phase. throw new PleaseRestartMasterException("Aborting active master after missing" - + " CFs are successfully added in meta. Subsequent active master " - + "initialization should be uninterrupted"); + + " CFs are successfully added in meta. Subsequent active master " + + "initialization should be uninterrupted"); } if (maintenanceMode) { @@ -1237,32 +1235,31 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc zombieDetector.interrupt(); /* - * After master has started up, lets do balancer post startup initialization. Since this runs - * in activeMasterManager thread, it should be fine. + * After master has started up, lets do balancer post startup initialization. Since this runs in + * activeMasterManager thread, it should be fine. */ long start = EnvironmentEdgeManager.currentTime(); this.balancer.postMasterStartupInitialize(); if (LOG.isDebugEnabled()) { - LOG.debug("Balancer post startup initialization complete, took " + ( - (EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds"); + LOG.debug("Balancer post startup initialization complete, took " + + ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds"); } this.rollingUpgradeChore = new RollingUpgradeChore(this); getChoreService().scheduleChore(rollingUpgradeChore); } - private void createMissingCFsInMetaDuringUpgrade( - TableDescriptor metaDescriptor) throws IOException { - TableDescriptor newMetaDesc = - TableDescriptorBuilder.newBuilder(metaDescriptor) - .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) - .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()) - .build(); - long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, - 0, 0, false); + private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor) + throws IOException { + TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor) + .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) + .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build(); + long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false); int tries = 30; - while (!(getMasterProcedureExecutor().isFinished(pid)) - && getMasterProcedureExecutor().isRunning() && tries > 0) { + while ( + !(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning() + && tries > 0 + ) { try { Thread.sleep(1000); } catch (InterruptedException e) { @@ -1272,13 +1269,12 @@ && getMasterProcedureExecutor().isRunning() && tries > 0) { } if (tries <= 0) { throw new HBaseIOException( - "Failed to add table and rep_barrier CFs to meta in a given time."); + "Failed to add table and rep_barrier CFs to meta in a given time."); } else { Procedure result = getMasterProcedureExecutor().getResult(pid); if (result != null && result.isFailed()) { - throw new IOException( - "Failed to add table and rep_barrier CFs to meta. " - + MasterProcedureUtil.unwrapRemoteIOException(result)); + throw new IOException("Failed to add table and rep_barrier CFs to meta. " + + MasterProcedureUtil.unwrapRemoteIOException(result)); } } } @@ -1286,7 +1282,7 @@ && getMasterProcedureExecutor().isRunning() && tries > 0) { /** * Check hbase:meta is up and ready for reading. For use during Master startup only. * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online - * and we will hold here until operator intervention. + * and we will hold here until operator intervention. */ @InterfaceAudience.Private public boolean waitForMetaOnline() { @@ -1294,8 +1290,8 @@ public boolean waitForMetaOnline() { } /** - * @return True if region is online and scannable else false if an error or shutdown (Otherwise - * we just block in here holding up all forward-progess). + * @return True if region is online and scannable else false if an error or shutdown (Otherwise we + * just block in here holding up all forward-progess). */ private boolean isRegionOnline(RegionInfo ri) { RetryCounter rc = null; @@ -1307,14 +1303,15 @@ private boolean isRegionOnline(RegionInfo ri) { } } // Region is not OPEN. - Optional> optProc = this.procedureExecutor.getProcedures(). - stream().filter(p -> p instanceof ServerCrashProcedure).findAny(); + Optional> optProc = this.procedureExecutor.getProcedures() + .stream().filter(p -> p instanceof ServerCrashProcedure).findAny(); // TODO: Add a page to refguide on how to do repair. Have this log message point to it. // Page will talk about loss of edits, how to schedule at least the meta WAL recovery, and // then how to assign including how to break region lock if one held. - LOG.warn("{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot " + - "progress, in holding-pattern until region onlined.", - ri.getRegionNameAsString(), rs, optProc.isPresent()); + LOG.warn( + "{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot " + + "progress, in holding-pattern until region onlined.", + ri.getRegionNameAsString(), rs, optProc.isPresent()); // Check once-a-minute. if (rc == null) { rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create(); @@ -1362,9 +1359,10 @@ private boolean waitForNamespaceOnline() throws IOException { @InterfaceAudience.Private public void updateConfigurationForQuotasObserver(Configuration conf) { // We're configured to not delete quotas on table deletion, so we don't need to add the obs. - if (!conf.getBoolean( - MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE, - MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)) { + if ( + !conf.getBoolean(MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE, + MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT) + ) { return; } String[] masterCoprocs = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); @@ -1393,8 +1391,8 @@ private void initMobCleaner() { *

        */ @InterfaceAudience.Private - protected ServerManager createServerManager(MasterServices master, - RegionServerList storage) throws IOException { + protected ServerManager createServerManager(MasterServices master, RegionServerList storage) + throws IOException { // We put this out here in a method so can do a Mockito.spy and stub it out // w/ a mocked up ServerManager. setupClusterConnection(); @@ -1402,7 +1400,7 @@ protected ServerManager createServerManager(MasterServices master, } private void waitForRegionServers(final MonitoredTask status) - throws IOException, InterruptedException { + throws IOException, InterruptedException { this.serverManager.waitForRegionServers(status); } @@ -1412,9 +1410,9 @@ protected void initClusterSchemaService() throws IOException, InterruptedExcepti this.clusterSchemaService = new ClusterSchemaServiceImpl(this); this.clusterSchemaService.startAsync(); try { - this.clusterSchemaService.awaitRunning(getConfiguration().getInt( - HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, - DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); + this.clusterSchemaService + .awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, + DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); } catch (TimeoutException toe) { throw new IOException("Timedout starting ClusterSchemaService", toe); } @@ -1428,7 +1426,7 @@ private void initQuotaManager() throws IOException { private SpaceQuotaSnapshotNotifier createQuotaSnapshotNotifier() { SpaceQuotaSnapshotNotifier notifier = - SpaceQuotaSnapshotNotifierFactory.getInstance().create(getConfiguration()); + SpaceQuotaSnapshotNotifierFactory.getInstance().create(getConfiguration()); return notifier; } @@ -1476,53 +1474,55 @@ public TableStateManager getTableStateManager() { } /* - * Start up all services. If any of these threads gets an unhandled exception - * then they just die with a logged message. This should be fine because - * in general, we do not expect the master to get such unhandled exceptions - * as OOMEs; it should be lightly loaded. See what HRegionServer does if - * need to install an unexpected exception handler. + * Start up all services. If any of these threads gets an unhandled exception then they just die + * with a logged message. This should be fine because in general, we do not expect the master to + * get such unhandled exceptions as OOMEs; it should be lightly loaded. See what HRegionServer + * does if need to install an unexpected exception handler. */ private void startServiceThreads() throws IOException { // Start the executor service pools - final int masterOpenRegionPoolSize = conf.getInt( - HConstants.MASTER_OPEN_REGION_THREADS, HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize)); - final int masterCloseRegionPoolSize = conf.getInt( - HConstants.MASTER_CLOSE_REGION_THREADS, HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_CLOSE_REGION).setCorePoolSize(masterCloseRegionPoolSize)); + final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS, + HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize)); + final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS, + HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION) + .setCorePoolSize(masterCloseRegionPoolSize)); final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS, - HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(masterServerOpThreads)); - final int masterServerMetaOpsThreads = conf.getInt( - HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, + HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS) + .setCorePoolSize(masterServerOpThreads)); + final int masterServerMetaOpsThreads = + conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_META_SERVER_OPERATIONS).setCorePoolSize(masterServerMetaOpsThreads)); - final int masterLogReplayThreads = conf.getInt( - HConstants.MASTER_LOG_REPLAY_OPS_THREADS, HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads)); - final int masterSnapshotThreads = conf.getInt( - SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SNAPSHOT_OPERATIONS).setCorePoolSize(masterSnapshotThreads) - .setAllowCoreThreadTimeout(true)); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS) + .setCorePoolSize(masterServerMetaOpsThreads)); + final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS, + HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads)); + final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, + SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS) + .setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true)); final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS, - HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_MERGE_OPERATIONS).setCorePoolSize(masterMergeDispatchThreads) - .setAllowCoreThreadTimeout(true)); + HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS) + .setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true)); // We depend on there being only one instance of this executor running // at a time. To do concurrency, would need fencing of enable/disable of // tables. // Any time changing this maxThreads to > 1, pls see the comment at // AccessController#postCompletedCreateTableAction - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1)); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1)); startProcedureExecutor(); // Create log cleaner thread pool @@ -1532,9 +1532,9 @@ private void startServiceThreads() throws IOException { // Start log cleaner thread int cleanerInterval = conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); - this.logCleaner = new LogCleaner(cleanerInterval, this, conf, - getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), - logCleanerPool, params); + this.logCleaner = + new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(), + getMasterWalManager().getOldLogDir(), logCleanerPool, params); getChoreService().scheduleChore(logCleaner); // start the hfile archive cleaner thread @@ -1547,26 +1547,25 @@ private void startServiceThreads() throws IOException { // Regions Reopen based on very high storeFileRefCount is considered enabled // only if hbase.regions.recovery.store.file.ref.count has value > 0 - final int maxStoreFileRefCount = conf.getInt( - HConstants.STORE_FILE_REF_COUNT_THRESHOLD, + final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD); if (maxStoreFileRefCount > 0) { this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this); getChoreService().scheduleChore(this.regionsRecoveryChore); } else { - LOG.info("Reopening regions with very high storeFileRefCount is disabled. " + - "Provide threshold value > 0 for {} to enable it.", + LOG.info( + "Reopening regions with very high storeFileRefCount is disabled. " + + "Provide threshold value > 0 for {} to enable it.", HConstants.STORE_FILE_REF_COUNT_THRESHOLD); } this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this); - replicationBarrierCleaner = new ReplicationBarrierCleaner(conf, this, getConnection(), - replicationPeerManager); + replicationBarrierCleaner = + new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager); getChoreService().scheduleChore(replicationBarrierCleaner); - final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker - .isSnapshotCleanupEnabled(); + final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker.isSnapshotCleanupEnabled(); this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager()); if (isSnapshotChoreEnabled) { getChoreService().scheduleChore(this.snapshotCleanerChore); @@ -1646,8 +1645,8 @@ protected void stopServiceThreads() { private void createProcedureExecutor() throws IOException { MasterProcedureEnv procEnv = new MasterProcedureEnv(this); - procedureStore = - new RegionProcedureStore(this, masterRegion, new MasterProcedureEnv.FsUtilsLeaseRecovery(this)); + procedureStore = new RegionProcedureStore(this, masterRegion, + new MasterProcedureEnv.FsUtilsLeaseRecovery(this)); procedureStore.registerListener(new ProcedureStoreListener() { @Override @@ -1682,7 +1681,6 @@ protected void startProcedureExecutor() throws IOException { /** * Turn on/off Snapshot Cleanup Chore - * * @param on indicates whether Snapshot Cleanup Chore is to be run */ void switchSnapshotCleanup(final boolean on, final boolean synchronous) { @@ -1708,7 +1706,6 @@ private void switchSnapshotCleanup(final boolean on) { } } - private void stopProcedureExecutor() { if (procedureExecutor != null) { configurationManager.deregisterObserver(procedureExecutor.getEnvironment()); @@ -1747,8 +1744,8 @@ protected void stopChores() { /** * @return Get remote side's InetAddress */ - InetAddress getRemoteInetAddress(final int port, - final long serverStartCode) throws UnknownHostException { + InetAddress getRemoteInetAddress(final int port, final long serverStartCode) + throws UnknownHostException { // Do it out here in its own little method so can fake an address when // mocking up in tests. InetAddress ia = RpcServer.getRemoteIp(); @@ -1769,8 +1766,8 @@ InetAddress getRemoteInetAddress(final int port, */ private int getMaxBalancingTime() { // if max balancing time isn't set, defaulting it to period time - int maxBalancingTime = getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, - getConfiguration() + int maxBalancingTime = + getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration() .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); return maxBalancingTime; } @@ -1784,20 +1781,22 @@ private int getMaxRegionsInTransition() { } /** - * It first sleep to the next balance plan start time. Meanwhile, throttling by the max - * number regions in transition to protect availability. - * @param nextBalanceStartTime The next balance plan start time + * It first sleep to the next balance plan start time. Meanwhile, throttling by the max number + * regions in transition to protect availability. + * @param nextBalanceStartTime The next balance plan start time * @param maxRegionsInTransition max number of regions in transition - * @param cutoffTime when to exit balancer + * @param cutoffTime when to exit balancer */ private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransition, - long cutoffTime) { + long cutoffTime) { boolean interrupted = false; // Sleep to next balance plan start time // But if there are zero regions in transition, it can skip sleep to speed up. - while (!interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime - && this.assignmentManager.getRegionStates().hasRegionsInTransition()) { + while ( + !interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime + && this.assignmentManager.getRegionStates().hasRegionsInTransition() + ) { try { Thread.sleep(100); } catch (InterruptedException ie) { @@ -1806,10 +1805,12 @@ private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransi } // Throttling by max number regions in transition - while (!interrupted - && maxRegionsInTransition > 0 + while ( + !interrupted && maxRegionsInTransition > 0 && this.assignmentManager.getRegionStates().getRegionsInTransitionCount() - >= maxRegionsInTransition && EnvironmentEdgeManager.currentTime() <= cutoffTime) { + >= maxRegionsInTransition + && EnvironmentEdgeManager.currentTime() <= cutoffTime + ) { try { // sleep if the number of regions in transition exceeds the limit Thread.sleep(100); @@ -1826,14 +1827,13 @@ public BalanceResponse balance() throws IOException { } /** - * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed - * this time, the metrics related to the balance will be updated. - * - * When balance is running, related metrics will be updated at the same time. But if some - * checking logic failed and cause the balancer exit early, we lost the chance to update - * balancer metrics. This will lead to user missing the latest balancer info. - * */ - public BalanceResponse balanceOrUpdateMetrics() throws IOException{ + * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this + * time, the metrics related to the balance will be updated. When balance is running, related + * metrics will be updated at the same time. But if some checking logic failed and cause the + * balancer exit early, we lost the chance to update balancer metrics. This will lead to user + * missing the latest balancer info. + */ + public BalanceResponse balanceOrUpdateMetrics() throws IOException { synchronized (this.balancer) { BalanceResponse response = balance(); if (!response.isBalancerRan()) { @@ -1877,8 +1877,9 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { BalanceResponse.Builder responseBuilder = BalanceResponse.newBuilder(); - if (loadBalancerTracker == null - || !(loadBalancerTracker.isBalancerOn() || request.isDryRun())) { + if ( + loadBalancerTracker == null || !(loadBalancerTracker.isBalancerOn() || request.isDryRun()) + ) { return responseBuilder.build(); } @@ -1887,7 +1888,7 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { } synchronized (this.balancer) { - // Only allow one balance run at at time. + // Only allow one balance run at at time. if (this.assignmentManager.hasRegionsInTransition()) { List regionsInTransition = assignmentManager.getRegionsInTransition(); // if hbase:meta region is in transition, result of assignment cannot be recorded @@ -1902,15 +1903,15 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { } if (!request.isIgnoreRegionsInTransition() || metaInTransition) { - LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition + - ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint - + (truncated? "(truncated list)": "")); + LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition + + ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint + + (truncated ? "(truncated list)" : "")); return responseBuilder.build(); } } if (this.serverManager.areDeadServersInProgress()) { - LOG.info("Not running balancer because processing dead regionserver(s): " + - this.serverManager.getDeadServers()); + LOG.info("Not running balancer because processing dead regionserver(s): " + + this.serverManager.getDeadServers()); return responseBuilder.build(); } @@ -1927,13 +1928,13 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { } Map>> assignments = - this.assignmentManager.getRegionStates() - .getAssignmentsForBalancer(tableStateManager, this.serverManager.getOnlineServersList()); + this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager, + this.serverManager.getOnlineServersList()); for (Map> serverMap : assignments.values()) { serverMap.keySet().removeAll(this.serverManager.getDrainingServersList()); } - //Give the balancer the current cluster state. + // Give the balancer the current cluster state. this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); List plans = this.balancer.balanceCluster(assignments); @@ -1947,9 +1948,8 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { // For dry run we don't actually want to execute the moves, but we do want // to execute the coprocessor below - List sucRPs = request.isDryRun() - ? Collections.emptyList() - : executeRegionPlansWithThrottling(plans); + List sucRPs = + request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans); if (this.cpHost != null) { try { @@ -1978,24 +1978,24 @@ public List executeRegionPlansWithThrottling(List plans) int maxRegionsInTransition = getMaxRegionsInTransition(); long balanceStartTime = EnvironmentEdgeManager.currentTime(); long cutoffTime = balanceStartTime + this.maxBalancingTime; - int rpCount = 0; // number of RegionPlans balanced so far + int rpCount = 0; // number of RegionPlans balanced so far if (plans != null && !plans.isEmpty()) { int balanceInterval = this.maxBalancingTime / plans.size(); - LOG.info("Balancer plans size is " + plans.size() + ", the balance interval is " - + balanceInterval + " ms, and the max number regions in transition is " - + maxRegionsInTransition); + LOG.info( + "Balancer plans size is " + plans.size() + ", the balance interval is " + balanceInterval + + " ms, and the max number regions in transition is " + maxRegionsInTransition); - for (RegionPlan plan: plans) { + for (RegionPlan plan : plans) { LOG.info("balance " + plan); - //TODO: bulk assign + // TODO: bulk assign try { this.assignmentManager.balance(plan); } catch (HBaseIOException hioe) { - //should ignore failed plans here, avoiding the whole balance plans be aborted - //later calls of balance() can fetch up the failed and skipped plans + // should ignore failed plans here, avoiding the whole balance plans be aborted + // later calls of balance() can fetch up the failed and skipped plans LOG.warn("Failed balance plan {}, skipping...", plan, hioe); } - //rpCount records balance plans processed, does not care if a plan succeeds + // rpCount records balance plans processed, does not care if a plan succeeds rpCount++; successRegionPlans.add(plan); @@ -2005,12 +2005,14 @@ public List executeRegionPlansWithThrottling(List plans) } // if performing next balance exceeds cutoff time, exit the loop - if (this.maxBalancingTime > 0 && rpCount < plans.size() - && EnvironmentEdgeManager.currentTime() > cutoffTime) { + if ( + this.maxBalancingTime > 0 && rpCount < plans.size() + && EnvironmentEdgeManager.currentTime() > cutoffTime + ) { // TODO: After balance, there should not be a cutoff time (keeping it as // a security net for now) - LOG.debug("No more balancing till next balance run; maxBalanceTime=" - + this.maxBalancingTime); + LOG.debug( + "No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime); break; } } @@ -2026,10 +2028,8 @@ public RegionNormalizerManager getRegionNormalizerManager() { } @Override - public boolean normalizeRegions( - final NormalizeTableFilterParams ntfp, - final boolean isHighPriority - ) throws IOException { + public boolean normalizeRegions(final NormalizeTableFilterParams ntfp, + final boolean isHighPriority) throws IOException { if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) { LOG.debug("Region normalization is disabled, don't run region normalizer."); return false; @@ -2042,10 +2042,8 @@ public boolean normalizeRegions( } final Set matchingTables = getTableDescriptors(new LinkedList<>(), - ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) - .stream() - .map(TableDescriptor::getTableName) - .collect(Collectors.toSet()); + ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false).stream() + .map(TableDescriptor::getTableName).collect(Collectors.toSet()); final Set allEnabledTables = tableStateManager.getTablesInStates(TableState.State.ENABLED); final List targetTables = @@ -2059,14 +2057,13 @@ public boolean normalizeRegions( */ @Override public String getClientIdAuditPrefix() { - return "Client=" + RpcServer.getRequestUserName().orElse(null) - + "/" + RpcServer.getRemoteAddress().orElse(null); + return "Client=" + RpcServer.getRequestUserName().orElse(null) + "/" + + RpcServer.getRemoteAddress().orElse(null); } /** - * Switch for the background CatalogJanitor thread. - * Used for testing. The thread will continue to run. It will just be a noop - * if disabled. + * Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to + * run. It will just be a noop if disabled. * @param b If false, the catalog janitor won't do anything. */ public void setCatalogJanitorEnabled(final boolean b) { @@ -2074,18 +2071,15 @@ public void setCatalogJanitorEnabled(final boolean b) { } @Override - public long mergeRegions( - final RegionInfo[] regionsToMerge, - final boolean forcible, - final long ng, - final long nonce) throws IOException { + public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible, final long ng, + final long nonce) throws IOException { checkInitialized(); if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { String regionsStr = Arrays.deepToString(regionsToMerge); LOG.warn("Merge switch is off! skip merge of " + regionsStr); - throw new DoNotRetryIOException("Merge of " + regionsStr + - " failed because merge switch is off"); + throw new DoNotRetryIOException( + "Merge of " + regionsStr + " failed because merge switch is off"); } final String mergeRegionsStr = Arrays.stream(regionsToMerge).map(RegionInfo::getEncodedName) @@ -2097,7 +2091,7 @@ protected void run() throws IOException { String aid = getClientIdAuditPrefix(); LOG.info("{} merge regions {}", aid, mergeRegionsStr); submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), - regionsToMerge, forcible)); + regionsToMerge, forcible)); getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); } @@ -2109,33 +2103,32 @@ protected String getDescription() { } @Override - public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, - final long nonceGroup, final long nonce) - throws IOException { + public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup, + final long nonce) throws IOException { checkInitialized(); if (!isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { LOG.warn("Split switch is off! skip split of " + regionInfo); - throw new DoNotRetryIOException("Split region " + regionInfo.getRegionNameAsString() + - " failed due to split switch off"); + throw new DoNotRetryIOException( + "Split region " + regionInfo.getRegionNameAsString() + " failed due to split switch off"); } - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); - LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); + LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); - // Execute the operation asynchronously - submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); - } + // Execute the operation asynchronously + submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); + } - @Override - protected String getDescription() { - return "SplitTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "SplitTableProcedure"; + } + }); } private void warmUpRegion(ServerName server, RegionInfo region) { @@ -2152,8 +2145,8 @@ private void warmUpRegion(ServerName server, RegionInfo region) { // a success/failure result. @InterfaceAudience.Private public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException { - RegionState regionState = assignmentManager.getRegionStates(). - getRegionState(Bytes.toString(encodedRegionName)); + RegionState regionState = + assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName)); RegionInfo hri; if (regionState != null) { @@ -2163,17 +2156,18 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I } ServerName dest; - List exclude = hri.getTable().isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() - : new ArrayList<>(1); - if (destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName)))) { - LOG.info( - Bytes.toString(encodedRegionName) + " can not move to " + Bytes.toString(destServerName) - + " because the server is in exclude list"); + List exclude = hri.getTable().isSystemTable() + ? assignmentManager.getExcludedServersForSystemTable() + : new ArrayList<>(1); + if ( + destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName))) + ) { + LOG.info(Bytes.toString(encodedRegionName) + " can not move to " + + Bytes.toString(destServerName) + " because the server is in exclude list"); destServerName = null; } if (destServerName == null || destServerName.length == 0) { - LOG.info("Passed destination servername is null/empty so " + - "choosing a server at random"); + LOG.info("Passed destination servername is null/empty so " + "choosing a server at random"); exclude.add(regionState.getServerName()); final List destServers = this.serverManager.createDestinationServersList(exclude); dest = balancer.randomAssignment(hri, destServers); @@ -2192,9 +2186,9 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I if (dest.equals(serverName)) { // To avoid unnecessary region moving later by balancer. Don't put user // regions on master. - LOG.debug("Skipping move of region " + hri.getRegionNameAsString() + - " to avoid unnecessary region moving later by load balancer," + - " because it should not be on master"); + LOG.debug("Skipping move of region " + hri.getRegionNameAsString() + + " to avoid unnecessary region moving later by load balancer," + + " because it should not be on master"); return; } } @@ -2207,7 +2201,7 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I // Now we can do the move RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest); - assert rp.getDestination() != null: rp.toString() + " " + dest; + assert rp.getDestination() != null : rp.toString() + " " + dest; try { checkInitialized(); @@ -2221,8 +2215,8 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I // Warmup the region on the destination before initiating the move. // A region server could reject the close request because it either does not // have the specified region or the region is being split. - LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on " + - rp.getDestination()); + LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on " + + rp.getDestination()); warmUpRegion(rp.getDestination(), hri); } LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer"); @@ -2239,7 +2233,7 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I } } catch (IOException ioe) { if (ioe instanceof HBaseIOException) { - throw (HBaseIOException)ioe; + throw (HBaseIOException) ioe; } throw new HBaseIOException(ioe); } @@ -2247,7 +2241,7 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I @Override public long createTable(final TableDescriptor tableDescriptor, final byte[][] splitKeys, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); TableDescriptor desc = getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor); if (desc == null) { @@ -2303,7 +2297,7 @@ public long createSystemTable(final TableDescriptor tableDescriptor) throws IOEx LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor); - // This special create table is called locally to master. Therefore, no RPC means no need + // This special create table is called locally to master. Therefore, no RPC means no need // to use nonce to detect duplicated RPC call. long procId = this.procedureExecutor.submitProcedure( new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions)); @@ -2312,18 +2306,15 @@ public long createSystemTable(final TableDescriptor tableDescriptor) throws IOEx } private void startActiveMasterManager(int infoPort) throws KeeperException { - String backupZNode = ZNodePaths.joinZNode( - zooKeeper.getZNodePaths().backupMasterAddressesZNode, serverName.toString()); + String backupZNode = ZNodePaths.joinZNode(zooKeeper.getZNodePaths().backupMasterAddressesZNode, + serverName.toString()); /* - * Add a ZNode for ourselves in the backup master directory since we - * may not become the active master. If so, we want the actual active - * master to know we are backup masters, so that it won't assign - * regions to us if so configured. - * - * If we become the active master later, ActiveMasterManager will delete - * this node explicitly. If we crash before then, ZooKeeper will delete - * this node for us since it is ephemeral. - */ + * Add a ZNode for ourselves in the backup master directory since we may not become the active + * master. If so, we want the actual active master to know we are backup masters, so that it + * won't assign regions to us if so configured. If we become the active master later, + * ActiveMasterManager will delete this node explicitly. If we crash before then, ZooKeeper will + * delete this node for us since it is ephemeral. + */ LOG.info("Adding backup master ZNode " + backupZNode); if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) { LOG.warn("Failed create of " + backupZNode + " by " + serverName); @@ -2350,12 +2341,14 @@ private void startActiveMasterManager(int infoPort) throws KeeperException { status.setStatus("Failed to become active: " + t.getMessage()); LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t); // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility - if (t instanceof NoClassDefFoundError && t.getMessage(). - contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")) { + if ( + t instanceof NoClassDefFoundError + && t.getMessage().contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction") + ) { // improved error message for this special case - abort("HBase is having a problem with its Hadoop jars. You may need to recompile " + - "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() + - " or change your hadoop jars to start properly", t); + abort("HBase is having a problem with its Hadoop jars. You may need to recompile " + + "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() + + " or change your hadoop jars to start properly", t); } else { abort("Unhandled exception. Starting shutdown.", t); } @@ -2369,72 +2362,67 @@ private static boolean isCatalogTable(final TableName tableName) { } @Override - public long deleteTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { + public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) + throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); - LOG.info(getClientIdAuditPrefix() + " delete " + tableName); + LOG.info(getClientIdAuditPrefix() + " delete " + tableName); - // TODO: We can handle/merge duplicate request - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure(new DeleteTableProcedure(procedureExecutor.getEnvironment(), - tableName, latch)); - latch.await(); + // TODO: We can handle/merge duplicate request + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure( + new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch)); + latch.await(); - getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); - } + getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); + } - @Override - protected String getDescription() { - return "DeleteTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "DeleteTableProcedure"; + } + }); } @Override - public long truncateTable( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) throws IOException { + public long truncateTable(final TableName tableName, final boolean preserveSplits, + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); - LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); - submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), - tableName, preserveSplits, latch)); - latch.await(); + LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); + submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName, + preserveSplits, latch)); + latch.await(); - getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); - } + getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); + } - @Override - protected String getDescription() { - return "TruncateTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "TruncateTableProcedure"; + } + }); } @Override public long addColumn(final TableName tableName, final ColumnFamilyDescriptor column, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); checkTableExists(tableName); @@ -2445,7 +2433,7 @@ public TableDescriptor get() throws IOException { TableDescriptor old = getTableDescriptors().get(tableName); if (old.hasColumnFamily(column.getName())) { throw new InvalidFamilyOperationException("Column family '" + column.getNameAsString() - + "' in table '" + tableName + "' already exists so cannot be added"); + + "' in table '" + tableName + "' already exists so cannot be added"); } return TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build(); @@ -2462,7 +2450,7 @@ protected interface TableDescriptorGetter { @Override public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); checkTableExists(tableName); return modifyTable(tableName, new TableDescriptorGetter() { @@ -2472,7 +2460,7 @@ public TableDescriptor get() throws IOException { TableDescriptor old = getTableDescriptors().get(tableName); if (!old.hasColumnFamily(descriptor.getName())) { throw new InvalidFamilyOperationException("Family '" + descriptor.getNameAsString() - + "' does not exist, so it cannot be modified"); + + "' does not exist, so it cannot be modified"); } return TableDescriptorBuilder.newBuilder(old).modifyColumnFamily(descriptor).build(); @@ -2480,7 +2468,6 @@ public TableDescriptor get() throws IOException { }, nonceGroup, nonce, true); } - @Override public long modifyColumnStoreFileTracker(TableName tableName, byte[] family, String dstSFT, long nonceGroup, long nonce) throws IOException { @@ -2509,7 +2496,7 @@ protected String getDescription() { @Override public long deleteColumn(final TableName tableName, final byte[] columnName, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); checkTableExists(tableName); @@ -2520,12 +2507,12 @@ public TableDescriptor get() throws IOException { TableDescriptor old = getTableDescriptors().get(tableName); if (!old.hasColumnFamily(columnName)) { - throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName) - + "' does not exist, so it cannot be deleted"); + throw new InvalidFamilyOperationException( + "Family '" + Bytes.toString(columnName) + "' does not exist, so it cannot be deleted"); } if (old.getColumnFamilyCount() == 1) { throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName) - + "' is the only column family in the table, so it cannot be deleted"); + + "' is the only column family in the table, so it cannot be deleted"); } return TableDescriptorBuilder.newBuilder(old).removeColumnFamily(columnName).build(); } @@ -2534,134 +2521,137 @@ public TableDescriptor get() throws IOException { @Override public long enableTable(final TableName tableName, final long nonceGroup, final long nonce) - throws IOException { + throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preEnableTable(tableName); - - // Normally, it would make sense for this authorization check to exist inside - // AccessController, but because the authorization check is done based on internal state - // (rather than explicit permissions) we'll do the check here instead of in the - // coprocessor. - MasterQuotaManager quotaManager = getMasterQuotaManager(); - if (quotaManager != null) { - if (quotaManager.isQuotaInitialized()) { + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preEnableTable(tableName); + + // Normally, it would make sense for this authorization check to exist inside + // AccessController, but because the authorization check is done based on internal state + // (rather than explicit permissions) we'll do the check here instead of in the + // coprocessor. + MasterQuotaManager quotaManager = getMasterQuotaManager(); + if (quotaManager != null) { + if (quotaManager.isQuotaInitialized()) { SpaceQuotaSnapshot currSnapshotOfTable = - QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); + QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); if (currSnapshotOfTable != null) { SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus(); - if (quotaStatus.isInViolation() - && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null)) { - throw new AccessDeniedException("Enabling the table '" + tableName + if ( + quotaStatus.isInViolation() + && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null) + ) { + throw new AccessDeniedException("Enabling the table '" + tableName + "' is disallowed due to a violated space quota."); + } } + } else if (LOG.isTraceEnabled()) { + LOG + .trace("Unable to check for space quotas as the MasterQuotaManager is not enabled"); } - } else if (LOG.isTraceEnabled()) { - LOG.trace("Unable to check for space quotas as the MasterQuotaManager is not enabled"); } - } - LOG.info(getClientIdAuditPrefix() + " enable " + tableName); + LOG.info(getClientIdAuditPrefix() + " enable " + tableName); - // Execute the operation asynchronously - client will check the progress of the operation - // In case the request is from a <1.1 client before returning, - // we want to make sure that the table is prepared to be - // enabled (the table is locked and the table state is set). - // Note: if the procedure throws exception, we will catch it and rethrow. - final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); - submitProcedure(new EnableTableProcedure(procedureExecutor.getEnvironment(), - tableName, prepareLatch)); - prepareLatch.await(); + // Execute the operation asynchronously - client will check the progress of the operation + // In case the request is from a <1.1 client before returning, + // we want to make sure that the table is prepared to be + // enabled (the table is locked and the table state is set). + // Note: if the procedure throws exception, we will catch it and rethrow. + final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); + submitProcedure( + new EnableTableProcedure(procedureExecutor.getEnvironment(), tableName, prepareLatch)); + prepareLatch.await(); - getMaster().getMasterCoprocessorHost().postEnableTable(tableName); - } + getMaster().getMasterCoprocessorHost().postEnableTable(tableName); + } - @Override - protected String getDescription() { - return "EnableTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "EnableTableProcedure"; + } + }); } @Override public long disableTable(final TableName tableName, final long nonceGroup, final long nonce) - throws IOException { + throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDisableTable(tableName); - - LOG.info(getClientIdAuditPrefix() + " disable " + tableName); - - // Execute the operation asynchronously - client will check the progress of the operation - // In case the request is from a <1.1 client before returning, - // we want to make sure that the table is prepared to be - // enabled (the table is locked and the table state is set). - // Note: if the procedure throws exception, we will catch it and rethrow. - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), - tableName, false, prepareLatch)); - prepareLatch.await(); - - getMaster().getMasterCoprocessorHost().postDisableTable(tableName); - } + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preDisableTable(tableName); - @Override - protected String getDescription() { - return "DisableTableProcedure"; - } - }); + LOG.info(getClientIdAuditPrefix() + " disable " + tableName); + + // Execute the operation asynchronously - client will check the progress of the operation + // In case the request is from a <1.1 client before returning, + // we want to make sure that the table is prepared to be + // enabled (the table is locked and the table state is set). + // Note: if the procedure throws exception, we will catch it and rethrow. + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), tableName, + false, prepareLatch)); + prepareLatch.await(); + + getMaster().getMasterCoprocessorHost().postDisableTable(tableName); + } + + @Override + protected String getDescription() { + return "DisableTableProcedure"; + } + }); } private long modifyTable(final TableName tableName, - final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce, - final boolean shouldCheckDescriptor) throws IOException { + final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce, + final boolean shouldCheckDescriptor) throws IOException { return MasterProcedureUtil - .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName); - TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost() - .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get()); - TableDescriptorChecker.sanityCheck(conf, newDescriptor); - LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName, - oldDescriptor, newDescriptor); - - // Execute the operation synchronously - wait for the operation completes before - // continuing. - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(), - newDescriptor, latch, oldDescriptor, shouldCheckDescriptor)); - latch.await(); - - getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor, - newDescriptor); - } + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName); + TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost() + .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get()); + TableDescriptorChecker.sanityCheck(conf, newDescriptor); + LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName, + oldDescriptor, newDescriptor); + + // Execute the operation synchronously - wait for the operation completes before + // continuing. + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(), + newDescriptor, latch, oldDescriptor, shouldCheckDescriptor)); + latch.await(); - @Override - protected String getDescription() { - return "ModifyTableProcedure"; - } - }); + getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor, + newDescriptor); + } + + @Override + protected String getDescription() { + return "ModifyTableProcedure"; + } + }); } @Override public long modifyTable(final TableName tableName, final TableDescriptor newDescriptor, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); return modifyTable(tableName, new TableDescriptorGetter() { @Override @@ -2706,13 +2696,12 @@ public long restoreSnapshot(final SnapshotDescription snapshotDesc, final long n final TableName dstTable = TableName.valueOf(snapshotDesc.getTable()); getClusterSchema().getNamespace(dstTable.getNamespaceAsString()); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { @Override protected void run() throws IOException { - setProcId( - getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), restoreAcl, - customSFT)); + setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), + restoreAcl, customSFT)); } @Override @@ -2731,7 +2720,7 @@ private void checkTableExists(final TableName tableName) @Override public void checkTableModifiable(final TableName tableName) - throws IOException, TableNotFoundException, TableNotDisabledException { + throws IOException, TableNotFoundException, TableNotDisabledException { if (isCatalogTable(tableName)) { throw new IOException("Can't modify catalog tables"); } @@ -2747,7 +2736,7 @@ public ClusterMetrics getClusterMetricsWithoutCoprocessor() throws InterruptedIO } public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet

    + * +
    * =-.}) If reference, then the regex has more than just one * group. Group 1, hfile/hfilelink pattern, is this file's id. Group 2 '(.+)' is the reference's * parent region name. */ - private static final Pattern REF_NAME_PATTERN = Pattern - .compile(String.format("^(%s|%s)\\.(.+)$", HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX)); + private static final Pattern REF_NAME_PATTERN = + Pattern.compile(String.format("^(%s|%s)\\.(.+)$", HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX)); public static final String STORE_FILE_READER_NO_READAHEAD = "hbase.store.reader.no-readahead"; public static final boolean DEFAULT_STORE_FILE_READER_NO_READAHEAD = false; @@ -115,18 +112,18 @@ public class StoreFileInfo implements Configurable { /** * Create a Store File Info - * @param conf the {@link Configuration} to use - * @param fs The current file system to use. - * @param initialPath The {@link Path} of the file + * @param conf the {@link Configuration} to use + * @param fs The current file system to use. + * @param initialPath The {@link Path} of the file * @param primaryReplica true if this is a store file for primary replica, otherwise false. */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final Path initialPath, - final boolean primaryReplica) throws IOException { + final boolean primaryReplica) throws IOException { this(conf, fs, null, initialPath, primaryReplica); } private StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final Path initialPath, final boolean primaryReplica) throws IOException { + final Path initialPath, final boolean primaryReplica) throws IOException { assert fs != null; assert initialPath != null; assert conf != null; @@ -135,8 +132,8 @@ private StoreFileInfo(final Configuration conf, final FileSystem fs, final FileS this.conf = conf; this.initialPath = initialPath; this.primaryReplica = primaryReplica; - this.noReadahead = this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, - DEFAULT_STORE_FILE_READER_NO_READAHEAD); + this.noReadahead = + this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD); Path p = initialPath; if (HFileLink.isHFileLink(p)) { // HFileLink @@ -173,48 +170,48 @@ private StoreFileInfo(final Configuration conf, final FileSystem fs, final FileS /** * Create a Store File Info - * @param conf the {@link Configuration} to use - * @param fs The current file system to use. + * @param conf the {@link Configuration} to use + * @param fs The current file system to use. * @param fileStatus The {@link FileStatus} of the file */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus) - throws IOException { + throws IOException { this(conf, fs, fileStatus, fileStatus.getPath(), true); } /** * Create a Store File Info from an HFileLink - * @param conf The {@link Configuration} to use - * @param fs The current file system to use + * @param conf The {@link Configuration} to use + * @param fs The current file system to use * @param fileStatus The {@link FileStatus} of the file */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final HFileLink link) { + final HFileLink link) { this(conf, fs, fileStatus, null, link); } /** * Create a Store File Info from an HFileLink - * @param conf The {@link Configuration} to use - * @param fs The current file system to use + * @param conf The {@link Configuration} to use + * @param fs The current file system to use * @param fileStatus The {@link FileStatus} of the file - * @param reference The reference instance + * @param reference The reference instance */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final Reference reference) { + final Reference reference) { this(conf, fs, fileStatus, reference, null); } /** * Create a Store File Info from an HFileLink and a Reference - * @param conf The {@link Configuration} to use - * @param fs The current file system to use + * @param conf The {@link Configuration} to use + * @param fs The current file system to use * @param fileStatus The {@link FileStatus} of the file - * @param reference The reference instance - * @param link The link instance + * @param reference The reference instance + * @param link The link instance */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final Reference reference, final HFileLink link) { + final Reference reference, final HFileLink link) { this.fs = fs; this.conf = conf; this.primaryReplica = false; @@ -222,8 +219,8 @@ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileSt this.createdTimestamp = (fileStatus == null) ? 0 : fileStatus.getModificationTime(); this.reference = reference; this.link = link; - this.noReadahead = this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, - DEFAULT_STORE_FILE_READER_NO_READAHEAD); + this.noReadahead = + this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD); } @Override @@ -237,25 +234,22 @@ public void setConf(Configuration conf) { } /** - * Size of the Hfile - * @return size + * Size of the Hfile n */ public long getSize() { return size; } /** - * Sets the region coprocessor env. - * @param coprocessorHost + * Sets the region coprocessor env. n */ public void setRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) { this.coprocessorHost = coprocessorHost; } /** - * @return the Reference object associated to this StoreFileInfo. - * null if the StoreFile is not a - * reference. + * @return the Reference object associated to this StoreFileInfo. null if the StoreFile is not a + * reference. */ public Reference getReference() { return this.reference; @@ -292,7 +286,7 @@ StoreFileReader createReader(ReaderContext context, CacheConfig cacheConf) throw } ReaderContext createReaderContext(boolean doDropBehind, long readahead, ReaderType type) - throws IOException { + throws IOException { FSDataInputStreamWrapper in; FileStatus status; if (this.link != null) { @@ -319,8 +313,8 @@ ReaderContext createReaderContext(boolean doDropBehind, long readahead, ReaderTy } long length = status.getLen(); ReaderContextBuilder contextBuilder = - new ReaderContextBuilder().withInputStreamWrapper(in).withFileSize(length) - .withPrimaryReplicaReader(this.primaryReplica).withReaderType(type).withFileSystem(fs); + new ReaderContextBuilder().withInputStreamWrapper(in).withFileSize(length) + .withPrimaryReplicaReader(this.primaryReplica).withReaderType(type).withFileSystem(fs); if (this.reference != null) { contextBuilder.withFilePath(this.getPath()); } else { @@ -333,7 +327,7 @@ ReaderContext createReaderContext(boolean doDropBehind, long readahead, ReaderTy * Compute the HDFS Block Distribution for this StoreFile */ public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs) - throws IOException { + throws IOException { // guard against the case where we get the FileStatus from link, but by the time we // call compute the file is moved again if (this.link != null) { @@ -353,7 +347,7 @@ public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs) } private HDFSBlocksDistribution computeHDFSBlocksDistributionInternal(final FileSystem fs) - throws IOException { + throws IOException { FileStatus status = getReferencedFileStatus(fs); if (this.reference != null) { return computeRefFileHDFSBlockDistribution(fs, reference, status); @@ -425,7 +419,7 @@ public long getModificationTime() throws IOException { @Override public String toString() { return this.getPath() - + (isReference() ? "->" + getReferredToFile(this.getPath()) + "-" + reference : ""); + + (isReference() ? "->" + getReferredToFile(this.getPath()) + "-" + reference : ""); } /** @@ -458,8 +452,7 @@ public static boolean isMobFile(final Path path) { } /** - * Checks if the file is a MOB reference file, - * created by snapshot + * Checks if the file is a MOB reference file, created by snapshot * @param path path to a file * @return true, if - yes, false otherwise */ @@ -477,7 +470,6 @@ public static boolean isMobRefFile(final Path path) { return m.matches() && m.groupCount() > 1; } - /** * @param path Path to check. * @return True if the path has format of a HStoreFile reference. @@ -526,7 +518,7 @@ public static Path getReferredToFile(final Path p) { // Build up new path with the referenced region in place of our current // region in the reference path. Also strip regionname suffix from name. return new Path(new Path(new Path(tableDir, otherRegion), p.getParent().getName()), - nameStrippedOfSuffix); + nameStrippedOfSuffix); } /* @@ -589,13 +581,13 @@ public static boolean isValid(final FileStatus fileStatus) throws IOException { * half of the reference file. This is just estimate, given midkey ofregion != midkey of HFile, * also the number and size of keys vary. If this estimate isn't good enough, we can improve it * later. - * @param fs The FileSystem + * @param fs The FileSystem * @param reference The reference - * @param status The reference FileStatus + * @param status The reference FileStatus * @return HDFS blocks distribution */ private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(final FileSystem fs, - final Reference reference, final FileStatus status) throws IOException { + final Reference reference, final FileStatus status) throws IOException { if (status == null) { return null; } @@ -633,8 +625,7 @@ public boolean equals(Object that) { if (initialPath == null && o.initialPath != null) { return false; } - if (initialPath != o.initialPath && initialPath != null - && !initialPath.equals(o.initialPath)) { + if (initialPath != o.initialPath && initialPath != null && !initialPath.equals(o.initialPath)) { return false; } if (reference != null && o.reference == null) { @@ -643,8 +634,7 @@ public boolean equals(Object that) { if (reference == null && o.reference != null) { return false; } - if (reference != o.reference && reference != null - && !reference.equals(o.reference)) { + if (reference != o.reference && reference != null && !reference.equals(o.reference)) { return false; } @@ -702,7 +692,7 @@ void initHDFSBlocksDistribution() throws IOException { } StoreFileReader preStoreFileReaderOpen(ReaderContext context, CacheConfig cacheConf) - throws IOException { + throws IOException { StoreFileReader reader = null; if (this.coprocessorHost != null) { reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(), @@ -712,7 +702,7 @@ StoreFileReader preStoreFileReaderOpen(ReaderContext context, CacheConfig cacheC } StoreFileReader postStoreFileReaderOpen(ReaderContext context, CacheConfig cacheConf, - StoreFileReader reader) throws IOException { + StoreFileReader reader) throws IOException { StoreFileReader res = reader; if (this.coprocessorHost != null) { res = this.coprocessorHost.postStoreFileReaderOpen(fs, this.getPath(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java index a40b209c6ebb..f276f5d76a4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Optional; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.yetus.audience.InterfaceAudience; @@ -51,7 +49,7 @@ public interface StoreFileManager { * @param storeFiles The files to load. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void loadFiles(List storeFiles); /** @@ -59,16 +57,16 @@ public interface StoreFileManager { * @param sfs New store files. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void insertNewFiles(Collection sfs); /** * Adds only the new compaction results into the structure. * @param compactedFiles The input files for the compaction. - * @param results The resulting files for the compaction. + * @param results The resulting files for the compaction. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void addCompactionResults(Collection compactedFiles, Collection results); /** @@ -76,7 +74,7 @@ public interface StoreFileManager { * @param compactedFiles the list of compacted files */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void removeCompactedFiles(Collection compactedFiles); /** @@ -86,24 +84,23 @@ public interface StoreFileManager { ImmutableCollection clearFiles(); /** - * Clears all the compacted files and returns them. This method is expected to be - * accessed single threaded. + * Clears all the compacted files and returns them. This method is expected to be accessed single + * threaded. * @return The files compacted previously. */ Collection clearCompactedFiles(); /** - * Gets the snapshot of the store files currently in use. Can be used for things like metrics - * and checks; should not assume anything about relations between store files in the list. + * Gets the snapshot of the store files currently in use. Can be used for things like metrics and + * checks; should not assume anything about relations between store files in the list. * @return The list of StoreFiles. */ Collection getStorefiles(); /** - * List of compacted files inside this store that needs to be excluded in reads - * because further new reads will be using only the newly created files out of compaction. - * These compacted files will be deleted/cleared once all the existing readers on these - * compacted files are done. + * List of compacted files inside this store that needs to be excluded in reads because further + * new reads will be using only the newly created files out of compaction. These compacted files + * will be deleted/cleared once all the existing readers on these compacted files are done. * @return the list of compacted files */ Collection getCompactedfiles(); @@ -123,34 +120,33 @@ public interface StoreFileManager { /** * Gets the store files to scan for a Scan or Get request. * @param startRow Start row of the request. - * @param stopRow Stop row of the request. + * @param stopRow Stop row of the request. * @return The list of files that are to be read for this request. */ Collection getFilesForScan(byte[] startRow, boolean includeStartRow, byte[] stopRow, - boolean includeStopRow); + boolean includeStopRow); /** * Gets initial, full list of candidate store files to check for row-key-before. * @param targetKey The key that is the basis of the search. - * @return The files that may have the key less than or equal to targetKey, in reverse - * order of new-ness, and preference for target key. + * @return The files that may have the key less than or equal to targetKey, in reverse order of + * new-ness, and preference for target key. */ Iterator getCandidateFilesForRowKeyBefore(KeyValue targetKey); /** * Updates the candidate list for finding row key before. Based on the list of candidates - * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate, - * may trim and reorder the list to remove the files where a better candidate cannot be found. - * @param candidateFiles The candidate files not yet checked for better candidates - return - * value from {@link #getCandidateFilesForRowKeyBefore(KeyValue)}, - * with some files already removed. - * @param targetKey The key to search for. - * @param candidate The current best candidate found. + * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate, may + * trim and reorder the list to remove the files where a better candidate cannot be found. + * @param candidateFiles The candidate files not yet checked for better candidates - return value + * from {@link #getCandidateFilesForRowKeyBefore(KeyValue)}, with some files + * already removed. + * @param targetKey The key to search for. + * @param candidate The current best candidate found. * @return The list to replace candidateFiles. */ Iterator updateCandidateFilesForRowKeyBefore(Iterator candidateFiles, - KeyValue targetKey, Cell candidate); - + KeyValue targetKey, Cell candidate); /** * Gets the split point for the split of this set of store files (approx. middle). @@ -164,7 +160,7 @@ Iterator updateCandidateFilesForRowKeyBefore(Iterator ca int getStoreCompactionPriority(); /** - * @param maxTs Maximum expired timestamp. + * @param maxTs Maximum expired timestamp. * @param filesCompacting Files that are currently compacting. * @return The files which don't have any necessary data according to TTL and other criteria. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 32ee47e21f1c..8454f4ee79e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,14 +28,13 @@ import java.util.Optional; import java.util.SortedSet; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockType; @@ -84,7 +83,7 @@ public class StoreFileReader { private final ReaderContext context; private StoreFileReader(HFile.Reader reader, AtomicInteger refCount, ReaderContext context, - Configuration conf) { + Configuration conf) { this.reader = reader; bloomFilterType = BloomType.NONE; this.refCount = refCount; @@ -93,7 +92,7 @@ private StoreFileReader(HFile.Reader reader, AtomicInteger refCount, ReaderConte } public StoreFileReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, - AtomicInteger refCount, Configuration conf) throws IOException { + AtomicInteger refCount, Configuration conf) throws IOException { this(HFile.createReader(context, fileInfo, cacheConf, conf), refCount, context, conf); } @@ -130,24 +129,24 @@ public CellComparator getComparator() { /** * Get a scanner to scan over this StoreFile. - * @param cacheBlocks should this scanner cache blocks? - * @param pread use pread (for highly concurrent small readers) - * @param isCompaction is scanner being used for compaction? - * @param scannerOrder Order of this scanner relative to other scanners. See - * {@link KeyValueScanner#getScannerOrder()}. + * @param cacheBlocks should this scanner cache blocks? + * @param pread use pread (for highly concurrent small readers) + * @param isCompaction is scanner being used for compaction? + * @param scannerOrder Order of this scanner relative to other scanners. See + * {@link KeyValueScanner#getScannerOrder()}. * @param canOptimizeForNonNullColumn {@code true} if we can make sure there is no null column, - * otherwise {@code false}. This is a hint for optimization. + * otherwise {@code false}. This is a hint for optimization. * @return a scanner */ public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, - boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { - return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), - !isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); + boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { + return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), !isCompaction, + reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); } /** - * Return the ref count associated with the reader whenever a scanner associated with the - * reader is opened. + * Return the ref count associated with the reader whenever a scanner associated with the reader + * is opened. */ int getRefCount() { return refCount.get(); @@ -178,11 +177,11 @@ void readCompleted() { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level concepts. - * + * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner + * class/interface which is the preferred way to scan a store with higher level + * concepts. * @param cacheBlocks should we cache the blocks? - * @param pread use pread (for concurrent small readers) + * @param pread use pread (for concurrent small readers) * @return the underlying HFileScanner * @see HBASE-15296 */ @@ -193,21 +192,15 @@ public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level concepts. - * - * @param cacheBlocks - * should we cache the blocks? - * @param pread - * use pread (for concurrent small readers) - * @param isCompaction - * is scanner being used for compaction? + * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner + * class/interface which is the preferred way to scan a store with higher level + * concepts. n * should we cache the blocks? n * use pread (for concurrent small + * readers) n * is scanner being used for compaction? * @return the underlying HFileScanner * @see HBASE-15296 */ @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread, - boolean isCompaction) { + public HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) { return reader.getScanner(conf, cacheBlocks, pread, isCompaction); } @@ -216,33 +209,31 @@ public void close(boolean evictOnClose) throws IOException { } /** - * Check if this storeFile may contain keys within the TimeRange that - * have not expired (i.e. not older than oldestUnexpiredTS). - * @param tr the timeRange to restrict - * @param oldestUnexpiredTS the oldest timestamp that is not expired, as - * determined by the column family's TTL + * Check if this storeFile may contain keys within the TimeRange that have not expired (i.e. not + * older than oldestUnexpiredTS). + * @param tr the timeRange to restrict + * @param oldestUnexpiredTS the oldest timestamp that is not expired, as determined by the column + * family's TTL * @return false if queried keys definitely don't exist in this StoreFile */ boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { - return this.timeRange == null? true: - this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; + return this.timeRange == null + ? true + : this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; } /** - * Checks whether the given scan passes the Bloom filter (if present). Only - * checks Bloom filters for single-row or single-row-column scans. Bloom - * filter checking for multi-gets is implemented as part of the store - * scanner system (see {@link StoreFileScanner#seek(Cell)} and uses - * the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} - * and {@link #passesGeneralRowColBloomFilter(Cell)}. - * - * @param scan the scan specification. Used to determine the row, and to - * check whether this is a single-row ("get") scan. - * @param columns the set of columns. Only used for row-column Bloom - * filters. - * @return true if the scan with the given column set passes the Bloom - * filter, or if the Bloom filter is not applicable for the scan. - * False if the Bloom filter is applicable and the scan fails it. + * Checks whether the given scan passes the Bloom filter (if present). Only checks Bloom filters + * for single-row or single-row-column scans. Bloom filter checking for multi-gets is implemented + * as part of the store scanner system (see {@link StoreFileScanner#seek(Cell)} and uses the + * lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} and + * {@link #passesGeneralRowColBloomFilter(Cell)}. + * @param scan the scan specification. Used to determine the row, and to check whether this is + * a single-row ("get") scan. + * @param columns the set of columns. Only used for row-column Bloom filters. + * @return true if the scan with the given column set passes the Bloom filter, or if the Bloom + * filter is not applicable for the scan. False if the Bloom filter is applicable and the + * scan fails it. */ boolean passesBloomFilter(Scan scan, final SortedSet columns) { byte[] row = scan.getStartRow(); @@ -274,8 +265,7 @@ boolean passesBloomFilter(Scan scan, final SortedSet columns) { } } - public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, - int rowLen) { + public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, int rowLen) { // Cache Bloom filter as a local variable in case it is set to null by // another thread on an IO error. BloomFilter bloomFilter = this.deleteFamilyBloomFilter; @@ -295,8 +285,7 @@ public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, } return bloomFilter.contains(row, rowOffset, rowLen, null); } catch (IllegalArgumentException e) { - LOG.error("Bad Delete Family bloom filter data -- proceeding without", - e); + LOG.error("Bad Delete Family bloom filter data -- proceeding without", e); setDeleteFamilyBloomFilterFaulty(); } @@ -304,9 +293,8 @@ public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. * @return True if passes */ private boolean passesGeneralRowBloomFilter(byte[] row, int rowOffset, int rowLen) { @@ -318,19 +306,15 @@ private boolean passesGeneralRowBloomFilter(byte[] row, int rowOffset, int rowLe // Used in ROW bloom byte[] key = null; if (rowOffset != 0 || rowLen != row.length) { - throw new AssertionError( - "For row-only Bloom filters the row must occupy the whole array"); + throw new AssertionError("For row-only Bloom filters the row must occupy the whole array"); } key = row; return checkGeneralBloomFilter(key, null, bloomFilter); } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * - * @param cell - * the cell to check if present in BloomFilter + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. n * the cell to check if present in BloomFilter * @return True if passes */ public boolean passesGeneralRowColBloomFilter(Cell cell) { @@ -350,9 +334,8 @@ public boolean passesGeneralRowColBloomFilter(Cell cell) { } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. * @return True if passes */ private boolean passesGeneralRowPrefixBloomFilter(Scan scan) { @@ -369,7 +352,7 @@ private boolean passesGeneralRowPrefixBloomFilter(Scan scan) { // For non-get scans // Find out the common prefix of startRow and stopRow. int commonLength = Bytes.findCommonPrefix(scan.getStartRow(), scan.getStopRow(), - scan.getStartRow().length, scan.getStopRow().length, 0, 0); + scan.getStartRow().length, scan.getStopRow().length, 0, 0); // startRow and stopRow don't have the common prefix. // Or the common prefix length is less than prefixLength if (commonLength <= 0 || commonLength < prefixLength) { @@ -406,7 +389,7 @@ private boolean checkGeneralBloomFilter(byte[] key, Cell kvKey, BloomFilter bloo // a sufficient condition to return false. boolean keyIsAfterLast = (lastBloomKey != null); // hbase:meta does not have blooms. So we need not have special interpretation - // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom + // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom if (keyIsAfterLast) { if (bloomFilterType == BloomType.ROWCOL) { keyIsAfterLast = (CellComparator.getInstance().compare(kvKey, lastBloomKeyOnlyKV)) > 0; @@ -422,25 +405,24 @@ private boolean checkGeneralBloomFilter(byte[] key, Cell kvKey, BloomFilter bloo // required looking only for a row bloom. Cell rowBloomKey = PrivateCellUtil.createFirstOnRow(kvKey); // hbase:meta does not have blooms. So we need not have special interpretation - // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom - if (keyIsAfterLast - && (CellComparator.getInstance().compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) { + // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom + if ( + keyIsAfterLast + && (CellComparator.getInstance().compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0 + ) { exists = false; } else { - exists = - bloomFilter.contains(kvKey, bloom, BloomType.ROWCOL) || - bloomFilter.contains(rowBloomKey, bloom, BloomType.ROWCOL); + exists = bloomFilter.contains(kvKey, bloom, BloomType.ROWCOL) + || bloomFilter.contains(rowBloomKey, bloom, BloomType.ROWCOL); } } else { - exists = !keyIsAfterLast - && bloomFilter.contains(key, 0, key.length, bloom); + exists = !keyIsAfterLast && bloomFilter.contains(key, 0, key.length, bloom); } return exists; } } catch (IOException e) { - LOG.error("Error reading bloom filter data -- proceeding without", - e); + LOG.error("Error reading bloom filter data -- proceeding without", e); setGeneralBloomFilterFaulty(); } catch (IllegalArgumentException e) { LOG.error("Bad bloom filter data -- proceeding without", e); @@ -466,23 +448,25 @@ public boolean passesKeyRangeFilter(Scan scan) { // the file is empty return false; } - if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) && - Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { + if ( + Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) + && Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW) + ) { return true; } byte[] smallestScanRow = scan.isReversed() ? scan.getStopRow() : scan.getStartRow(); byte[] largestScanRow = scan.isReversed() ? scan.getStartRow() : scan.getStopRow(); - boolean nonOverLapping = (getComparator() - .compareRows(firstKeyKV.get(), largestScanRow, 0, largestScanRow.length) > 0 && - !Bytes.equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), - HConstants.EMPTY_END_ROW)) || - getComparator().compareRows(lastKeyKV.get(), smallestScanRow, 0, - smallestScanRow.length) < 0; + boolean nonOverLapping = + (getComparator().compareRows(firstKeyKV.get(), largestScanRow, 0, largestScanRow.length) > 0 + && !Bytes.equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), + HConstants.EMPTY_END_ROW)) + || getComparator().compareRows(lastKeyKV.get(), smallestScanRow, 0, smallestScanRow.length) + < 0; return !nonOverLapping; } public Map loadFileInfo() throws IOException { - Map fi = reader.getHFileInfo(); + Map fi = reader.getHFileInfo(); byte[] b = fi.get(BLOOM_FILTER_TYPE_KEY); if (b != null) { @@ -490,12 +474,12 @@ public Map loadFileInfo() throws IOException { } byte[] p = fi.get(BLOOM_FILTER_PARAM_KEY); - if (bloomFilterType == BloomType.ROWPREFIX_FIXED_LENGTH) { + if (bloomFilterType == BloomType.ROWPREFIX_FIXED_LENGTH) { prefixLength = Bytes.toInt(p); } lastBloomKey = fi.get(LAST_BLOOM_KEY); - if(bloomFilterType == BloomType.ROWCOL) { + if (bloomFilterType == BloomType.ROWCOL) { lastBloomKeyOnlyKV = new KeyValue.KeyOnlyKeyValue(lastBloomKey, 0, lastBloomKey.length); } byte[] cnt = fi.get(DELETE_FAMILY_COUNT); @@ -514,48 +498,41 @@ public void loadBloomfilter() { public void loadBloomfilter(BlockType blockType) { try { if (blockType == BlockType.GENERAL_BLOOM_META) { - if (this.generalBloomFilter != null) - return; // Bloom has been loaded + if (this.generalBloomFilter != null) return; // Bloom has been loaded DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); if (bloomMeta != null) { // sanity check for NONE Bloom filter if (bloomFilterType == BloomType.NONE) { - throw new IOException( - "valid bloom filter type not found in FileInfo"); + throw new IOException("valid bloom filter type not found in FileInfo"); } else { - generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, - reader); + generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); if (LOG.isTraceEnabled()) { LOG.trace("Loaded " + bloomFilterType.toString() + " " - + generalBloomFilter.getClass().getSimpleName() - + " metadata for " + reader.getName()); + + generalBloomFilter.getClass().getSimpleName() + " metadata for " + + reader.getName()); } } } } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { - if (this.deleteFamilyBloomFilter != null) - return; // Bloom has been loaded + if (this.deleteFamilyBloomFilter != null) return; // Bloom has been loaded DataInput bloomMeta = reader.getDeleteBloomFilterMetadata(); if (bloomMeta != null) { - deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta( - bloomMeta, reader); - LOG.info("Loaded Delete Family Bloom (" - + deleteFamilyBloomFilter.getClass().getSimpleName() + deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + LOG.info( + "Loaded Delete Family Bloom (" + deleteFamilyBloomFilter.getClass().getSimpleName() + ") metadata for " + reader.getName()); } } else { - throw new RuntimeException("Block Type: " + blockType.toString() - + "is not supported for Bloom filter"); + throw new RuntimeException( + "Block Type: " + blockType.toString() + "is not supported for Bloom filter"); } } catch (IOException e) { - LOG.error("Error reading bloom filter meta for " + blockType - + " -- proceeding without", e); + LOG.error("Error reading bloom filter meta for " + blockType + " -- proceeding without", e); setBloomFilterFaulty(blockType); } catch (IllegalArgumentException e) { - LOG.error("Bad bloom filter meta " + blockType - + " -- proceeding without", e); + LOG.error("Bad bloom filter meta " + blockType + " -- proceeding without", e); setBloomFilterFaulty(blockType); } } @@ -569,15 +546,12 @@ private void setBloomFilterFaulty(BlockType blockType) { } /** - * The number of Bloom filter entries in this store file, or an estimate - * thereof, if the Bloom filter is not loaded. This always returns an upper - * bound of the number of Bloom filter entries. - * + * The number of Bloom filter entries in this store file, or an estimate thereof, if the Bloom + * filter is not loaded. This always returns an upper bound of the number of Bloom filter entries. * @return an estimate of the number of Bloom filter entries in this file */ public long getFilterEntries() { - return generalBloomFilter != null ? generalBloomFilter.getKeyCount() - : reader.getEntries(); + return generalBloomFilter != null ? generalBloomFilter.getKeyCount() : reader.getEntries(); } public void setGeneralBloomFilterFaulty() { @@ -653,8 +627,7 @@ long getUncompressedDataIndexSize() { } public long getTotalBloomSize() { - if (generalBloomFilter == null) - return 0; + if (generalBloomFilter == null) return 0; return generalBloomFilter.getByteSize(); } @@ -676,7 +649,7 @@ void disableBloomFilterForTesting() { } public long getMaxTimestamp() { - return timeRange == null ? TimeRange.INITIAL_MAX_TIMESTAMP: timeRange.getMax(); + return timeRange == null ? TimeRange.INITIAL_MAX_TIMESTAMP : timeRange.getMax(); } boolean isSkipResetSeqId() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 6e70c5b68de9..ce2a3d6f249a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.FileNotFoundException; @@ -28,23 +26,21 @@ import java.util.Optional; import java.util.PriorityQueue; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * KeyValueScanner adaptor over the Reader. It also provides hooks into - * bloom filter things. + * KeyValueScanner adaptor over the Reader. It also provides hooks into bloom filter things. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) @InterfaceStability.Evolving @@ -77,16 +73,18 @@ public class StoreFileScanner implements KeyValueScanner { /** * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner} - * @param useMVCC If true, scanner will filter out updates with MVCC larger than {@code readPt}. - * @param readPt MVCC value to use to filter out the updates newer than this scanner. - * @param hasMVCC Set to true if underlying store file reader has MVCC info. - * @param scannerOrder Order of the scanner relative to other scanners. See - * {@link KeyValueScanner#getScannerOrder()}. + * @param useMVCC If true, scanner will filter out updates with MVCC larger + * than {@code readPt}. + * @param readPt MVCC value to use to filter out the updates newer than this + * scanner. + * @param hasMVCC Set to true if underlying store file reader has MVCC info. + * @param scannerOrder Order of the scanner relative to other scanners. See + * {@link KeyValueScanner#getScannerOrder()}. * @param canOptimizeForNonNullColumn {@code true} if we can make sure there is no null column, - * otherwise {@code false}. This is a hint for optimization. + * otherwise {@code false}. This is a hint for optimization. */ public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean useMVCC, - boolean hasMVCC, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { + boolean hasMVCC, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { this.readPt = readPt; this.reader = reader; this.hfs = hfs; @@ -101,8 +99,8 @@ public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean useMVC * Return an array of scanners corresponding to the given set of store files. */ public static List getScannersForStoreFiles(Collection files, - boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean useDropBehind, - long readPt) throws IOException { + boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean useDropBehind, long readPt) + throws IOException { return getScannersForStoreFiles(files, cacheBlocks, usePread, isCompaction, useDropBehind, null, readPt); } @@ -112,15 +110,15 @@ public static List getScannersForStoreFiles(Collection getScannersForStoreFiles(Collection files, - boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean canUseDrop, - ScanQueryMatcher matcher, long readPt) throws IOException { + boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean canUseDrop, + ScanQueryMatcher matcher, long readPt) throws IOException { if (files.isEmpty()) { return Collections.emptyList(); } List scanners = new ArrayList<>(files.size()); boolean canOptimizeForNonNullColumn = matcher != null ? !matcher.hasNullColumnInQuery() : false; PriorityQueue sortedFiles = - new PriorityQueue<>(files.size(), StoreFileComparators.SEQ_ID); + new PriorityQueue<>(files.size(), StoreFileComparators.SEQ_ID); for (HStoreFile file : files) { // The sort function needs metadata so we need to open reader first before sorting the list. file.initReader(); @@ -135,7 +133,7 @@ public static List getScannersForStoreFiles(Collection getScannersForStoreFiles(Collection getScannersForCompaction(Collection files, - boolean canUseDropBehind, long readPt) throws IOException { + boolean canUseDropBehind, long readPt) throws IOException { List scanners = new ArrayList<>(files.size()); List sortedFiles = new ArrayList<>(files); Collections.sort(sortedFiles, StoreFileComparators.SEQ_ID); @@ -201,7 +199,7 @@ public Cell next() throws IOException { } } catch (FileNotFoundException e) { throw e; - } catch(IOException e) { + } catch (IOException e) { throw new IOException("Could not iterate " + this, e); } return retKey; @@ -213,7 +211,7 @@ public boolean seek(Cell key) throws IOException { try { try { - if(!seekAtOrAfter(hfs, key)) { + if (!seekAtOrAfter(hfs, key)) { this.cur = null; return false; } @@ -258,8 +256,7 @@ public boolean reseek(Cell key) throws IOException { } catch (FileNotFoundException e) { throw e; } catch (IOException ioe) { - throw new IOException("Could not reseek " + this + " to key " + key, - ioe); + throw new IOException("Could not reseek " + this + " to key " + key, ioe); } } @@ -274,13 +271,12 @@ protected boolean skipKVsNewerThanReadpoint() throws IOException { // We want to ignore all key-values that are newer than our current // readPoint Cell startKV = cur; - while(enforceMVCC - && cur != null - && (cur.getSequenceId() > readPt)) { + while (enforceMVCC && cur != null && (cur.getSequenceId() > readPt)) { boolean hasNext = hfs.next(); setCurrentCell(hfs.getCell()); - if (hasNext && this.stopSkippingKVsIfNextRow - && getComparator().compareRows(cur, startKV) > 0) { + if ( + hasNext && this.stopSkippingKVsIfNextRow && getComparator().compareRows(cur, startKV) > 0 + ) { return false; } } @@ -304,23 +300,18 @@ public void close() { } /** - * - * @param s - * @param k - * @return false if not found or if k is after the end. - * @throws IOException + * nn * @return false if not found or if k is after the end. n */ - public static boolean seekAtOrAfter(HFileScanner s, Cell k) - throws IOException { + public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException { int result = s.seekTo(k); - if(result < 0) { + if (result < 0) { if (result == HConstants.INDEX_KEY_MAGIC) { // using faked key return true; } // Passed KV is smaller than first KV in file, work from start of file return s.seekTo(); - } else if(result > 0) { + } else if (result > 0) { // Passed KV is larger than current KV in file, if there is a next // it is the "after", if not then this scanner is done. return s.next(); @@ -329,9 +320,8 @@ public static boolean seekAtOrAfter(HFileScanner s, Cell k) return true; } - static boolean reseekAtOrAfter(HFileScanner s, Cell k) - throws IOException { - //This function is similar to seekAtOrAfter function + static boolean reseekAtOrAfter(HFileScanner s, Cell k) throws IOException { + // This function is similar to seekAtOrAfter function int result = s.reseekTo(k); if (result <= 0) { if (result == HConstants.INDEX_KEY_MAGIC) { @@ -342,7 +332,7 @@ static boolean reseekAtOrAfter(HFileScanner s, Cell k) // than first KV in file, and it is the first time we seek on this file. // So we also need to work from the start of file. if (!s.isSeeked()) { - return s.seekTo(); + return s.seekTo(); } return true; } @@ -360,22 +350,19 @@ public long getScannerOrder() { } /** - * Pretend we have done a seek but don't do it yet, if possible. The hope is - * that we find requested columns in more recent files and won't have to seek - * in older files. Creates a fake key/value with the given row/column and the - * highest (most recent) possible timestamp we might get from this file. When - * users of such "lazy scanner" need to know the next KV precisely (e.g. when - * this scanner is at the top of the heap), they run {@link #enforceSeek()}. + * Pretend we have done a seek but don't do it yet, if possible. The hope is that we find + * requested columns in more recent files and won't have to seek in older files. Creates a fake + * key/value with the given row/column and the highest (most recent) possible timestamp we might + * get from this file. When users of such "lazy scanner" need to know the next KV precisely (e.g. + * when this scanner is at the top of the heap), they run {@link #enforceSeek()}. *

    - * Note that this function does guarantee that the current KV of this scanner - * will be advanced to at least the given KV. Because of this, it does have - * to do a real seek in cases when the seek timestamp is older than the - * highest timestamp of the file, e.g. when we are trying to seek to the next - * row/column and use OLDEST_TIMESTAMP in the seek key. + * Note that this function does guarantee that the current KV of this scanner will be advanced to + * at least the given KV. Because of this, it does have to do a real seek in cases when the seek + * timestamp is older than the highest timestamp of the file, e.g. when we are trying to seek to + * the next row/column and use OLDEST_TIMESTAMP in the seek key. */ @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) - throws IOException { + public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { if (kv.getFamilyLength() == 0) { useBloom = false; } @@ -385,9 +372,10 @@ public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) // check ROWCOL Bloom filter first. if (reader.getBloomFilterType() == BloomType.ROWCOL) { haveToSeek = reader.passesGeneralRowColBloomFilter(kv); - } else if (canOptimizeForNonNullColumn - && ((PrivateCellUtil.isDeleteFamily(kv) - || PrivateCellUtil.isDeleteFamilyVersion(kv)))) { + } else if ( + canOptimizeForNonNullColumn + && ((PrivateCellUtil.isDeleteFamily(kv) || PrivateCellUtil.isDeleteFamilyVersion(kv))) + ) { // if there is no such delete family kv in the store file, // then no need to seek. haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(), @@ -450,8 +438,7 @@ public boolean realSeekDone() { @Override public void enforceSeek() throws IOException { - if (realSeekDone) - return; + if (realSeekDone) return; if (delayedReseek) { reseek(delayedSeekKV); @@ -487,8 +474,9 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) if (timeRange == null) { timeRange = scan.getTimeRange(); } - return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) && reader - .passesKeyRangeFilter(scan) && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf)); + return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) + && reader.passesKeyRangeFilter(scan) + && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf)); } @Override @@ -521,8 +509,7 @@ public boolean seekToPreviousRow(Cell originalKey) throws IOException { } finally { this.stopSkippingKVsIfNextRow = false; } - if (!resultOfSkipKVs - || getComparator().compareRows(cur, firstKeyOfPreviousRow) > 0) { + if (!resultOfSkipKVs || getComparator().compareRows(cur, firstKeyOfPreviousRow) > 0) { keepSeeking = true; key = firstKeyOfPreviousRow; continue; @@ -537,8 +524,7 @@ public boolean seekToPreviousRow(Cell originalKey) throws IOException { } catch (FileNotFoundException e) { throw e; } catch (IOException ioe) { - throw new IOException("Could not seekToPreviousRow " + this + " to key " - + originalKey, ioe); + throw new IOException("Could not seekToPreviousRow " + this + " to key " + originalKey, ioe); } } @@ -559,8 +545,7 @@ public boolean seekToLastRow() throws IOException { @Override public boolean backwardSeek(Cell key) throws IOException { seek(key); - if (cur == null - || getComparator().compareRows(cur, key) > 0) { + if (cur == null || getComparator().compareRows(cur, key) > 0) { return seekToPreviousRow(key); } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index dfca882e62cb..de32c270565b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,8 +73,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * A StoreFile writer. Use this to read/write HBase Store Files. It is package - * local because it is an implementation detail of the HBase regionserver. + * A StoreFile writer. Use this to read/write HBase Store Files. It is package local because it is + * an implementation detail of the HBase regionserver. */ @InterfaceAudience.Private public class StoreFileWriter implements CellSink, ShipperListener { @@ -95,13 +95,12 @@ public class StoreFileWriter implements CellSink, ShipperListener { /** * Creates an HFile.Writer that also write helpful meta data. - * * @param fs file system to write to * @param path file name to create * @param conf user configuration * @param bloomType bloom filter setting - * @param maxKeys the expected maximum number of keys to be added. Was used - * for Bloom filter size in {@link HFile} format version 1. + * @param maxKeys the expected maximum number of keys to be added. Was used for + * Bloom filter size in {@link HFile} format version 1. * @param favoredNodes an array of favored nodes or possibly null * @param fileContext The HFile context * @param shouldDropCacheBehind Drop pages written to page cache after writing the store file. @@ -109,31 +108,28 @@ public class StoreFileWriter implements CellSink, ShipperListener { * @throws IOException problem writing to FS */ private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, - BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext, - boolean shouldDropCacheBehind, Supplier> compactedFilesSupplier) - throws IOException { + BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext, + boolean shouldDropCacheBehind, Supplier> compactedFilesSupplier) + throws IOException { this.compactedFilesSupplier = compactedFilesSupplier; this.timeRangeTracker = TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC); // TODO : Change all writers to be specifically created for compaction context - writer = HFile.getWriterFactory(conf, cacheConf) - .withPath(fs, path) - .withFavoredNodes(favoredNodes) - .withFileContext(fileContext) - .withShouldDropCacheBehind(shouldDropCacheBehind) - .create(); + writer = + HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withFavoredNodes(favoredNodes) + .withFileContext(fileContext).withShouldDropCacheBehind(shouldDropCacheBehind).create(); - generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite( - conf, cacheConf, bloomType, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, + bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); if (generalBloomFilterWriter != null) { this.bloomType = bloomType; this.bloomParam = BloomFilterUtil.getBloomFilterParam(bloomType, conf); if (LOG.isTraceEnabled()) { LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ", param: " - + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH? - Bytes.toInt(bloomParam):Bytes.toStringBinary(bloomParam)) - + ", " + generalBloomFilterWriter.getClass().getSimpleName()); + + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH + ? Bytes.toInt(bloomParam) + : Bytes.toStringBinary(bloomParam)) + + ", " + generalBloomFilterWriter.getClass().getSimpleName()); } // init bloom context switch (bloomType) { @@ -151,7 +147,7 @@ private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, Cach break; default: throw new IOException( - "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL or ROWPREFIX expected)"); + "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL or ROWPREFIX expected)"); } } else { // Not using Bloom filters. @@ -161,45 +157,43 @@ private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, Cach // initialize delete family Bloom filter when there is NO RowCol Bloom // filter if (this.bloomType != BloomType.ROWCOL) { - this.deleteFamilyBloomFilterWriter = BloomFilterFactory - .createDeleteBloomAtWrite(conf, cacheConf, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + this.deleteFamilyBloomFilterWriter = BloomFilterFactory.createDeleteBloomAtWrite(conf, + cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); deleteFamilyBloomContext = new RowBloomContext(deleteFamilyBloomFilterWriter, fileContext.getCellComparator()); } else { deleteFamilyBloomFilterWriter = null; } if (deleteFamilyBloomFilterWriter != null && LOG.isTraceEnabled()) { - LOG.trace("Delete Family Bloom filter type for " + path + ": " + - deleteFamilyBloomFilterWriter.getClass().getSimpleName()); + LOG.trace("Delete Family Bloom filter type for " + path + ": " + + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); } } public long getPos() throws IOException { return ((HFileWriterImpl) writer).getPos(); } + /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. - * @param maxSequenceId Maximum sequence id. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction * @throws IOException problem writing to FS */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction) - throws IOException { + throws IOException { appendMetadata(maxSequenceId, majorCompaction, Collections.emptySet()); } /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. - * @param maxSequenceId Maximum sequence id. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction - * @param storeFiles The compacted store files to generate this new file + * @param storeFiles The compacted store files to generate this new file * @throws IOException problem writing to FS */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction, - final Collection storeFiles) throws IOException { + final Collection storeFiles) throws IOException { writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); writer.appendFileInfo(COMPACTION_EVENT_KEY, toCompactionEventTrackerBytes(storeFiles)); @@ -213,16 +207,14 @@ public void appendMetadata(final long maxSequenceId, final boolean majorCompacti * recursively. If file A, B, C compacted to new file D, and file D compacted to new file E, will * write A, B, C, D to file E's compacted files. So if file E compacted to new file F, will add E * to F's compacted files first, then add E's compacted files: A, B, C, D to it. And no need to - * add D's compacted file, as D's compacted files has been in E's compacted files, too. - * See HBASE-20724 for more details. - * + * add D's compacted file, as D's compacted files has been in E's compacted files, too. See + * HBASE-20724 for more details. * @param storeFiles The compacted store files to generate this new file * @return bytes of CompactionEventTracker */ private byte[] toCompactionEventTrackerBytes(Collection storeFiles) { - Set notArchivedCompactedStoreFiles = - this.compactedFilesSupplier.get().stream().map(sf -> sf.getPath().getName()) - .collect(Collectors.toSet()); + Set notArchivedCompactedStoreFiles = this.compactedFilesSupplier.get().stream() + .map(sf -> sf.getPath().getName()).collect(Collectors.toSet()); Set compactedStoreFiles = new HashSet<>(); for (HStoreFile storeFile : storeFiles) { compactedStoreFiles.add(storeFile.getFileInfo().getPath().getName()); @@ -236,15 +228,14 @@ private byte[] toCompactionEventTrackerBytes(Collection storeFiles) } /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. - * @param maxSequenceId Maximum sequence id. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction - * @param mobCellsCount The number of mob cells. + * @param mobCellsCount The number of mob cells. * @throws IOException problem writing to FS */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction, - final long mobCellsCount) throws IOException { + final long mobCellsCount) throws IOException { writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); writer.appendFileInfo(MOB_CELLS_COUNT, Bytes.toBytes(mobCellsCount)); @@ -271,10 +262,8 @@ public void appendTrackedTimestampsToMetadata() throws IOException { } /** - * Record the earlest Put timestamp. - * - * If the timeRangeTracker is not set, - * update TimeRangeTracker to include the timestamp of this key + * Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker + * to include the timestamp of this key */ public void trackTimestamps(final Cell cell) { if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { @@ -287,19 +276,15 @@ private void appendGeneralBloomfilter(final Cell cell) throws IOException { if (this.generalBloomFilterWriter != null) { /* * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.png - * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + Timestamp - * - * 3 Types of Filtering: - * 1. Row = Row - * 2. RowCol = Row + Qualifier - * 3. RowPrefixFixedLength = Fixed Length Row Prefix + * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + Timestamp 3 Types of + * Filtering: 1. Row = Row 2. RowCol = Row + Qualifier 3. RowPrefixFixedLength = Fixed Length + * Row Prefix */ bloomContext.writeBloom(cell); } } - private void appendDeleteFamilyBloomFilter(final Cell cell) - throws IOException { + private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { if (!PrivateCellUtil.isDeleteFamily(cell) && !PrivateCellUtil.isDeleteFamilyVersion(cell)) { return; } @@ -342,7 +327,6 @@ public boolean hasGeneralBloom() { /** * For unit testing only. - * * @return the Bloom filter used by this writer. */ BloomFilterWriter getGeneralBloomWriter() { @@ -396,9 +380,9 @@ public void close() throws IOException { // Log final Bloom filter statistics. This needs to be done after close() // because compound Bloom filters might be finalized as part of closing. if (LOG.isTraceEnabled()) { - LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " + - (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " + - getPath()); + LOG.trace( + (hasGeneralBloom ? "" : "NO ") + "General Bloom and " + (hasDeleteFamilyBloom ? "" : "NO ") + + "DeleteFamily" + " was added to HFile " + getPath()); } } @@ -407,7 +391,8 @@ public void appendFileInfo(byte[] key, byte[] value) throws IOException { writer.appendFileInfo(key, value); } - /** For use in testing. + /** + * For use in testing. */ HFile.Writer getHFileWriter() { return writer; @@ -424,8 +409,8 @@ static Path getUniqueFile(final FileSystem fs, final Path dir) throws IOExceptio return new Path(dir, dash.matcher(UUID.randomUUID().toString()).replaceAll("")); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", - justification="Will not overflow") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", + justification = "Will not overflow") public static class Builder { private final Configuration conf; private final CacheConfig cacheConf; @@ -464,9 +449,8 @@ public Builder(Configuration conf, FileSystem fs) { /** * Use either this method or {@link #withFilePath}, but not both. - * @param dir Path to column family directory. The directory is created if - * does not exist. The file is given a unique name within this - * directory. + * @param dir Path to column family directory. The directory is created if does not exist. The + * file is given a unique name within this directory. * @return this (for chained invocation) */ public Builder withOutputDir(Path dir) { @@ -520,8 +504,8 @@ public Builder withShouldDropCacheBehind(boolean shouldDropCacheBehind) { return this; } - public Builder withCompactedFilesSupplier( - Supplier> compactedFilesSupplier) { + public Builder + withCompactedFilesSupplier(Supplier> compactedFilesSupplier) { this.compactedFilesSupplier = compactedFilesSupplier; return this; } @@ -537,14 +521,12 @@ public Builder withWriterCreationTracker(Consumer writerCreationTracker) { } /** - * Create a store file writer. Client is responsible for closing file when - * done. If metadata, add BEFORE closing using - * {@link StoreFileWriter#appendMetadata}. + * Create a store file writer. Client is responsible for closing file when done. If metadata, + * add BEFORE closing using {@link StoreFileWriter#appendMetadata}. */ public StoreFileWriter build() throws IOException { if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { - throw new IllegalArgumentException("Either specify parent directory " + - "or file path"); + throw new IllegalArgumentException("Either specify parent directory " + "or file path"); } if (dir == null) { @@ -592,14 +574,8 @@ public StoreFileWriter build() throws IOException { if (writerCreationTracker != null) { writerCreationTracker.accept(filePath); } - return new StoreFileWriter( - fs, - filePath, - conf, - cacheConf, - bloomType, - maxKeyCount, - favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier); + return new StoreFileWriter(fs, filePath, conf, cacheConf, bloomType, maxKeyCount, + favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java index e53fdc0de2a6..07db4427641d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; @@ -25,56 +24,43 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A package protected interface for a store flushing. - * A store flush context carries the state required to prepare/flush/commit the store's cache. + * A package protected interface for a store flushing. A store flush context carries the state + * required to prepare/flush/commit the store's cache. */ @InterfaceAudience.Private interface StoreFlushContext { /** - * Prepare for a store flush (create snapshot) - * Requires pausing writes. - * A very short operation. + * Prepare for a store flush (create snapshot) Requires pausing writes. A very short operation. * @return The size of snapshot to flush */ MemStoreSize prepare(); /** - * Flush the cache (create the new store file) - * - * A length operation which doesn't require locking out any function - * of the store. - * + * Flush the cache (create the new store file) A length operation which doesn't require locking + * out any function of the store. * @throws IOException in case the flush fails */ void flushCache(MonitoredTask status) throws IOException; /** - * Commit the flush - add the store file to the store and clear the - * memstore snapshot. - * - * Requires pausing scans. - * - * A very short operation - * - * @return whether compaction is required - * @throws IOException + * Commit the flush - add the store file to the store and clear the memstore snapshot. Requires + * pausing scans. A very short operation + * @return whether compaction is required n */ boolean commit(MonitoredTask status) throws IOException; /** - * Similar to commit, but called in secondary region replicas for replaying the - * flush cache from primary region. Adds the new files to the store, and drops the - * snapshot depending on dropMemstoreSnapshot argument. - * @param fileNames names of the flushed files - * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot - * @throws IOException + * Similar to commit, but called in secondary region replicas for replaying the flush cache from + * primary region. Adds the new files to the store, and drops the snapshot depending on + * dropMemstoreSnapshot argument. + * @param fileNames names of the flushed files + * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot n */ void replayFlush(List fileNames, boolean dropMemstoreSnapshot) throws IOException; /** - * Abort the snapshot preparation. Drops the snapshot if any. - * @throws IOException + * Abort the snapshot preparation. Drops the snapshot if any. n */ void abort() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index c783100ed676..7aae769fda96 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; @@ -49,9 +48,10 @@ public StoreFlusher(Configuration conf, HStore store) { /** * Turns a snapshot of memstore into a set of store files. - * @param snapshot Memstore snapshot. - * @param cacheFlushSeqNum Log cache flush sequence number. - * @param status Task that represents the flush operation and may be updated with status. + * @param snapshot Memstore snapshot. + * @param cacheFlushSeqNum Log cache flush sequence number. + * @param status Task that represents the flush operation and may be updated with + * status. * @param throughputController A controller to avoid flush too fast * @return List of files written. Can be empty; must not be null. */ @@ -59,8 +59,8 @@ public abstract List flushSnapshot(MemStoreSnapshot snapshot, long cacheFl MonitoredTask status, ThroughputController throughputController, FlushLifeCycleTracker tracker, Consumer writerCreationTracker) throws IOException; - protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, - MonitoredTask status) throws IOException { + protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, MonitoredTask status) + throws IOException { // Write out the log sequence number that corresponds to this output // hfile. Also write current time in metadata as minFlushTime. // The hfile is current up to and including cacheFlushSeqNum. @@ -73,14 +73,10 @@ protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, protected final StoreFileWriter createWriter(MemStoreSnapshot snapshot, boolean alwaysIncludesTag, Consumer writerCreationTracker) throws IOException { return store.getStoreEngine() - .createWriter( - CreateStoreFileWriterParams.create() - .maxKeyCount(snapshot.getCellsCount()) - .compression(store.getColumnFamilyDescriptor().getCompressionType()) - .isCompaction(false) - .includeMVCCReadpoint(true) - .includesTag(alwaysIncludesTag || snapshot.isTagsPresent()) - .shouldDropBehind(false).writerCreationTracker(writerCreationTracker)); + .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(snapshot.getCellsCount()) + .compression(store.getColumnFamilyDescriptor().getCompressionType()).isCompaction(false) + .includeMVCCReadpoint(true).includesTag(alwaysIncludesTag || snapshot.isTagsPresent()) + .shouldDropBehind(false).writerCreationTracker(writerCreationTracker)); } /** @@ -97,7 +93,7 @@ protected final InternalScanner createScanner(List snapshotScan } final long smallestReadPoint = store.getSmallestReadPoint(); InternalScanner scanner = new StoreScanner(store, scanInfo, snapshotScanners, - ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, PrivateConstants.OLDEST_TIMESTAMP); + ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, PrivateConstants.OLDEST_TIMESTAMP); if (store.getCoprocessorHost() != null) { try { @@ -112,24 +108,24 @@ protected final InternalScanner createScanner(List snapshotScan /** * Performs memstore flush, writing data from scanner into sink. - * @param scanner Scanner to get data from. - * @param sink Sink to write data to. Could be StoreFile.Writer. + * @param scanner Scanner to get data from. + * @param sink Sink to write data to. Could be StoreFile.Writer. * @param throughputController A controller to avoid flush too fast */ protected void performFlush(InternalScanner scanner, CellSink sink, - ThroughputController throughputController) throws IOException { + ThroughputController throughputController) throws IOException { int compactionKVMax = - conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); + conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); List kvs = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); // no control on system table (such as meta, namespace, etc) flush boolean control = - throughputController != null && !store.getRegionInfo().getTable().isSystemTable(); + throughputController != null && !store.getRegionInfo().getTable().isSystemTable(); if (control) { throughputController.start(flushName); } @@ -148,7 +144,7 @@ protected void performFlush(InternalScanner scanner, CellSink sink, } while (hasMore); } catch (InterruptedException e) { throw new InterruptedIOException( - "Interrupted while control throughput of flushing " + flushName); + "Interrupted while control throughput of flushing " + flushName); } finally { if (control) { throughputController.finish(flushName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 6cc5e4010d3b..1a163aced7f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +60,7 @@ */ @InterfaceAudience.Private public class StoreScanner extends NonReversedNonLazyKeyValueScanner - implements KeyValueScanner, InternalScanner, ChangedReadersObserver { + implements KeyValueScanner, InternalScanner, ChangedReadersObserver { private static final Logger LOG = LoggerFactory.getLogger(StoreScanner.class); // In unit tests, the store could be null protected final HStore store; @@ -94,15 +93,15 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner long mixedReads; // 1) Collects all the KVHeap that are eagerly getting closed during the - // course of a scan + // course of a scan // 2) Collects the unused memstore scanners. If we close the memstore scanners - // before sending data to client, the chunk may be reclaimed by other - // updates and the data will be corrupt. + // before sending data to client, the chunk may be reclaimed by other + // updates and the data will be corrupt. private final List scannersForDelayedClose = new ArrayList<>(); /** - * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not - * KVs skipped via seeking to next row/column. TODO: estimate them? + * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not KVs skipped via + * seeking to next row/column. TODO: estimate them? */ private long kvsScanned = 0; private Cell prevCell = null; @@ -113,7 +112,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /** We don't ever expect to change this, the constant is just for clarity. */ static final boolean LAZY_SEEK_ENABLED_BY_DEFAULT = true; public static final String STORESCANNER_PARALLEL_SEEK_ENABLE = - "hbase.storescanner.parallel.seek.enable"; + "hbase.storescanner.parallel.seek.enable"; /** Used during unit testing to ensure that lazy seek does save seek ops */ private static boolean lazySeekEnabledGlobally = LAZY_SEEK_ENABLED_BY_DEFAULT; @@ -124,7 +123,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * timeout checks. */ public static final String HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK = - "hbase.cells.scanned.per.heartbeat.check"; + "hbase.cells.scanned.per.heartbeat.check"; /** * Default value of {@link #HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK}. @@ -134,9 +133,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /** * If the read type is Scan.ReadType.DEFAULT, we will start with pread, and if the kvs we scanned * reaches this limit, we will reopen the scanner with stream. The default value is 4 times of - * block size for this store. - * If configured with a value <0, for all scans with ReadType DEFAULT, we will open scanner with - * stream mode itself. + * block size for this store. If configured with a value <0, for all scans with ReadType DEFAULT, + * we will open scanner with stream mode itself. */ public static final String STORESCANNER_PREAD_MAX_BYTES = "hbase.storescanner.pread.max.bytes"; @@ -162,8 +160,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner private boolean topChanged = false; /** An internal constructor. */ - private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, - int numColumns, long readPt, boolean cacheBlocks, ScanType scanType) { + private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, int numColumns, long readPt, + boolean cacheBlocks, ScanType scanType) { this.readPt = readPt; this.store = store; this.cacheBlocks = cacheBlocks; @@ -179,8 +177,8 @@ private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, // the seek operation. However, we also look the row-column Bloom filter // for multi-row (non-"get") scans because this is not done in // StoreFile.passesBloomFilter(Scan, SortedSet). - this.useRowColBloom = numColumns > 1 || (!get && numColumns == 1) - && (store == null || store.getColumnFamilyDescriptor().getBloomFilterType() == BloomType.ROWCOL); + this.useRowColBloom = numColumns > 1 || (!get && numColumns == 1) && (store == null + || store.getColumnFamilyDescriptor().getBloomFilterType() == BloomType.ROWCOL); this.maxRowSize = scanInfo.getTableMaxRowSize(); this.preadMaxBytes = scanInfo.getPreadMaxBytes(); if (get) { @@ -223,18 +221,16 @@ private void addCurrentScanners(List scanners) { } /** - * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we - * are not in a compaction. - * - * @param store who we scan - * @param scan the spec - * @param columns which columns we are scanning - * @throws IOException + * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we are not in a + * compaction. + * @param store who we scan + * @param scan the spec + * @param columns which columns we are scanning n */ public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet columns, - long readPt) throws IOException { - this(store, scan, scanInfo, columns != null ? columns.size() : 0, readPt, - scan.getCacheBlocks(), ScanType.USER_SCAN); + long readPt) throws IOException { + this(store, scan, scanInfo, columns != null ? columns.size() : 0, readPt, scan.getCacheBlocks(), + ScanType.USER_SCAN); if (columns != null && scan.isRaw()) { throw new DoNotRetryIOException("Cannot specify any column for a raw scan"); } @@ -281,12 +277,12 @@ public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet * Opens a scanner across specified StoreFiles/MemStoreSegments. - * @param store who we scan - * @param scanners ancillary scanners + * @param store who we scan + * @param scanners ancillary scanners * @param smallestReadPoint the readPoint that we should use for tracking versions */ public StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { + ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { this(store, scanInfo, scanners, scanType, smallestReadPoint, earliestPutTs, null, null); } @@ -294,28 +290,28 @@ public StoreScanner(HStore store, ScanInfo scanInfo, List * Opens a scanner across specified StoreFiles. - * @param store who we scan - * @param scanners ancillary scanners - * @param smallestReadPoint the readPoint that we should use for tracking versions + * @param store who we scan + * @param scanners ancillary scanners + * @param smallestReadPoint the readPoint that we should use for tracking versions * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW. - * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW. + * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW. */ public StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, - byte[] dropDeletesToRow) throws IOException { + long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) + throws IOException { this(store, scanInfo, scanners, ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, - earliestPutTs, dropDeletesFromRow, dropDeletesToRow); + earliestPutTs, dropDeletesFromRow, dropDeletesToRow); } private StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, - byte[] dropDeletesToRow) throws IOException { + ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, + byte[] dropDeletesToRow) throws IOException { this(store, SCAN_FOR_COMPACTION, scanInfo, 0, - store.getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false, scanType); + store.getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false, scanType); assert scanType != ScanType.USER_SCAN; matcher = - CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint, earliestPutTs, - oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); + CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint, earliestPutTs, + oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); // Filter the list of scanners using Bloom filters, time range, TTL, etc. scanners = selectScannersFrom(store, scanners); @@ -328,7 +324,7 @@ private StoreScanner(HStore store, ScanInfo scanInfo, List scanners) - throws IOException { + throws IOException { // Seek all scanners to the initial key seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled); addCurrentScanners(scanners); @@ -337,7 +333,7 @@ private void seekAllScanner(ScanInfo scanInfo, List s // For mob compaction only as we do not have a Store instance when doing mob compaction. public StoreScanner(ScanInfo scanInfo, ScanType scanType, - List scanners) throws IOException { + List scanners) throws IOException { this(null, SCAN_FOR_COMPACTION, scanInfo, 0, Long.MAX_VALUE, false, scanType); assert scanType != ScanType.USER_SCAN; this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, 0L, @@ -347,13 +343,13 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, // Used to instantiate a scanner for user scan in test StoreScanner(Scan scan, ScanInfo scanInfo, NavigableSet columns, - List scanners, ScanType scanType) throws IOException { + List scanners, ScanType scanType) throws IOException { // 0 is passed as readpoint because the test bypasses Store this(null, scan, scanInfo, columns != null ? columns.size() : 0, 0L, scan.getCacheBlocks(), - scanType); + scanType); if (scanType == ScanType.USER_SCAN) { this.matcher = - UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); + UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); } else { this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, PrivateConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); @@ -363,21 +359,21 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, // Used to instantiate a scanner for user scan in test StoreScanner(Scan scan, ScanInfo scanInfo, NavigableSet columns, - List scanners) throws IOException { + List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store this(null, scan, scanInfo, columns != null ? columns.size() : 0, 0L, scan.getCacheBlocks(), - ScanType.USER_SCAN); + ScanType.USER_SCAN); this.matcher = - UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); + UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); seekAllScanner(scanInfo, scanners); } // Used to instantiate a scanner for compaction in test StoreScanner(ScanInfo scanInfo, int maxVersions, ScanType scanType, - List scanners) throws IOException { + List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store - this(null, maxVersions > 0 ? new Scan().readVersions(maxVersions) - : SCAN_FOR_COMPACTION, scanInfo, 0, 0L, false, scanType); + this(null, maxVersions > 0 ? new Scan().readVersions(maxVersions) : SCAN_FOR_COMPACTION, + scanInfo, 0, 0L, false, scanType); this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, PrivateConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); seekAllScanner(scanInfo, scanners); @@ -386,17 +382,13 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, boolean isScanUsePread() { return this.scanUsePread; } + /** - * Seek the specified scanners with the given key - * @param scanners - * @param seekKey - * @param isLazy true if using lazy seek - * @param isParallelSeek true if using parallel seek - * @throws IOException + * Seek the specified scanners with the given key nn * @param isLazy true if using lazy seek + * @param isParallelSeek true if using parallel seek n */ - protected void seekScanners(List scanners, - Cell seekKey, boolean isLazy, boolean isParallelSeek) - throws IOException { + protected void seekScanners(List scanners, Cell seekKey, + boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). // Always check bloom filter to optimize the top row seek for delete @@ -410,8 +402,8 @@ protected void seekScanners(List scanners, long totalScannersSoughtBytes = 0; for (KeyValueScanner scanner : scanners) { if (matcher.isUserScan() && totalScannersSoughtBytes >= maxRowSize) { - throw new RowTooBigException("Max row size allowed: " + maxRowSize - + ", but row is bigger than that"); + throw new RowTooBigException( + "Max row size allowed: " + maxRowSize + ", but row is bigger than that"); } scanner.seek(seekKey); Cell c = scanner.peek(); @@ -425,14 +417,14 @@ protected void seekScanners(List scanners, } } - protected void resetKVHeap(List scanners, - CellComparator comparator) throws IOException { + protected void resetKVHeap(List scanners, CellComparator comparator) + throws IOException { // Combine all seeked scanners with a heap heap = newKVHeap(scanners, comparator); } protected KeyValueHeap newKVHeap(List scanners, - CellComparator comparator) throws IOException { + CellComparator comparator) throws IOException { return new KeyValueHeap(scanners, comparator); } @@ -442,7 +434,7 @@ protected KeyValueHeap newKVHeap(List scanners, * Will be overridden by testcase so declared as protected. */ protected List selectScannersFrom(HStore store, - List allScanners) { + List allScanners) { boolean memOnly; boolean filesOnly; if (scan instanceof InternalScan) { @@ -538,10 +530,8 @@ public boolean seek(Cell key) throws IOException { } /** - * Get the next row of values from this Store. - * @param outResult - * @param scannerContext - * @return true if there are more rows, false if scanner is done + * Get the next row of values from this Store. nn * @return true if there are more rows, false if + * scanner is done */ @Override public boolean next(List outResult, ScannerContext scannerContext) throws IOException { @@ -592,8 +582,10 @@ public boolean next(List outResult, ScannerContext scannerContext) throws // Or if the preadMaxBytes is reached and we may want to return so we can switch to stream // in // the shipped method below. - if (kvsScanned % cellsPerHeartbeatCheck == 0 - || (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes)) { + if ( + kvsScanned % cellsPerHeartbeatCheck == 0 + || (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes) + ) { if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { return scannerContext.setScannerState(NextState.TIME_LIMIT_REACHED).hasMoreValues(); } @@ -643,8 +635,8 @@ public boolean next(List outResult, ScannerContext scannerContext) throws totalBytesRead += cellSize; /** - * Increment the metric if all the cells are from memstore. - * If not we will account it for mixed reads + * Increment the metric if all the cells are from memstore. If not we will account it + * for mixed reads */ onlyFromMemstore = onlyFromMemstore && heap.isLatestCellFromMemstore(); // Update the progress of the scanner context @@ -653,10 +645,10 @@ public boolean next(List outResult, ScannerContext scannerContext) throws if (matcher.isUserScan() && totalBytesRead > maxRowSize) { String message = "Max row size allowed: " + maxRowSize - + ", but the row is bigger than that, the row info: " - + CellUtil.toString(cell, false) + ", already have process row cells = " - + outResult.size() + ", it belong to region = " - + store.getHRegion().getRegionInfo().getRegionNameAsString(); + + ", but the row is bigger than that, the row info: " + + CellUtil.toString(cell, false) + ", already have process row cells = " + + outResult.size() + ", it belong to region = " + + store.getHRegion().getRegionInfo().getRegionNameAsString(); LOG.warn(message); throw new RowTooBigException(message); } @@ -740,8 +732,9 @@ public boolean next(List outResult, ScannerContext scannerContext) throws Cell nextKV = matcher.getNextKeyHint(cell); if (nextKV != null) { int difference = comparator.compare(nextKV, cell); - if (((!scan.isReversed() && difference > 0) - || (scan.isReversed() && difference < 0))) { + if ( + ((!scan.isReversed() && difference > 0) || (scan.isReversed() && difference < 0)) + ) { seekAsDirection(nextKV); NextState stateAfterSeekByHint = needToReturn(outResult); if (stateAfterSeekByHint != null) { @@ -793,15 +786,13 @@ private void updateMetricsStore(boolean memstoreRead) { } /** - * If the top cell won't be flushed into disk, the new top cell may be - * changed after #reopenAfterFlush. Because the older top cell only exist - * in the memstore scanner but the memstore scanner is replaced by hfile - * scanner after #reopenAfterFlush. If the row of top cell is changed, - * we should return the current cells. Otherwise, we may return - * the cells across different rows. + * If the top cell won't be flushed into disk, the new top cell may be changed after + * #reopenAfterFlush. Because the older top cell only exist in the memstore scanner but the + * memstore scanner is replaced by hfile scanner after #reopenAfterFlush. If the row of top cell + * is changed, we should return the current cells. Otherwise, we may return the cells across + * different rows. * @param outResult the cells which are visible for user scan - * @return null is the top cell doesn't change. Otherwise, the NextState - * to return + * @return null is the top cell doesn't change. Otherwise, the NextState to return */ private NextState needToReturn(List outResult) { if (!outResult.isEmpty() && topChanged) { @@ -829,30 +820,31 @@ private void seekOrSkipToNextColumn(Cell cell) throws IOException { /** * See if we should actually SEEK or rather just SKIP to the next Cell (see HBASE-13109). - * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, - * or seek to an arbitrary seek key. This method decides whether a seek is the most efficient - * _actual_ way to get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, - * SKIP inside the current, loaded block). - * It does this by looking at the next indexed key of the current HFile. This key - * is then compared with the _SEEK_ key, where a SEEK key is an artificial 'last possible key - * on the row' (only in here, we avoid actually creating a SEEK key; in the compare we work with - * the current Cell but compare as though it were a seek key; see down in - * matcher.compareKeyForNextRow, etc). If the compare gets us onto the - * next block we *_SEEK, otherwise we just SKIP to the next requested cell. - * - *

    Other notes: + * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, or seek to an + * arbitrary seek key. This method decides whether a seek is the most efficient _actual_ way to + * get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, SKIP inside the + * current, loaded block). It does this by looking at the next indexed key of the current HFile. + * This key is then compared with the _SEEK_ key, where a SEEK key is an artificial 'last possible + * key on the row' (only in here, we avoid actually creating a SEEK key; in the compare we work + * with the current Cell but compare as though it were a seek key; see down in + * matcher.compareKeyForNextRow, etc). If the compare gets us onto the next block we *_SEEK, + * otherwise we just SKIP to the next requested cell. + *

    + * Other notes: *

      *
    • Rows can straddle block boundaries
    • *
    • Versions of columns can straddle block boundaries (i.e. column C1 at T1 might be in a * different block than column C1 at T2)
    • - *
    • We want to SKIP if the chance is high that we'll find the desired Cell after a - * few SKIPs...
    • - *
    • We want to SEEK when the chance is high that we'll be able to seek - * past many Cells, especially if we know we need to go to the next block.
    • + *
    • We want to SKIP if the chance is high that we'll find the desired Cell after a few + * SKIPs...
    • + *
    • We want to SEEK when the chance is high that we'll be able to seek past many Cells, + * especially if we know we need to go to the next block.
    • *
    - *

    A good proxy (best effort) to determine whether SKIP is better than SEEK is whether - * we'll likely end up seeking to the next block (or past the next block) to get our next column. + *

    + * A good proxy (best effort) to determine whether SKIP is better than SEEK is whether we'll + * likely end up seeking to the next block (or past the next block) to get our next column. * Example: + * *

        * |    BLOCK 1              |     BLOCK 2                   |
        * |  r1/c1, r1/c2, r1/c3    |    r1/c4, r1/c5, r2/c1        |
    @@ -867,6 +859,7 @@ private void seekOrSkipToNextColumn(Cell cell) throws IOException {
        *                                            |              |
        *                                    Next Index Key        SEEK_NEXT_COL
        * 
    + * * Now imagine we want columns c1 and c3 (see first diagram above), the 'Next Index Key' of r1/c4 * is > r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we only * want one version of c1, after we have it, a SEEK_COL will be issued to get to c2. Looking at @@ -883,9 +876,11 @@ protected boolean trySkipToNextRow(Cell cell) throws IOException { Cell previousIndexedKey = null; do { Cell nextIndexedKey = getNextIndexedKey(); - if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && - (nextIndexedKey == previousIndexedKey || - matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) { + if ( + nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY + && (nextIndexedKey == previousIndexedKey + || matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) + ) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; @@ -908,9 +903,11 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { Cell previousIndexedKey = null; do { Cell nextIndexedKey = getNextIndexedKey(); - if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && - (nextIndexedKey == previousIndexedKey || - matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) { + if ( + nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY + && (nextIndexedKey == previousIndexedKey + || matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) + ) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; @@ -945,7 +942,7 @@ private static void clearAndClose(List scanners) { // Implementation of ChangedReadersObserver @Override public void updateReaders(List sfs, List memStoreScanners) - throws IOException { + throws IOException { if (CollectionUtils.isEmpty(sfs) && CollectionUtils.isEmpty(memStoreScanners)) { return; } @@ -1008,7 +1005,7 @@ protected final boolean reopenAfterFlush() throws IOException { flushLock.lock(); try { List allScanners = - new ArrayList<>(flushedstoreFileScanners.size() + memStoreScannersAfterFlush.size()); + new ArrayList<>(flushedstoreFileScanners.size() + memStoreScannersAfterFlush.size()); allScanners.addAll(flushedstoreFileScanners); allScanners.addAll(memStoreScannersAfterFlush); scanners = selectScannersFrom(store, allScanners); @@ -1022,7 +1019,7 @@ protected final boolean reopenAfterFlush() throws IOException { // Seek the new scanners to the last key seekScanners(scanners, lastTop, false, parallelSeekEnabled); // remove the older memstore scanner - for (int i = currentScanners.size() - 1; i >=0; i--) { + for (int i = currentScanners.size() - 1; i >= 0; i--) { if (!currentScanners.get(i).isFileScanner()) { scannersForDelayedClose.add(currentScanners.remove(i)); } else { @@ -1036,8 +1033,8 @@ protected final boolean reopenAfterFlush() throws IOException { resetKVHeap(this.currentScanners, store.getComparator()); resetQueryMatcher(lastTop); if (heap.peek() == null || store.getComparator().compareRows(lastTop, this.heap.peek()) != 0) { - LOG.info("Storescanner.peek() is changed where before = " + lastTop.toString() + - ",and after = " + heap.peek()); + LOG.info("Storescanner.peek() is changed where before = " + lastTop.toString() + + ",and after = " + heap.peek()); topChanged = true; } else { topChanged = false; @@ -1061,17 +1058,13 @@ private void resetQueryMatcher(Cell lastTopKey) { } /** - * Check whether scan as expected order - * @param prevKV - * @param kv - * @param comparator - * @throws IOException + * Check whether scan as expected order nnnn */ - protected void checkScanOrder(Cell prevKV, Cell kv, - CellComparator comparator) throws IOException { + protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) + throws IOException { // Check that the heap gives us KVs in an increasing order. - assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 : "Key " - + prevKV + " followed by a smaller key " + kv + " in cf " + store; + assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 + : "Key " + prevKV + " followed by a smaller key " + kv + " in cf " + store; } protected boolean seekToNextRow(Cell c) throws IOException { @@ -1079,13 +1072,10 @@ protected boolean seekToNextRow(Cell c) throws IOException { } /** - * Do a reseek in a normal StoreScanner(scan forward) - * @param kv - * @return true if scanner has values left, false if end of scanner - * @throws IOException + * Do a reseek in a normal StoreScanner(scan forward) n * @return true if scanner has values left, + * false if end of scanner n */ - protected boolean seekAsDirection(Cell kv) - throws IOException { + protected boolean seekAsDirection(Cell kv) throws IOException { return reseek(kv); } @@ -1101,12 +1091,14 @@ public boolean reseek(Cell kv) throws IOException { } void trySwitchToStreamRead() { - if (readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || - heap.peek() == null || bytesRead < preadMaxBytes) { + if ( + readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || heap.peek() == null + || bytesRead < preadMaxBytes + ) { return; } LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead, - this.store.getColumnFamilyName()); + this.store.getColumnFamilyName()); scanUsePread = false; Cell lastTop = heap.peek(); List memstoreScanners = new ArrayList<>(); @@ -1125,9 +1117,9 @@ void trySwitchToStreamRead() { try { // We must have a store instance here so no null check // recreate the scanners on the current file scanners - fileScanners = store.recreateScanners(scannersToClose, cacheBlocks, false, false, - matcher, scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), - scan.includeStopRow(), readPt, false); + fileScanners = store.recreateScanners(scannersToClose, cacheBlocks, false, false, matcher, + scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), scan.includeStopRow(), + readPt, false); if (fileScanners == null) { return; } @@ -1169,23 +1161,20 @@ protected final boolean checkFlushed() { return false; } - /** * Seek storefiles in parallel to optimize IO latency as much as possible * @param scanners the list {@link KeyValueScanner}s to be read from - * @param kv the KeyValue on which the operation is being requested - * @throws IOException + * @param kv the KeyValue on which the operation is being requested n */ - private void parallelSeek(final List - scanners, final Cell kv) throws IOException { + private void parallelSeek(final List scanners, final Cell kv) + throws IOException { if (scanners.isEmpty()) return; int storeFileScannerCount = scanners.size(); CountDownLatch latch = new CountDownLatch(storeFileScannerCount); List handlers = new ArrayList<>(storeFileScannerCount); for (KeyValueScanner scanner : scanners) { if (scanner instanceof StoreFileScanner) { - ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, - this.readPt, latch); + ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, this.readPt, latch); executor.submit(seekHandler); handlers.add(seekHandler); } else { @@ -1197,7 +1186,7 @@ private void parallelSeek(final List try { latch.await(); } catch (InterruptedException ie) { - throw (InterruptedIOException)new InterruptedIOException().initCause(ie); + throw (InterruptedIOException) new InterruptedIOException().initCause(ie); } for (ParallelSeekHandler handler : handlers) { @@ -1214,8 +1203,7 @@ private void parallelSeek(final List List getAllScannersForTesting() { List allScanners = new ArrayList<>(); KeyValueScanner current = heap.getCurrentForTesting(); - if (current != null) - allScanners.add(current); + if (current != null) allScanners.add(current); for (KeyValueScanner scanner : heap.getHeap()) allScanners.add(scanner); return allScanners; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 10a9330f8326..98bb68f31fb0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +85,7 @@ public static long getLowestTimestamp(Collection candidates) throws */ static Optional getLargestFile(Collection candidates) { return candidates.stream().filter(f -> f.getReader() != null) - .max((f1, f2) -> Long.compare(f1.getReader().length(), f2.getReader().length())); + .max((f1, f2) -> Long.compare(f1.getReader().length(), f2.getReader().length())); } /** @@ -96,7 +95,7 @@ static Optional getLargestFile(Collection candidates) { */ public static OptionalLong getMaxMemStoreTSInList(Collection sfs) { return sfs.stream().filter(sf -> !sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemStoreTS) - .max(); + .max(); } /** @@ -108,12 +107,12 @@ public static OptionalLong getMaxSequenceIdInList(Collection sfs) { /** * Gets the approximate mid-point of the given file that is optimal for use in splitting it. - * @param file the store file + * @param file the store file * @param comparator Comparator used to compare KVs. * @return The split point row, or null if splitting is not possible, or reader is null. */ static Optional getFileSplitPoint(HStoreFile file, CellComparator comparator) - throws IOException { + throws IOException { StoreFileReader reader = file.getReader(); if (reader == null) { LOG.warn("Storefile " + file + " Reader is null; cannot get split point"); @@ -130,8 +129,9 @@ static Optional getFileSplitPoint(HStoreFile file, CellComparator compar Cell firstKey = reader.getFirstKey().get(); Cell lastKey = reader.getLastKey().get(); // if the midkey is the same as the first or last keys, we cannot (ever) split this region. - if (comparator.compareRows(midKey, firstKey) == 0 || - comparator.compareRows(midKey, lastKey) == 0) { + if ( + comparator.compareRows(midKey, firstKey) == 0 || comparator.compareRows(midKey, lastKey) == 0 + ) { if (LOG.isDebugEnabled()) { LOG.debug("cannot split {} because midkey is the same as first or last row", file); } @@ -144,10 +144,11 @@ static Optional getFileSplitPoint(HStoreFile file, CellComparator compar * Gets the mid point of the largest file passed in as split point. */ static Optional getSplitPoint(Collection storefiles, - CellComparator comparator) throws IOException { + CellComparator comparator) throws IOException { Optional largestFile = StoreUtils.getLargestFile(storefiles); - return largestFile.isPresent() ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) - : Optional.empty(); + return largestFile.isPresent() + ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) + : Optional.empty(); } /** @@ -166,16 +167,15 @@ public static ChecksumType getChecksumType(Configuration conf) { * @return The bytesPerChecksum that is set in the configuration */ public static int getBytesPerChecksum(Configuration conf) { - return conf.getInt(HConstants.BYTES_PER_CHECKSUM, - HFile.DEFAULT_BYTES_PER_CHECKSUM); + return conf.getInt(HConstants.BYTES_PER_CHECKSUM, HFile.DEFAULT_BYTES_PER_CHECKSUM); } public static Configuration createStoreConfiguration(Configuration conf, TableDescriptor td, - ColumnFamilyDescriptor cfd) { + ColumnFamilyDescriptor cfd) { // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. return new CompoundConfiguration().add(conf).addBytesMap(td.getValues()) - .addStringMap(cfd.getConfiguration()).addBytesMap(cfd.getValues()); + .addStringMap(cfd.getConfiguration()).addBytesMap(cfd.getValues()); } public static List toStoreFileInfo(Collection storefiles) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index 18f7e185eede..40108e346d1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,31 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; - import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A chore for refreshing the store files for secondary regions hosted in the region server. - * - * This chore should run periodically with a shorter interval than HFile TTL - * ("hbase.master.hfilecleaner.ttl", default 5 minutes). - * It ensures that if we cannot refresh files longer than that amount, the region - * will stop serving read requests because the referenced files might have been deleted (by the - * primary region). + * A chore for refreshing the store files for secondary regions hosted in the region server. This + * chore should run periodically with a shorter interval than HFile TTL + * ("hbase.master.hfilecleaner.ttl", default 5 minutes). It ensures that if we cannot refresh files + * longer than that amount, the region will stop serving read requests because the referenced files + * might have been deleted (by the primary region). */ @InterfaceAudience.Private public class StorefileRefresherChore extends ScheduledChore { @@ -49,35 +45,35 @@ public class StorefileRefresherChore extends ScheduledChore { /** * The period (in milliseconds) for refreshing the store files for the secondary regions. */ - public static final String REGIONSERVER_STOREFILE_REFRESH_PERIOD - = "hbase.regionserver.storefile.refresh.period"; - static final int DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD = 0; //disabled by default + public static final String REGIONSERVER_STOREFILE_REFRESH_PERIOD = + "hbase.regionserver.storefile.refresh.period"; + static final int DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD = 0; // disabled by default /** - * Whether all storefiles should be refreshed, as opposed to just hbase:meta's - * Meta region doesn't have WAL replication for replicas enabled yet + * Whether all storefiles should be refreshed, as opposed to just hbase:meta's Meta region doesn't + * have WAL replication for replicas enabled yet */ - public static final String REGIONSERVER_META_STOREFILE_REFRESH_PERIOD - = "hbase.regionserver.meta.storefile.refresh.period"; + public static final String REGIONSERVER_META_STOREFILE_REFRESH_PERIOD = + "hbase.regionserver.meta.storefile.refresh.period"; private HRegionServer regionServer; private long hfileTtl; private int period; private boolean onlyMetaRefresh = true; - //ts of last time regions store files are refreshed + // ts of last time regions store files are refreshed private Map lastRefreshTimes; // encodedName -> long public StorefileRefresherChore(int period, boolean onlyMetaRefresh, HRegionServer regionServer, - Stoppable stoppable) { + Stoppable stoppable) { super("StorefileRefresherChore", stoppable, period); this.period = period; this.regionServer = regionServer; - this.hfileTtl = this.regionServer.getConfiguration().getLong( - TimeToLiveHFileCleaner.TTL_CONF_KEY, TimeToLiveHFileCleaner.DEFAULT_TTL); + this.hfileTtl = this.regionServer.getConfiguration() + .getLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, TimeToLiveHFileCleaner.DEFAULT_TTL); this.onlyMetaRefresh = onlyMetaRefresh; if (period > hfileTtl / 2) { - throw new RuntimeException(REGIONSERVER_STOREFILE_REFRESH_PERIOD + - " should be set smaller than half of " + TimeToLiveHFileCleaner.TTL_CONF_KEY); + throw new RuntimeException(REGIONSERVER_STOREFILE_REFRESH_PERIOD + + " should be set smaller than half of " + TimeToLiveHFileCleaner.TTL_CONF_KEY); } lastRefreshTimes = new HashMap<>(); } @@ -108,14 +104,15 @@ protected void chore() { LOG.warn("Exception while trying to refresh store files for region:" + r.getRegionInfo() + ", exception:" + StringUtils.stringifyException(ex)); - // Store files have a TTL in the archive directory. If we fail to refresh for that long, we stop serving reads + // Store files have a TTL in the archive directory. If we fail to refresh for that long, we + // stop serving reads if (isRegionStale(encodedName, time)) { - ((HRegion)r).setReadsEnabled(false); // stop serving reads + ((HRegion) r).setReadsEnabled(false); // stop serving reads } continue; } lastRefreshTimes.put(encodedName, time); - ((HRegion)r).setReadsEnabled(true); // restart serving reads + ((HRegion) r).setReadsEnabled(true); // restart serving reads } // remove closed regions diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index fc0598d89ac0..386f64166ef4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,15 +23,14 @@ import java.util.Collection; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; /** * Base class for cell sink that separates the provided cells into multiple files for stripe @@ -94,11 +93,13 @@ protected void preCloseWriter(StoreFileWriter writer) throws IOException { * @param cell The cell whose row has to be checked. */ protected void sanityCheckLeft(byte[] left, Cell cell) throws IOException { - if (!Arrays.equals(StripeStoreFileManager.OPEN_KEY, left) - && comparator.compareRows(cell, left, 0, left.length) < 0) { + if ( + !Arrays.equals(StripeStoreFileManager.OPEN_KEY, left) + && comparator.compareRows(cell, left, 0, left.length) < 0 + ) { String error = - "The first row is lower than the left boundary of [" + Bytes.toString(left) + "]: [" - + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; + "The first row is lower than the left boundary of [" + Bytes.toString(left) + "]: [" + + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; LOG.error(error); throw new IOException(error); } @@ -109,12 +110,13 @@ protected void sanityCheckLeft(byte[] left, Cell cell) throws IOException { * @param right The right boundary of the writer. */ protected void sanityCheckRight(byte[] right, Cell cell) throws IOException { - if (!Arrays.equals(StripeStoreFileManager.OPEN_KEY, right) - && comparator.compareRows(cell, right, 0, right.length) >= 0) { - String error = - "The last row is higher or equal than the right boundary of [" + Bytes.toString(right) - + "]: [" - + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; + if ( + !Arrays.equals(StripeStoreFileManager.OPEN_KEY, right) + && comparator.compareRows(cell, right, 0, right.length) >= 0 + ) { + String error = "The last row is higher or equal than the right boundary of [" + + Bytes.toString(right) + "]: [" + + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; LOG.error(error); throw new IOException(error); } @@ -136,12 +138,13 @@ public static class BoundaryMultiWriter extends StripeMultiFileWriter { /** * @param targetBoundaries The boundaries on which writers/files are separated. - * @param majorRangeFrom Major range is the range for which at least one file should be written - * (because all files are included in compaction). majorRangeFrom is the left boundary. - * @param majorRangeTo The right boundary of majorRange (see majorRangeFrom). + * @param majorRangeFrom Major range is the range for which at least one file should be + * written (because all files are included in compaction). + * majorRangeFrom is the left boundary. + * @param majorRangeTo The right boundary of majorRange (see majorRangeFrom). */ public BoundaryMultiWriter(CellComparator comparator, List targetBoundaries, - byte[] majorRangeFrom, byte[] majorRangeTo) throws IOException { + byte[] majorRangeFrom, byte[] majorRangeTo) throws IOException { super(comparator); this.boundaries = targetBoundaries; this.existingWriters = new ArrayList<>(this.boundaries.size() - 1); @@ -149,16 +152,16 @@ public BoundaryMultiWriter(CellComparator comparator, List targetBoundar // must match some target boundaries, let's find them. assert (majorRangeFrom == null) == (majorRangeTo == null); if (majorRangeFrom != null) { - majorRangeFromIndex = - Arrays.equals(majorRangeFrom, StripeStoreFileManager.OPEN_KEY) ? 0 : Collections - .binarySearch(boundaries, majorRangeFrom, Bytes.BYTES_COMPARATOR); - majorRangeToIndex = - Arrays.equals(majorRangeTo, StripeStoreFileManager.OPEN_KEY) ? boundaries.size() - : Collections.binarySearch(boundaries, majorRangeTo, Bytes.BYTES_COMPARATOR); + majorRangeFromIndex = Arrays.equals(majorRangeFrom, StripeStoreFileManager.OPEN_KEY) + ? 0 + : Collections.binarySearch(boundaries, majorRangeFrom, Bytes.BYTES_COMPARATOR); + majorRangeToIndex = Arrays.equals(majorRangeTo, StripeStoreFileManager.OPEN_KEY) + ? boundaries.size() + : Collections.binarySearch(boundaries, majorRangeTo, Bytes.BYTES_COMPARATOR); if (this.majorRangeFromIndex < 0 || this.majorRangeToIndex < 0) { throw new IOException("Major range does not match writer boundaries: [" - + Bytes.toString(majorRangeFrom) + "] [" + Bytes.toString(majorRangeTo) + "]; from " - + majorRangeFromIndex + " to " + majorRangeToIndex); + + Bytes.toString(majorRangeFrom) + "] [" + Bytes.toString(majorRangeTo) + "]; from " + + majorRangeFromIndex + " to " + majorRangeToIndex); } } } @@ -223,9 +226,9 @@ private void createEmptyWriter() throws IOException { boolean needEmptyFile = isInMajorRange || isLastWriter; existingWriters.add(needEmptyFile ? writerFactory.createWriter() : null); hasAnyWriter |= needEmptyFile; - currentWriterEndKey = - (existingWriters.size() + 1 == boundaries.size()) ? null : boundaries.get(existingWriters - .size() + 1); + currentWriterEndKey = (existingWriters.size() + 1 == boundaries.size()) + ? null + : boundaries.get(existingWriters.size() + 1); } private void checkCanCreateWriter() throws IOException { @@ -233,7 +236,7 @@ private void checkCanCreateWriter() throws IOException { assert existingWriters.size() <= maxWriterCount; if (existingWriters.size() >= maxWriterCount) { throw new IOException("Cannot create any more writers (created " + existingWriters.size() - + " out of " + maxWriterCount + " - row might be out of range of all valid writers"); + + " out of " + maxWriterCount + " - row might be out of range of all valid writers"); } } @@ -241,14 +244,14 @@ private void stopUsingCurrentWriter() { if (currentWriter != null) { if (LOG.isDebugEnabled()) { LOG.debug("Stopping to use a writer after [" + Bytes.toString(currentWriterEndKey) - + "] row; wrote out " + cellsInCurrentWriter + " kvs"); + + "] row; wrote out " + cellsInCurrentWriter + " kvs"); } cellsInCurrentWriter = 0; } currentWriter = null; - currentWriterEndKey = - (existingWriters.size() + 1 == boundaries.size()) ? null : boundaries.get(existingWriters - .size() + 1); + currentWriterEndKey = (existingWriters.size() + 1 == boundaries.size()) + ? null + : boundaries.get(existingWriters.size() + 1); } } @@ -272,12 +275,12 @@ public static class SizeMultiWriter extends StripeMultiFileWriter { /** * @param targetCount The maximum count of writers that can be created. - * @param targetKvs The number of KVs to read from source before starting each new writer. - * @param left The left boundary of the first writer. - * @param right The right boundary of the last writer. + * @param targetKvs The number of KVs to read from source before starting each new writer. + * @param left The left boundary of the first writer. + * @param right The right boundary of the last writer. */ public SizeMultiWriter(CellComparator comparator, int targetCount, long targetKvs, byte[] left, - byte[] right) { + byte[] right) { super(comparator); this.targetCount = targetCount; this.targetCells = targetKvs; @@ -297,12 +300,13 @@ public void append(Cell cell) throws IOException { // First append ever, do a sanity check. sanityCheckLeft(left, cell); doCreateWriter = true; - } else if (lastRowInCurrentWriter != null - && !PrivateCellUtil.matchingRows(cell, lastRowInCurrentWriter, 0, - lastRowInCurrentWriter.length)) { + } else if ( + lastRowInCurrentWriter != null && !PrivateCellUtil.matchingRows(cell, + lastRowInCurrentWriter, 0, lastRowInCurrentWriter.length) + ) { if (LOG.isDebugEnabled()) { LOG.debug("Stopping to use a writer after [" + Bytes.toString(lastRowInCurrentWriter) - + "] row; wrote out " + cellsInCurrentWriter + " kvs"); + + "] row; wrote out " + cellsInCurrentWriter + " kvs"); } lastRowInCurrentWriter = null; cellsInCurrentWriter = 0; @@ -325,20 +329,21 @@ public void append(Cell cell) throws IOException { ++cellsInCurrentWriter; cellsSeen = cellsInCurrentWriter; if (this.sourceScanner != null) { - cellsSeen = - Math.max(cellsSeen, this.sourceScanner.getEstimatedNumberOfKvsScanned() - - cellsSeenInPrevious); + cellsSeen = Math.max(cellsSeen, + this.sourceScanner.getEstimatedNumberOfKvsScanned() - cellsSeenInPrevious); } // If we are not already waiting for opportunity to close, start waiting if we can // create any more writers and if the current one is too big. - if (lastRowInCurrentWriter == null && existingWriters.size() < targetCount - && cellsSeen >= targetCells) { + if ( + lastRowInCurrentWriter == null && existingWriters.size() < targetCount + && cellsSeen >= targetCells + ) { lastRowInCurrentWriter = CellUtil.cloneRow(cell); // make a copy if (LOG.isDebugEnabled()) { LOG.debug("Preparing to start a new writer after [" - + Bytes.toString(lastRowInCurrentWriter) + "] row; observed " + cellsSeen - + " kvs and wrote out " + cellsInCurrentWriter + " kvs"); + + Bytes.toString(lastRowInCurrentWriter) + "] row; observed " + cellsSeen + + " kvs and wrote out " + cellsInCurrentWriter + " kvs"); } } } @@ -346,11 +351,11 @@ public void append(Cell cell) throws IOException { @Override protected void preCommitWritersInternal() throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("Stopping with " - + cellsInCurrentWriter - + " kvs in last writer" - + ((this.sourceScanner == null) ? "" : ("; observed estimated " - + this.sourceScanner.getEstimatedNumberOfKvsScanned() + " KVs total"))); + LOG.debug("Stopping with " + cellsInCurrentWriter + " kvs in last writer" + + ((this.sourceScanner == null) + ? "" + : ("; observed estimated " + this.sourceScanner.getEstimatedNumberOfKvsScanned() + + " KVs total"))); } if (lastCell != null) { sanityCheckRight(right, lastCell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java index 61deb0b93ce3..b354eda2e79b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +17,15 @@ */ package org.apache.hadoop.hbase.regionserver; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; /** - * Configuration class for stripe store and compactions. - * See {@link StripeStoreFileManager} for general documentation. - * See getters for the description of each setting. + * Configuration class for stripe store and compactions. See {@link StripeStoreFileManager} for + * general documentation. See getters for the description of each setting. */ @InterfaceAudience.Private public class StripeStoreConfig { @@ -38,33 +36,42 @@ public class StripeStoreConfig { /** The minimum number of files to compact within a stripe; same as for regular compaction. */ public static final String MIN_FILES_KEY = "hbase.store.stripe.compaction.minFiles"; - /** The minimum number of files to compact when compacting L0; same as minFiles for regular + /** + * The minimum number of files to compact when compacting L0; same as minFiles for regular * compaction. Given that L0 causes unnecessary overwriting of the data, should be higher than - * regular minFiles. */ + * regular minFiles. + */ public static final String MIN_FILES_L0_KEY = "hbase.store.stripe.compaction.minFilesL0"; - /** The size the stripe should achieve to be considered for splitting into multiple stripes. - Stripe will be split when it can be fully compacted, and it is above this size. */ + /** + * The size the stripe should achieve to be considered for splitting into multiple stripes. Stripe + * will be split when it can be fully compacted, and it is above this size. + */ public static final String SIZE_TO_SPLIT_KEY = "hbase.store.stripe.sizeToSplit"; - /** The target count of new stripes to produce when splitting a stripe. A floating point - number, default is 2. Values less than 1 will be converted to 1/x. Non-whole numbers will - produce unbalanced splits, which may be good for some cases. In this case the "smaller" of - the new stripes will always be the rightmost one. If the stripe is bigger than sizeToSplit - when splitting, this will be adjusted by a whole increment. */ + /** + * The target count of new stripes to produce when splitting a stripe. A floating point number, + * default is 2. Values less than 1 will be converted to 1/x. Non-whole numbers will produce + * unbalanced splits, which may be good for some cases. In this case the "smaller" of the new + * stripes will always be the rightmost one. If the stripe is bigger than sizeToSplit when + * splitting, this will be adjusted by a whole increment. + */ public static final String SPLIT_PARTS_KEY = "hbase.store.stripe.splitPartCount"; - /** The initial stripe count to create. If the row distribution is roughly the same over time, - it's good to set this to a count of stripes that is expected to be achieved in most regions, - to get this count from the outset and prevent unnecessary splitting. */ + /** + * The initial stripe count to create. If the row distribution is roughly the same over time, it's + * good to set this to a count of stripes that is expected to be achieved in most regions, to get + * this count from the outset and prevent unnecessary splitting. + */ public static final String INITIAL_STRIPE_COUNT_KEY = "hbase.store.stripe.initialStripeCount"; /** Whether to flush memstore to L0 files, or directly to stripes. */ public static final String FLUSH_TO_L0_KEY = "hbase.store.stripe.compaction.flushToL0"; - /** When splitting region, the maximum size imbalance to allow in an attempt to split at a - stripe boundary, so that no files go to both regions. Most users won't need to change that. */ + /** + * When splitting region, the maximum size imbalance to allow in an attempt to split at a stripe + * boundary, so that no files go to both regions. Most users won't need to change that. + */ public static final String MAX_REGION_SPLIT_IMBALANCE_KEY = - "hbase.store.stripe.region.split.max.imbalance"; - + "hbase.store.stripe.region.split.max.imbalance"; private final float maxRegionSplitImbalance; private final int level0CompactMinFiles; @@ -78,6 +85,7 @@ public class StripeStoreConfig { private final long splitPartSize; // derived from sizeToSplitAt and splitPartCount private static final double EPSILON = 0.001; // good enough for this, not a real epsilon. + public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4); this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false); @@ -85,7 +93,7 @@ public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { int minFiles = config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, -1); this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles)); this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY, - config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10)); + config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10)); this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true); float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true); @@ -100,7 +108,7 @@ public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { if (flushSize == 0) { flushSize = 128 * 1024 * 1024; } - long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount); + long defaultSplitSize = (long) (flushSize * getLevel0MinFiles() * 4 * splitPartCount); this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize); int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1); if (initialCount == 0) { @@ -108,15 +116,15 @@ public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { initialCount = 1; } this.initialCount = initialCount; - this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount); + this.splitPartSize = (long) (this.sizeToSplitAt / this.splitPartCount); } - private static float getFloat( - Configuration config, String key, float defaultValue, boolean moreThanOne) { + private static float getFloat(Configuration config, String key, float defaultValue, + boolean moreThanOne) { float value = config.getFloat(key, defaultValue); if (value < EPSILON) { - LOG.warn(String.format( - "%s is set to 0 or negative; using default value of %f", key, defaultValue)); + LOG.warn( + String.format("%s is set to 0 or negative; using default value of %f", key, defaultValue)); value = defaultValue; } else if ((value > 1f) != moreThanOne) { value = 1f / value; @@ -157,8 +165,8 @@ public float getSplitCount() { } /** - * @return the desired size of the target stripe when splitting, in bytes. - * Derived from {@link #getSplitSize()} and {@link #getSplitCount()}. + * @return the desired size of the target stripe when splitting, in bytes. Derived from + * {@link #getSplitSize()} and {@link #getSplitCount()}. */ public long getSplitPartSize() { return splitPartSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index bfb3f649ff27..348c4e61be09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,8 +40,8 @@ * The storage engine that implements the stripe-based store/compaction scheme. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class StripeStoreEngine extends StoreEngine { +public class StripeStoreEngine extends + StoreEngine { private static final Logger LOG = LoggerFactory.getLogger(StripeStoreEngine.class); private StripeStoreConfig config; @@ -56,13 +56,13 @@ public CompactionContext createCompaction() { } @Override - protected void createComponents( - Configuration conf, HStore store, CellComparator comparator) throws IOException { + protected void createComponents(Configuration conf, HStore store, CellComparator comparator) + throws IOException { this.config = new StripeStoreConfig(conf, store); this.compactionPolicy = new StripeCompactionPolicy(conf, store, config); this.storeFileManager = new StripeStoreFileManager(comparator, conf, this.config); - this.storeFlusher = new StripeStoreFlusher( - conf, store, this.compactionPolicy, this.storeFileManager); + this.storeFlusher = + new StripeStoreFlusher(conf, store, this.compactionPolicy, this.storeFileManager); this.compactor = new StripeCompactor(conf, store); } @@ -79,11 +79,12 @@ public List preSelect(List filesCompacting) { @Override public boolean select(List filesCompacting, boolean isUserCompaction, - boolean mayUseOffPeak, boolean forceMajor) throws IOException { - this.stripeRequest = compactionPolicy.selectCompaction( - storeFileManager, filesCompacting, mayUseOffPeak); + boolean mayUseOffPeak, boolean forceMajor) throws IOException { + this.stripeRequest = + compactionPolicy.selectCompaction(storeFileManager, filesCompacting, mayUseOffPeak); this.request = (this.stripeRequest == null) - ? new CompactionRequestImpl(new ArrayList<>()) : this.stripeRequest.getRequest(); + ? new CompactionRequestImpl(new ArrayList<>()) + : this.stripeRequest.getRequest(); return this.stripeRequest != null; } @@ -100,7 +101,7 @@ public void forceSelect(CompactionRequestImpl request) { @Override public List compact(ThroughputController throughputController, User user) - throws IOException { + throws IOException { Preconditions.checkArgument(this.stripeRequest != null, "Cannot compact without selection"); return this.stripeRequest.execute(compactor, throughputController, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index 1c3ac683dcd8..f494ab7ee822 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,6 @@ import java.util.Map; import java.util.Optional; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; @@ -44,25 +42,23 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** - * Stripe implementation of {@link StoreFileManager}. - * Not thread safe - relies on external locking (in HStore). Collections that this class - * returns are immutable or unique to the call, so they should be safe. - * Stripe store splits the key space of the region into non-overlapping stripes, as well as - * some recent files that have all the keys (level 0). Each stripe contains a set of files. - * When L0 is compacted, it's split into the files corresponding to existing stripe boundaries, - * that can thus be added to stripes. - * When scan or get happens, it only has to read the files from the corresponding stripes. - * See {@link StripeCompactionPolicy} on how the stripes are determined; this class doesn't care. - * - * This class should work together with {@link StripeCompactionPolicy} and - * {@link org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor}. - * With regard to how they work, we make at least the following (reasonable) assumptions: - * - Compaction produces one file per new stripe (if any); that is easy to change. - * - Compaction has one contiguous set of stripes both in and out, except if L0 is involved. + * Stripe implementation of {@link StoreFileManager}. Not thread safe - relies on external locking + * (in HStore). Collections that this class returns are immutable or unique to the call, so they + * should be safe. Stripe store splits the key space of the region into non-overlapping stripes, as + * well as some recent files that have all the keys (level 0). Each stripe contains a set of files. + * When L0 is compacted, it's split into the files corresponding to existing stripe boundaries, that + * can thus be added to stripes. When scan or get happens, it only has to read the files from the + * corresponding stripes. See {@link StripeCompactionPolicy} on how the stripes are determined; this + * class doesn't care. This class should work together with {@link StripeCompactionPolicy} and + * {@link org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor}. With regard to how they + * work, we make at least the following (reasonable) assumptions: - Compaction produces one file per + * new stripe (if any); that is easy to change. - Compaction has one contiguous set of stripes both + * in and out, except if L0 is involved. */ @InterfaceAudience.Private public class StripeStoreFileManager @@ -84,8 +80,8 @@ public class StripeStoreFileManager final static byte[] INVALID_KEY = null; /** - * The state class. Used solely to replace results atomically during - * compactions and avoid complicated error handling. + * The state class. Used solely to replace results atomically during compactions and avoid + * complicated error handling. */ private static class State { /** @@ -96,9 +92,9 @@ private static class State { public byte[][] stripeEndRows = new byte[0][]; /** - * Files by stripe. Each element of the list corresponds to stripeEndRow element with the - * same index, except the last one. Inside each list, the files are in reverse order by - * seqNum. Note that the length of this is one higher than that of stripeEndKeys. + * Files by stripe. Each element of the list corresponds to stripeEndRow element with the same + * index, except the last one. Inside each list, the files are in reverse order by seqNum. Note + * that the length of this is one higher than that of stripeEndKeys. */ public ArrayList> stripeFiles = new ArrayList<>(); /** Level 0. The files are in reverse order by seqNum. */ @@ -108,14 +104,17 @@ private static class State { public ImmutableList allFilesCached = ImmutableList.of(); private ImmutableList allCompactedFilesCached = ImmutableList.of(); } + private State state = null; /** Cached file metadata (or overrides as the case may be) */ private HashMap fileStarts = new HashMap<>(); private HashMap fileEnds = new HashMap<>(); - /** Normally invalid key is null, but in the map null is the result for "no key"; so use - * the following constant value in these maps instead. Note that this is a constant and - * we use it to compare by reference when we read from the map. */ + /** + * Normally invalid key is null, but in the map null is the result for "no key"; so use the + * following constant value in these maps instead. Note that this is a constant and we use it to + * compare by reference when we read from the map. + */ private static final byte[] INVALID_KEY_IN_MAP = new byte[0]; private final CellComparator cellComparator; @@ -123,12 +122,12 @@ private static class State { private final int blockingFileCount; - public StripeStoreFileManager( - CellComparator kvComparator, Configuration conf, StripeStoreConfig config) { + public StripeStoreFileManager(CellComparator kvComparator, Configuration conf, + StripeStoreConfig config) { this.cellComparator = kvComparator; this.config = config; - this.blockingFileCount = conf.getInt( - HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); + this.blockingFileCount = + conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); } @Override @@ -179,8 +178,10 @@ public int getStorefileCount() { return state.allFilesCached.size(); } - /** See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} - * for details on this methods. */ + /** + * See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} for details on this + * methods. + */ @Override public Iterator getCandidateFilesForRowKeyBefore(final KeyValue targetKey) { KeyBeforeConcatenatedLists result = new KeyBeforeConcatenatedLists(); @@ -195,14 +196,16 @@ public Iterator getCandidateFilesForRowKeyBefore(final KeyValue targ return result.iterator(); } - /** See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and - * {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} - * for details on this methods. */ + /** + * See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and + * {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} for + * details on this methods. + */ @Override public Iterator updateCandidateFilesForRowKeyBefore( - Iterator candidateFiles, final KeyValue targetKey, final Cell candidate) { + Iterator candidateFiles, final KeyValue targetKey, final Cell candidate) { KeyBeforeConcatenatedLists.Iterator original = - (KeyBeforeConcatenatedLists.Iterator)candidateFiles; + (KeyBeforeConcatenatedLists.Iterator) candidateFiles; assert original != null; ArrayList> components = original.getComponents(); for (int firstIrrelevant = 0; firstIrrelevant < components.size(); ++firstIrrelevant) { @@ -211,8 +214,7 @@ public Iterator updateCandidateFilesForRowKeyBefore( // Entries are ordered as such: L0, then stripes in reverse order. We never remove // level 0; we remove the stripe, and all subsequent ones, as soon as we find the // first one that cannot possibly have better candidates. - if (!isInvalid(endKey) && !isOpen(endKey) - && (nonOpenRowCompare(targetKey, endKey) >= 0)) { + if (!isInvalid(endKey) && !isOpen(endKey) && (nonOpenRowCompare(targetKey, endKey) >= 0)) { original.removeComponents(firstIrrelevant); break; } @@ -221,10 +223,9 @@ public Iterator updateCandidateFilesForRowKeyBefore( } /** - * Override of getSplitPoint that determines the split point as the boundary between two - * stripes, unless it causes significant imbalance between split sides' sizes. In that - * case, the split boundary will be chosen from the middle of one of the stripes to - * minimize imbalance. + * Override of getSplitPoint that determines the split point as the boundary between two stripes, + * unless it causes significant imbalance between split sides' sizes. In that case, the split + * boundary will be chosen from the middle of one of the stripes to minimize imbalance. * @return The split point, or null if no split is possible. */ @Override @@ -250,13 +251,14 @@ public Optional getSplitPoint() throws IOException { } } if (leftSize == 0 || rightSize == 0) { - String errMsg = String.format("Cannot split on a boundary - left index %d size %d, " - + "right index %d size %d", leftIndex, leftSize, rightIndex, rightSize); + String errMsg = String.format( + "Cannot split on a boundary - left index %d size %d, " + "right index %d size %d", + leftIndex, leftSize, rightIndex, rightSize); debugDumpState(errMsg); LOG.warn(errMsg); return getSplitPointFromAllFiles(); } - double ratio = (double)rightSize / leftSize; + double ratio = (double) rightSize / leftSize; if (ratio < 1) { ratio = 1 / ratio; } @@ -270,16 +272,16 @@ public Optional getSplitPoint() throws IOException { // See if we can achieve better ratio if we split the bigger side in half. boolean isRightLarger = rightSize >= leftSize; double newRatio = isRightLarger - ? getMidStripeSplitRatio(leftSize, rightSize, lastRightSize) - : getMidStripeSplitRatio(rightSize, leftSize, lastLeftSize); + ? getMidStripeSplitRatio(leftSize, rightSize, lastRightSize) + : getMidStripeSplitRatio(rightSize, leftSize, lastLeftSize); if (newRatio < 1) { newRatio = 1 / newRatio; } if (newRatio >= ratio) { return Optional.of(state.stripeEndRows[leftIndex]); } - LOG.debug("Splitting the stripe - ratio w/o split " + ratio + ", ratio with split " - + newRatio + " configured ratio " + config.getMaxSplitImbalance()); + LOG.debug("Splitting the stripe - ratio w/o split " + ratio + ", ratio with split " + newRatio + + " configured ratio " + config.getMaxSplitImbalance()); // OK, we may get better ratio, get it. return StoreUtils.getSplitPoint(state.stripeFiles.get(isRightLarger ? rightIndex : leftIndex), cellComparator); @@ -293,12 +295,12 @@ private Optional getSplitPointFromAllFiles() throws IOException { } private double getMidStripeSplitRatio(long smallerSize, long largerSize, long lastLargerSize) { - return (double)(largerSize - lastLargerSize / 2f) / (smallerSize + lastLargerSize / 2f); + return (double) (largerSize - lastLargerSize / 2f) / (smallerSize + lastLargerSize / 2f); } @Override public Collection getFilesForScan(byte[] startRow, boolean includeStartRow, - byte[] stopRow, boolean includeStopRow) { + byte[] stopRow, boolean includeStopRow) { if (state.stripeFiles.isEmpty()) { return state.level0Files; // There's just L0. } @@ -323,8 +325,8 @@ public Collection getFilesForScan(byte[] startRow, boolean includeSt public void addCompactionResults(Collection compactedFiles, Collection results) { // See class comment for the assumptions we make here. - LOG.debug("Attempting to merge compaction results: " + compactedFiles.size() + - " files replaced by " + results.size()); + LOG.debug("Attempting to merge compaction results: " + compactedFiles.size() + + " files replaced by " + results.size()); // In order to be able to fail in the middle of the operation, we'll operate on lazy // copies and apply the result at the end. CompactionOrFlushMergeCopy cmc = new CompactionOrFlushMergeCopy(false); @@ -367,7 +369,7 @@ public int getStoreCompactionPriority() { // many files we have, so do an approximate mapping to normal priority range; L0 counts // for all stripes. int l0 = state.level0Files.size(), sc = state.stripeFiles.size(); - int priority = (int)Math.ceil(((double)(this.blockingFileCount - fc + l0) / sc) - l0); + int priority = (int) Math.ceil(((double) (this.blockingFileCount - fc + l0) / sc) - l0); return (priority <= HStore.PRIORITY_USER) ? (HStore.PRIORITY_USER + 1) : priority; } @@ -385,10 +387,9 @@ private long getStripeFilesSize(int stripeIndex) { } /** - * Loads initial store files that were picked up from some physical location pertaining to - * this store (presumably). Unlike adding files after compaction, assumes empty initial - * sets, and is forgiving with regard to stripe constraints - at worst, many/all files will - * go to level 0. + * Loads initial store files that were picked up from some physical location pertaining to this + * store (presumably). Unlike adding files after compaction, assumes empty initial sets, and is + * forgiving with regard to stripe constraints - at worst, many/all files will go to level 0. * @param storeFiles Store files to add. */ private void loadUnclassifiedStoreFiles(List storeFiles) { @@ -403,8 +404,7 @@ private void loadUnclassifiedStoreFiles(List storeFiles) { if (isInvalid(startRow) || isInvalid(endRow)) { insertFileIntoStripe(level0Files, sf); // No metadata - goes to L0. ensureLevel0Metadata(sf); - } else if (!isOpen(startRow) && !isOpen(endRow) && - nonOpenRowCompare(startRow, endRow) >= 0) { + } else if (!isOpen(startRow) && !isOpen(endRow) && nonOpenRowCompare(startRow, endRow) >= 0) { LOG.error("Unexpected metadata - start row [" + Bytes.toString(startRow) + "], end row [" + Bytes.toString(endRow) + "] in file [" + sf.getPath() + "], pushing to L0"); insertFileIntoStripe(level0Files, sf); // Bad metadata - goes to L0 also. @@ -424,7 +424,7 @@ private void loadUnclassifiedStoreFiles(List storeFiles) { boolean hasOverlaps = false; byte[] expectedStartRow = null; // first stripe can start wherever Iterator>> entryIter = - candidateStripes.entrySet().iterator(); + candidateStripes.entrySet().iterator(); while (entryIter.hasNext()) { Map.Entry> entry = entryIter.next(); ArrayList files = entry.getValue(); @@ -437,8 +437,8 @@ private void loadUnclassifiedStoreFiles(List storeFiles) { } else if (!rowEquals(expectedStartRow, startRow)) { hasOverlaps = true; LOG.warn("Store file doesn't fit into the tentative stripes - expected to start at [" - + Bytes.toString(expectedStartRow) + "], but starts at [" + Bytes.toString(startRow) - + "], to L0 it goes"); + + Bytes.toString(expectedStartRow) + "], but starts at [" + Bytes.toString(startRow) + + "], to L0 it goes"); HStoreFile badSf = files.remove(i); insertFileIntoStripe(level0Files, badSf); ensureLevel0Metadata(badSf); @@ -463,8 +463,8 @@ private void loadUnclassifiedStoreFiles(List storeFiles) { boolean isOpen = isOpen(startOf(firstFile)) && isOpen(candidateStripes.lastKey()); if (!isOpen) { LOG.warn("The range of the loaded files does not cover full key space: from [" - + Bytes.toString(startOf(firstFile)) + "], to [" - + Bytes.toString(candidateStripes.lastKey()) + "]"); + + Bytes.toString(startOf(firstFile)) + "], to [" + + Bytes.toString(candidateStripes.lastKey()) + "]"); if (!hasOverlaps) { ensureEdgeStripeMetadata(candidateStripes.firstEntry().getValue(), true); ensureEdgeStripeMetadata(candidateStripes.lastEntry().getValue(), false); @@ -517,23 +517,17 @@ private void debugDumpState(String string) { if (!LOG.isDebugEnabled()) return; StringBuilder sb = new StringBuilder(); sb.append("\n" + string + "; current stripe state is as such:"); - sb.append("\n level 0 with ") - .append(state.level0Files.size()) - .append( - " files: " - + TraditionalBinaryPrefix.long2String( - StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";"); + sb.append("\n level 0 with ").append(state.level0Files.size()) + .append(" files: " + TraditionalBinaryPrefix + .long2String(StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";"); for (int i = 0; i < state.stripeFiles.size(); ++i) { String endRow = (i == state.stripeEndRows.length) - ? "(end)" : "[" + Bytes.toString(state.stripeEndRows[i]) + "]"; - sb.append("\n stripe ending in ") - .append(endRow) - .append(" with ") - .append(state.stripeFiles.get(i).size()) - .append( - " files: " - + TraditionalBinaryPrefix.long2String( - StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";"); + ? "(end)" + : "[" + Bytes.toString(state.stripeEndRows[i]) + "]"; + sb.append("\n stripe ending in ").append(endRow).append(" with ") + .append(state.stripeFiles.get(i).size()) + .append(" files: " + TraditionalBinaryPrefix.long2String( + StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";"); } sb.append("\n").append(state.stripeFiles.size()).append(" stripes total."); sb.append("\n").append(getStorefileCount()).append(" files total."); @@ -606,25 +600,25 @@ private final int findStripeForRow(byte[] row, boolean isStart) { @Override public final byte[] getStartRow(int stripeIndex) { - return (stripeIndex == 0 ? OPEN_KEY : state.stripeEndRows[stripeIndex - 1]); + return (stripeIndex == 0 ? OPEN_KEY : state.stripeEndRows[stripeIndex - 1]); } @Override public final byte[] getEndRow(int stripeIndex) { return (stripeIndex == state.stripeEndRows.length - ? OPEN_KEY : state.stripeEndRows[stripeIndex]); + ? OPEN_KEY + : state.stripeEndRows[stripeIndex]); } - private byte[] startOf(HStoreFile sf) { byte[] result = fileStarts.get(sf); // result and INVALID_KEY_IN_MAP are compared _only_ by reference on purpose here as the latter // serves only as a marker and is not to be confused with other empty byte arrays. // See Javadoc of INVALID_KEY_IN_MAP for more information - return (result == null) - ? sf.getMetadataValue(STRIPE_START_KEY) - : result == INVALID_KEY_IN_MAP ? INVALID_KEY : result; + return (result == null) ? sf.getMetadataValue(STRIPE_START_KEY) + : result == INVALID_KEY_IN_MAP ? INVALID_KEY + : result; } private byte[] endOf(HStoreFile sf) { @@ -633,22 +627,24 @@ private byte[] endOf(HStoreFile sf) { // result and INVALID_KEY_IN_MAP are compared _only_ by reference on purpose here as the latter // serves only as a marker and is not to be confused with other empty byte arrays. // See Javadoc of INVALID_KEY_IN_MAP for more information - return (result == null) - ? sf.getMetadataValue(STRIPE_END_KEY) - : result == INVALID_KEY_IN_MAP ? INVALID_KEY : result; + return (result == null) ? sf.getMetadataValue(STRIPE_END_KEY) + : result == INVALID_KEY_IN_MAP ? INVALID_KEY + : result; } /** * Inserts a file in the correct place (by seqnum) in a stripe copy. * @param stripe Stripe copy to insert into. - * @param sf File to insert. + * @param sf File to insert. */ private static void insertFileIntoStripe(ArrayList stripe, HStoreFile sf) { // The only operation for which sorting of the files matters is KeyBefore. Therefore, // we will store the file in reverse order by seqNum from the outset. - for (int insertBefore = 0; ; ++insertBefore) { - if (insertBefore == stripe.size() - || (StoreFileComparators.SEQ_ID.compare(sf, stripe.get(insertBefore)) >= 0)) { + for (int insertBefore = 0;; ++insertBefore) { + if ( + insertBefore == stripe.size() + || (StoreFileComparators.SEQ_ID.compare(sf, stripe.get(insertBefore)) >= 0) + ) { stripe.add(insertBefore, sf); break; } @@ -656,13 +652,12 @@ private static void insertFileIntoStripe(ArrayList stripe, HStoreFil } /** - * An extension of ConcatenatedLists that has several peculiar properties. - * First, one can cut the tail of the logical list by removing last several sub-lists. - * Second, items can be removed thru iterator. - * Third, if the sub-lists are immutable, they are replaced with mutable copies when needed. - * On average KeyBefore operation will contain half the stripes as potential candidates, - * but will quickly cut down on them as it finds something in the more likely ones; thus, - * the above allow us to avoid unnecessary copying of a bunch of lists. + * An extension of ConcatenatedLists that has several peculiar properties. First, one can cut the + * tail of the logical list by removing last several sub-lists. Second, items can be removed thru + * iterator. Third, if the sub-lists are immutable, they are replaced with mutable copies when + * needed. On average KeyBefore operation will contain half the stripes as potential candidates, + * but will quickly cut down on them as it finds something in the more likely ones; thus, the + * above allow us to avoid unnecessary copying of a bunch of lists. */ private static class KeyBeforeConcatenatedLists extends ConcatenatedLists { @Override @@ -706,9 +701,9 @@ public void remove() { } /** - * Non-static helper class for merging compaction or flush results. - * Since we want to merge them atomically (more or less), it operates on lazy copies, - * then creates a new state object and puts it in place. + * Non-static helper class for merging compaction or flush results. Since we want to merge them + * atomically (more or less), it operates on lazy copies, then creates a new state object and puts + * it in place. */ private class CompactionOrFlushMergeCopy { private ArrayList> stripeFiles = null; @@ -759,14 +754,16 @@ private State createNewState(boolean delCompactedFiles) { // Stripe count should be the same unless the end rows changed. assert oldState.stripeFiles.size() == this.stripeFiles.size() || this.stripeEndRows != null; State newState = new State(); - newState.level0Files = (this.level0Files == null) ? oldState.level0Files - : ImmutableList.copyOf(this.level0Files); - newState.stripeEndRows = (this.stripeEndRows == null) ? oldState.stripeEndRows - : this.stripeEndRows.toArray(new byte[this.stripeEndRows.size()][]); + newState.level0Files = + (this.level0Files == null) ? oldState.level0Files : ImmutableList.copyOf(this.level0Files); + newState.stripeEndRows = (this.stripeEndRows == null) + ? oldState.stripeEndRows + : this.stripeEndRows.toArray(new byte[this.stripeEndRows.size()][]); newState.stripeFiles = new ArrayList<>(this.stripeFiles.size()); for (List newStripe : this.stripeFiles) { newState.stripeFiles.add(newStripe instanceof ImmutableList - ? (ImmutableList)newStripe : ImmutableList.copyOf(newStripe)); + ? (ImmutableList) newStripe + : ImmutableList.copyOf(newStripe)); } List newAllFiles = new ArrayList<>(oldState.allFilesCached); @@ -813,7 +810,7 @@ private final ArrayList getStripeCopy(int index) { result = new ArrayList<>(stripeCopy); this.stripeFiles.set(index, result); } else { - result = (ArrayList)stripeCopy; + result = (ArrayList) stripeCopy; } return result; } @@ -861,8 +858,8 @@ private TreeMap processResults() { HStoreFile oldSf = newStripes.put(endRow, sf); if (oldSf != null) { throw new IllegalStateException( - "Compactor has produced multiple files for the stripe ending in [" + - Bytes.toString(endRow) + "], found " + sf.getPath() + " and " + oldSf.getPath()); + "Compactor has produced multiple files for the stripe ending in [" + + Bytes.toString(endRow) + "], found " + sf.getPath() + " and " + oldSf.getPath()); } } return newStripes; @@ -881,8 +878,8 @@ private void removeCompactedFiles() { int stripeIndex = findStripeIndexByEndRow(oldEndRow); if (stripeIndex < 0) { throw new IllegalStateException( - "An allegedly compacted file [" + oldFile + "] does not belong" + - " to a known stripe (end row - [" + Bytes.toString(oldEndRow) + "])"); + "An allegedly compacted file [" + oldFile + "] does not belong" + + " to a known stripe (end row - [" + Bytes.toString(oldEndRow) + "])"); } source = getStripeCopy(stripeIndex); } @@ -893,14 +890,15 @@ private void removeCompactedFiles() { } /** - * See {@link #addCompactionResults(Collection, Collection)} - updates the stripe list with - * new candidate stripes/removes old stripes; produces new set of stripe end rows. - * @param newStripes New stripes - files by end row. + * See {@link #addCompactionResults(Collection, Collection)} - updates the stripe list with new + * candidate stripes/removes old stripes; produces new set of stripe end rows. + * @param newStripes New stripes - files by end row. */ private void processNewCandidateStripes(TreeMap newStripes) { // Validate that the removed and added aggregate ranges still make for a full key space. boolean hasStripes = !this.stripeFiles.isEmpty(); - this.stripeEndRows = new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); + this.stripeEndRows = + new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); int removeFrom = 0; byte[] firstStartRow = startOf(newStripes.firstEntry().getValue()); byte[] lastEndRow = newStripes.lastKey(); @@ -938,13 +936,13 @@ private void processNewCandidateStripes(TreeMap newStripes) if (isFlush) { long newSize = StripeCompactionPolicy.getTotalFileSize(newStripes.values()); LOG.warn("Stripes were created by a flush, but results of size " + newSize - + " cannot be added because the stripes have changed"); + + " cannot be added because the stripes have changed"); canAddNewStripes = false; filesForL0 = newStripes.values(); } else { long oldSize = StripeCompactionPolicy.getTotalFileSize(conflictingFiles); LOG.info(conflictingFiles.size() + " conflicting files (likely created by a flush) " - + " of size " + oldSize + " are moved to L0 due to concurrent stripe change"); + + " of size " + oldSize + " are moved to L0 due to concurrent stripe change"); filesForL0 = conflictingFiles; } if (filesForL0 != null) { @@ -980,8 +978,8 @@ private void processNewCandidateStripes(TreeMap newStripes) assert !isOpen(previousEndRow); byte[] startRow = startOf(newStripe.getValue()); if (!rowEquals(previousEndRow, startRow)) { - throw new IllegalStateException("The new stripes produced by " + - (isFlush ? "flush" : "compaction") + " are not contiguous"); + throw new IllegalStateException("The new stripes produced by " + + (isFlush ? "flush" : "compaction") + " are not contiguous"); } } // Add the new stripe. @@ -1037,7 +1035,7 @@ public Collection getUnneededFiles(long maxTs, List file } private Collection findExpiredFiles(ImmutableList stripe, long maxTs, - List filesCompacting, Collection expiredStoreFiles) { + List filesCompacting, Collection expiredStoreFiles) { // Order by seqnum is reversed. for (int i = 1; i < stripe.size(); ++i) { HStoreFile sf = stripe.get(i); @@ -1045,7 +1043,7 @@ private Collection findExpiredFiles(ImmutableList stripe long fileTs = sf.getReader().getMaxTimestamp(); if (fileTs < maxTs && !filesCompacting.contains(sf)) { LOG.info("Found an expired store file: " + sf.getPath() + " whose maxTimestamp is " - + fileTs + ", which is below " + maxTs); + + fileTs + ", which is below " + maxTs); if (expiredStoreFiles == null) { expiredStoreFiles = new ArrayList<>(); } @@ -1073,9 +1071,8 @@ public double getCompactionPressure() { double max = 0.0; for (ImmutableList stripeFile : stateLocal.stripeFiles) { int stripeFileCount = stripeFile.size(); - double normCount = - (double) (stripeFileCount + delta - config.getStripeCompactMinFiles()) - / (blockingFilePerStripe - config.getStripeCompactMinFiles()); + double normCount = (double) (stripeFileCount + delta - config.getStripeCompactMinFiles()) + / (blockingFilePerStripe - config.getStripeCompactMinFiles()); if (normCount >= 1.0) { // This could happen if stripe is not split evenly. Do not return values that larger than // 1.0 because we have not reached the blocking file count actually. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index fb9115e01ecf..6df7bd34e3b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,8 +34,8 @@ import org.slf4j.LoggerFactory; /** - * Stripe implementation of StoreFlusher. Flushes files either into L0 file w/o metadata, or - * into separate striped files, avoiding L0. + * Stripe implementation of StoreFlusher. Flushes files either into L0 file w/o metadata, or into + * separate striped files, avoiding L0. */ @InterfaceAudience.Private public class StripeStoreFlusher extends StoreFlusher { @@ -45,8 +44,8 @@ public class StripeStoreFlusher extends StoreFlusher { private final StripeCompactionPolicy policy; private final StripeCompactionPolicy.StripeInformationProvider stripes; - public StripeStoreFlusher(Configuration conf, HStore store, - StripeCompactionPolicy policy, StripeStoreFileManager stripes) { + public StripeStoreFlusher(Configuration conf, HStore store, StripeCompactionPolicy policy, + StripeStoreFileManager stripes) { super(conf, store); this.policy = policy; this.stripes = stripes; @@ -66,8 +65,8 @@ public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum InternalScanner scanner = createScanner(snapshot.getScanners(), tracker); // Let policy select flush method. - StripeFlushRequest req = this.policy.selectFlush(store.getComparator(), this.stripes, - cellsCount); + StripeFlushRequest req = + this.policy.selectFlush(store.getComparator(), this.stripes, cellsCount); boolean success = false; StripeMultiFileWriter mw = null; @@ -124,7 +123,7 @@ public StripeFlushRequest(CellComparator comparator) { public StripeMultiFileWriter createWriter() throws IOException { StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter(comparator, 1, - Long.MAX_VALUE, OPEN_KEY, OPEN_KEY); + Long.MAX_VALUE, OPEN_KEY, OPEN_KEY); writer.setNoStripeMetadata(); return writer; } @@ -143,7 +142,7 @@ public BoundaryStripeFlushRequest(CellComparator comparator, List target @Override public StripeMultiFileWriter createWriter() throws IOException { return new StripeMultiFileWriter.BoundaryMultiWriter(comparator, targetBoundaries, null, - null); + null); } } @@ -154,8 +153,8 @@ public static class SizeStripeFlushRequest extends StripeFlushRequest { /** * @param targetCount The maximum number of stripes to flush into. - * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than - * total number of kvs, all the overflow data goes into the last stripe. + * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than total + * number of kvs, all the overflow data goes into the last stripe. */ public SizeStripeFlushRequest(CellComparator comparator, int targetCount, long targetKvs) { super(comparator); @@ -166,7 +165,7 @@ public SizeStripeFlushRequest(CellComparator comparator, int targetCount, long t @Override public StripeMultiFileWriter createWriter() throws IOException { return new StripeMultiFileWriter.SizeMultiWriter(comparator, this.targetCount, this.targetKvs, - OPEN_KEY, OPEN_KEY); + OPEN_KEY, OPEN_KEY); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java index e8eaf452d01a..a49df9378d39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java @@ -19,12 +19,11 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; - import org.apache.yetus.audience.InterfaceAudience; /** - * Accounting of current heap and data sizes. - * Thread-safe. Many threads can do updates against this single instance. + * Accounting of current heap and data sizes. Thread-safe. Many threads can do updates against this + * single instance. * @see NonThreadSafeMemStoreSizing * @see MemStoreSize */ @@ -56,7 +55,7 @@ public MemStoreSize getMemStoreSize() { @Override public long incMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, - int cellsCountDelta) { + int cellsCountDelta) { this.offHeapSize.addAndGet(offHeapSizeDelta); this.heapSize.addAndGet(heapSizeDelta); this.cellsCount.addAndGet(cellsCountDelta); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index fdf9db273a69..e3ce20839331 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,14 +35,13 @@ /** * Stores minimum and maximum timestamp values, it is [minimumTimestamp, maximumTimestamp] in - * interval notation. - * Use this class at write-time ONLY. Too much synchronization to use at read time - * Use {@link TimeRange} at read time instead of this. See toTimeRange() to make TimeRange to use. - * MemStores use this class to track minimum and maximum timestamps. The TimeRangeTracker made by - * the MemStore is passed to the StoreFile for it to write out as part a flush in the the file + * interval notation. Use this class at write-time ONLY. Too much synchronization to use at read + * time Use {@link TimeRange} at read time instead of this. See toTimeRange() to make TimeRange to + * use. MemStores use this class to track minimum and maximum timestamps. The TimeRangeTracker made + * by the MemStore is passed to the StoreFile for it to write out as part a flush in the the file * metadata. If no memstore involved -- i.e. a compaction -- then the StoreFile will calculate its - * own TimeRangeTracker as it appends. The StoreFile serialized TimeRangeTracker is used - * at read time via an instance of {@link TimeRange} to test if Cells fit the StoreFile TimeRange. + * own TimeRangeTracker as it appends. The StoreFile serialized TimeRangeTracker is used at read + * time via an instance of {@link TimeRange} to test if Cells fit the StoreFile TimeRange. */ @InterfaceAudience.Private public abstract class TimeRangeTracker { @@ -92,13 +90,17 @@ public static TimeRangeTracker create(Type type, long minimumTimestamp, long max } protected abstract void setMax(long ts); + protected abstract void setMin(long ts); + protected abstract boolean compareAndSetMin(long expect, long update); + protected abstract boolean compareAndSetMax(long expect, long update); + /** - * Update the current TimestampRange to include the timestamp from cell. - * If the Key is of type DeleteColumn or DeleteFamily, it includes the - * entire time range from 0 to timestamp of the key. + * Update the current TimestampRange to include the timestamp from cell. If the Key + * is of type DeleteColumn or DeleteFamily, it includes the entire time range from 0 to timestamp + * of the key. * @param cell the Cell to include */ public void includeTimestamp(final Cell cell) { @@ -112,8 +114,8 @@ public void includeTimestamp(final Cell cell) { * If required, update the current TimestampRange to include timestamp * @param timestamp the timestamp value to include */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MT_CORRECTNESS", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MT_CORRECTNESS", + justification = "Intentional") void includeTimestamp(final long timestamp) { long initialMinTimestamp = getMin(); if (timestamp < initialMinTimestamp) { @@ -128,14 +130,14 @@ void includeTimestamp(final long timestamp) { } // When it reaches here, there are two possibilities: - // 1). timestamp >= curMinTimestamp, someone already sets the minimumTimestamp. In this case, - // it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP to see - // if it needs to update minimumTimestamp. Someone may already set both - // minimumTimestamp/minimumTimestamp to the same value(curMinTimestamp), - // need to check if maximumTimestamp needs to be updated. - // 2). timestamp < curMinTimestamp, it sets the minimumTimestamp successfully. - // In this case,it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP - // to see if it needs to set maximumTimestamp. + // 1). timestamp >= curMinTimestamp, someone already sets the minimumTimestamp. In this case, + // it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP to see + // if it needs to update minimumTimestamp. Someone may already set both + // minimumTimestamp/minimumTimestamp to the same value(curMinTimestamp), + // need to check if maximumTimestamp needs to be updated. + // 2). timestamp < curMinTimestamp, it sets the minimumTimestamp successfully. + // In this case,it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP + // to see if it needs to set maximumTimestamp. if (initialMinTimestamp != INITIAL_MIN_TIMESTAMP) { // Someone already sets minimumTimestamp and timestamp is less than minimumTimestamp. // In this case, no need to set maximumTimestamp as it will be set to at least @@ -185,8 +187,7 @@ public String toString() { /** * @param data the serialization data. It can't be null! * @return An instance of NonSyncTimeRangeTracker filled w/ the content of serialized - * NonSyncTimeRangeTracker in timeRangeTrackerBytes. - * @throws IOException + * NonSyncTimeRangeTracker in timeRangeTrackerBytes. n */ public static TimeRangeTracker parseFrom(final byte[] data) throws IOException { return parseFrom(data, Type.NON_SYNC); @@ -207,11 +208,11 @@ public static TimeRangeTracker parseFrom(final byte[] data, Type type) throws IO } /** - * This method used to serialize TimeRangeTracker (TRT) by protobuf while this breaks the - * forward compatibility on HFile.(See HBASE-21008) In previous hbase version ( < 2.0.0 ) we use - * DataOutput to serialize TRT, these old versions don't have capability to deserialize TRT - * which is serialized by protobuf. So we need to revert the change of serializing - * TimeRangeTracker back to DataOutput. For more information, please check HBASE-21012. + * This method used to serialize TimeRangeTracker (TRT) by protobuf while this breaks the forward + * compatibility on HFile.(See HBASE-21008) In previous hbase version ( < 2.0.0 ) we use + * DataOutput to serialize TRT, these old versions don't have capability to deserialize TRT which + * is serialized by protobuf. So we need to revert the change of serializing TimeRangeTracker back + * to DataOutput. For more information, please check HBASE-21012. * @param tracker TimeRangeTracker needed to be serialized. * @return byte array filled with serialized TimeRangeTracker. * @throws IOException if something goes wrong in writeLong. @@ -242,7 +243,7 @@ TimeRange toTimeRange() { return TimeRange.between(min, max); } - //In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. + // In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. public static class NonSyncTimeRangeTracker extends TimeRangeTracker { private long minimumTimestamp = INITIAL_MIN_TIMESTAMP; private long maximumTimestamp = INITIAL_MAX_TIMESTAMP; @@ -299,7 +300,7 @@ public long getMax() { } } - //In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. + // In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. public static class SyncTimeRangeTracker extends TimeRangeTracker { private final AtomicLong minimumTimestamp = new AtomicLong(INITIAL_MIN_TIMESTAMP); private final AtomicLong maximumTimestamp = new AtomicLong(INITIAL_MAX_TIMESTAMP); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java index d5be356f93f9..2d80fad37a1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,13 +22,10 @@ /** * A list of segment managers coupled with the version of the memstore (version at the time it was - * created). - * This structure helps to guarantee that the compaction pipeline updates after the compaction is - * updated in a consistent (atomic) way. - * Specifically, swapping some of the elements in a compaction pipeline with a new compacted - * element is permitted only if the pipeline version is the same as the version attached to the - * elements. - * + * created). This structure helps to guarantee that the compaction pipeline updates after the + * compaction is updated in a consistent (atomic) way. Specifically, swapping some of the elements + * in a compaction pipeline with a new compacted element is permitted only if the pipeline version + * is the same as the version attached to the elements. */ @InterfaceAudience.Private public class VersionedSegmentsList { @@ -70,9 +66,9 @@ public int getNumOfSegments() { for (ImmutableSegment s : storeSegments) { double segmentUniques = s.getNumUniqueKeys(); - if(segmentUniques != CellSet.UNKNOWN_NUM_UNIQUES) { + if (segmentUniques != CellSet.UNKNOWN_NUM_UNIQUES) { segmentCells = s.getCellsCount(); - if(segmentCells > maxCells) { + if (segmentCells > maxCells) { maxCells = segmentCells; est = segmentUniques / segmentCells; } @@ -80,7 +76,7 @@ public int getNumOfSegments() { // else ignore this segment specifically since if the unique number is unknown counting // cells can be expensive } - if(maxCells == 0) { + if (maxCells == 0) { return 1.0; } return est; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java index 23d16934b65c..f5a662ffe14f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ */ @InterfaceAudience.Private public abstract class AbstractMultiOutputCompactor - extends Compactor { + extends Compactor { private static final Logger LOG = LoggerFactory.getLogger(AbstractMultiOutputCompactor.class); @@ -52,15 +52,15 @@ protected final void initMultiWriter(AbstractMultiFileWriter writer, InternalSca WriterFactory writerFactory = new WriterFactory() { @Override public StoreFileWriter createWriter() throws IOException { - return AbstractMultiOutputCompactor.this - .createWriter(fd, shouldDropBehind, major, writerCreationTracker); + return AbstractMultiOutputCompactor.this.createWriter(fd, shouldDropBehind, major, + writerCreationTracker); } @Override public StoreFileWriter createWriterWithStoragePolicy(String fileStoragePolicy) throws IOException { - return AbstractMultiOutputCompactor.this - .createWriter(fd, shouldDropBehind, fileStoragePolicy, major, writerCreationTracker); + return AbstractMultiOutputCompactor.this.createWriter(fd, shouldDropBehind, + fileStoragePolicy, major, writerCreationTracker); } }; // Prepare multi-writer, and perform the compaction using scanner and writer. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CloseChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CloseChecker.java index ea711c037729..cc26068190eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CloseChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CloseChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -42,7 +44,6 @@ public CloseChecker(Configuration conf, long currentTime) { /** * Check periodically to see if a system stop is requested every written bytes reach size limit. - * * @return if true, system stop. */ public boolean isSizeLimit(Store store, long bytesWritten) { @@ -61,7 +62,6 @@ public boolean isSizeLimit(Store store, long bytesWritten) { /** * Check periodically to see if a system stop is requested every time. - * * @return if true, system stop. */ public boolean isTimeLimit(Store store, long now) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index 75966b9e7467..eea5e219d298 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,31 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; /** *

    - * Compaction configuration for a particular instance of HStore. - * Takes into account both global settings and ones set on the column family/store. - * Control knobs for default compaction algorithm: + * Compaction configuration for a particular instance of HStore. Takes into account both global + * settings and ones set on the column family/store. Control knobs for default compaction algorithm: *

    *

    - * maxCompactSize - upper bound on file size to be included in minor compactions - * minCompactSize - lower bound below which compaction is selected without ratio test - * minFilesToCompact - lower bound on number of files in any minor compaction - * maxFilesToCompact - upper bound on number of files in any minor compaction - * compactionRatio - Ratio used for compaction - * minLocalityToForceCompact - Locality threshold for a store file to major compact (HBASE-11195) + * maxCompactSize - upper bound on file size to be included in minor compactions minCompactSize - + * lower bound below which compaction is selected without ratio test minFilesToCompact - lower bound + * on number of files in any minor compaction maxFilesToCompact - upper bound on number of files in + * any minor compaction compactionRatio - Ratio used for compaction minLocalityToForceCompact - + * Locality threshold for a store file to major compact (HBASE-11195) *

    * Set parameter as "hbase.hstore.compaction.<attribute>" */ @@ -62,14 +58,14 @@ public class CompactionConfiguration { public static final String HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY = "hbase.hstore.compaction.max.size"; public static final String HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY = - "hbase.hstore.compaction.max.size.offpeak"; + "hbase.hstore.compaction.max.size.offpeak"; public static final String HBASE_HSTORE_OFFPEAK_END_HOUR = "hbase.offpeak.end.hour"; public static final String HBASE_HSTORE_OFFPEAK_START_HOUR = "hbase.offpeak.start.hour"; public static final String HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT = - "hbase.hstore.min.locality.to.skip.major.compact"; + "hbase.hstore.min.locality.to.skip.major.compact"; public static final String HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT = - "hbase.hfile.compaction.discharger.thread.count"; + "hbase.hfile.compaction.discharger.thread.count"; /* * The epoch time length for the windows we no longer compact @@ -83,14 +79,16 @@ public class CompactionConfiguration { public static final String DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY = "hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction"; - private static final Class - DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS = ExploringCompactionPolicy.class; + private static final Class< + ? extends RatioBasedCompactionPolicy> DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS = + ExploringCompactionPolicy.class; public static final String DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY = "hbase.hstore.compaction.date.tiered.window.factory.class"; - private static final Class - DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS = ExponentialCompactionWindowFactory.class; + private static final Class< + ? extends CompactionWindowFactory> DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS = + ExponentialCompactionWindowFactory.class; public static final String DATE_TIERED_STORAGE_POLICY_ENABLE_KEY = "hbase.hstore.compaction.date.tiered.storage.policy.enable"; @@ -139,34 +137,34 @@ public class CompactionConfiguration { this.storeConfigInfo = storeConfigInfo; maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, Long.MAX_VALUE); - offPeakMaxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, - maxCompactSize); - minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, - storeConfigInfo.getMemStoreFlushSize()); + offPeakMaxCompactSize = + conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, maxCompactSize); + minCompactSize = + conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, storeConfigInfo.getMemStoreFlushSize()); minFilesToCompact = Math.max(2, conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY, - conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY_OLD, 3))); + conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY_OLD, 3))); maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10); compactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); offPeakCompactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F); throttlePoint = conf.getLong("hbase.regionserver.thread.compaction.throttle", - 2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize()); - majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, - HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD); - majorCompactionJitter = conf.getFloat(HConstants.MAJOR_COMPACTION_JITTER, - HConstants.DEFAULT_MAJOR_COMPACTION_JITTER); + 2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize()); + majorCompactionPeriod = + conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD); + majorCompactionJitter = + conf.getFloat(HConstants.MAJOR_COMPACTION_JITTER, HConstants.DEFAULT_MAJOR_COMPACTION_JITTER); minLocalityToForceCompact = conf.getFloat(HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT, 0f); dateTieredMaxStoreFileAgeMillis = conf.getLong(DATE_TIERED_MAX_AGE_MILLIS_KEY, Long.MAX_VALUE); dateTieredIncomingWindowMin = conf.getInt(DATE_TIERED_INCOMING_WINDOW_MIN_KEY, 6); - compactionPolicyForDateTieredWindow = conf.get( - COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY, - DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS.getName()); - dateTieredSingleOutputForMinorCompaction = conf - .getBoolean(DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, true); - this.dateTieredCompactionWindowFactory = conf.get( - DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY, - DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS.getName()); + compactionPolicyForDateTieredWindow = + conf.get(COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY, + DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS.getName()); + dateTieredSingleOutputForMinorCompaction = + conf.getBoolean(DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, true); + this.dateTieredCompactionWindowFactory = + conf.get(DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY, + DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS.getName()); // for Heterogeneous Storage dateTieredStoragePolicyEnable = conf.getBoolean(DATE_TIERED_STORAGE_POLICY_ENABLE_KEY, false); hotWindowAgeMillis = conf.getLong(DATE_TIERED_HOT_WINDOW_AGE_MILLIS_KEY, 86400000L); @@ -181,32 +179,20 @@ public class CompactionConfiguration { public String toString() { return String.format( "size [minCompactSize:%s, maxCompactSize:%s, offPeakMaxCompactSize:%s);" - + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" - + " ratio %f; off-peak ratio %f; throttle point %d;" - + " major period %d, major jitter %f, min locality to compact %f;" - + " tiered compaction: max_age %d, incoming window min %d," - + " compaction policy for tiered window %s, single output for minor %b," - + " compaction window factory %s," - + " region %s columnFamilyName %s", - StringUtils.byteDesc(minCompactSize), - StringUtils.byteDesc(maxCompactSize), - StringUtils.byteDesc(offPeakMaxCompactSize), - minFilesToCompact, - maxFilesToCompact, - compactionRatio, - offPeakCompactionRatio, - throttlePoint, - majorCompactionPeriod, - majorCompactionJitter, - minLocalityToForceCompact, - dateTieredMaxStoreFileAgeMillis, - dateTieredIncomingWindowMin, - compactionPolicyForDateTieredWindow, - dateTieredSingleOutputForMinorCompaction, - dateTieredCompactionWindowFactory, + + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" + + " ratio %f; off-peak ratio %f; throttle point %d;" + + " major period %d, major jitter %f, min locality to compact %f;" + + " tiered compaction: max_age %d, incoming window min %d," + + " compaction policy for tiered window %s, single output for minor %b," + + " compaction window factory %s," + " region %s columnFamilyName %s", + StringUtils.byteDesc(minCompactSize), StringUtils.byteDesc(maxCompactSize), + StringUtils.byteDesc(offPeakMaxCompactSize), minFilesToCompact, maxFilesToCompact, + compactionRatio, offPeakCompactionRatio, throttlePoint, majorCompactionPeriod, + majorCompactionJitter, minLocalityToForceCompact, dateTieredMaxStoreFileAgeMillis, + dateTieredIncomingWindowMin, compactionPolicyForDateTieredWindow, + dateTieredSingleOutputForMinorCompaction, dateTieredCompactionWindowFactory, RegionInfo.prettyPrint(storeConfigInfo.getRegionInfo().getEncodedName()), - storeConfigInfo.getColumnFamilyName() - ); + storeConfigInfo.getColumnFamilyName()); } /** @@ -267,16 +253,16 @@ public long getThrottlePoint() { } /** - * @return Major compaction period from compaction. - * Major compactions are selected periodically according to this parameter plus jitter + * @return Major compaction period from compaction. Major compactions are selected periodically + * according to this parameter plus jitter */ public long getMajorCompactionPeriod() { return majorCompactionPeriod; } /** - * @return Major the jitter fraction, the fraction within which the major compaction - * period is randomly chosen from the majorCompactionPeriod in each store. + * @return Major the jitter fraction, the fraction within which the major compaction period is + * randomly chosen from the majorCompactionPeriod in each store. */ public float getMajorCompactionJitter() { return majorCompactionJitter; @@ -284,8 +270,8 @@ public float getMajorCompactionJitter() { /** * @return Block locality ratio, the ratio at which we will include old regions with a single - * store file for major compaction. Used to improve block locality for regions that - * haven't had writes in a while but are still being read. + * store file for major compaction. Used to improve block locality for regions that + * haven't had writes in a while but are still being read. */ public float getMinLocalityToForceCompact() { return minLocalityToForceCompact; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java index 9aa383c4e66f..df65777ab5d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,27 +19,24 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; - /** - * This class holds all "physical" details necessary to run a compaction, - * and abstracts away the details specific to a particular compaction. - * It also has compaction request with all the logical details. - * Hence, this class is basically the compaction. + * This class holds all "physical" details necessary to run a compaction, and abstracts away the + * details specific to a particular compaction. It also has compaction request with all the logical + * details. Hence, this class is basically the compaction. */ @InterfaceAudience.Private public abstract class CompactionContext { protected CompactionRequestImpl request = null; /** - * Called before coprocessor preCompactSelection and should filter the candidates - * for coprocessor; i.e. exclude the files that definitely cannot be compacted at this time. + * Called before coprocessor preCompactSelection and should filter the candidates for coprocessor; + * i.e. exclude the files that definitely cannot be compacted at this time. * @param filesCompacting files currently compacting * @return the list of files that can theoretically be compacted. */ @@ -48,14 +44,14 @@ public abstract class CompactionContext { /** * Called to select files for compaction. Must fill in the request field if successful. - * @param filesCompacting Files currently being compacted by other compactions. + * @param filesCompacting Files currently being compacted by other compactions. * @param isUserCompaction Whether this is a user compaction. - * @param mayUseOffPeak Whether the underlying policy may assume it's off-peak hours. - * @param forceMajor Whether to force major compaction. + * @param mayUseOffPeak Whether the underlying policy may assume it's off-peak hours. + * @param forceMajor Whether to force major compaction. * @return Whether the selection succeeded. Selection may be empty and lead to no compaction. */ public abstract boolean select(List filesCompacting, boolean isUserCompaction, - boolean mayUseOffPeak, boolean forceMajor) throws IOException; + boolean mayUseOffPeak, boolean forceMajor) throws IOException; /** * Forces external selection to be applied for this compaction. @@ -66,7 +62,7 @@ public void forceSelect(CompactionRequestImpl request) { } public abstract List compact(ThroughputController throughputController, User user) - throws IOException; + throws IOException; public CompactionRequestImpl getRequest() { assert hasSelection(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java index dfff2f980fbb..5feaf15b631f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java index 755b9d39cb2e..772e21f0424c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.yetus.audience.InterfaceAudience; /** - * A compaction policy determines how to select files for compaction, - * how to compact them, and how to generate the compacted files. + * A compaction policy determines how to select files for compaction, how to compact them, and how + * to generate the compacted files. */ @InterfaceAudience.Private public abstract class CompactionPolicy { @@ -46,7 +43,7 @@ public CompactionPolicy(Configuration conf, StoreConfigInformation storeConfigIn * @return True if we should run a major compaction. */ public abstract boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException; + throws IOException; /** * @param compactionSize Total size of some compaction @@ -55,8 +52,8 @@ public abstract boolean shouldPerformMajorCompaction(Collection file public abstract boolean throttleCompaction(long compactionSize); /** - * Inform the policy that some configuration has been change, - * so cached value should be updated it any. + * Inform the policy that some configuration has been change, so cached value should be updated it + * any. */ public void setConf(Configuration conf) { this.comConf = new CompactionConfiguration(conf, this.storeConfigInfo); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java index 2ccdd150cd21..72634bbf2cef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import org.apache.yetus.audience.InterfaceAudience; @@ -24,13 +22,10 @@ import org.slf4j.LoggerFactory; /** - * This class holds information relevant for tracking the progress of a - * compaction. - * - *

    The metrics tracked allow one to calculate the percent completion of the - * compaction based on the number of Key/Value pairs already compacted vs. - * total amount scheduled to be compacted. - * + * This class holds information relevant for tracking the progress of a compaction. + *

    + * The metrics tracked allow one to calculate the percent completion of the compaction based on the + * number of Key/Value pairs already compacted vs. total amount scheduled to be compacted. */ @InterfaceAudience.Private public class CompactionProgress { @@ -43,18 +38,19 @@ public class CompactionProgress { /** the total size of data processed by the currently running compaction, in bytes */ public long totalCompactedSize = 0; - /** Constructor + /** + * Constructor * @param totalCompactingKVs the total Key/Value pairs to be compacted */ public CompactionProgress(long totalCompactingKVs) { this.totalCompactingKVs = totalCompactingKVs; } - /** getter for calculated percent complete - * @return float + /** + * getter for calculated percent complete n */ public float getProgressPct() { - return (float)currentCompactedKVs / getTotalCompactingKVs(); + return (float) currentCompactedKVs / getTotalCompactingKVs(); } /** @@ -65,8 +61,8 @@ public void cancel() { } /** - * Marks the compaction as complete by setting total to current KV count; - * Total KV count is an estimate, so there might be a discrepancy otherwise. + * Marks the compaction as complete by setting total to current KV count; Total KV count is an + * estimate, so there might be a discrepancy otherwise. */ public void complete() { this.totalCompactingKVs = this.currentCompactedKVs; @@ -77,8 +73,8 @@ public void complete() { */ public long getTotalCompactingKVs() { if (totalCompactingKVs < currentCompactedKVs) { - LOG.debug("totalCompactingKVs={} less than currentCompactedKVs={}", - totalCompactingKVs, currentCompactedKVs); + LOG.debug("totalCompactingKVs={} less than currentCompactedKVs={}", totalCompactingKVs, + currentCompactedKVs); return currentCompactedKVs; } return totalCompactingKVs; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 73f36837f9ec..723cefb73d78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +17,11 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import java.util.Collection; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.yetus.audience.InterfaceAudience; -import java.util.Collection; - /** * Coprocessors use this interface to get details about compaction. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java index 5d8285aecdb8..a553ba279971 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +39,13 @@ public class CompactionRequestImpl implements CompactionRequest { // was this compaction promoted to an off-peak private boolean isOffPeak = false; - private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR } + + private enum DisplayCompactionType { + MINOR, + ALL_FILES, + MAJOR + } + private DisplayCompactionType isMajor = DisplayCompactionType.MINOR; private int priority = NO_PRIORITY; private Collection filesToCompact; @@ -87,7 +92,7 @@ public long getSize() { @Override public boolean isAllFiles() { return this.isMajor == DisplayCompactionType.MAJOR - || this.isMajor == DisplayCompactionType.ALL_FILES; + || this.isMajor == DisplayCompactionType.ALL_FILES; } @Override @@ -123,12 +128,13 @@ public long getSelectionTime() { /** * Specify if this compaction should be a major compaction based on the state of the store * @param isMajor true if the system determines that this compaction should be a major - * compaction + * compaction */ public void setIsMajor(boolean isMajor, boolean isAllFiles) { assert isAllFiles || !isMajor; - this.isMajor = !isAllFiles ? DisplayCompactionType.MINOR - : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES); + this.isMajor = !isAllFiles + ? DisplayCompactionType.MINOR + : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES); } public void setTracker(CompactionLifeCycleTracker tracker) { @@ -236,14 +242,14 @@ public boolean equals(Object obj) { @Override public String toString() { String fsList = filesToCompact.stream().filter(f -> f.getReader() != null) - .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1)) - .collect(Collectors.joining(", ")); - - return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" + - this.getFiles().size() + ", fileSize=" + - TraditionalBinaryPrefix.long2String(totalSize, "", 1) + - ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" + - selectionTime; + .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1)) + .collect(Collectors.joining(", ")); + + return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" + + this.getFiles().size() + ", fileSize=" + + TraditionalBinaryPrefix.long2String(totalSize, "", 1) + + ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" + + selectionTime; } /** @@ -251,6 +257,6 @@ public String toString() { */ private void recalculateSize() { this.totalSize = filesToCompact.stream().map(HStoreFile::getReader) - .mapToLong(r -> r != null ? r.length() : 0L).sum(); + .mapToLong(r -> r != null ? r.length() : 0L).sum(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java index 31a7ca7ea4ed..b55c173dda94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import edu.umd.cs.findbugs.annotations.Nullable; import java.io.IOException; - import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; -import edu.umd.cs.findbugs.annotations.Nullable; - /** * Request a compaction. */ @@ -36,19 +34,19 @@ public interface CompactionRequester { * Request compaction on all the stores of the given region. */ void requestCompaction(HRegion region, String why, int priority, - CompactionLifeCycleTracker tracker, @Nullable User user) throws IOException; + CompactionLifeCycleTracker tracker, @Nullable User user) throws IOException; /** * Request compaction on the given store. */ void requestCompaction(HRegion region, HStore store, String why, int priority, - CompactionLifeCycleTracker tracker, @Nullable User user) throws IOException; + CompactionLifeCycleTracker tracker, @Nullable User user) throws IOException; /** * Request system compaction on the given store. */ void requestSystemCompaction(HRegion region, HStore store, String why, - boolean giveUpIfRequestedOrCompacting) throws IOException; + boolean giveUpIfRequestedOrCompacting) throws IOException; /** * on/off compaction diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java index ad0cfb4cb396..d71a9c0593ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java index bd5c85c5770c..9689464f1ba8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 80cc14be74f7..e66a3e05a42c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,10 +72,9 @@ * A compactor is a compaction algorithm associated a given policy. Base class also contains * reusable parts for implementing compactors (what is common and what isn't is evolving). *

    - * Compactions might be concurrent against a given store and the Compactor is shared among - * them. Do not put mutable state into class fields. All Compactor class fields should be - * final or effectively final. - * 'keepSeqIdPeriod' is an exception to this rule because unit tests may set it. + * Compactions might be concurrent against a given store and the Compactor is shared among them. Do + * not put mutable state into class fields. All Compactor class fields should be final or + * effectively final. 'keepSeqIdPeriod' is an exception to this rule because unit tests may set it. */ @InterfaceAudience.Private public abstract class Compactor { @@ -91,10 +90,10 @@ public abstract class Compactor { protected int keepSeqIdPeriod; // Configs that drive whether we drop page cache behind compactions - protected static final String MAJOR_COMPACTION_DROP_CACHE = - "hbase.regionserver.majorcompaction.pagecache.drop"; + protected static final String MAJOR_COMPACTION_DROP_CACHE = + "hbase.regionserver.majorcompaction.pagecache.drop"; protected static final String MINOR_COMPACTION_DROP_CACHE = - "hbase.regionserver.minorcompaction.pagecache.drop"; + "hbase.regionserver.minorcompaction.pagecache.drop"; protected final boolean dropCacheMajor; protected final boolean dropCacheMinor; @@ -110,12 +109,15 @@ public abstract class Compactor { this.store = store; this.compactionKVMax = this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); - this.majorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? - Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType(); - this.minorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? - Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMinorCompactionCompressionType(); - this.keepSeqIdPeriod = Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, - HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD); + this.majorCompactionCompression = (store.getColumnFamilyDescriptor() == null) + ? Compression.Algorithm.NONE + : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType(); + this.minorCompactionCompression = (store.getColumnFamilyDescriptor() == null) + ? Compression.Algorithm.NONE + : store.getColumnFamilyDescriptor().getMinorCompactionCompressionType(); + this.keepSeqIdPeriod = + Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, HConstants.MIN_KEEP_SEQID_PERIOD), + HConstants.MIN_KEEP_SEQID_PERIOD); this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true); this.dropCacheMinor = conf.getBoolean(MINOR_COMPACTION_DROP_CACHE, true); } @@ -137,7 +139,7 @@ protected static class FileDetails { public long maxSeqId = 0; /** Latest memstore read point found in any of the involved files */ public long maxMVCCReadpoint = 0; - /** Max tags length**/ + /** Max tags length **/ public int maxTagsLength = 0; /** Min SeqId to keep during a major compaction **/ public long minSeqIdToKeep = 0; @@ -148,21 +150,21 @@ protected static class FileDetails { /** * Extracts some details about the files to compact that are commonly needed by compactors. * @param filesToCompact Files. - * @param allFiles Whether all files are included for compaction + * @param allFiles Whether all files are included for compaction * @parma major If major compaction * @return The result. */ - private FileDetails getFileDetails( - Collection filesToCompact, boolean allFiles, boolean major) throws IOException { + private FileDetails getFileDetails(Collection filesToCompact, boolean allFiles, + boolean major) throws IOException { FileDetails fd = new FileDetails(); - long oldestHFileTimestampToKeepMVCC = EnvironmentEdgeManager.currentTime() - - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); + long oldestHFileTimestampToKeepMVCC = + EnvironmentEdgeManager.currentTime() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); for (HStoreFile file : filesToCompact) { - if(allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) { + if (allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) { // when isAllFiles is true, all files are compacted so we can calculate the smallest // MVCC value to keep - if(fd.minSeqIdToKeep < file.getMaxMemStoreTS()) { + if (fd.minSeqIdToKeep < file.getMaxMemStoreTS()) { fd.minSeqIdToKeep = file.getMaxMemStoreTS(); } } @@ -189,8 +191,7 @@ private FileDetails getFileDetails( // SeqId number. if (r.isBulkLoaded()) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID()); - } - else { + } else { tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY); if (tmp != null) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp)); @@ -215,17 +216,16 @@ private FileDetails getFileDetails( } } tmp = fileInfo.get(TIMERANGE_KEY); - fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax(); - LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, " - + "encoding={}, compression={}, seqNum={}{}", - (file.getPath() == null? null: file.getPath().getName()), - keyCount, - r.getBloomFilterType().toString(), - TraditionalBinaryPrefix.long2String(r.length(), "", 1), - r.getHFileReader().getDataBlockEncoding(), - major ? majorCompactionCompression : minorCompactionCompression, - seqNum, - (allFiles? ", earliestPutTs=" + earliestPutTs: "")); + fd.latestPutTs = + tmp == null ? HConstants.LATEST_TIMESTAMP : TimeRangeTracker.parseFrom(tmp).getMax(); + LOG.debug( + "Compacting {}, keycount={}, bloomtype={}, size={}, " + + "encoding={}, compression={}, seqNum={}{}", + (file.getPath() == null ? null : file.getPath().getName()), keyCount, + r.getBloomFilterType().toString(), TraditionalBinaryPrefix.long2String(r.length(), "", 1), + r.getHFileReader().getDataBlockEncoding(), + major ? majorCompactionCompression : minorCompactionCompression, seqNum, + (allFiles ? ", earliestPutTs=" + earliestPutTs : "")); } return fd; } @@ -236,7 +236,7 @@ private FileDetails getFileDetails( * @return Scanners. */ private List createFileScanners(Collection filesToCompact, - long smallestReadPoint, boolean useDropBehind) throws IOException { + long smallestReadPoint, boolean useDropBehind) throws IOException { return StoreFileScanner.getScannersForCompaction(filesToCompact, useDropBehind, smallestReadPoint); } @@ -249,8 +249,8 @@ protected interface InternalScannerFactory { ScanType getScanType(CompactionRequestImpl request); - InternalScanner createScanner(ScanInfo scanInfo, List scanners, ScanType scanType, - FileDetails fd, long smallestReadPoint) throws IOException; + InternalScanner createScanner(ScanInfo scanInfo, List scanners, + ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException; } protected final InternalScannerFactory defaultScannerFactory = new InternalScannerFactory() { @@ -262,7 +262,7 @@ public ScanType getScanType(CompactionRequestImpl request) { @Override public InternalScanner createScanner(ScanInfo scanInfo, List scanners, - ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException { + ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException { return Compactor.this.createScanner(store, scanInfo, scanners, scanType, smallestReadPoint, fd.earliestPutTs); } @@ -296,13 +296,12 @@ protected final StoreFileWriter createWriter(FileDetails fd, boolean shouldDropB String fileStoragePolicy, boolean major, Consumer writerCreationTracker) throws IOException { return store.getStoreEngine() - .createWriter( - createParams(fd, shouldDropBehind, major, writerCreationTracker) - .fileStoragePolicy(fileStoragePolicy)); + .createWriter(createParams(fd, shouldDropBehind, major, writerCreationTracker) + .fileStoragePolicy(fileStoragePolicy)); } private ScanInfo preCompactScannerOpen(CompactionRequestImpl request, ScanType scanType, - User user) throws IOException { + User user) throws IOException { if (store.getCoprocessorHost() == null) { return store.getScanInfo(); } @@ -312,13 +311,13 @@ private ScanInfo preCompactScannerOpen(CompactionRequestImpl request, ScanType s /** * Calls coprocessor, if any, to create scanners - after normal scanner creation. - * @param request Compaction request. + * @param request Compaction request. * @param scanType Scan type. - * @param scanner The default scanner created for compaction. + * @param scanner The default scanner created for compaction. * @return Scanner scanner to use (usually the default); null if compaction should not proceed. */ private InternalScanner postCompactScannerOpen(CompactionRequestImpl request, ScanType scanType, - InternalScanner scanner, User user) throws IOException { + InternalScanner scanner, User user) throws IOException { if (store.getCoprocessorHost() == null) { return scanner; } @@ -327,8 +326,8 @@ private InternalScanner postCompactScannerOpen(CompactionRequestImpl request, Sc } protected final List compact(final CompactionRequestImpl request, - InternalScannerFactory scannerFactory, CellSinkFactory sinkFactory, - ThroughputController throughputController, User user) throws IOException { + InternalScannerFactory scannerFactory, CellSinkFactory sinkFactory, + ThroughputController throughputController, User user) throws IOException { FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles(), request.isMajor()); // Find the smallest read point across all the Scanners. @@ -360,17 +359,13 @@ protected final List compact(final CompactionRequestImpl request, smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint); cleanSeqId = true; } - writer = sinkFactory.createWriter( - scanner, - fd, - dropCache, - request.isMajor(), + writer = sinkFactory.createWriter(scanner, fd, dropCache, request.isMajor(), request.getWriterCreationTracker()); finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId, throughputController, request.isAllFiles(), request.getFiles().size(), progress); if (!finished) { throw new InterruptedIOException("Aborting compaction of store " + store + " in region " - + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted."); + + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted."); } } finally { // createScanner may fail when seeking hfiles encounter Exception, e.g. even only one hfile @@ -400,26 +395,26 @@ protected final List compact(final CompactionRequestImpl request, } protected abstract List commitWriter(T writer, FileDetails fd, - CompactionRequestImpl request) throws IOException; + CompactionRequestImpl request) throws IOException; protected abstract void abortWriter(T writer) throws IOException; /** * Performs the compaction. - * @param fd FileDetails of cell sink writer - * @param scanner Where to read from. - * @param writer Where to write to. - * @param smallestReadPoint Smallest read point. - * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= - * smallestReadPoint - * @param major Is a major compaction. + * @param fd FileDetails of cell sink writer + * @param scanner Where to read from. + * @param writer Where to write to. + * @param smallestReadPoint Smallest read point. + * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= + * smallestReadPoint + * @param major Is a major compaction. * @param numofFilesToCompact the number of files to compact - * @param progress Progress reporter. + * @param progress Progress reporter. * @return Whether compaction ended; false if it was interrupted for some reason. */ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, - long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, - boolean major, int numofFilesToCompact, CompactionProgress progress) throws IOException { + long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, + boolean major, int numofFilesToCompact, CompactionProgress progress) throws IOException { assert writer instanceof ShipperListener; long bytesWrittenProgressForLog = 0; long bytesWrittenProgressForShippedCall = 0; @@ -436,12 +431,12 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel long now = 0; boolean hasMore; ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); throughputController.start(compactionName); KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null; long shippedCallSizeLimit = - (long) numofFilesToCompact * this.store.getColumnFamilyDescriptor().getBlocksize(); + (long) numofFilesToCompact * this.store.getColumnFamilyDescriptor().getBlocksize(); try { do { hasMore = scanner.next(cells, scannerContext); @@ -519,7 +514,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel } catch (InterruptedException e) { progress.cancel(); throw new InterruptedIOException( - "Interrupted while control throughput of compacting " + compactionName); + "Interrupted while control throughput of compacting " + compactionName); } finally { // Clone last cell in the final because writer will append last cell when committing. If // don't clone here and once the scanner get closed, then the memory of last cell will be @@ -532,33 +527,33 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel } /** - * @param store store - * @param scanners Store file scanners. - * @param scanType Scan type. + * @param store store + * @param scanners Store file scanners. + * @param scanType Scan type. * @param smallestReadPoint Smallest MVCC read point. - * @param earliestPutTs Earliest put across all files. + * @param earliestPutTs Earliest put across all files. * @return A compaction scanner. */ protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, - List scanners, ScanType scanType, long smallestReadPoint, - long earliestPutTs) throws IOException { + List scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) + throws IOException { return new StoreScanner(store, scanInfo, scanners, scanType, smallestReadPoint, earliestPutTs); } /** - * @param store The store. - * @param scanners Store file scanners. - * @param smallestReadPoint Smallest MVCC read point. - * @param earliestPutTs Earliest put across all files. + * @param store The store. + * @param scanners Store file scanners. + * @param smallestReadPoint Smallest MVCC read point. + * @param earliestPutTs Earliest put across all files. * @param dropDeletesFromRow Drop deletes starting with this row, inclusive. Can be null. - * @param dropDeletesToRow Drop deletes ending with this row, exclusive. Can be null. + * @param dropDeletesToRow Drop deletes ending with this row, exclusive. Can be null. * @return A compaction scanner. */ protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, - List scanners, long smallestReadPoint, long earliestPutTs, - byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { + List scanners, long smallestReadPoint, long earliestPutTs, + byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { return new StoreScanner(store, scanInfo, scanners, smallestReadPoint, earliestPutTs, - dropDeletesFromRow, dropDeletesToRow); + dropDeletesFromRow, dropDeletesToRow); } /** @@ -569,7 +564,7 @@ public CompactionProgress getProgress() { long totalCompactingKVs = 0; long currentCompactedKVs = 0; long totalCompactedSize = 0; - for (CompactionProgress progress: progressSet) { + for (CompactionProgress progress : progressSet) { totalCompactingKVs += progress.totalCompactingKVs; currentCompactedKVs += progress.currentCompactedKVs; totalCompactedSize += progress.totalCompactedSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java index ebbaa4560472..8c2a65395fbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java @@ -68,7 +68,7 @@ public static int getCurrentHour() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") static void advanceTick() { tick = nextTick(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index f60e97db4836..c083628f60b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,24 +71,24 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { private final CompactionWindowFactory windowFactory; public DateTieredCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) - throws IOException { + throws IOException { super(conf, storeConfigInfo); try { - compactionPolicyPerWindow = ReflectionUtils.instantiateWithCustomCtor( - comConf.getCompactionPolicyForDateTieredWindow(), - new Class[] { Configuration.class, StoreConfigInformation.class }, - new Object[] { conf, storeConfigInfo }); + compactionPolicyPerWindow = + ReflectionUtils.instantiateWithCustomCtor(comConf.getCompactionPolicyForDateTieredWindow(), + new Class[] { Configuration.class, StoreConfigInformation.class }, + new Object[] { conf, storeConfigInfo }); } catch (Exception e) { throw new IOException("Unable to load configured compaction policy '" - + comConf.getCompactionPolicyForDateTieredWindow() + "'", e); + + comConf.getCompactionPolicyForDateTieredWindow() + "'", e); } try { - windowFactory = ReflectionUtils.instantiateWithCustomCtor( - comConf.getDateTieredCompactionWindowFactory(), - new Class[] { CompactionConfiguration.class }, new Object[] { comConf }); + windowFactory = + ReflectionUtils.instantiateWithCustomCtor(comConf.getDateTieredCompactionWindowFactory(), + new Class[] { CompactionConfiguration.class }, new Object[] { comConf }); } catch (Exception e) { throw new IOException("Unable to load configured window factory '" - + comConf.getDateTieredCompactionWindowFactory() + "'", e); + + comConf.getDateTieredCompactionWindowFactory() + "'", e); } } @@ -99,7 +98,7 @@ public DateTieredCompactionPolicy(Configuration conf, StoreConfigInformation sto @Override @InterfaceAudience.Private public boolean needsCompaction(Collection storeFiles, - List filesCompacting) { + List filesCompacting) { ArrayList candidates = new ArrayList<>(storeFiles); try { return !selectMinorCompaction(candidates, false, true).getFiles().isEmpty(); @@ -111,7 +110,7 @@ public boolean needsCompaction(Collection storeFiles, @Override public boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException { + throws IOException { long mcTime = getNextMajorCompactTime(filesToCompact); if (filesToCompact == null || mcTime == 0) { if (LOG.isDebugEnabled()) { @@ -125,8 +124,8 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac long now = EnvironmentEdgeManager.currentTime(); if (lowTimestamp <= 0L || lowTimestamp >= (now - mcTime)) { if (LOG.isDebugEnabled()) { - LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " + - now + " mcTime: " + mcTime); + LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " + + now + " mcTime: " + mcTime); } return false; } @@ -136,12 +135,11 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac List boundaries = getCompactBoundariesForMajor(filesToCompact, now); boolean[] filesInWindow = new boolean[boundaries.size()]; - for (HStoreFile file: filesToCompact) { + for (HStoreFile file : filesToCompact) { OptionalLong minTimestamp = file.getMinimumTimestamp(); long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE; if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) { - LOG.debug("Major compaction triggered on store " + this - + "; for TTL maintenance"); + LOG.debug("Major compaction triggered on store " + this + "; for TTL maintenance"); return true; } if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) { @@ -152,19 +150,19 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac } int lowerWindowIndex = - Collections.binarySearch(boundaries, minTimestamp.orElse(Long.MAX_VALUE)); + Collections.binarySearch(boundaries, minTimestamp.orElse(Long.MAX_VALUE)); int upperWindowIndex = - Collections.binarySearch(boundaries, file.getMaximumTimestamp().orElse(Long.MAX_VALUE)); + Collections.binarySearch(boundaries, file.getMaximumTimestamp().orElse(Long.MAX_VALUE)); // Handle boundary conditions and negative values of binarySearch lowerWindowIndex = (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 2) : lowerWindowIndex; upperWindowIndex = (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 2) : upperWindowIndex; if (lowerWindowIndex != upperWindowIndex) { - LOG.debug("Major compaction triggered on store " + this + "; because file " - + file.getPath() + " has data with timestamps cross window boundaries"); + LOG.debug("Major compaction triggered on store " + this + "; because file " + file.getPath() + + " has data with timestamps cross window boundaries"); return true; } else if (filesInWindow[upperWindowIndex]) { - LOG.debug("Major compaction triggered on store " + this + - "; because there are more than one file in some windows"); + LOG.debug("Major compaction triggered on store " + this + + "; because there are more than one file in some windows"); return true; } else { filesInWindow[upperWindowIndex] = true; @@ -173,23 +171,24 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac } float blockLocalityIndex = hdfsBlocksDistribution - .getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); + .getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) { LOG.debug("Major compaction triggered on store " + this - + "; to make hdfs blocks local, current blockLocalityIndex is " - + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + "; to make hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex + + " (min " + comConf.getMinLocalityToForceCompact() + ")"); return true; } - LOG.debug("Skipping major compaction of " + this + - ", because the files are already major compacted"); + LOG.debug( + "Skipping major compaction of " + this + ", because the files are already major compacted"); return false; } @Override protected CompactionRequestImpl createCompactionRequest(ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { - CompactionRequestImpl result = tryingMajor ? selectMajorCompaction(candidateSelection) + CompactionRequestImpl result = tryingMajor + ? selectMajorCompaction(candidateSelection) : selectMinorCompaction(candidateSelection, mayUseOffPeak, mayBeStuck); if (LOG.isDebugEnabled()) { LOG.debug("Generated compaction request: " + result); @@ -201,8 +200,7 @@ public CompactionRequestImpl selectMajorCompaction(ArrayList candida long now = EnvironmentEdgeManager.currentTime(); List boundaries = getCompactBoundariesForMajor(candidateSelection, now); Map boundariesPolicies = getBoundariesStoragePolicyForMajor(boundaries, now); - return new DateTieredCompactionRequest(candidateSelection, - boundaries, boundariesPolicies); + return new DateTieredCompactionRequest(candidateSelection, boundaries, boundariesPolicies); } /** @@ -214,18 +212,18 @@ public CompactionRequestImpl selectMajorCompaction(ArrayList candida * data into the same compaction windows, guaranteeing contiguous compaction based on sequence id. */ public CompactionRequestImpl selectMinorCompaction(ArrayList candidateSelection, - boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { + boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { long now = EnvironmentEdgeManager.currentTime(); long oldestToCompact = getOldestToCompact(comConf.getDateTieredMaxStoreFileAgeMillis(), now); List> storefileMaxTimestampPairs = - Lists.newArrayListWithCapacity(candidateSelection.size()); + Lists.newArrayListWithCapacity(candidateSelection.size()); long maxTimestampSeen = Long.MIN_VALUE; for (HStoreFile storeFile : candidateSelection) { // if there is out-of-order data, // we put them in the same window as the last file in increasing order maxTimestampSeen = - Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp().orElse(Long.MIN_VALUE)); + Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp().orElse(Long.MIN_VALUE)); storefileMaxTimestampPairs.add(new Pair<>(storeFile, maxTimestampSeen)); } Collections.reverse(storefileMaxTimestampPairs); @@ -233,7 +231,7 @@ public CompactionRequestImpl selectMinorCompaction(ArrayList candida CompactionWindow window = getIncomingWindow(now); int minThreshold = comConf.getDateTieredIncomingWindowMin(); PeekingIterator> it = - Iterators.peekingIterator(storefileMaxTimestampPairs.iterator()); + Iterators.peekingIterator(storefileMaxTimestampPairs.iterator()); while (it.hasNext()) { if (window.compareToTimestamp(oldestToCompact) < 0) { break; @@ -268,27 +266,28 @@ public CompactionRequestImpl selectMinorCompaction(ArrayList candida } private DateTieredCompactionRequest generateCompactionRequest(ArrayList storeFiles, - CompactionWindow window, boolean mayUseOffPeak, boolean mayBeStuck, int minThreshold, - long now) throws IOException { + CompactionWindow window, boolean mayUseOffPeak, boolean mayBeStuck, int minThreshold, long now) + throws IOException { // The files has to be in ascending order for ratio-based compaction to work right // and removeExcessFile to exclude youngest files. Collections.reverse(storeFiles); // Compact everything in the window if have more files than comConf.maxBlockingFiles compactionPolicyPerWindow.setMinThreshold(minThreshold); - ArrayList storeFileSelection = mayBeStuck ? storeFiles + ArrayList storeFileSelection = mayBeStuck + ? storeFiles : compactionPolicyPerWindow.applyCompactionPolicy(storeFiles, mayUseOffPeak, false); if (storeFileSelection != null && !storeFileSelection.isEmpty()) { // If there is any file in the window excluded from compaction, // only one file will be output from compaction. - boolean singleOutput = storeFiles.size() != storeFileSelection.size() || - comConf.useDateTieredSingleOutputForMinorCompaction(); + boolean singleOutput = storeFiles.size() != storeFileSelection.size() + || comConf.useDateTieredSingleOutputForMinorCompaction(); List boundaries = getCompactionBoundariesForMinor(window, singleOutput); // we want to generate policy to boundaries for minor compaction Map boundaryPolicyMap = getBoundariesStoragePolicyForMinor(singleOutput, window, now); - DateTieredCompactionRequest result = new DateTieredCompactionRequest(storeFileSelection, - boundaries, boundaryPolicyMap); + DateTieredCompactionRequest result = + new DateTieredCompactionRequest(storeFileSelection, boundaries, boundaryPolicyMap); return result; } return null; @@ -298,15 +297,14 @@ private DateTieredCompactionRequest generateCompactionRequest(ArrayList getCompactBoundariesForMajor(Collection filesToCompact, long now) { - long minTimestamp = - filesToCompact.stream().mapToLong(f -> f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min() - .orElse(Long.MAX_VALUE); + long minTimestamp = filesToCompact.stream() + .mapToLong(f -> f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min().orElse(Long.MAX_VALUE); List boundaries = new ArrayList<>(); // Add startMillis of all windows between now and min timestamp - for (CompactionWindow window = getIncomingWindow(now); window - .compareToTimestamp(minTimestamp) > 0; window = window.nextEarlierWindow()) { + for (CompactionWindow window = getIncomingWindow(now); window.compareToTimestamp(minTimestamp) + > 0; window = window.nextEarlierWindow()) { boundaries.add(window.startMillis()); } boundaries.add(Long.MIN_VALUE); @@ -318,7 +316,7 @@ private List getCompactBoundariesForMajor(Collection filesToCo * @return a list of boundaries for multiple compaction output from minTimestamp to maxTimestamp. */ private static List getCompactionBoundariesForMinor(CompactionWindow window, - boolean singleOutput) { + boolean singleOutput) { List boundaries = new ArrayList<>(); boundaries.add(Long.MIN_VALUE); if (!singleOutput) { @@ -336,13 +334,13 @@ private static long getOldestToCompact(long maxAgeMillis, long now) { return LongMath.checkedSubtract(now, maxAgeMillis); } catch (ArithmeticException ae) { LOG.warn("Value for " + CompactionConfiguration.DATE_TIERED_MAX_AGE_MILLIS_KEY + ": " - + maxAgeMillis + ". All the files will be eligible for minor compaction."); + + maxAgeMillis + ". All the files will be eligible for minor compaction."); return Long.MIN_VALUE; } } private Map getBoundariesStoragePolicyForMinor(boolean singleOutput, - CompactionWindow window, long now) { + CompactionWindow window, long now) { Map boundariesPolicy = new HashMap<>(); if (!comConf.isDateTieredStoragePolicyEnable()) { return boundariesPolicy; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java index ddf9a0ce2eff..8311b6d36648 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +24,8 @@ import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.yetus.audience.InterfaceAudience; -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS", - justification="It is intended to use the same equal method as superclass") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_DOESNT_OVERRIDE_EQUALS", + justification = "It is intended to use the same equal method as superclass") @InterfaceAudience.Private public class DateTieredCompactionRequest extends CompactionRequestImpl { private List boundaries; @@ -33,7 +33,7 @@ public class DateTieredCompactionRequest extends CompactionRequestImpl { private Map boundariesPolicies; public DateTieredCompactionRequest(Collection files, List boundaryList, - Map boundaryPolicyMap) { + Map boundaryPolicyMap) { super(files); boundaries = boundaryList; boundariesPolicies = boundaryPolicyMap; @@ -50,6 +50,6 @@ public Map getBoundariesPolicies() { @Override public String toString() { return super.toString() + " boundaries=" + Arrays.toString(boundaries.toArray()) - + " boundariesPolicies="+boundariesPolicies.toString(); + + " boundariesPolicies=" + boundariesPolicies.toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java index c8c10e16ff19..b5911b0cec46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,16 +51,16 @@ private boolean needEmptyFile(CompactionRequestImpl request) { // maxSeqId if we haven't written out anything. OptionalLong maxSeqId = StoreUtils.getMaxSequenceIdInList(request.getFiles()); OptionalLong storeMaxSeqId = store.getMaxSequenceId(); - return maxSeqId.isPresent() && storeMaxSeqId.isPresent() && - maxSeqId.getAsLong() == storeMaxSeqId.getAsLong(); + return maxSeqId.isPresent() && storeMaxSeqId.isPresent() + && maxSeqId.getAsLong() == storeMaxSeqId.getAsLong(); } public List compact(final CompactionRequestImpl request, final List lowerBoundaries, - final Map lowerBoundariesPolicies, - ThroughputController throughputController, User user) throws IOException { + final Map lowerBoundariesPolicies, ThroughputController throughputController, + User user) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Executing compaction with " + lowerBoundaries.size() - + "windows, lower boundaries: " + lowerBoundaries); + + "windows, lower boundaries: " + lowerBoundaries); } return compact(request, defaultScannerFactory, @@ -70,21 +70,17 @@ public List compact(final CompactionRequestImpl request, final List public DateTieredMultiFileWriter createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind, boolean major, Consumer writerCreationTracker) throws IOException { - DateTieredMultiFileWriter writer = new DateTieredMultiFileWriter( - lowerBoundaries, - lowerBoundariesPolicies, - needEmptyFile(request)); + DateTieredMultiFileWriter writer = new DateTieredMultiFileWriter(lowerBoundaries, + lowerBoundariesPolicies, needEmptyFile(request)); initMultiWriter(writer, scanner, fd, shouldDropBehind, major, writerCreationTracker); return writer; } - }, - throughputController, - user); + }, throughputController, user); } @Override protected List commitWriter(DateTieredMultiFileWriter writer, FileDetails fd, - CompactionRequestImpl request) throws IOException { + CompactionRequestImpl request) throws IOException { List pathList = writer.commitWriters(fd.maxSeqId, request.isAllFiles(), request.getFiles()); return pathList; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index 0e91d8870b6a..eb803c3e2a88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,8 +51,8 @@ public DefaultCompactor(Configuration conf, HStore store) { public StoreFileWriter createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind, boolean major, Consumer writerCreationTracker) throws IOException { - return DefaultCompactor.this - .createWriter(fd, shouldDropBehind, major, writerCreationTracker); + return DefaultCompactor.this.createWriter(fd, shouldDropBehind, major, + writerCreationTracker); } }; @@ -60,13 +60,13 @@ public StoreFileWriter createWriter(InternalScanner scanner, FileDetails fd, * Do a minor/major compaction on an explicit set of storefiles from a Store. */ public List compact(final CompactionRequestImpl request, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, User user) throws IOException { return compact(request, defaultScannerFactory, writerFactory, throughputController, user); } @Override protected List commitWriter(StoreFileWriter writer, FileDetails fd, - CompactionRequestImpl request) throws IOException { + CompactionRequestImpl request) throws IOException { List newFiles = Lists.newArrayList(writer.getPath()); writer.appendMetadata(fd.maxSeqId, request.isAllFiles(), request.getFiles()); writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index 76bf1d7ac47d..2b54081642f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; @@ -30,10 +28,8 @@ import org.slf4j.LoggerFactory; /** - * Class to pick which files if any to compact together. - * - * This class will search all possibilities for different and if it gets stuck it will choose - * the smallest set of files to compact. + * Class to pick which files if any to compact together. This class will search all possibilities + * for different and if it gets stuck it will choose the smallest set of files to compact. */ @InterfaceAudience.Private public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { @@ -41,25 +37,25 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { /** * Constructor for ExploringCompactionPolicy. - * @param conf The configuration object + * @param conf The configuration object * @param storeConfigInfo An object to provide info about the store. */ public ExploringCompactionPolicy(final Configuration conf, - final StoreConfigInformation storeConfigInfo) { + final StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); } @Override protected final ArrayList applyCompactionPolicy(ArrayList candidates, - boolean mayUseOffPeak, boolean mightBeStuck) throws IOException { + boolean mayUseOffPeak, boolean mightBeStuck) throws IOException { return new ArrayList<>(applyCompactionPolicy(candidates, mightBeStuck, mayUseOffPeak, comConf.getMinFilesToCompact(), comConf.getMaxFilesToCompact())); } public List applyCompactionPolicy(List candidates, boolean mightBeStuck, - boolean mayUseOffPeak, int minFiles, int maxFiles) { - final double currentRatio = mayUseOffPeak - ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); + boolean mayUseOffPeak, int minFiles, int maxFiles) { + final double currentRatio = + mayUseOffPeak ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); // Start off choosing nothing. List bestSelection = new ArrayList<>(0); @@ -71,8 +67,7 @@ public List applyCompactionPolicy(List candidates, boole // Consider every starting place. for (int start = 0; start < candidates.size(); start++) { // Consider every different sub list permutation in between start and end with min files. - for (int currentEnd = start + minFiles - 1; - currentEnd < candidates.size(); currentEnd++) { + for (int currentEnd = start + minFiles - 1; currentEnd < candidates.size(); currentEnd++) { List potentialMatchFiles = candidates.subList(start, currentEnd + 1); // Sanity checks @@ -87,7 +82,7 @@ public List applyCompactionPolicy(List candidates, boole // have to be read if this set of files is compacted. long size = getTotalStoreSize(potentialMatchFiles); - // Store the smallest set of files. This stored set of files will be used + // Store the smallest set of files. This stored set of files will be used // if it looks like the algorithm is stuck. if (mightBeStuck && size < smallestSize) { smallest = potentialMatchFiles; @@ -99,8 +94,9 @@ public List applyCompactionPolicy(List candidates, boole } ++opts; - if (size >= comConf.getMinCompactSize() - && !filesInRatio(potentialMatchFiles, currentRatio)) { + if ( + size >= comConf.getMinCompactSize() && !filesInRatio(potentialMatchFiles, currentRatio) + ) { continue; } @@ -113,25 +109,26 @@ public List applyCompactionPolicy(List candidates, boole } } if (bestSelection.isEmpty() && mightBeStuck) { - LOG.debug("Exploring compaction algorithm has selected " + smallest.size() - + " files of size "+ smallestSize + " because the store might be stuck"); + LOG.debug("Exploring compaction algorithm has selected " + smallest.size() + " files of size " + + smallestSize + " because the store might be stuck"); return new ArrayList<>(smallest); } - LOG.debug("Exploring compaction algorithm has selected {} files of size {} starting at " + - "candidate #{} after considering {} permutations with {} in ratio", bestSelection.size(), - bestSize, bestStart, opts, optsInRatio); + LOG.debug( + "Exploring compaction algorithm has selected {} files of size {} starting at " + + "candidate #{} after considering {} permutations with {} in ratio", + bestSelection.size(), bestSize, bestStart, opts, optsInRatio); return new ArrayList<>(bestSelection); } /** - * Select at least one file in the candidates list to compact, through choosing files - * from the head to the index that the accumulation length larger the max compaction size. - * This method is a supplementary of the selectSimpleCompaction() method, aims to make sure - * at least one file can be selected to compact, for compactions like L0 files, which need to - * compact all files and as soon as possible. + * Select at least one file in the candidates list to compact, through choosing files from the + * head to the index that the accumulation length larger the max compaction size. This method is a + * supplementary of the selectSimpleCompaction() method, aims to make sure at least one file can + * be selected to compact, for compactions like L0 files, which need to compact all files and as + * soon as possible. */ public List selectCompactFiles(final List candidates, int maxFiles, - boolean isOffpeak) { + boolean isOffpeak) { long selectedSize = 0L; for (int end = 0; end < Math.min(candidates.size(), maxFiles); end++) { selectedSize += candidates.get(end).getReader().length(); @@ -143,17 +140,17 @@ public List selectCompactFiles(final List candidates, in } private boolean isBetterSelection(List bestSelection, long bestSize, - List selection, long size, boolean mightBeStuck) { + List selection, long size, boolean mightBeStuck) { if (mightBeStuck && bestSize > 0 && size > 0) { // Keep the selection that removes most files for least size. That penaltizes adding // large files to compaction, but not small files, so we don't become totally inefficient // (might want to tweak that in future). Also, given the current order of looking at // permutations, prefer earlier files and smaller selection if the difference is small. final double REPLACE_IF_BETTER_BY = 1.05; - double thresholdQuality = ((double)bestSelection.size() / bestSize) * REPLACE_IF_BETTER_BY; - return thresholdQuality < ((double)selection.size() / size); + double thresholdQuality = ((double) bestSelection.size() / bestSize) * REPLACE_IF_BETTER_BY; + return thresholdQuality < ((double) selection.size() / size); } - // Keep if this gets rid of more files. Or the same number of files for less io. + // Keep if this gets rid of more files. Or the same number of files for less io. return selection.size() > bestSelection.size() || (selection.size() == bestSelection.size() && size < bestSize); } @@ -168,10 +165,9 @@ private long getTotalStoreSize(List potentialMatchFiles) { } /** - * Check that all files satisfy the constraint - * FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) ) * Ratio. - * - * @param files List of store files to consider as a compaction candidate. + * Check that all files satisfy the constraint FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) + * ) * Ratio. + * @param files List of store files to consider as a compaction candidate. * @param currentRatio The ratio to use. * @return a boolean if these files satisfy the ratio constraints. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java index 2ec010807ce1..b5e86c589cbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ExponentialCompactionWindowFactory extends CompactionWindowFactory { private static final Logger LOG = - LoggerFactory.getLogger(ExponentialCompactionWindowFactory.class); + LoggerFactory.getLogger(ExponentialCompactionWindowFactory.class); public static final String BASE_WINDOW_MILLIS_KEY = "hbase.hstore.compaction.date.tiered.base.window.millis"; @@ -80,12 +80,14 @@ public int compareToTimestamp(long timestamp) { public Window nextEarlierWindow() { // Don't promote to the next tier if there is not even 1 window at current tier // or if the next window crosses the max age. - if (divPosition % windowsPerTier > 0 - || startMillis() - windowMillis * windowsPerTier < maxTierAgeCutoff) { + if ( + divPosition % windowsPerTier > 0 + || startMillis() - windowMillis * windowsPerTier < maxTierAgeCutoff + ) { return new Window(windowMillis, divPosition - 1, maxTierAgeCutoff); } else { return new Window(windowMillis * windowsPerTier, divPosition / windowsPerTier - 1, - maxTierAgeCutoff); + maxTierAgeCutoff); } } @@ -126,8 +128,8 @@ public ExponentialCompactionWindowFactory(CompactionConfiguration comConf) { Configuration conf = comConf.conf; baseWindowMillis = conf.getLong(BASE_WINDOW_MILLIS_KEY, 3600000 * 6); windowsPerTier = conf.getInt(WINDOWS_PER_TIER_KEY, 4); - maxTierAgeMillis = conf.getLong(MAX_TIER_AGE_MILLIS_KEY, - comConf.getDateTieredMaxStoreFileAgeMillis()); + maxTierAgeMillis = + conf.getLong(MAX_TIER_AGE_MILLIS_KEY, comConf.getDateTieredMaxStoreFileAgeMillis()); LOG.info(toString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java index 344b90d5f85b..716f95a8dac8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; @@ -33,22 +31,18 @@ import org.slf4j.LoggerFactory; /** - * - * FIFO compaction policy selects only files which have all cells expired. - * The column family MUST have non-default TTL. One of the use cases for this - * policy is when we need to store raw data which will be post-processed later - * and discarded completely after quite short period of time. Raw time-series vs. - * time-based roll up aggregates and compacted time-series. We collect raw time-series - * and store them into CF with FIFO compaction policy, periodically we run task - * which creates roll up aggregates and compacts time-series, the original raw data - * can be discarded after that. - * + * FIFO compaction policy selects only files which have all cells expired. The column family MUST + * have non-default TTL. One of the use cases for this policy is when we need to store raw data + * which will be post-processed later and discarded completely after quite short period of time. Raw + * time-series vs. time-based roll up aggregates and compacted time-series. We collect raw + * time-series and store them into CF with FIFO compaction policy, periodically we run task which + * creates roll up aggregates and compacts time-series, the original raw data can be discarded after + * that. */ @InterfaceAudience.Private public class FIFOCompactionPolicy extends ExploringCompactionPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(FIFOCompactionPolicy.class); + private static final Logger LOG = LoggerFactory.getLogger(FIFOCompactionPolicy.class); public FIFOCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); @@ -56,15 +50,15 @@ public FIFOCompactionPolicy(Configuration conf, StoreConfigInformation storeConf @Override public CompactionRequestImpl selectCompaction(Collection candidateFiles, - List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, - boolean forceMajor) throws IOException { - if(forceMajor){ + List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, + boolean forceMajor) throws IOException { + if (forceMajor) { LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag."); } boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate selection to the parent policy."); - return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, + return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor); } @@ -78,7 +72,7 @@ public CompactionRequestImpl selectCompaction(Collection candidateFi public boolean shouldPerformMajorCompaction(Collection filesToCompact) throws IOException { boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate to the parent policy."); return super.shouldPerformMajorCompaction(filesToCompact); } @@ -87,9 +81,9 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac @Override public boolean needsCompaction(Collection storeFiles, - List filesCompacting) { + List filesCompacting) { boolean isAfterSplit = StoreUtils.hasReferences(storeFiles); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate to the parent policy."); return super.needsCompaction(storeFiles, filesCompacting); } @@ -126,7 +120,7 @@ private boolean hasExpiredStores(Collection files) { } private Collection getExpiredStores(Collection files, - Collection filesCompacting) { + Collection filesCompacting) { long currentTime = EnvironmentEdgeManager.currentTime(); Collection expiredStores = new ArrayList<>(); for (HStoreFile sf : files) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ForbidMajorCompactionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ForbidMajorCompactionChecker.java index eecc78057120..dc3319fe22d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ForbidMajorCompactionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ForbidMajorCompactionChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.util.function.BiPredicate; - import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.yetus.audience.InterfaceAudience; @@ -27,7 +26,7 @@ */ @InterfaceAudience.Private public class ForbidMajorCompactionChecker - implements BiPredicate { + implements BiPredicate { private static final ForbidMajorCompactionChecker INST = new ForbidMajorCompactionChecker(); @@ -35,7 +34,7 @@ public class ForbidMajorCompactionChecker public boolean test(SyncReplicationState state, SyncReplicationState newState) { // Forbid major compaction when cluster transit sync replication state from S to DA return state == SyncReplicationState.STANDBY - || newState == SyncReplicationState.DOWNGRADE_ACTIVE; + || newState == SyncReplicationState.DOWNGRADE_ACTIVE; } public static ForbidMajorCompactionChecker get() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java index b920de2b57d9..979ec368cc42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java @@ -17,18 +17,25 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; @InterfaceAudience.Private public abstract class OffPeakHours { private static final Logger LOG = LoggerFactory.getLogger(OffPeakHours.class); public static final OffPeakHours DISABLED = new OffPeakHours() { - @Override public boolean isOffPeakHour() { return false; } - @Override public boolean isOffPeakHour(int targetHour) { return false; } + @Override + public boolean isOffPeakHour() { + return false; + } + + @Override + public boolean isOffPeakHour(int targetHour) { + return false; + } }; public static OffPeakHours getInstance(Configuration conf) { @@ -39,18 +46,17 @@ public static OffPeakHours getInstance(Configuration conf) { /** * @param startHour inclusive - * @param endHour exclusive + * @param endHour exclusive */ public static OffPeakHours getInstance(int startHour, int endHour) { if (startHour == -1 && endHour == -1) { return DISABLED; } - if (! isValidHour(startHour) || ! isValidHour(endHour)) { + if (!isValidHour(startHour) || !isValidHour(endHour)) { if (LOG.isWarnEnabled()) { - LOG.warn("Ignoring invalid start/end hour for peak hour : start = " + - startHour + " end = " + endHour + - ". Valid numbers are [0-23]"); + LOG.warn("Ignoring invalid start/end hour for peak hour : start = " + startHour + " end = " + + endHour + ". Valid numbers are [0-23]"); } return DISABLED; } @@ -82,7 +88,7 @@ private static class OffPeakHoursImpl extends OffPeakHours { /** * @param startHour inclusive - * @param endHour exclusive + * @param endHour exclusive */ OffPeakHoursImpl(int startHour, int endHour) { this.startHour = startHour; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 425df1bb10ba..932b18d61804 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.Collection; import java.util.List; import java.util.OptionalLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.HStore; @@ -37,16 +35,15 @@ import org.slf4j.LoggerFactory; /** - * The default algorithm for selecting files for compaction. - * Combines the compaction configuration and the provisional file selection that - * it's given to produce the list of suitable candidates for compaction. + * The default algorithm for selecting files for compaction. Combines the compaction configuration + * and the provisional file selection that it's given to produce the list of suitable candidates for + * compaction. */ @InterfaceAudience.Private public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { private static final Logger LOG = LoggerFactory.getLogger(RatioBasedCompactionPolicy.class); - public RatioBasedCompactionPolicy(Configuration conf, - StoreConfigInformation storeConfigInfo) { + public RatioBasedCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); } @@ -68,14 +65,14 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac if (lowTimestamp > 0L && lowTimestamp < (now - mcTime)) { String regionInfo; if (this.storeConfigInfo != null && this.storeConfigInfo instanceof HStore) { - regionInfo = ((HStore)this.storeConfigInfo).getRegionInfo().getRegionNameAsString(); + regionInfo = ((HStore) this.storeConfigInfo).getRegionInfo().getRegionNameAsString(); } else { regionInfo = this.toString(); } // Major compaction time has elapsed. long cfTTL = HConstants.FOREVER; if (this.storeConfigInfo != null) { - cfTTL = this.storeConfigInfo.getStoreFileTtl(); + cfTTL = this.storeConfigInfo.getStoreFileTtl(); } if (filesToCompact.size() == 1) { // Single file @@ -83,19 +80,18 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac OptionalLong minTimestamp = sf.getMinimumTimestamp(); long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE; if (sf.isMajorCompactionResult() && (cfTTL == Long.MAX_VALUE || oldest < cfTTL)) { - float blockLocalityIndex = - sf.getHDFSBlockDistribution().getBlockLocalityIndex( - DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); + float blockLocalityIndex = sf.getHDFSBlockDistribution() + .getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) { LOG.debug("Major compaction triggered on only store " + regionInfo - + "; to make hdfs blocks local, current blockLocalityIndex is " - + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + "; to make hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex + + " (min " + comConf.getMinLocalityToForceCompact() + ")"); result = true; } else { LOG.debug("Skipping major compaction of " + regionInfo - + " because one (major) compacted file only, oldestTime " + oldest - + "ms is < TTL=" + cfTTL + " and blockLocalityIndex is " + blockLocalityIndex - + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + " because one (major) compacted file only, oldestTime " + oldest + "ms is < TTL=" + + cfTTL + " and blockLocalityIndex is " + blockLocalityIndex + " (min " + + comConf.getMinLocalityToForceCompact() + ")"); } } else if (cfTTL != HConstants.FOREVER && oldest > cfTTL) { LOG.debug("Major compaction triggered on store " + regionInfo @@ -113,48 +109,33 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac } @Override - protected CompactionRequestImpl createCompactionRequest(ArrayList - candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) - throws IOException { + protected CompactionRequestImpl createCompactionRequest(ArrayList candidateSelection, + boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (!tryingMajor) { filterBulk(candidateSelection); candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck); - candidateSelection = checkMinFilesCriteria(candidateSelection, - comConf.getMinFilesToCompact()); + candidateSelection = + checkMinFilesCriteria(candidateSelection, comConf.getMinFilesToCompact()); } return new CompactionRequestImpl(candidateSelection); } /** - * -- Default minor compaction selection algorithm: - * choose CompactSelection from candidates -- - * First exclude bulk-load files if indicated in configuration. - * Start at the oldest file and stop when you find the first file that - * meets compaction criteria: - * (1) a recently-flushed, small file (i.e. <= minCompactSize) - * OR - * (2) within the compactRatio of sum(newer_files) - * Given normal skew, any newer files will also meet this criteria - *

    - * Additional Note: - * If fileSizes.size() >> maxFilesToCompact, we will recurse on - * compact(). Consider the oldest files first to avoid a - * situation where we always compact [end-threshold,end). Then, the - * last file becomes an aggregate of the previous compactions. - * - * normal skew: - * - * older ----> newer (increasing seqID) - * _ - * | | _ - * | | | | _ - * --|-|- |-|- |-|---_-------_------- minCompactSize - * | | | | | | | | _ | | - * | | | | | | | | | | | | - * | | | | | | | | | | | | - * @param candidates pre-filtrate - * @return filtered subset - */ + * -- Default minor compaction selection algorithm: choose CompactSelection from candidates -- + * First exclude bulk-load files if indicated in configuration. Start at the oldest file and stop + * when you find the first file that meets compaction criteria: (1) a recently-flushed, small file + * (i.e. <= minCompactSize) OR (2) within the compactRatio of sum(newer_files) Given normal skew, + * any newer files will also meet this criteria + *

    + * Additional Note: If fileSizes.size() >> maxFilesToCompact, we will recurse on compact(). + * Consider the oldest files first to avoid a situation where we always compact + * [end-threshold,end). Then, the last file becomes an aggregate of the previous compactions. + * normal skew: older ----> newer (increasing seqID) _ | | _ | | | | _ --|-|- |-|- + * |-|---_-------_------- minCompactSize | | | | | | | | _ | | | | | | | | | | | | | | | | | | | | + * | | | | | | + * @param candidates pre-filtrate + * @return filtered subset + */ protected ArrayList applyCompactionPolicy(ArrayList candidates, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (candidates.isEmpty()) { @@ -178,15 +159,14 @@ protected ArrayList applyCompactionPolicy(ArrayList cand fileSizes[i] = file.getReader().length(); // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo int tooFar = i + comConf.getMaxFilesToCompact() - 1; - sumSize[i] = fileSizes[i] - + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0) + sumSize[i] = fileSizes[i] + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0) - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0); } - - while (countOfFiles - start >= comConf.getMinFilesToCompact() && - fileSizes[start] > Math.max(comConf.getMinCompactSize(), - (long) (sumSize[start + 1] * ratio))) { + while ( + countOfFiles - start >= comConf.getMinFilesToCompact() && fileSizes[start] + > Math.max(comConf.getMinCompactSize(), (long) (sumSize[start + 1] * ratio)) + ) { ++start; } if (start < countOfFiles) { @@ -205,13 +185,13 @@ protected ArrayList applyCompactionPolicy(ArrayList cand /** * A heuristic method to decide whether to schedule a compaction request - * @param storeFiles files in the store. + * @param storeFiles files in the store. * @param filesCompacting files being scheduled to compact. * @return true to schedule a request. */ @Override public boolean needsCompaction(Collection storeFiles, - List filesCompacting) { + List filesCompacting) { int numCandidates = storeFiles.size() - filesCompacting.size(); return numCandidates >= comConf.getMinFilesToCompact(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index db469c420ca0..1d039de96fb6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver.compactions; @@ -23,6 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -41,19 +49,19 @@ public SortedCompactionPolicy(Configuration conf, StoreConfigInformation storeCo } public List preSelectCompactionForCoprocessor(Collection candidates, - List filesCompacting) { + List filesCompacting) { return getCurrentEligibleFiles(new ArrayList<>(candidates), filesCompacting); } /** * @param candidateFiles candidate files, ordered from oldest to newest by seqId. We rely on - * DefaultStoreFileManager to sort the files by seqId to guarantee contiguous compaction based - * on seqId for data consistency. + * DefaultStoreFileManager to sort the files by seqId to guarantee + * contiguous compaction based on seqId for data consistency. * @return subset copy of candidate list that meets compaction criteria */ public CompactionRequestImpl selectCompaction(Collection candidateFiles, - List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, - boolean forceMajor) throws IOException { + List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, + boolean forceMajor) throws IOException { // Preliminary compaction subject to filters ArrayList candidateSelection = new ArrayList<>(candidateFiles); // Stuck and not compacting enough (estimate). It is not guaranteed that we will be @@ -64,9 +72,9 @@ public CompactionRequestImpl selectCompaction(Collection candidateFi >= storeConfigInfo.getBlockingFileCount(); candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting); - LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " + - filesCompacting.size() + " compacting, " + candidateSelection.size() + - " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking"); + LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " + + filesCompacting.size() + " compacting, " + candidateSelection.size() + " eligible, " + + storeConfigInfo.getBlockingFileCount() + " blocking"); // If we can't have all files, we cannot do major anyway boolean isAllFiles = candidateFiles.size() == candidateSelection.size(); @@ -78,8 +86,8 @@ public CompactionRequestImpl selectCompaction(Collection candidateFi // Try a major compaction if this is a user-requested major compaction, // or if we do not have too many files to compact and this was requested as a major compaction boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction) - || (((forceMajor && isAllFiles) || shouldPerformMajorCompaction(candidateSelection)) - && (candidateSelection.size() < comConf.getMaxFilesToCompact())); + || (((forceMajor && isAllFiles) || shouldPerformMajorCompaction(candidateSelection)) + && (candidateSelection.size() < comConf.getMaxFilesToCompact())); // Or, if there are any references among the candidates. boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection); @@ -99,8 +107,8 @@ public CompactionRequestImpl selectCompaction(Collection candidateFi } protected abstract CompactionRequestImpl createCompactionRequest( - ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, - boolean mayBeStuck) throws IOException; + ArrayList candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, + boolean mayBeStuck) throws IOException; /** * @param filesToCompact Files to compact. Can be null. @@ -108,11 +116,10 @@ protected abstract CompactionRequestImpl createCompactionRequest( */ @Override public abstract boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException; + throws IOException; /** - * @param filesToCompact - * @return When to run next major compaction + * n * @return When to run next major compaction */ public long getNextMajorCompactTime(Collection filesToCompact) { /** Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_PERIOD}. */ @@ -122,8 +129,8 @@ public long getNextMajorCompactTime(Collection filesToCompact) { } /** - * Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER}, - * that is, +/- 3.5 days (7 days * 0.5). + * Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER}, that + * is, +/- 3.5 days (7 days * 0.5). */ double jitterPct = comConf.getMajorCompactionJitter(); if (jitterPct <= 0) { @@ -154,10 +161,10 @@ public boolean throttleCompaction(long compactionSize) { } public abstract boolean needsCompaction(Collection storeFiles, - List filesCompacting); + List filesCompacting); protected ArrayList getCurrentEligibleFiles(ArrayList candidateFiles, - final List filesCompacting) { + final List filesCompacting) { // candidates = all storefiles not already in compaction queue if (!filesCompacting.isEmpty()) { // exclude all files older than the newest file we're currently @@ -172,19 +179,20 @@ protected ArrayList getCurrentEligibleFiles(ArrayList ca /** * @param candidates pre-filtrate - * @return filtered subset exclude all files above maxCompactSize - * Also save all references. We MUST compact them + * @return filtered subset exclude all files above maxCompactSize Also save all references. We + * MUST compact them */ protected ArrayList skipLargeFiles(ArrayList candidates, boolean mayUseOffpeak) { int pos = 0; - while (pos < candidates.size() && !candidates.get(pos).isReference() - && (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak))) { + while ( + pos < candidates.size() && !candidates.get(pos).isReference() + && (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak)) + ) { ++pos; } if (pos > 0) { - LOG.debug("Some files are too large. Excluding " + pos - + " files from compaction candidates"); + LOG.debug("Some files are too large. Excluding " + pos + " files from compaction candidates"); candidates.subList(0, pos).clear(); } return candidates; @@ -200,16 +208,16 @@ protected void filterBulk(ArrayList candidates) { /** * @param candidates pre-filtrate */ - protected void removeExcessFiles(ArrayList candidates, - boolean isUserCompaction, boolean isMajorCompaction) { + protected void removeExcessFiles(ArrayList candidates, boolean isUserCompaction, + boolean isMajorCompaction) { int excess = candidates.size() - comConf.getMaxFilesToCompact(); if (excess > 0) { if (isMajorCompaction && isUserCompaction) { LOG.debug("Warning, compacting more than " + comConf.getMaxFilesToCompact() - + " files because of a user-requested major compaction"); + + " files because of a user-requested major compaction"); } else { - LOG.debug("Too many admissible files. Excluding " + excess - + " files from compaction candidates"); + LOG.debug( + "Too many admissible files. Excluding " + excess + " files from compaction candidates"); candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear(); } } @@ -220,11 +228,11 @@ protected void removeExcessFiles(ArrayList candidates, * @return filtered subset forget the compactionSelection if we don't have enough files */ protected ArrayList checkMinFilesCriteria(ArrayList candidates, - int minFiles) { + int minFiles) { if (candidates.size() < minFiles) { if (LOG.isDebugEnabled()) { - LOG.debug("Not compacting files because we only have " + candidates.size() + - " files ready for compaction. Need " + minFiles + " to initiate."); + LOG.debug("Not compacting files because we only have " + candidates.size() + + " files ready for compaction. Need " + minFiles + " to initiate."); } candidates.clear(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index 19c5b24a4f66..575b7c352eaf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,11 @@ package org.apache.hadoop.hbase.regionserver.compactions; import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.OPEN_KEY; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; @@ -41,6 +40,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** @@ -54,15 +54,15 @@ public class StripeCompactionPolicy extends CompactionPolicy { private StripeStoreConfig config; - public StripeCompactionPolicy( - Configuration conf, StoreConfigInformation storeConfigInfo, StripeStoreConfig config) { + public StripeCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo, + StripeStoreConfig config) { super(conf, storeConfigInfo); this.config = config; stripePolicy = new ExploringCompactionPolicy(conf, storeConfigInfo); } public List preSelectFilesForCoprocessor(StripeInformationProvider si, - List filesCompacting) { + List filesCompacting) { // We sincerely hope nobody is messing with us with their coprocessors. // If they do, they are very likely to shoot themselves in the foot. // We'll just exclude all the filesCompacting from the list. @@ -71,20 +71,20 @@ public List preSelectFilesForCoprocessor(StripeInformationProvider s return candidateFiles; } - public StripeCompactionRequest createEmptyRequest( - StripeInformationProvider si, CompactionRequestImpl request) { + public StripeCompactionRequest createEmptyRequest(StripeInformationProvider si, + CompactionRequestImpl request) { // Treat as L0-ish compaction with fixed set of files, and hope for the best. if (si.getStripeCount() > 0) { return new BoundaryStripeCompactionRequest(request, si.getStripeBoundaries()); } - Pair targetKvsAndCount = estimateTargetKvs( - request.getFiles(), this.config.getInitialCount()); - return new SplitStripeCompactionRequest( - request, OPEN_KEY, OPEN_KEY, targetKvsAndCount.getSecond(), targetKvsAndCount.getFirst()); + Pair targetKvsAndCount = + estimateTargetKvs(request.getFiles(), this.config.getInitialCount()); + return new SplitStripeCompactionRequest(request, OPEN_KEY, OPEN_KEY, + targetKvsAndCount.getSecond(), targetKvsAndCount.getFirst()); } public StripeStoreFlusher.StripeFlushRequest selectFlush(CellComparator comparator, - StripeInformationProvider si, int kvCount) { + StripeInformationProvider si, int kvCount) { if (this.config.isUsingL0Flush()) { // L0 is used, return dumb request. return new StripeStoreFlusher.StripeFlushRequest(comparator); @@ -93,16 +93,16 @@ public StripeStoreFlusher.StripeFlushRequest selectFlush(CellComparator comparat // No stripes - start with the requisite count, derive KVs per stripe. int initialCount = this.config.getInitialCount(); return new StripeStoreFlusher.SizeStripeFlushRequest(comparator, initialCount, - kvCount / initialCount); + kvCount / initialCount); } // There are stripes - do according to the boundaries. return new StripeStoreFlusher.BoundaryStripeFlushRequest(comparator, si.getStripeBoundaries()); } public StripeCompactionRequest selectCompaction(StripeInformationProvider si, - List filesCompacting, boolean isOffpeak) throws IOException { + List filesCompacting, boolean isOffpeak) throws IOException { // TODO: first cut - no parallel compactions. To have more fine grained control we - // probably need structure more sophisticated than a list. + // probably need structure more sophisticated than a list. if (!filesCompacting.isEmpty()) { LOG.debug("Not selecting compaction: " + filesCompacting.size() + " files compacting"); return null; @@ -118,8 +118,8 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, if (StoreUtils.hasReferences(allFiles)) { LOG.debug("There are references in the store; compacting all files"); long targetKvs = estimateTargetKvs(allFiles, config.getInitialCount()).getFirst(); - SplitStripeCompactionRequest request = new SplitStripeCompactionRequest( - allFiles, OPEN_KEY, OPEN_KEY, targetKvs); + SplitStripeCompactionRequest request = + new SplitStripeCompactionRequest(allFiles, OPEN_KEY, OPEN_KEY, targetKvs); request.setMajorRangeFull(); request.getRequest().setAfterSplit(true); return request; @@ -130,7 +130,7 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, // See if we need to make new stripes. boolean shouldCompactL0 = - this.config.getLevel0MinFiles() <= l0Files.size() || allL0FilesExpired(si); + this.config.getLevel0MinFiles() <= l0Files.size() || allL0FilesExpired(si); if (stripeCount == 0) { if (!shouldCompactL0) { return null; // nothing to do. @@ -142,8 +142,8 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, if (shouldCompactL0) { if (!canDropDeletesNoL0) { // If we need to compact L0, see if we can add something to it, and drop deletes. - StripeCompactionRequest result = selectSingleStripeCompaction( - si, !shouldSelectL0Files(si), canDropDeletesNoL0, isOffpeak); + StripeCompactionRequest result = + selectSingleStripeCompaction(si, !shouldSelectL0Files(si), canDropDeletesNoL0, isOffpeak); if (result != null) { return result; } @@ -165,10 +165,9 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, public boolean needsCompactions(StripeInformationProvider si, List filesCompacting) { // Approximation on whether we need compaction. - return filesCompacting.isEmpty() - && (StoreUtils.hasReferences(si.getStorefiles()) - || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles()) - || needsSingleStripeCompaction(si) || hasExpiredStripes(si) || allL0FilesExpired(si)); + return filesCompacting.isEmpty() && (StoreUtils.hasReferences(si.getStorefiles()) + || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles()) + || needsSingleStripeCompaction(si) || hasExpiredStripes(si) || allL0FilesExpired(si)); } @Override @@ -195,7 +194,7 @@ protected boolean needsSingleStripeCompaction(StripeInformationProvider si) { } protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformationProvider si, - boolean includeL0, boolean canDropDeletesWithoutL0, boolean isOffpeak) throws IOException { + boolean includeL0, boolean canDropDeletesWithoutL0, boolean isOffpeak) throws IOException { ArrayList> stripes = si.getStripes(); int bqIndex = -1; @@ -206,14 +205,16 @@ protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformation // If we want to compact L0 to drop deletes, we only want whole-stripe compactions. // So, pass includeL0 as 2nd parameter to indicate that. List selection = selectSimpleCompaction(stripes.get(i), - !canDropDeletesWithoutL0 && includeL0, isOffpeak, false); + !canDropDeletesWithoutL0 && includeL0, isOffpeak, false); if (selection.isEmpty()) continue; long size = 0; for (HStoreFile sf : selection) { size += sf.getReader().length(); } - if (bqSelection == null || selection.size() > bqSelection.size() || - (selection.size() == bqSelection.size() && size < bqTotalSize)) { + if ( + bqSelection == null || selection.size() > bqSelection.size() + || (selection.size() == bqSelection.size() && size < bqTotalSize) + ) { bqSelection = selection; bqIndex = i; bqTotalSize = size; @@ -238,13 +239,12 @@ protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformation Pair kvsAndCount = estimateTargetKvs(filesToCompact, config.getSplitCount()); targetKvs = kvsAndCount.getFirst(); targetCount = kvsAndCount.getSecond(); - splitString = "; the stripe will be split into at most " - + targetCount + " stripes with " + targetKvs + " target KVs"; + splitString = "; the stripe will be split into at most " + targetCount + " stripes with " + + targetKvs + " target KVs"; } - LOG.debug("Found compaction in a stripe with end key [" - + Bytes.toString(si.getEndRow(bqIndex)) + "], with " - + filesToCompact.size() + " files of total size " + bqTotalSize + splitString); + LOG.debug("Found compaction in a stripe with end key [" + Bytes.toString(si.getEndRow(bqIndex)) + + "], with " + filesToCompact.size() + " files of total size " + bqTotalSize + splitString); // See if we can drop deletes. StripeCompactionRequest req; @@ -257,8 +257,8 @@ protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformation sfs.addSublist(l0Files); req = new BoundaryStripeCompactionRequest(sfs, si.getStripeBoundaries()); } else { - req = new SplitStripeCompactionRequest( - filesToCompact, si.getStartRow(bqIndex), si.getEndRow(bqIndex), targetCount, targetKvs); + req = new SplitStripeCompactionRequest(filesToCompact, si.getStartRow(bqIndex), + si.getEndRow(bqIndex), targetCount, targetKvs); } if (hasAllFiles && (canDropDeletesWithoutL0 || includeL0)) { req.setMajorRange(si.getStartRow(bqIndex), si.getEndRow(bqIndex)); @@ -269,17 +269,17 @@ protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformation /** * Selects the compaction of a single stripe using default policy. - * @param sfs Files. + * @param sfs Files. * @param allFilesOnly Whether a compaction of all-or-none files is needed. * @return The resulting selection. */ - private List selectSimpleCompaction( - List sfs, boolean allFilesOnly, boolean isOffpeak, boolean forceCompact) { - int minFilesLocal = Math.max( - allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles()); + private List selectSimpleCompaction(List sfs, boolean allFilesOnly, + boolean isOffpeak, boolean forceCompact) { + int minFilesLocal = + Math.max(allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles()); int maxFilesLocal = Math.max(this.config.getStripeCompactMaxFiles(), minFilesLocal); - List selected = stripePolicy.applyCompactionPolicy(sfs, false, - isOffpeak, minFilesLocal, maxFilesLocal); + List selected = + stripePolicy.applyCompactionPolicy(sfs, false, isOffpeak, minFilesLocal, maxFilesLocal); if (forceCompact && (selected == null || selected.isEmpty()) && !sfs.isEmpty()) { return stripePolicy.selectCompactFiles(sfs, maxFilesLocal, isOffpeak); } @@ -287,8 +287,8 @@ private List selectSimpleCompaction( } private boolean shouldSelectL0Files(StripeInformationProvider si) { - return si.getLevel0Files().size() > this.config.getStripeCompactMaxFiles() || - getTotalFileSize(si.getLevel0Files()) > comConf.getMaxCompactSize(); + return si.getLevel0Files().size() > this.config.getStripeCompactMaxFiles() + || getTotalFileSize(si.getLevel0Files()) > comConf.getMaxCompactSize(); } private StripeCompactionRequest selectL0OnlyCompaction(StripeInformationProvider si) { @@ -317,8 +317,8 @@ private StripeCompactionRequest selectL0OnlyCompaction(StripeInformationProvider return request; } - private StripeCompactionRequest selectExpiredMergeCompaction( - StripeInformationProvider si, boolean canDropDeletesNoL0) { + private StripeCompactionRequest selectExpiredMergeCompaction(StripeInformationProvider si, + boolean canDropDeletesNoL0) { long cfTtl = this.storeConfigInfo.getStoreFileTtl(); if (cfTtl == Long.MAX_VALUE) { return null; // minversion might be set, cannot delete old files @@ -362,7 +362,7 @@ private StripeCompactionRequest selectExpiredMergeCompaction( ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addAllSublists(stripes.subList(bestStart, endIndex + 1)); SplitStripeCompactionRequest result = new SplitStripeCompactionRequest(sfs, - si.getStartRow(bestStart), si.getEndRow(endIndex), 1, Long.MAX_VALUE); + si.getStartRow(bestStart), si.getEndRow(endIndex), 1, Long.MAX_VALUE); if (canDropDeletesNoL0) { result.setMajorRangeFull(); } @@ -395,8 +395,10 @@ private boolean allFilesExpired(final List storeFiles) { long timestampCutoff = EnvironmentEdgeManager.currentTime() - cfTtl; for (HStoreFile storeFile : storeFiles) { // Check store file is not empty and has not expired - if (storeFile.getReader().getMaxTimestamp() >= timestampCutoff - && storeFile.getReader().getEntries() != 0) { + if ( + storeFile.getReader().getMaxTimestamp() >= timestampCutoff + && storeFile.getReader().getEntries() != 0 + ) { return false; } } @@ -435,8 +437,8 @@ private Pair estimateTargetKvs(Collection files, doub ratio = newRatio; splitCount += 1.0; } - long kvCount = (long)(getTotalKvCount(files) / splitCount); - return new Pair<>(kvCount, (int)Math.ceil(splitCount)); + long kvCount = (long) (getTotalKvCount(files) / splitCount); + return new Pair<>(kvCount, (int) Math.ceil(splitCount)); } /** Stripe compaction request wrapper. */ @@ -444,28 +446,29 @@ public abstract static class StripeCompactionRequest { protected CompactionRequestImpl request; protected byte[] majorRangeFromRow = null, majorRangeToRow = null; - public List execute(StripeCompactor compactor, - ThroughputController throughputController) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController) + throws IOException { return execute(compactor, throughputController, null); } + /** - * Executes the request against compactor (essentially, just calls correct overload of - * compact method), to simulate more dynamic dispatch. + * Executes the request against compactor (essentially, just calls correct overload of compact + * method), to simulate more dynamic dispatch. * @param compactor Compactor. * @return result of compact(...) */ public abstract List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException; + ThroughputController throughputController, User user) throws IOException; public StripeCompactionRequest(CompactionRequestImpl request) { this.request = request; } /** - * Sets compaction "major range". Major range is the key range for which all - * the files are included, so they can be treated like major-compacted files. + * Sets compaction "major range". Major range is the key range for which all the files are + * included, so they can be treated like major-compacted files. * @param startRow Left boundary, inclusive. - * @param endRow Right boundary, exclusive. + * @param endRow Right boundary, exclusive. */ public void setMajorRange(byte[] startRow, byte[] endRow) { this.majorRangeFromRow = startRow; @@ -484,40 +487,40 @@ public void setRequest(CompactionRequestImpl request) { } /** - * Request for stripe compactor that will cause it to split the source files into several - * separate files at the provided boundaries. + * Request for stripe compactor that will cause it to split the source files into several separate + * files at the provided boundaries. */ private static class BoundaryStripeCompactionRequest extends StripeCompactionRequest { private final List targetBoundaries; /** - * @param request Original request. + * @param request Original request. * @param targetBoundaries New files should be written with these boundaries. */ public BoundaryStripeCompactionRequest(CompactionRequestImpl request, - List targetBoundaries) { + List targetBoundaries) { super(request); this.targetBoundaries = targetBoundaries; } public BoundaryStripeCompactionRequest(Collection files, - List targetBoundaries) { + List targetBoundaries) { this(new CompactionRequestImpl(files), targetBoundaries); } @Override - public List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController, + User user) throws IOException { return compactor.compact(this.request, this.targetBoundaries, this.majorRangeFromRow, this.majorRangeToRow, throughputController, user); } } /** - * Request for stripe compactor that will cause it to split the source files into several - * separate files into based on key-value count, as well as file count limit. - * Most of the files will be roughly the same size. The last file may be smaller or larger - * depending on the interplay of the amount of data and maximum number of files allowed. + * Request for stripe compactor that will cause it to split the source files into several separate + * files into based on key-value count, as well as file count limit. Most of the files will be + * roughly the same size. The last file may be smaller or larger depending on the interplay of the + * amount of data and maximum number of files allowed. */ private static class SplitStripeCompactionRequest extends StripeCompactionRequest { private final byte[] startRow, endRow; @@ -525,15 +528,15 @@ private static class SplitStripeCompactionRequest extends StripeCompactionReques private final long targetKvs; /** - * @param request Original request. - * @param startRow Left boundary of the range to compact, inclusive. - * @param endRow Right boundary of the range to compact, exclusive. + * @param request Original request. + * @param startRow Left boundary of the range to compact, inclusive. + * @param endRow Right boundary of the range to compact, exclusive. * @param targetCount The maximum number of stripe to compact into. - * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than - * total number of kvs, all the overflow data goes into the last stripe. + * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than total + * number of kvs, all the overflow data goes into the last stripe. */ - public SplitStripeCompactionRequest(CompactionRequestImpl request, - byte[] startRow, byte[] endRow, int targetCount, long targetKvs) { + public SplitStripeCompactionRequest(CompactionRequestImpl request, byte[] startRow, + byte[] endRow, int targetCount, long targetKvs) { super(request); this.startRow = startRow; this.endRow = endRow; @@ -541,25 +544,27 @@ public SplitStripeCompactionRequest(CompactionRequestImpl request, this.targetKvs = targetKvs; } - public SplitStripeCompactionRequest( - Collection files, byte[] startRow, byte[] endRow, long targetKvs) { + public SplitStripeCompactionRequest(Collection files, byte[] startRow, + byte[] endRow, long targetKvs) { this(files, startRow, endRow, Integer.MAX_VALUE, targetKvs); } - public SplitStripeCompactionRequest(Collection files, - byte[] startRow, byte[] endRow, int targetCount, long targetKvs) { + public SplitStripeCompactionRequest(Collection files, byte[] startRow, + byte[] endRow, int targetCount, long targetKvs) { this(new CompactionRequestImpl(files), startRow, endRow, targetCount, targetKvs); } @Override - public List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController, + User user) throws IOException { return compactor.compact(this.request, this.targetCount, this.targetKvs, this.startRow, this.endRow, this.majorRangeFromRow, this.majorRangeToRow, throughputController, user); } - /** Set major range of the compaction to the entire compaction range. - * See {@link #setMajorRange(byte[], byte[])}. */ + /** + * Set major range of the compaction to the entire compaction range. See + * {@link #setMajorRange(byte[], byte[])}. + */ public void setMajorRangeFull() { setMajorRange(this.startRow, this.endRow); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java index 6413a304d55d..96c317b60ee5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,18 +68,18 @@ public ScanType getScanType(CompactionRequestImpl request) { @Override public InternalScanner createScanner(ScanInfo scanInfo, List scanners, - ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException { + ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException { return (majorRangeFromRow == null) - ? StripeCompactor.this.createScanner(store, scanInfo, scanners, scanType, - smallestReadPoint, fd.earliestPutTs) - : StripeCompactor.this.createScanner(store, scanInfo, scanners, smallestReadPoint, - fd.earliestPutTs, majorRangeFromRow, majorRangeToRow); + ? StripeCompactor.this.createScanner(store, scanInfo, scanners, scanType, smallestReadPoint, + fd.earliestPutTs) + : StripeCompactor.this.createScanner(store, scanInfo, scanners, smallestReadPoint, + fd.earliestPutTs, majorRangeFromRow, majorRangeToRow); } } public List compact(CompactionRequestImpl request, final List targetBoundaries, - final byte[] majorRangeFromRow, final byte[] majorRangeToRow, - ThroughputController throughputController, User user) throws IOException { + final byte[] majorRangeFromRow, final byte[] majorRangeToRow, + ThroughputController throughputController, User user) throws IOException { if (LOG.isDebugEnabled()) { StringBuilder sb = new StringBuilder(); sb.append("Executing compaction with " + targetBoundaries.size() + " boundaries:"); @@ -88,9 +88,7 @@ public List compact(CompactionRequestImpl request, final List targ } LOG.debug(sb.toString()); } - return compact( - request, - new StripeInternalScannerFactory(majorRangeFromRow, majorRangeToRow), + return compact(request, new StripeInternalScannerFactory(majorRangeFromRow, majorRangeToRow), new CellSinkFactory() { @Override @@ -98,25 +96,21 @@ public StripeMultiFileWriter createWriter(InternalScanner scanner, FileDetails f boolean shouldDropBehind, boolean major, Consumer writerCreationTracker) throws IOException { StripeMultiFileWriter writer = new StripeMultiFileWriter.BoundaryMultiWriter( - store.getComparator(), - targetBoundaries, - majorRangeFromRow, - majorRangeToRow); + store.getComparator(), targetBoundaries, majorRangeFromRow, majorRangeToRow); initMultiWriter(writer, scanner, fd, shouldDropBehind, major, writerCreationTracker); return writer; } - }, - throughputController, - user); + }, throughputController, user); } - public List compact(CompactionRequestImpl request, final int targetCount, final long targetSize, - final byte[] left, final byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow, - ThroughputController throughputController, User user) throws IOException { + public List compact(CompactionRequestImpl request, final int targetCount, + final long targetSize, final byte[] left, final byte[] right, byte[] majorRangeFromRow, + byte[] majorRangeToRow, ThroughputController throughputController, User user) + throws IOException { if (LOG.isDebugEnabled()) { LOG.debug( "Executing compaction with " + targetSize + " target file size, no more than " + targetCount - + " files, in [" + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range"); + + " files, in [" + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range"); } return compact(request, new StripeInternalScannerFactory(majorRangeFromRow, majorRangeToRow), new CellSinkFactory() { @@ -126,22 +120,16 @@ public StripeMultiFileWriter createWriter(InternalScanner scanner, FileDetails f boolean shouldDropBehind, boolean major, Consumer writerCreationTracker) throws IOException { StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter( - store.getComparator(), - targetCount, - targetSize, - left, - right); + store.getComparator(), targetCount, targetSize, left, right); initMultiWriter(writer, scanner, fd, shouldDropBehind, major, writerCreationTracker); return writer; } - }, - throughputController, - user); + }, throughputController, user); } @Override protected List commitWriter(StripeMultiFileWriter writer, FileDetails fd, - CompactionRequestImpl request) throws IOException { + CompactionRequestImpl request) throws IOException { List newFiles = writer.commitWriters(fd.maxSeqId, request.isMajor(), request.getFiles()); assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata."; return newFiles; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java index 101c9c3d9f6b..16107d93f090 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java @@ -61,7 +61,7 @@ public class AssignRegionHandler extends EventHandler { private final RetryCounter retryCounter; public AssignRegionHandler(HRegionServer server, RegionInfo regionInfo, long openProcId, - @Nullable TableDescriptor tableDesc, long masterSystemTime, EventType eventType) { + @Nullable TableDescriptor tableDesc, long masterSystemTime, EventType eventType) { super(server, eventType); this.regionInfo = regionInfo; this.openProcId = openProcId; @@ -79,8 +79,10 @@ private void cleanUpAndReportFailure(IOException error) throws IOException { error); HRegionServer rs = getServer(); rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); - if (!rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.FAILED_OPEN, - HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo))) { + if ( + !rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.FAILED_OPEN, + HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo)) + ) { throw new IOException( "Failed to report failed open to master: " + regionInfo.getRegionNameAsString()); } @@ -106,16 +108,15 @@ public void process() throws IOException { if (previous != null) { if (previous) { // The region is opening and this maybe a retry on the rpc call, it is safe to ignore it. - LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + - " - ignoring this new request for this region.", regionName); + LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + + " - ignoring this new request for this region.", regionName); } else { // The region is closing. This is possible as we will update the region state to CLOSED when // calling reportRegionStateTransition, so the HMaster will think the region is offline, // before we actually close the region, as reportRegionStateTransition is part of the // closing process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.info( - "Receiving OPEN for {} which we are trying to close, try again after {}ms", + LOG.info("Receiving OPEN for {} which we are trying to close, try again after {}ms", regionName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } @@ -166,12 +167,14 @@ protected void handleException(Throwable t) { } public static AssignRegionHandler create(HRegionServer server, RegionInfo regionInfo, - long openProcId, TableDescriptor tableDesc, long masterSystemTime) { + long openProcId, TableDescriptor tableDesc, long masterSystemTime) { EventType eventType; if (regionInfo.isMetaRegion()) { eventType = EventType.M_RS_OPEN_META; - } else if (regionInfo.getTable().isSystemTable() || - (tableDesc != null && tableDesc.getPriority() >= HConstants.ADMIN_QOS)) { + } else if ( + regionInfo.getTable().isSystemTable() + || (tableDesc != null && tableDesc.getPriority() >= HConstants.ADMIN_QOS) + ) { eventType = EventType.M_RS_OPEN_PRIORITY_REGION; } else { eventType = EventType.M_RS_OPEN_REGION; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java index 38097bafd6e0..a5336430dec7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,10 +30,8 @@ public class CloseMetaHandler extends CloseRegionHandler { // Called when regionserver determines its to go down; not master orchestrated - public CloseMetaHandler(final Server server, - final RegionServerServices rsServices, - final RegionInfo regionInfo, - final boolean abort) { + public CloseMetaHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final boolean abort) { super(server, rsServices, regionInfo, abort, EventType.M_RS_CLOSE_META, null); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java index f9f0e91cc461..2301b9b8b494 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,25 +31,28 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; /** * Handles closing of a region on a region server. *

    * In normal operation, we use {@link UnassignRegionHandler} closing Regions but when shutting down - * the region server and closing out Regions, we use this handler instead; it does not expect to - * be able to communicate the close back to the Master. - *

    Expects that the close *has* been registered in the hosting RegionServer before - * submitting this Handler; i.e. rss.getRegionsInTransitionInRS().putIfAbsent( - * this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE); has been called first. - * In here when done, we do the deregister.

    + * the region server and closing out Regions, we use this handler instead; it does not expect to be + * able to communicate the close back to the Master. + *

    + * Expects that the close *has* been registered in the hosting RegionServer before submitting this + * Handler; i.e. rss.getRegionsInTransitionInRS().putIfAbsent( + * this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE); has been called first. In here + * when done, we do the deregister. + *

    * @see UnassignRegionHandler */ @InterfaceAudience.Private public class CloseRegionHandler extends EventHandler { - // NOTE on priorities shutting down. There are none for close. There are some - // for open. I think that is right. On shutdown, we want the meta to close - // after the user regions have closed. What + // NOTE on priorities shutting down. There are none for close. There are some + // for open. I think that is right. On shutdown, we want the meta to close + // after the user regions have closed. What // about the case where master tells us to shutdown a catalog region and we // have a running queue of user regions to close? private static final Logger LOG = LoggerFactory.getLogger(CloseRegionHandler.class); @@ -58,7 +60,7 @@ public class CloseRegionHandler extends EventHandler { private final RegionServerServices rsServices; private final RegionInfo regionInfo; - // If true, the hosting server is aborting. Region close process is different + // If true, the hosting server is aborting. Region close process is different // when we are aborting. private final boolean abort; private ServerName destination; @@ -67,17 +69,13 @@ public class CloseRegionHandler extends EventHandler { * This method used internally by the RegionServer to close out regions. * @param abort If the regionserver is aborting. */ - public CloseRegionHandler(final Server server, - final RegionServerServices rsServices, - final RegionInfo regionInfo, final boolean abort, - ServerName destination) { - this(server, rsServices, regionInfo, abort, - EventType.M_RS_CLOSE_REGION, destination); + public CloseRegionHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final boolean abort, ServerName destination) { + this(server, rsServices, regionInfo, abort, EventType.M_RS_CLOSE_REGION, destination); } - protected CloseRegionHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - boolean abort, EventType eventType, ServerName destination) { + protected CloseRegionHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, boolean abort, EventType eventType, ServerName destination) { super(server, eventType); this.server = server; this.rsServices = rsServices; @@ -95,7 +93,7 @@ public void process() throws IOException { String name = regionInfo.getEncodedName(); LOG.trace("Processing close of {}", name); // Check that this region is being served here - HRegion region = (HRegion)rsServices.getRegion(name); + HRegion region = (HRegion) rsServices.getRegion(name); try { if (region == null) { LOG.warn("Received CLOSE for region {} but currently not serving - ignoring", name); @@ -115,7 +113,7 @@ public void process() throws IOException { rsServices.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.CLOSED, HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo)); - // Done! Region is closed on this RS + // Done! Region is closed on this RS LOG.debug("Closed {}", region.getRegionInfo().getRegionNameAsString()); } finally { // Clear any reference in getServer().getRegionsInTransitionInRS() on success or failure, @@ -125,8 +123,9 @@ public void process() throws IOException { } } - @Override protected void handleException(Throwable t) { - server.abort("Unrecoverable exception while closing " + - this.regionInfo.getRegionNameAsString(), t); + @Override + protected void handleException(Throwable t) { + server.abort("Unrecoverable exception while closing " + this.regionInfo.getRegionNameAsString(), + t); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java index 02ed0ef71c59..43051893376e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java index ca5f9e179a9a..1107b60bf156 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,9 +31,8 @@ */ @InterfaceAudience.Private public class OpenMetaHandler extends OpenRegionHandler { - public OpenMetaHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - final TableDescriptor htd, long masterSystemTime) { + public OpenMetaHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, final TableDescriptor htd, long masterSystemTime) { super(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_META); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java index 1861a2bba332..36f91068341a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.handler; import org.apache.hadoop.hbase.Server; @@ -33,8 +32,8 @@ @InterfaceAudience.Private public class OpenPriorityRegionHandler extends OpenRegionHandler { public OpenPriorityRegionHandler(Server server, RegionServerServices rsServices, - RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { + RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { super(server, rsServices, regionInfo, htd, masterSystemTime, - EventType.M_RS_OPEN_PRIORITY_REGION); + EventType.M_RS_OPEN_PRIORITY_REGION); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index 3ae38864ba1e..bde771f6ab88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.client.RegionInfo; @@ -36,7 +34,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; + /** * Handles opening of a region on a region server. *

    @@ -55,15 +55,14 @@ public class OpenRegionHandler extends EventHandler { private final TableDescriptor htd; private final long masterSystemTime; - public OpenRegionHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - TableDescriptor htd, long masterSystemTime) { + public OpenRegionHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { this(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_REGION); } - protected OpenRegionHandler(final Server server, - final RegionServerServices rsServices, final RegionInfo regionInfo, - final TableDescriptor htd, long masterSystemTime, EventType eventType) { + protected OpenRegionHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final TableDescriptor htd, long masterSystemTime, + EventType eventType) { super(server, eventType); this.rsServices = rsServices; this.regionInfo = regionInfo; @@ -93,28 +92,30 @@ public void process() throws IOException { // Check that this region is not already online if (this.rsServices.getRegion(encodedName) != null) { - LOG.error("Region " + encodedName + - " was already online when we started processing the opening. " + - "Marking this new attempt as failed"); + LOG.error( + "Region " + encodedName + " was already online when we started processing the opening. " + + "Marking this new attempt as failed"); return; } // Check that we're still supposed to open the region. - // If fails, just return. Someone stole the region from under us. - if (!isRegionStillOpening()){ + // If fails, just return. Someone stole the region from under us. + if (!isRegionStillOpening()) { LOG.error("Region " + encodedName + " opening cancelled"); return; } - // Open region. After a successful open, failures in subsequent + // Open region. After a successful open, failures in subsequent // processing needs to do a close as part of cleanup. region = openRegion(); if (region == null) { return; } - if (!updateMeta(region, masterSystemTime) || this.server.isStopped() || - this.rsServices.isStopping()) { + if ( + !updateMeta(region, masterSystemTime) || this.server.isStopped() + || this.rsServices.isStopping() + ) { return; } @@ -126,30 +127,30 @@ public void process() throws IOException { this.rsServices.addRegion(region); openSuccessful = true; - // Done! Successful region open + // Done! Successful region open LOG.debug("Opened " + regionName + " on " + this.server.getServerName()); } finally { // Do all clean up here if (!openSuccessful) { doCleanUpOnFailedOpen(region); } - final Boolean current = this.rsServices.getRegionsInTransitionInRS(). - remove(this.regionInfo.getEncodedNameAsBytes()); + final Boolean current = this.rsServices.getRegionsInTransitionInRS() + .remove(this.regionInfo.getEncodedNameAsBytes()); // Let's check if we have met a race condition on open cancellation.... // A better solution would be to not have any race condition. // this.rsServices.getRegionsInTransitionInRS().remove( - // this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); + // this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); // would help. if (openSuccessful) { if (current == null) { // Should NEVER happen, but let's be paranoid. LOG.error("Bad state: we've just opened a region that was NOT in transition. Region=" - + regionName); + + regionName); } else if (Boolean.FALSE.equals(current)) { // Can happen, if we're // really unlucky. LOG.error("Race condition: we've finished to open a region, while a close was requested " - + " on region=" + regionName + ". It can be a critical error, as a region that" - + " should be closed is now opened. Closing it now"); + + " on region=" + regionName + ". It can be a critical error, as a region that" + + " should be closed is now opened. Closing it now"); cleanupFailedOpen(region); } } @@ -168,27 +169,28 @@ private void doCleanUpOnFailedOpen(HRegion region) throws IOException { } /** - * Update ZK or META. This can take a while if for example the - * hbase:meta is not available -- if server hosting hbase:meta crashed and we are - * waiting on it to come back -- so run in a thread and keep updating znode - * state meantime so master doesn't timeout our region-in-transition. + * Update ZK or META. This can take a while if for example the hbase:meta is not available -- if + * server hosting hbase:meta crashed and we are waiting on it to come back -- so run in a thread + * and keep updating znode state meantime so master doesn't timeout our region-in-transition. * Caller must cleanup region if this fails. */ private boolean updateMeta(final HRegion r, long masterSystemTime) { if (this.server.isStopped() || this.rsServices.isStopping()) { return false; } - // Object we do wait/notify on. Make it boolean. If set, we're done. + // Object we do wait/notify on. Make it boolean. If set, we're done. // Else, wait. final AtomicBoolean signaller = new AtomicBoolean(false); - PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(r, - this.server, this.rsServices, signaller, masterSystemTime); + PostOpenDeployTasksThread t = + new PostOpenDeployTasksThread(r, this.server, this.rsServices, signaller, masterSystemTime); t.start(); // Post open deploy task: - // meta => update meta location in ZK - // other region => update meta - while (!signaller.get() && t.isAlive() && !this.server.isStopped() && - !this.rsServices.isStopping() && isRegionStillOpening()) { + // meta => update meta location in ZK + // other region => update meta + while ( + !signaller.get() && t.isAlive() && !this.server.isStopped() && !this.rsServices.isStopping() + && isRegionStillOpening() + ) { synchronized (signaller) { try { // Wait for 10 seconds, so that server shutdown @@ -199,8 +201,8 @@ private boolean updateMeta(final HRegion r, long masterSystemTime) { } } } - // Is thread still alive? We may have left above loop because server is - // stopping or we timed out the edit. Is so, interrupt it. + // Is thread still alive? We may have left above loop because server is + // stopping or we timed out the edit. Is so, interrupt it. if (t.isAlive()) { if (!signaller.get()) { // Thread still running; interrupt @@ -210,20 +212,19 @@ private boolean updateMeta(final HRegion r, long masterSystemTime) { try { t.join(); } catch (InterruptedException ie) { - LOG.warn("Interrupted joining " + - r.getRegionInfo().getRegionNameAsString(), ie); + LOG.warn("Interrupted joining " + r.getRegionInfo().getRegionNameAsString(), ie); Thread.currentThread().interrupt(); } } - // Was there an exception opening the region? This should trigger on - // InterruptedException too. If so, we failed. + // Was there an exception opening the region? This should trigger on + // InterruptedException too. If so, we failed. return (!Thread.interrupted() && t.getException() == null); } /** - * Thread to run region post open tasks. Call {@link #getException()} after the thread finishes - * to check for exceptions running + * Thread to run region post open tasks. Call {@link #getException()} after the thread finishes to + * check for exceptions running * {@link RegionServerServices#postOpenDeployTasks(PostOpenDeployContext)} */ static class PostOpenDeployTasksThread extends Thread { @@ -235,7 +236,7 @@ static class PostOpenDeployTasksThread extends Thread { private final long masterSystemTime; PostOpenDeployTasksThread(final HRegion region, final Server server, - final RegionServerServices services, final AtomicBoolean signaller, long masterSystemTime) { + final RegionServerServices services, final AtomicBoolean signaller, long masterSystemTime) { super("PostOpenDeployTasks:" + region.getRegionInfo().getEncodedName()); this.setDaemon(true); this.server = server; @@ -251,8 +252,8 @@ public void run() { this.services.postOpenDeployTasks( new PostOpenDeployContext(region, Procedure.NO_PROC_ID, masterSystemTime)); } catch (Throwable e) { - String msg = "Exception running postOpenDeployTasks; region=" + - this.region.getRegionInfo().getEncodedName(); + String msg = "Exception running postOpenDeployTasks; region=" + + this.region.getRegionInfo().getEncodedName(); this.exception = e; if (e instanceof IOException && isRegionStillOpening(region.getRegionInfo(), services)) { server.abort(msg, e); @@ -281,28 +282,25 @@ Throwable getException() { private HRegion openRegion() { HRegion region = null; try { - // Instantiate the region. This also periodically tickles OPENING + // Instantiate the region. This also periodically tickles OPENING // state so master doesn't timeout this region in transition. - region = HRegion.openHRegion(this.regionInfo, this.htd, - this.rsServices.getWAL(this.regionInfo), - this.server.getConfiguration(), - this.rsServices, - new CancelableProgressable() { - @Override - public boolean progress() { - if (!isRegionStillOpening()) { - LOG.warn("Open region aborted since it isn't opening any more"); - return false; + region = + HRegion.openHRegion(this.regionInfo, this.htd, this.rsServices.getWAL(this.regionInfo), + this.server.getConfiguration(), this.rsServices, new CancelableProgressable() { + @Override + public boolean progress() { + if (!isRegionStillOpening()) { + LOG.warn("Open region aborted since it isn't opening any more"); + return false; + } + return true; } - return true; - } - }); + }); } catch (Throwable t) { // We failed open. Our caller will see the 'null' return value // and transition the node back to FAILED_OPEN. If that fails, // we rely on the Timeout Monitor in the master to reassign. - LOG.error( - "Failed open of region=" + this.regionInfo.getRegionNameAsString(), t); + LOG.error("Failed open of region=" + this.regionInfo.getRegionNameAsString(), t); } return region; } @@ -314,8 +312,8 @@ private void cleanupFailedOpen(final HRegion region) throws IOException { } } - private static boolean isRegionStillOpening( - RegionInfo regionInfo, RegionServerServices rsServices) { + private static boolean isRegionStillOpening(RegionInfo regionInfo, + RegionServerServices rsServices) { byte[] encodedName = regionInfo.getEncodedNameAsBytes(); Boolean action = rsServices.getRegionsInTransitionInRS().get(encodedName); return Boolean.TRUE.equals(action); // true means opening for RIT diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java index ed1b2c760f9c..41fb3e7bf12b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,13 @@ import java.io.IOException; import java.util.concurrent.CountDownLatch; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Handler to seek storefiles in parallel. @@ -41,8 +39,8 @@ public class ParallelSeekHandler extends EventHandler { private CountDownLatch latch; private Throwable err = null; - public ParallelSeekHandler(KeyValueScanner scanner,Cell keyValue, - long readPoint, CountDownLatch latch) { + public ParallelSeekHandler(KeyValueScanner scanner, Cell keyValue, long readPoint, + CountDownLatch latch) { super(null, EventType.RS_PARALLEL_SEEK); this.scanner = scanner; this.keyValue = keyValue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java index 829d0bf01578..5b3f053d847e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.handler; import java.io.IOException; @@ -41,12 +40,12 @@ /** * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to WAL in * secondary region replicas. This means that a secondary region replica can serve some edits from - * it's memstore that are still not flushed from primary. We do not want to allow secondary - * region's seqId to go back in time, when this secondary region is opened elsewhere after a - * crash or region move. We will trigger a flush cache in the primary region replica and wait - * for observing a complete flush cycle before marking the region readsEnabled. This handler does - * the flushing of the primary region replica and ensures that regular region opening is not - * blocked while the secondary replica is blocked on flush. + * it's memstore that are still not flushed from primary. We do not want to allow secondary region's + * seqId to go back in time, when this secondary region is opened elsewhere after a crash or region + * move. We will trigger a flush cache in the primary region replica and wait for observing a + * complete flush cycle before marking the region readsEnabled. This handler does the flushing of + * the primary region replica and ensures that regular region opening is not blocked while the + * secondary replica is blocked on flush. */ @InterfaceAudience.Private public class RegionReplicaFlushHandler extends EventHandler { @@ -97,15 +96,17 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { HConstants.DEFAULT_HBASE_CLIENT_PAUSE); int maxAttempts = getRetriesCount(connection.getConfiguration()); - RetryCounter counter = new RetryCounterFactory(maxAttempts, (int)pause).create(); + RetryCounter counter = new RetryCounterFactory(maxAttempts, (int) pause).create(); if (LOG.isDebugEnabled()) { - LOG.debug("RPC'ing to primary " + ServerRegionReplicaUtil. - getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionNameAsString() + - " from " + region.getRegionInfo().getRegionNameAsString() + " to trigger FLUSH"); + LOG.debug("RPC'ing to primary " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getRegionNameAsString() + + " from " + region.getRegionInfo().getRegionNameAsString() + " to trigger FLUSH"); } - while (!region.isClosing() && !region.isClosed() - && !server.isAborted() && !server.isStopped()) { + while ( + !region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped() + ) { // TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we // do not have to wait for the whole flush here, just initiate it. FlushRegionResponse response; @@ -113,8 +114,10 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { response = FutureUtils.get(connection.flush(ServerRegionReplicaUtil .getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionName(), true)); } catch (IOException e) { - if (e instanceof TableNotFoundException || FutureUtils - .get(connection.getAdmin().isTableDisabled(region.getRegionInfo().getTable()))) { + if ( + e instanceof TableNotFoundException || FutureUtils + .get(connection.getAdmin().isTableDisabled(region.getRegionInfo().getTable())) + ) { return; } if (!counter.shouldRetry()) { @@ -141,11 +144,11 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { // then we have to wait for seeing the flush entry. All reads will be rejected until we see // a complete flush cycle or replay a region open event if (LOG.isDebugEnabled()) { - LOG.debug("Triggered flush of primary region replica " + - ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) - .getRegionNameAsString() + - " for " + region.getRegionInfo().getEncodedName() + - "; now waiting and blocking reads until completes a full flush cycle"); + LOG.debug("Triggered flush of primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getRegionNameAsString() + + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until completes a full flush cycle"); } region.setReadsEnabled(true); break; @@ -153,10 +156,11 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { if (response.hasWroteFlushWalMarker()) { if (response.getWroteFlushWalMarker()) { if (LOG.isDebugEnabled()) { - LOG.debug("Triggered empty flush marker (memstore empty) on primary region replica " + - ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()). - getRegionNameAsString() + " for " + region.getRegionInfo().getEncodedName() + - "; now waiting and blocking reads until observing a flush marker"); + LOG.debug("Triggered empty flush marker (memstore empty) on primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getRegionNameAsString() + + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until observing a flush marker"); } region.setReadsEnabled(true); break; @@ -164,13 +168,13 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { // somehow we were not able to get the primary to write the flush request. It may be // closing or already flushing. Retry flush again after some sleep. if (!counter.shouldRetry()) { - throw new IOException("Cannot cause primary to flush or drop a wal marker after " + - counter.getAttemptTimes() + " retries. Failing opening of this region replica " + - region.getRegionInfo().getRegionNameAsString()); + throw new IOException("Cannot cause primary to flush or drop a wal marker after " + + counter.getAttemptTimes() + " retries. Failing opening of this region replica " + + region.getRegionInfo().getRegionNameAsString()); } else { LOG.warn( - "Cannot cause primary replica {} to flush or drop a wal marker " + - "for region replica {}, retry={}", + "Cannot cause primary replica {} to flush or drop a wal marker " + + "for region replica {}, retry={}", ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) .getRegionNameAsString(), region.getRegionInfo().getRegionNameAsString(), counter.getAttemptTimes()); @@ -178,10 +182,9 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { } } else { // nothing to do. Are we dealing with an old server? - LOG.warn( - "Was not able to trigger a flush from primary region due to old server version? " + - "Continuing to open the secondary region replica: " + - region.getRegionInfo().getRegionNameAsString()); + LOG.warn("Was not able to trigger a flush from primary region due to old server version? " + + "Continuing to open the secondary region replica: " + + region.getRegionInfo().getRegionNameAsString()); break; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java index 2ac55ec48f62..32b82264c5ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java @@ -61,7 +61,7 @@ public class UnassignRegionHandler extends EventHandler { private final RetryCounter retryCounter; public UnassignRegionHandler(HRegionServer server, String encodedName, long closeProcId, - boolean abort, @Nullable ServerName destination, EventType eventType) { + boolean abort, @Nullable ServerName destination, EventType eventType) { super(server, eventType); this.encodedName = encodedName; this.closeProcId = closeProcId; @@ -85,12 +85,14 @@ public void process() throws IOException { // reportRegionStateTransition, so the HMaster will think the region is online, before we // actually open the region, as reportRegionStateTransition is part of the opening process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Received CLOSE for {} which we are already " + - "trying to OPEN; try again after {}ms", encodedName, backoff); + LOG.warn( + "Received CLOSE for {} which we are already " + "trying to OPEN; try again after {}ms", + encodedName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } else { - LOG.info("Received CLOSE for {} which we are already trying to CLOSE," + - " but not completed yet", encodedName); + LOG.info( + "Received CLOSE for {} which we are already trying to CLOSE," + " but not completed yet", + encodedName); } return; } @@ -120,9 +122,10 @@ public void process() throws IOException { } rs.removeRegion(region, destination); - if (!rs.reportRegionStateTransition( - new RegionStateTransitionContext(TransitionCode.CLOSED, HConstants.NO_SEQNUM, closeProcId, - -1, region.getRegionInfo()))) { + if ( + !rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.CLOSED, + HConstants.NO_SEQNUM, closeProcId, -1, region.getRegionInfo())) + ) { throw new IOException("Failed to report close to master: " + regionName); } // Cache the close region procedure id after report region transition succeed. @@ -141,13 +144,13 @@ protected void handleException(Throwable t) { } public static UnassignRegionHandler create(HRegionServer server, String encodedName, - long closeProcId, boolean abort, @Nullable ServerName destination) { + long closeProcId, boolean abort, @Nullable ServerName destination) { // Just try our best to determine whether it is for closing meta. It is not the end of the world // if we put the handler into a wrong executor. Region region = server.getRegion(encodedName); - EventType eventType = - region != null && region.getRegionInfo().isMetaRegion() ? EventType.M_RS_CLOSE_META - : EventType.M_RS_CLOSE_REGION; + EventType eventType = region != null && region.getRegionInfo().isMetaRegion() + ? EventType.M_RS_CLOSE_META + : EventType.M_RS_CLOSE_REGION; return new UnassignRegionHandler(server, encodedName, closeProcId, abort, destination, eventType); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java index ffdade15372d..dadcf8b410f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +19,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; @@ -35,13 +30,16 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Handles log splitting a wal - * Used by the zk-based distributed log splitting. Created by ZKSplitLogWorkerCoordination. + * Handles log splitting a wal Used by the zk-based distributed log splitting. Created by + * ZKSplitLogWorkerCoordination. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager - */ + * distributed WAL splitter, see SplitWALManager + */ @Deprecated @InterfaceAudience.Private public class WALSplitterHandler extends EventHandler { @@ -53,10 +51,9 @@ public class WALSplitterHandler extends EventHandler { private final SplitLogWorkerCoordination.SplitTaskDetails splitTaskDetails; private final SplitLogWorkerCoordination coordination; - public WALSplitterHandler(final Server server, SplitLogWorkerCoordination coordination, - SplitLogWorkerCoordination.SplitTaskDetails splitDetails, CancelableProgressable reporter, - AtomicInteger inProgressTasks, TaskExecutor splitTaskExecutor) { + SplitLogWorkerCoordination.SplitTaskDetails splitDetails, CancelableProgressable reporter, + AtomicInteger inProgressTasks, TaskExecutor splitTaskExecutor) { super(server, EventType.RS_LOG_REPLAY); this.splitTaskDetails = splitDetails; this.coordination = coordination; @@ -74,35 +71,35 @@ public void process() throws IOException { try { status = this.splitTaskExecutor.exec(splitTaskDetails.getWALFile(), reporter); switch (status) { - case DONE: - coordination.endTask(new SplitLogTask.Done(this.serverName), - SplitLogCounters.tot_wkr_task_done, splitTaskDetails); - break; - case PREEMPTED: - SplitLogCounters.tot_wkr_preempt_task.increment(); - LOG.warn("task execution preempted " + splitTaskDetails.getWALFile()); - break; - case ERR: - if (server != null && !server.isStopped()) { - coordination.endTask(new SplitLogTask.Err(this.serverName), - SplitLogCounters.tot_wkr_task_err, splitTaskDetails); + case DONE: + coordination.endTask(new SplitLogTask.Done(this.serverName), + SplitLogCounters.tot_wkr_task_done, splitTaskDetails); + break; + case PREEMPTED: + SplitLogCounters.tot_wkr_preempt_task.increment(); + LOG.warn("task execution preempted " + splitTaskDetails.getWALFile()); break; - } - // if the RS is exiting then there is probably a tons of stuff - // that can go wrong. Resign instead of signaling error. - //$FALL-THROUGH$ - case RESIGNED: - if (server != null && server.isStopped()) { - LOG.info("task execution interrupted because worker is exiting " + case ERR: + if (server != null && !server.isStopped()) { + coordination.endTask(new SplitLogTask.Err(this.serverName), + SplitLogCounters.tot_wkr_task_err, splitTaskDetails); + break; + } + // if the RS is exiting then there is probably a tons of stuff + // that can go wrong. Resign instead of signaling error. + // $FALL-THROUGH$ + case RESIGNED: + if (server != null && server.isStopped()) { + LOG.info("task execution interrupted because worker is exiting " + splitTaskDetails.toString()); - } - coordination.endTask(new SplitLogTask.Resigned(this.serverName), - SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails); - break; + } + coordination.endTask(new SplitLogTask.Resigned(this.serverName), + SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails); + break; } } finally { LOG.info("Worker " + serverName + " done with task " + splitTaskDetails.toString() + " in " - + (EnvironmentEdgeManager.currentTime() - startTime) + "ms. Status = " + status); + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms. Status = " + status); this.inProgressTasks.decrementAndGet(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java index cc48d9ef18a4..fe9e41a960c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,14 +38,12 @@ @InterfaceAudience.Private public class RSDumpServlet extends StateDumpServlet { private static final long serialVersionUID = 1L; - private static final String LINE = - "==========================================================="; + private static final String LINE = "==========================================================="; @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException { - HRegionServer hrs = (HRegionServer)getServletContext().getAttribute( - HRegionServer.REGIONSERVER); + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { + HRegionServer hrs = + (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); assert hrs != null : "No RS in context!"; response.setContentType("text/plain"); @@ -60,8 +57,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) OutputStream os = response.getOutputStream(); try (PrintWriter out = new PrintWriter(os)) { - out.println("RegionServer status for " + hrs.getServerName() - + " as of " + new Date()); + out.println("RegionServer status for " + hrs.getServerName() + " as of " + new Date()); out.println("\n\nVersion Info:"); out.println(LINE); @@ -128,29 +124,28 @@ public static void dumpQueue(HRegionServer hrs, PrintWriter out) { } } - public static void dumpCallQueues(HRegionServer hrs, PrintWriter out) { CallQueueInfo callQueueInfo = hrs.getRpcServer().getScheduler().getCallQueueInfo(); - for(String queueName: callQueueInfo.getCallQueueNames()) { + for (String queueName : callQueueInfo.getCallQueueNames()) { out.println("\nQueue Name: " + queueName); long totalCallCount = 0L, totalCallSize = 0L; - for (String methodName: callQueueInfo.getCalledMethodNames(queueName)) { + for (String methodName : callQueueInfo.getCalledMethodNames(queueName)) { long thisMethodCount, thisMethodSize; thisMethodCount = callQueueInfo.getCallMethodCount(queueName, methodName); thisMethodSize = callQueueInfo.getCallMethodSize(queueName, methodName); - out.println("Method in call: "+methodName); - out.println("Total call count for method: "+thisMethodCount); - out.println("Total call size for method (bytes): "+thisMethodSize); + out.println("Method in call: " + methodName); + out.println("Total call count for method: " + thisMethodCount); + out.println("Total call size for method (bytes): " + thisMethodSize); totalCallCount += thisMethodCount; totalCallSize += thisMethodSize; } - out.println("Total call count for queue: "+totalCallCount); - out.println("Total call size for queue (bytes): "+totalCallSize); + out.println("Total call count for queue: " + totalCallCount); + out.println("Total call size for queue (bytes): " + totalCallSize); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java index f2d8d48865ce..b9bf2da6080e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,15 +18,13 @@ package org.apache.hadoop.hbase.regionserver.http; import java.io.IOException; - import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class RSStatusServlet extends HttpServlet { @@ -35,8 +32,9 @@ public class RSStatusServlet extends HttpServlet { @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) - throws ServletException, IOException { - HRegionServer hrs = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER); + throws ServletException, IOException { + HRegionServer hrs = + (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); assert hrs != null : "No RS in context!"; resp.setContentType("text/html"); @@ -48,14 +46,10 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) } RSStatusTmpl tmpl = new RSStatusTmpl(); - if (req.getParameter("format") != null) - tmpl.setFormat(req.getParameter("format")); - if (req.getParameter("filter") != null) - tmpl.setFilter(req.getParameter("filter")); - if (req.getParameter("bcn") != null) - tmpl.setBcn(req.getParameter("bcn")); - if (req.getParameter("bcv") != null) - tmpl.setBcv(req.getParameter("bcv")); + if (req.getParameter("format") != null) tmpl.setFormat(req.getParameter("format")); + if (req.getParameter("filter") != null) tmpl.setFilter(req.getParameter("filter")); + if (req.getParameter("bcn") != null) tmpl.setBcn(req.getParameter("bcn")); + if (req.getParameter("bcv") != null) tmpl.setBcv(req.getParameter("bcv")); tmpl.render(resp.getWriter(), hrs); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java index 35726ab0f2e4..b23a016c9643 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +42,7 @@ public ColumnCount(byte[] column) { /** * Constructor * @param column the qualifier to count the versions for - * @param count initial count + * @param count initial count */ public ColumnCount(byte[] column, int count) { this(column, 0, column.length, count); @@ -54,7 +53,7 @@ public ColumnCount(byte[] column, int count) { * @param column the qualifier to count the versions for * @param offset in the passed buffer where to start the qualifier from * @param length of the qualifier - * @param count initial count + * @param count initial count */ public ColumnCount(byte[] column, int offset, int length, int count) { this.bytes = column; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java index bd6cb20d8293..1fd478889860 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,34 +18,32 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** - * Implementing classes of this interface will be used for the tracking - * and enforcement of columns and numbers of versions and timeToLive during - * the course of a Get or Scan operation. + * Implementing classes of this interface will be used for the tracking and enforcement of columns + * and numbers of versions and timeToLive during the course of a Get or Scan operation. *

    * Currently there are two different types of Store/Family-level queries. - *

    • {@link ExplicitColumnTracker} is used when the query specifies - * one or more column qualifiers to return in the family.
    • - *
    • {@link ScanWildcardColumnTracker} is used when no columns are - * explicitly specified.
    • + *
        + *
      • {@link ExplicitColumnTracker} is used when the query specifies one or more column qualifiers + * to return in the family.
      • + *
      • {@link ScanWildcardColumnTracker} is used when no columns are explicitly specified.
      • *
      *

      * This class is utilized by {@link ScanQueryMatcher} mainly through two methods: - *

      • {@link #checkColumn} is called when a Put satisfies all other - * conditions of the query.
      • - *
      • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher - * believes that the current column should be skipped (by timestamp, filter etc.)
      • + *
          + *
        • {@link #checkColumn} is called when a Put satisfies all other conditions of the query.
        • + *
        • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher believes that the current + * column should be skipped (by timestamp, filter etc.)
        • *
        *

        * These two methods returns a - * {@link org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode} - * to define what action should be taken. + * {@link org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode} to define + * what action should be taken. *

        * This class is NOT thread-safe as queries are never multi-threaded */ @@ -56,82 +53,73 @@ public interface ColumnTracker extends ShipperListener { /** * Checks if the column is present in the list of requested columns by returning the match code * instance. It does not check against the number of versions for the columns asked for. To do the - * version check, one has to call {@link #checkVersions(Cell, long, byte, boolean)} - * method based on the return type (INCLUDE) of this method. The values that can be returned by - * this method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and - * {@link MatchCode#SEEK_NEXT_ROW}. + * version check, one has to call {@link #checkVersions(Cell, long, byte, boolean)} method based + * on the return type (INCLUDE) of this method. The values that can be returned by this method are + * {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and {@link MatchCode#SEEK_NEXT_ROW}. * @param cell a cell with the column to match against * @param type The type of the Cell * @return The match code instance. * @throws IOException in case there is an internal consistency problem caused by a data - * corruption. + * corruption. */ ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) throws IOException; /** * Keeps track of the number of versions for the columns asked for. It assumes that the user has * already checked if the cell needs to be included by calling the - * {@link #checkColumn(Cell, byte)} method. The enum values returned by this method - * are {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE}, - * {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} and {@link MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}. - * Implementations which include all the columns could just return {@link MatchCode#INCLUDE} in - * the {@link #checkColumn(Cell, byte)} method and perform all the operations in this - * checkVersions method. - * @param cell a cell with the column to match against - * @param timestamp The timestamp of the cell. - * @param type the type of the key value (Put/Delete) + * {@link #checkColumn(Cell, byte)} method. The enum values returned by this method are + * {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE}, {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} + * and {@link MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}. Implementations which include all the columns + * could just return {@link MatchCode#INCLUDE} in the {@link #checkColumn(Cell, byte)} method and + * perform all the operations in this checkVersions method. + * @param cell a cell with the column to match against + * @param timestamp The timestamp of the cell. + * @param type the type of the key value (Put/Delete) * @param ignoreCount indicates if the KV needs to be excluded while counting (used during - * compactions. We only count KV's that are older than all the scanners' read points.) + * compactions. We only count KV's that are older than all the scanners' read + * points.) * @return the scan query matcher match code instance * @throws IOException in case there is an internal consistency problem caused by a data - * corruption. + * corruption. */ ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException; + boolean ignoreCount) throws IOException; + /** * Resets the Matcher */ void reset(); /** - * * @return true when done. */ boolean done(); /** - * Used by matcher and scan/get to get a hint of the next column - * to seek to after checkColumn() returns SKIP. Returns the next interesting - * column we want, or NULL there is none (wildcard scanner). - * - * Implementations aren't required to return anything useful unless the most recent - * call was to checkColumn() and the return code was SKIP. This is pretty implementation - * detail-y, but optimizations are like that. - * + * Used by matcher and scan/get to get a hint of the next column to seek to after checkColumn() + * returns SKIP. Returns the next interesting column we want, or NULL there is none (wildcard + * scanner). Implementations aren't required to return anything useful unless the most recent call + * was to checkColumn() and the return code was SKIP. This is pretty implementation detail-y, but + * optimizations are like that. * @return null, or a ColumnCount that we should seek to */ ColumnCount getColumnHint(); /** - * Retrieve the MatchCode for the next row or column - * @param cell + * Retrieve the MatchCode for the next row or column n */ MatchCode getNextRowOrNextColumn(Cell cell); /** - * Give the tracker a chance to declare it's done based on only the timestamp - * to allow an early out. - * - * @param timestamp - * @return true to early out based on timestamp. + * Give the tracker a chance to declare it's done based on only the timestamp to allow an early + * out. n * @return true to early out based on timestamp. */ boolean isDone(long timestamp); /** * This method is used to inform the column tracker that we are done with this column. We may get * this information from external filters or timestamp range and we then need to indicate this - * information to tracker. It is currently implemented for ExplicitColumnTracker. - * @param cell + * information to tracker. It is currently implemented for ExplicitColumnTracker. n */ default void doneWithColumn(Cell cell) { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java index f9fb6029db31..9a4361a956aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,15 +20,14 @@ import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for compaction. @@ -46,9 +45,9 @@ public abstract class CompactionScanQueryMatcher extends ScanQueryMatcher { protected final KeepDeletedCells keepDeletedCells; protected CompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columnTracker, long readPointToUse, long oldestUnexpiredTS, long now) { + ColumnTracker columnTracker, long readPointToUse, long oldestUnexpiredTS, long now) { super(createStartKeyFromRow(EMPTY_START_ROW, scanInfo), scanInfo, columnTracker, - oldestUnexpiredTS, now); + oldestUnexpiredTS, now); this.maxReadPointToTrackVersions = readPointToUse; this.deletes = deletes; this.keepDeletedCells = scanInfo.getKeepDeletedCells(); @@ -98,37 +97,39 @@ protected final void trackDelete(Cell cell) { // If keepDeletedCells is TTL and the delete marker is expired, then we can make sure that the // minVerions is larger than 0(otherwise we will just return at preCheck). So here we still // need to track the delete marker to see if it masks some cells. - if (keepDeletedCells == KeepDeletedCells.FALSE - || (keepDeletedCells == KeepDeletedCells.TTL && cell.getTimestamp() < oldestUnexpiredTS)) { + if ( + keepDeletedCells == KeepDeletedCells.FALSE + || (keepDeletedCells == KeepDeletedCells.TTL && cell.getTimestamp() < oldestUnexpiredTS) + ) { deletes.add(cell); } } public static CompactionScanQueryMatcher create(ScanInfo scanInfo, ScanType scanType, - long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, - byte[] dropDeletesFromRow, byte[] dropDeletesToRow, - RegionCoprocessorHost regionCoprocessorHost) throws IOException { - Pair trackers = getTrackers(regionCoprocessorHost, null, - scanInfo,oldestUnexpiredTS, null); + long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, + byte[] dropDeletesFromRow, byte[] dropDeletesToRow, RegionCoprocessorHost regionCoprocessorHost) + throws IOException { + Pair trackers = + getTrackers(regionCoprocessorHost, null, scanInfo, oldestUnexpiredTS, null); DeleteTracker deleteTracker = trackers.getFirst(); ColumnTracker columnTracker = trackers.getSecond(); if (dropDeletesFromRow == null) { if (scanType == ScanType.COMPACT_RETAIN_DELETES) { if (scanInfo.isNewVersionBehavior()) { return new IncludeAllCompactionQueryMatcher(scanInfo, deleteTracker, columnTracker, - readPointToUse, oldestUnexpiredTS, now); + readPointToUse, oldestUnexpiredTS, now); } else { return new MinorCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker, - readPointToUse, oldestUnexpiredTS, now); + readPointToUse, oldestUnexpiredTS, now); } } else { return new MajorCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker, - readPointToUse, earliestPutTs, oldestUnexpiredTS, now); + readPointToUse, earliestPutTs, oldestUnexpiredTS, now); } } else { return new StripeCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker, - readPointToUse, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, - dropDeletesToRow); + readPointToUse, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, + dropDeletesToRow); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java index be9c51eca857..0b48e1ca3b0c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver.querymatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.ShipperListener; +import org.apache.yetus.audience.InterfaceAudience; /** * This interface is used for the tracking and enforcement of Deletes during the course of a Get or @@ -81,8 +81,8 @@ enum DeleteResult { COLUMN_DELETED, // The Cell is deleted by a delete column. VERSION_DELETED, // The Cell is deleted by a version delete. NOT_DELETED, - VERSION_MASKED // The Cell is masked by max number of versions which is considered as - // deleted in strong semantics of versions(See MvccTracker) + VERSION_MASKED // The Cell is masked by max number of versions which is considered as + // deleted in strong semantics of versions(See MvccTracker) } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java index c9899d510416..397e2631a440 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +19,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * A query matcher for compaction which can drop delete markers. @@ -53,8 +53,8 @@ public abstract class DropDeletesCompactionScanQueryMatcher extends CompactionSc protected final long earliestPutTs; protected DropDeletesCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, - long now) { + ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, + long now) { super(scanInfo, deletes, columns, readPointToUse, oldestUnexpiredTS, now); this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes(); this.earliestPutTs = earliestPutTs; @@ -66,8 +66,10 @@ protected final MatchCode tryDropDelete(Cell cell) { if (timeToPurgeDeletes > 0 && now - timestamp <= timeToPurgeDeletes) { return MatchCode.INCLUDE; } - if (keepDeletedCells == KeepDeletedCells.TRUE - || (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= oldestUnexpiredTS)) { + if ( + keepDeletedCells == KeepDeletedCells.TRUE + || (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= oldestUnexpiredTS) + ) { // If keepDeletedCell is true, or the delete marker is not expired yet, we should include it // in version counting to see if we can drop it. The only exception is that, we can make // sure that no put is older than this delete marker. And under this situation, all later diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java index c0f13c0ac554..1ce2c6136cc2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ import java.io.IOException; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is used for the tracking and enforcement of columns and numbers of versions during the @@ -68,13 +67,13 @@ public class ExplicitColumnTracker implements ColumnTracker { /** * Default constructor. - * @param columns columns specified user in query - * @param minVersions minimum number of versions to keep - * @param maxVersions maximum versions to return per column + * @param columns columns specified user in query + * @param minVersions minimum number of versions to keep + * @param maxVersions maximum versions to return per column * @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL */ public ExplicitColumnTracker(NavigableSet columns, int minVersions, int maxVersions, - long oldestUnexpiredTS) { + long oldestUnexpiredTS) { this.maxVersions = maxVersions; this.minVersions = minVersions; this.oldestStamp = oldestUnexpiredTS; @@ -154,7 +153,7 @@ public ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) { @Override public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException { + boolean ignoreCount) throws IOException { assert !PrivateCellUtil.isDelete(type); if (ignoreCount) { return ScanQueryMatcher.MatchCode.INCLUDE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java index a486bec4377e..c6776a05a41d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,19 +18,18 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * A compaction query matcher that always return INCLUDE and drops nothing. */ @InterfaceAudience.Private -public class IncludeAllCompactionQueryMatcher extends MinorCompactionScanQueryMatcher{ +public class IncludeAllCompactionQueryMatcher extends MinorCompactionScanQueryMatcher { public IncludeAllCompactionQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { + ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { super(scanInfo, deletes, columns, readPointToUse, oldestUnexpiredTS, now); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java index 2f02d77d0f5b..7d3d973779c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for major compaction. @@ -31,8 +30,8 @@ public class MajorCompactionScanQueryMatcher extends DropDeletesCompactionScanQueryMatcher { public MajorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, - long now) { + ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, + long now) { super(scanInfo, deletes, columns, readPointToUse, earliestPutTs, oldestUnexpiredTS, now); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java index b3815dae1e73..70e474e106b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for minor compaction. @@ -31,7 +30,7 @@ public class MinorCompactionScanQueryMatcher extends CompactionScanQueryMatcher { public MinorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { + ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { super(scanInfo, deletes, columns, readPointToUse, oldestUnexpiredTS, now); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java index d62e2aa1f5b6..146f67dbd2fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -26,18 +26,17 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue.Type; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** - * A tracker both implementing ColumnTracker and DeleteTracker, used for mvcc-sensitive scanning. - * We should make sure in one QueryMatcher the ColumnTracker and DeleteTracker is the same instance. + * A tracker both implementing ColumnTracker and DeleteTracker, used for mvcc-sensitive scanning. We + * should make sure in one QueryMatcher the ColumnTracker and DeleteTracker is the same instance. */ @InterfaceAudience.Private public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { @@ -71,7 +70,6 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { /** * Note maxVersion and minVersion must set according to cf's conf, not user's scan parameter. - * * @param columns columns specified user in query * @param comparartor the cell comparator * @param minVersion The minimum number of versions to keep(used when TTL is set). @@ -81,7 +79,7 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { * @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL */ public NewVersionBehaviorTracker(NavigableSet columns, CellComparator comparartor, - int minVersion, int maxVersion, int resultMaxVersions, long oldestUnexpiredTS) { + int minVersion, int maxVersion, int resultMaxVersions, long oldestUnexpiredTS) { this.maxVersions = maxVersion; this.minVersions = minVersion; this.resultMaxVersions = resultMaxVersions; @@ -103,8 +101,8 @@ public void beforeShipped() throws IOException { } /** - * A data structure which contains infos we need that happens before this node's mvcc and - * after the previous node's mvcc. A node means there is a version deletion at the mvcc and ts. + * A data structure which contains infos we need that happens before this node's mvcc and after + * the previous node's mvcc. A node means there is a version deletion at the mvcc and ts. */ protected class DeleteVersionsNode { public long ts; @@ -158,11 +156,10 @@ protected DeleteVersionsNode getDeepCopy() { } /** - * Reset the map if it is different with the last Cell. - * Save the cq array/offset/length for next Cell. - * - * @return If this put has duplicate ts with last cell, return the mvcc of last cell. - * Else return MAX_VALUE. + * Reset the map if it is different with the last Cell. Save the cq array/offset/length for next + * Cell. + * @return If this put has duplicate ts with last cell, return the mvcc of last cell. Else return + * MAX_VALUE. */ protected long prepare(Cell cell) { if (isColumnQualifierChanged(cell)) { @@ -173,8 +170,10 @@ protected long prepare(Cell cell) { delColMap.put(e.getKey(), e.getValue().getDeepCopy()); } countCurrentCol = 0; - } else if (!PrivateCellUtil.isDelete(lastCqType) && lastCqType == cell.getTypeByte() - && lastCqTs == cell.getTimestamp()) { + } else if ( + !PrivateCellUtil.isDelete(lastCqType) && lastCqType == cell.getTypeByte() + && lastCqTs == cell.getTimestamp() + ) { // Put with duplicate timestamp, ignore. return lastCqMvcc; } @@ -188,8 +187,10 @@ protected long prepare(Cell cell) { } private boolean isColumnQualifierChanged(Cell cell) { - if (delColMap.isEmpty() && lastCqArray == null && cell.getQualifierLength() == 0 - && (PrivateCellUtil.isDeleteColumns(cell) || PrivateCellUtil.isDeleteColumnVersion(cell))) { + if ( + delColMap.isEmpty() && lastCqArray == null && cell.getQualifierLength() == 0 + && (PrivateCellUtil.isDeleteColumns(cell) || PrivateCellUtil.isDeleteColumnVersion(cell)) + ) { // for null columnQualifier return true; } @@ -202,25 +203,25 @@ public void add(Cell cell) { prepare(cell); byte type = cell.getTypeByte(); switch (Type.codeToType(type)) { - // By the order of seen. We put null cq at first. - case DeleteFamily: // Delete all versions of all columns of the specified family - delFamMap.put(cell.getSequenceId(), + // By the order of seen. We put null cq at first. + case DeleteFamily: // Delete all versions of all columns of the specified family + delFamMap.put(cell.getSequenceId(), new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId())); - break; - case DeleteFamilyVersion: // Delete all columns of the specified family and specified version - delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - - // These two kinds of markers are mix with Puts. - case DeleteColumn: // Delete all versions of the specified column - delColMap.put(cell.getSequenceId(), + break; + case DeleteFamilyVersion: // Delete all columns of the specified family and specified version + delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + + // These two kinds of markers are mix with Puts. + case DeleteColumn: // Delete all versions of the specified column + delColMap.put(cell.getSequenceId(), new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId())); - break; - case Delete: // Delete the specified version of the specified column. - delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - default: - throw new AssertionError("Unknown delete marker type for " + cell); + break; + case Delete: // Delete the specified version of the specified column. + delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + default: + throw new AssertionError("Unknown delete marker type for " + cell); } } @@ -234,7 +235,7 @@ public DeleteResult isDeleted(Cell cell) { long duplicateMvcc = prepare(cell); for (Map.Entry e : delColMap.tailMap(cell.getSequenceId()) - .entrySet()) { + .entrySet()) { DeleteVersionsNode node = e.getValue(); long deleteMvcc = Long.MAX_VALUE; SortedSet deleteVersionMvccs = node.deletesMap.get(cell.getTimestamp()); @@ -244,9 +245,8 @@ public DeleteResult isDeleted(Cell cell) { deleteMvcc = tail.first(); } } - SortedMap> subMap = - node.mvccCountingMap - .subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true); + SortedMap> subMap = node.mvccCountingMap.subMap(cell.getSequenceId(), + true, Math.min(duplicateMvcc, deleteMvcc), true); for (Map.Entry> seg : subMap.entrySet()) { if (seg.getValue().size() >= maxVersions) { return DeleteResult.VERSION_MASKED; @@ -270,7 +270,7 @@ public DeleteResult isDeleted(Cell cell) { @Override public boolean isEmpty() { return delColMap.size() == 1 && delColMap.get(Long.MAX_VALUE).mvccCountingMap.size() == 1 - && delFamMap.size() == 1 && delFamMap.get(Long.MAX_VALUE).mvccCountingMap.size() == 1; + && delFamMap.size() == 1 && delFamMap.get(Long.MAX_VALUE).mvccCountingMap.size() == 1; } @Override @@ -278,17 +278,17 @@ public void update() { // ignore } - //ColumnTracker + // ColumnTracker @Override public MatchCode checkColumn(Cell cell, byte type) throws IOException { if (columns == null) { - return MatchCode.INCLUDE; + return MatchCode.INCLUDE; } while (!done()) { - int c = CellUtil.compareQualifiers(cell, - columns[columnIndex], 0, columns[columnIndex].length); + int c = + CellUtil.compareQualifiers(cell, columns[columnIndex], 0, columns[columnIndex].length); if (c < 0) { return MatchCode.SEEK_NEXT_COL; } @@ -305,8 +305,8 @@ public MatchCode checkColumn(Cell cell, byte type) throws IOException { } @Override - public MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException { + public MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) + throws IOException { assert !PrivateCellUtil.isDelete(type); // We drop old version in #isDeleted, so here we won't SKIP because of versioning. But we should // consider TTL. @@ -350,7 +350,7 @@ public void reset() { resetInternal(); } - protected void resetInternal(){ + protected void resetInternal() { delFamMap.put(Long.MAX_VALUE, new DeleteVersionsNode()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java index c755ff5c4556..93288cba8cd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +18,12 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for normal user scan. @@ -42,7 +41,7 @@ public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher { protected final boolean seePastDeleteMarkers; protected NormalUserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, - boolean hasNullColumn, DeleteTracker deletes, long oldestUnexpiredTS, long now) { + boolean hasNullColumn, DeleteTracker deletes, long oldestUnexpiredTS, long now) { super(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, now); this.deletes = deletes; this.get = scan.isGetScan(); @@ -67,8 +66,8 @@ public MatchCode match(Cell cell) throws IOException { long timestamp = cell.getTimestamp(); byte typeByte = cell.getTypeByte(); if (PrivateCellUtil.isDelete(typeByte)) { - boolean includeDeleteMarker = seePastDeleteMarkers ? tr.withinTimeRange(timestamp) - : tr.withinOrAfterTimeRange(timestamp); + boolean includeDeleteMarker = + seePastDeleteMarkers ? tr.withinTimeRange(timestamp) : tr.withinOrAfterTimeRange(timestamp); if (includeDeleteMarker) { this.deletes.add(cell); } @@ -92,12 +91,12 @@ protected boolean isGet() { } public static NormalUserScanQueryMatcher create(Scan scan, ScanInfo scanInfo, - ColumnTracker columns, DeleteTracker deletes, boolean hasNullColumn, long oldestUnexpiredTS, - long now) throws IOException { + ColumnTracker columns, DeleteTracker deletes, boolean hasNullColumn, long oldestUnexpiredTS, + long now) throws IOException { if (scan.isReversed()) { if (scan.includeStopRow()) { return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, - oldestUnexpiredTS, now) { + oldestUnexpiredTS, now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -106,7 +105,7 @@ protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { }; } else { return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, - oldestUnexpiredTS, now) { + oldestUnexpiredTS, now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -117,7 +116,7 @@ protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { } else { if (scan.includeStopRow()) { return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, - oldestUnexpiredTS, now) { + oldestUnexpiredTS, now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -126,7 +125,7 @@ protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { }; } else { return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, - oldestUnexpiredTS, now) { + oldestUnexpiredTS, now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java index ed9ba58c9901..180d2dd2ed31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for raw scan. @@ -31,7 +30,7 @@ public abstract class RawScanQueryMatcher extends UserScanQueryMatcher { protected RawScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, - boolean hasNullColumn, long oldestUnexpiredTS, long now) { + boolean hasNullColumn, long oldestUnexpiredTS, long now) { super(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, now); } @@ -61,11 +60,11 @@ protected boolean isGet() { } public static RawScanQueryMatcher create(Scan scan, ScanInfo scanInfo, ColumnTracker columns, - boolean hasNullColumn, long oldestUnexpiredTS, long now) { + boolean hasNullColumn, long oldestUnexpiredTS, long now) { if (scan.isReversed()) { if (scan.includeStopRow()) { return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, - now) { + now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -74,7 +73,7 @@ protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { }; } else { return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, - now) { + now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -85,7 +84,7 @@ protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { } else { if (scan.includeStopRow()) { return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, - now) { + now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { @@ -94,7 +93,7 @@ protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { }; } else { return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, - now) { + now) { @Override protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java index 26da698f4774..3557973aae5e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; import java.util.SortedSet; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is responsible for the tracking and enforcement of Deletes during the course of a Scan @@ -98,8 +96,7 @@ public void add(Cell cell) { /** * Check if the specified Cell buffer has been deleted by a previously seen delete. - * @param cell - current cell to check if deleted by a previously seen delete - * @return deleteResult + * @param cell - current cell to check if deleted by a previously seen delete n */ @Override public DeleteResult isDeleted(Cell cell) { @@ -133,12 +130,12 @@ public DeleteResult isDeleted(Cell cell) { deleteCell = null; } else { throw new IllegalStateException("isDelete failed: deleteBuffer=" - + Bytes.toStringBinary(deleteCell.getQualifierArray(), - deleteCell.getQualifierOffset(), deleteCell.getQualifierLength()) - + ", qualifier=" - + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()) - + ", timestamp=" + timestamp + ", comparison result: " + ret); + + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), + deleteCell.getQualifierLength()) + + ", qualifier=" + + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + + ", timestamp=" + timestamp + ", comparison result: " + ret); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java index 5833d10877e0..17008e2941a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Iterator; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; @@ -131,7 +130,7 @@ public static enum MatchCode { protected Cell currentRow; protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker columns, - long oldestUnexpiredTS, long now) { + long oldestUnexpiredTS, long now) { this.rowComparator = scanInfo.getComparator(); this.startKey = startKey; this.oldestUnexpiredTS = oldestUnexpiredTS; @@ -140,12 +139,10 @@ protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker colum } /** - * @param cell - * @param oldestTimestamp - * @return true if the cell is expired + * nn * @return true if the cell is expired */ private static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp, - final long now) { + final long now) { // Look for a TTL tag first. Use it instead of the family setting if // found. If a cell has multiple TTLs, resolve the conflict by using the // first tag encountered. @@ -226,7 +223,6 @@ protected final MatchCode checkDeleted(DeleteTracker deletes, Cell cell) { } } - /** * Determines if the caller should do one of several things: *

          @@ -239,7 +235,7 @@ protected final MatchCode checkDeleted(DeleteTracker deletes, Cell cell) { * @param cell KeyValue to check * @return The match code instance. * @throws IOException in case there is an internal consistency problem caused by a data - * corruption. + * corruption. */ public abstract MatchCode match(Cell cell) throws IOException; @@ -272,8 +268,7 @@ public void clearCurrentRow() { protected abstract void reset(); /** - * Set the row when there is change in row - * @param currentRow + * Set the row when there is change in row n */ public void setToNewRow(Cell currentRow) { this.currentRow = currentRow; @@ -301,7 +296,8 @@ public Cell getKeyForNextColumn(Cell cell) { if (nextKey != cell) { return nextKey; } - // The cell is at the end of row/family/qualifier, so it is impossible to find any DeleteFamily cells. + // The cell is at the end of row/family/qualifier, so it is impossible to find any + // DeleteFamily cells. // Let us seek to next column. } ColumnCount nextColumn = columns.getColumnHint(); @@ -319,8 +315,8 @@ public Cell getKeyForNextColumn(Cell cell) { * @return result of the compare between the indexed key and the key portion of the passed cell */ public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { - return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, 0, - 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, + null, 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } /** @@ -331,8 +327,8 @@ public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { ColumnCount nextColumn = columns.getColumnHint(); if (nextColumn == null) { - return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, - 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, + null, 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } else { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(), @@ -366,8 +362,8 @@ protected static Cell createStartKeyFromRow(byte[] startRow, ScanInfo scanInfo) } protected static Pair getTrackers(RegionCoprocessorHost host, - NavigableSet columns, ScanInfo scanInfo, long oldestUnexpiredTS, Scan userScan) - throws IOException { + NavigableSet columns, ScanInfo scanInfo, long oldestUnexpiredTS, Scan userScan) + throws IOException { int resultMaxVersion = scanInfo.getMaxVersions(); int maxVersionToCheck = resultMaxVersion; if (userScan != null) { @@ -383,8 +379,7 @@ protected static Pair getTrackers(RegionCoprocesso DeleteTracker deleteTracker; if (scanInfo.isNewVersionBehavior() && (userScan == null || !userScan.isRaw())) { deleteTracker = new NewVersionBehaviorTracker(columns, scanInfo.getComparator(), - scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, - oldestUnexpiredTS); + scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, oldestUnexpiredTS); } else { deleteTracker = new ScanDeleteTracker(scanInfo.getComparator()); } @@ -392,8 +387,8 @@ protected static Pair getTrackers(RegionCoprocesso deleteTracker = host.postInstantiateDeleteTracker(deleteTracker); if (deleteTracker instanceof VisibilityScanDeleteTracker && scanInfo.isNewVersionBehavior()) { deleteTracker = new VisibilityNewVersionBehaivorTracker(columns, scanInfo.getComparator(), - scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, - oldestUnexpiredTS); + scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, + oldestUnexpiredTS); } } @@ -403,7 +398,7 @@ protected static Pair getTrackers(RegionCoprocesso columnTracker = (NewVersionBehaviorTracker) deleteTracker; } else if (columns == null || columns.size() == 0) { columnTracker = new ScanWildcardColumnTracker(scanInfo.getMinVersions(), maxVersionToCheck, - oldestUnexpiredTS, scanInfo.getComparator()); + oldestUnexpiredTS, scanInfo.getComparator()); } else { columnTracker = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersionToCheck, oldestUnexpiredTS); @@ -413,7 +408,7 @@ protected static Pair getTrackers(RegionCoprocesso // Used only for testing purposes static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset, int length, - long ttl, byte type, boolean ignoreCount) throws IOException { + long ttl, byte type, boolean ignoreCount) throws IOException { KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, HConstants.EMPTY_BYTE_ARRAY, 0, 0, bytes, offset, length); MatchCode matchCode = columnTracker.checkColumn(kv, type); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java index f2ad1e6b87c9..4d84e5a0fdf7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Keeps track of the columns for a scan if they are not explicitly specified @@ -50,15 +48,16 @@ public class ScanWildcardColumnTracker implements ColumnTracker { private long oldestStamp; private final CellComparator comparator; + /** * Return maxVersions of every row. - * @param minVersion Minimum number of versions to keep - * @param maxVersion Maximum number of versions to return + * @param minVersion Minimum number of versions to keep + * @param maxVersion Maximum number of versions to return * @param oldestUnexpiredTS oldest timestamp that has not expired according to the TTL. - * @param comparator used to compare the qualifier of cell + * @param comparator used to compare the qualifier of cell */ - public ScanWildcardColumnTracker(int minVersion, int maxVersion, - long oldestUnexpiredTS, CellComparator comparator) { + public ScanWildcardColumnTracker(int minVersion, int maxVersion, long oldestUnexpiredTS, + CellComparator comparator) { this.maxVersions = maxVersion; this.minVersions = minVersion; this.oldestStamp = oldestUnexpiredTS; @@ -79,7 +78,7 @@ public MatchCode checkColumn(Cell cell, byte type) throws IOException { */ @Override public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException { + boolean ignoreCount) throws IOException { if (columnCell == null) { // first iteration. resetCell(cell); @@ -119,8 +118,7 @@ public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte // was incorrectly stored into the store for this one. Throw an exception, // because this might lead to data corruption. throw new IOException("ScanWildcardColumnTracker.checkColumn ran into a column actually " - + "smaller than the previous column: " - + Bytes.toStringBinary(CellUtil.cloneQualifier(cell))); + + "smaller than the previous column: " + Bytes.toStringBinary(CellUtil.cloneQualifier(cell))); } private void resetCell(Cell columnCell) { @@ -186,8 +184,7 @@ public ColumnCount getColumnHint() { } /** - * We can never know a-priori if we are done, so always return false. - * @return false + * We can never know a-priori if we are done, so always return false. n */ @Override public boolean done() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java index 763735e10786..370164c8a0d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for stripe compaction if range drop deletes is used. @@ -35,14 +34,16 @@ public class StripeCompactionScanQueryMatcher extends DropDeletesCompactionScanQ private final byte[] dropDeletesToRow; private enum DropDeletesInOutput { - BEFORE, IN, AFTER + BEFORE, + IN, + AFTER } private DropDeletesInOutput dropDeletesInOutput = DropDeletesInOutput.BEFORE; public StripeCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, - ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, - long now, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) { + ColumnTracker columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, + long now, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) { super(scanInfo, deletes, columns, readPointToUse, earliestPutTs, oldestUnexpiredTS, now); this.dropDeletesFromRow = dropDeletesFromRow; this.dropDeletesToRow = dropDeletesToRow; @@ -83,13 +84,14 @@ public MatchCode match(Cell cell) throws IOException { } private boolean entered() { - return dropDeletesFromRow.length == 0 || rowComparator.compareRows(currentRow, - dropDeletesFromRow, 0, dropDeletesFromRow.length) >= 0; + return dropDeletesFromRow.length == 0 + || rowComparator.compareRows(currentRow, dropDeletesFromRow, 0, dropDeletesFromRow.length) + >= 0; } private boolean left() { return dropDeletesToRow.length > 0 - && rowComparator.compareRows(currentRow, dropDeletesToRow, 0, dropDeletesToRow.length) >= 0; + && rowComparator.compareRows(currentRow, dropDeletesToRow, 0, dropDeletesToRow.length) >= 0; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java index cc994466b332..6c3d002b0929 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +19,10 @@ import java.io.IOException; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; @@ -32,6 +30,7 @@ import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for user scan. @@ -69,14 +68,14 @@ private static Cell createStartKey(Scan scan, ScanInfo scanInfo) { } protected UserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, - boolean hasNullColumn, long oldestUnexpiredTS, long now) { + boolean hasNullColumn, long oldestUnexpiredTS, long now) { super(createStartKey(scan, scanInfo), scanInfo, columns, oldestUnexpiredTS, now); this.hasNullColumn = hasNullColumn; this.filter = scan.getFilter(); if (this.filter != null) { - this.versionsAfterFilter = - scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(), - scanInfo.getMaxVersions()); + this.versionsAfterFilter = scan.isRaw() + ? scan.getMaxVersions() + : Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions()); } else { this.versionsAfterFilter = 0; } @@ -122,7 +121,7 @@ public void beforeShipped() throws IOException { } protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) - throws IOException { + throws IOException { int tsCmp = tr.compare(timestamp); if (tsCmp > 0) { return MatchCode.SKIP; @@ -148,12 +147,13 @@ protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) default: // It means it is INCLUDE, INCLUDE_AND_SEEK_NEXT_COL or INCLUDE_AND_SEEK_NEXT_ROW. assert matchCode == MatchCode.INCLUDE || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL - || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW; + || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW; break; } - return filter == null ? matchCode : mergeFilterResponse(cell, matchCode, - filter.filterCell(cell)); + return filter == null + ? matchCode + : mergeFilterResponse(cell, matchCode, filter.filterCell(cell)); } /** @@ -188,7 +188,7 @@ protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) * */ private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode, - ReturnCode filterResponse) { + ReturnCode filterResponse) { switch (filterResponse) { case SKIP: if (matchCode == MatchCode.INCLUDE) { @@ -226,7 +226,7 @@ private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode, // It means it is INCLUDE, INCLUDE_AND_SEEK_NEXT_COL or INCLUDE_AND_SEEK_NEXT_ROW. assert matchCode == MatchCode.INCLUDE || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL - || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW; + || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW; // We need to make sure that the number of cells returned will not exceed max version in scan // when the match code is INCLUDE* case. @@ -276,12 +276,12 @@ public boolean moreRowsMayExistAfter(Cell cell) { } public static UserScanQueryMatcher create(Scan scan, ScanInfo scanInfo, - NavigableSet columns, long oldestUnexpiredTS, long now, - RegionCoprocessorHost regionCoprocessorHost) throws IOException { + NavigableSet columns, long oldestUnexpiredTS, long now, + RegionCoprocessorHost regionCoprocessorHost) throws IOException { boolean hasNullColumn = - !(columns != null && columns.size() != 0 && columns.first().length != 0); - Pair trackers = getTrackers(regionCoprocessorHost, columns, - scanInfo, oldestUnexpiredTS, scan); + !(columns != null && columns.size() != 0 && columns.first().length != 0); + Pair trackers = + getTrackers(regionCoprocessorHost, columns, scanInfo, oldestUnexpiredTS, scan); DeleteTracker deleteTracker = trackers.getFirst(); ColumnTracker columnTracker = trackers.getSecond(); if (scan.isRaw()) { @@ -289,7 +289,7 @@ public static UserScanQueryMatcher create(Scan scan, ScanInfo scanInfo, oldestUnexpiredTS, now); } else { return NormalUserScanQueryMatcher.create(scan, scanInfo, columnTracker, deleteTracker, - hasNullColumn, oldestUnexpiredTS, now); + hasNullColumn, oldestUnexpiredTS, now); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationBufferManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationBufferManager.java index bda3cb42b33b..074121ec053a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationBufferManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationBufferManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationFlushRequester.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationFlushRequester.java index 34313241d1f6..24a20a438253 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationFlushRequester.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationFlushRequester.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationSink.java index cd5d30707d9e..d06a1d3c427c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -204,8 +204,7 @@ public RegionReplicationSink(Configuration conf, RegionInfo primary, TableDescri this.failedReplicas = new IntHashSet(regionReplication - 1); } - void onComplete(List sent, - Map> replica2Error) { + void onComplete(List sent, Map> replica2Error) { long maxSequenceId = Long.MIN_VALUE; long toReleaseSize = 0; for (SinkEntry entry : sent) { @@ -224,16 +223,16 @@ void onComplete(List sent, if (maxSequenceId > lastFlushedSequenceId) { LOG.warn( "Failed to replicate to secondary replica {} for {}, since the max sequence" - + " id of sunk entris is {}, which is greater than the last flush SN {}," - + " we will stop replicating for a while and trigger a flush", + + " id of sunk entris is {}, which is greater than the last flush SN {}," + + " we will stop replicating for a while and trigger a flush", replicaId, primary, maxSequenceId, lastFlushedSequenceId, error); failedReplicas.add(replicaId); addFailedReplicas = true; } else { LOG.warn( "Failed to replicate to secondary replica {} for {}, since the max sequence" - + " id of sunk entris is {}, which is less than or equal to the last flush SN {}," - + " we will not stop replicating", + + " id of sunk entris is {}, which is less than or equal to the last flush SN {}," + + " we will not stop replicating", replicaId, primary, maxSequenceId, lastFlushedSequenceId, error); } } @@ -380,8 +379,8 @@ public void add(WALKeyImpl key, WALEdit edit, ServerCall rpcCall) { long clearedSize = clearAllEntries(); if (LOG.isDebugEnabled()) { LOG.debug( - "Got a flush all request with sequence id {}, clear {} pending" + - " entries with size {}, clear failed replicas {}", + "Got a flush all request with sequence id {}, clear {} pending" + + " entries with size {}, clear failed replicas {}", flushSequenceNumber, clearedCount, StringUtils.TraditionalBinaryPrefix.long2String(clearedSize, "", 1), failedReplicas); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java index a20a001e27c6..c9fd11d06807 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,6 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.Callable; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -34,15 +29,20 @@ import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** - * This online snapshot implementation uses the distributed procedure framework to force a - * store flush and then records the hfiles. Its enter stage does nothing. Its leave stage then - * flushes the memstore, builds the region server's snapshot manifest from its hfiles list, and - * copies .regioninfos into the snapshot working directory. At the master side, there is an atomic - * rename of the working dir into the proper snapshot directory. + * This online snapshot implementation uses the distributed procedure framework to force a store + * flush and then records the hfiles. Its enter stage does nothing. Its leave stage then flushes the + * memstore, builds the region server's snapshot manifest from its hfiles list, and copies + * .regioninfos into the snapshot working directory. At the master side, there is an atomic rename + * of the working dir into the proper snapshot directory. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -57,10 +57,9 @@ public class FlushSnapshotSubprocedure extends Subprocedure { // the maximum number of attempts we flush final static int MAX_RETRIES = 3; - public FlushSnapshotSubprocedure(ProcedureMember member, - ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, - List regions, SnapshotDescription snapshot, - SnapshotSubprocedurePool taskManager) { + public FlushSnapshotSubprocedure(ProcedureMember member, ForeignExceptionDispatcher errorListener, + long wakeFrequency, long timeout, List regions, SnapshotDescription snapshot, + SnapshotSubprocedurePool taskManager) { super(member, snapshot.getName(), errorListener, wakeFrequency, timeout); this.snapshot = snapshot; @@ -72,7 +71,7 @@ public FlushSnapshotSubprocedure(ProcedureMember member, } /** - * Callable for adding files to snapshot manifest working dir. Ready for multithreading. + * Callable for adding files to snapshot manifest working dir. Ready for multithreading. */ public static class RegionSnapshotTask implements Callable { private HRegion region; @@ -80,8 +79,8 @@ public static class RegionSnapshotTask implements Callable { private ForeignExceptionDispatcher monitor; private SnapshotDescription snapshotDesc; - public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc, - boolean skipFlush, ForeignExceptionDispatcher monitor) { + public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc, boolean skipFlush, + ForeignExceptionDispatcher monitor) { this.region = region; this.skipFlush = skipFlush; this.monitor = monitor; @@ -91,21 +90,21 @@ public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc, @Override public Void call() throws Exception { // Taking the region read lock prevents the individual region from being closed while a - // snapshot is in progress. This is helpful but not sufficient for preventing races with - // snapshots that involve multiple regions and regionservers. It is still possible to have + // snapshot is in progress. This is helpful but not sufficient for preventing races with + // snapshots that involve multiple regions and regionservers. It is still possible to have // an interleaving such that globally regions are missing, so we still need the verification // step. LOG.debug("Starting snapshot operation on " + region); region.startRegionOperation(Operation.SNAPSHOT); try { if (skipFlush) { - /* - * This is to take an online-snapshot without force a coordinated flush to prevent pause - * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure - * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be - * turned on/off based on the flush type. - * To minimized the code change, class name is not changed. - */ + /* + * This is to take an online-snapshot without force a coordinated flush to prevent pause + * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure + * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be + * turned on/off based on the flush type. To minimized the code change, class name is not + * changed. + */ LOG.debug("take snapshot without flush memstore first"); } else { LOG.debug("Flush Snapshotting region " + region.toString() + " started..."); @@ -155,8 +154,8 @@ private void flushSnapshot() throws ForeignException { // assert that the taskManager is empty. if (taskManager.hasTasks()) { - throw new IllegalStateException("Attempting to take snapshot " - + ClientSnapshotDescriptionUtils.toString(snapshot) + throw new IllegalStateException( + "Attempting to take snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " but we currently have outstanding tasks"); } @@ -200,7 +199,7 @@ public byte[] insideBarrier() throws ForeignException { @Override public void cleanup(Exception e) { LOG.info("Aborting all online FLUSH snapshot subprocedure task threads for '" - + snapshot.getName() + "' due to error", e); + + snapshot.getName() + "' due to error", e); try { taskManager.cancelTasks(); } catch (InterruptedException e1) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index a01d118718d0..2cb5b7e6f478 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,17 +28,11 @@ import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -53,20 +47,25 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; -import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; + /** * This manager class handles the work dealing with snapshots for a {@link HRegionServer}. *

          - * This provides the mechanism necessary to kick off a online snapshot specific - * {@link Subprocedure} that is responsible for the regions being served by this region server. - * If any failures occur with the subprocedure, the RegionSeverSnapshotManager's subprocedure - * handler, {@link ProcedureMember}, notifies the master's ProcedureCoordinator to abort all - * others. + * This provides the mechanism necessary to kick off a online snapshot specific {@link Subprocedure} + * that is responsible for the regions being served by this region server. If any failures occur + * with the subprocedure, the RegionSeverSnapshotManager's subprocedure handler, + * {@link ProcedureMember}, notifies the master's ProcedureCoordinator to abort all others. *

          * On startup, requires {@link #start()} to be called. *

          @@ -78,7 +77,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { private static final Logger LOG = LoggerFactory.getLogger(RegionServerSnapshotManager.class); /** Maximum number of snapshot region tasks that can run concurrently */ - private static final String CONCURENT_SNAPSHOT_TASKS_KEY = "hbase.snapshot.region.concurrentTasks"; + private static final String CONCURENT_SNAPSHOT_TASKS_KEY = + "hbase.snapshot.region.concurrentTasks"; private static final int DEFAULT_CONCURRENT_SNAPSHOT_TASKS = 3; /** Conf key for number of request threads to start snapshots on regionservers */ @@ -91,8 +91,9 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** Keep threads alive in request pool for max of 300 seconds */ public static final long SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 5 * 60000; - /** Conf key for millis between checks to see if snapshot completed or if there are errors*/ - public static final String SNAPSHOT_REQUEST_WAKE_MILLIS_KEY = "hbase.snapshot.region.wakefrequency"; + /** Conf key for millis between checks to see if snapshot completed or if there are errors */ + public static final String SNAPSHOT_REQUEST_WAKE_MILLIS_KEY = + "hbase.snapshot.region.wakefrequency"; /** Default amount of time to check for errors while regions finish snapshotting */ private static final long SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT = 500; @@ -102,19 +103,20 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** * Exposed for testing. - * @param conf HBase configuration. - * @param parent parent running the snapshot handler - * @param memberRpc use specified memberRpc instance + * @param conf HBase configuration. + * @param parent parent running the snapshot handler + * @param memberRpc use specified memberRpc instance * @param procMember use specified ProcedureMember */ - RegionServerSnapshotManager(Configuration conf, HRegionServer parent, - ProcedureMemberRpcs memberRpc, ProcedureMember procMember) { + RegionServerSnapshotManager(Configuration conf, HRegionServer parent, + ProcedureMemberRpcs memberRpc, ProcedureMember procMember) { this.rss = parent; this.memberRpcs = memberRpc; this.member = procMember; } - public RegionServerSnapshotManager() {} + public RegionServerSnapshotManager() { + } /** * Start accepting snapshot requests. @@ -127,8 +129,7 @@ public void start() { /** * Close this and all running snapshot tasks - * @param force forcefully stop all running tasks - * @throws IOException + * @param force forcefully stop all running tasks n */ @Override public void stop(boolean force) throws IOException { @@ -144,20 +145,16 @@ public void stop(boolean force) throws IOException { /** * If in a running state, creates the specified subprocedure for handling an online snapshot. - * - * Because this gets the local list of regions to snapshot and not the set the master had, - * there is a possibility of a race where regions may be missed. This detected by the master in - * the snapshot verification step. - * - * @param snapshot - * @return Subprocedure to submit to the ProcedureMember. + * Because this gets the local list of regions to snapshot and not the set the master had, there + * is a possibility of a race where regions may be missed. This detected by the master in the + * snapshot verification step. n * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { // don't run a snapshot if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { - throw new IllegalStateException("Can't start snapshot on RS: " + rss.getServerName() - + ", because stopping/stopped!"); + throw new IllegalStateException( + "Can't start snapshot on RS: " + rss.getServerName() + ", because stopping/stopped!"); } // check to see if this server is hosting any regions for the snapshots @@ -167,64 +164,57 @@ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { involvedRegions = getRegionsToSnapshot(snapshot); } catch (IOException e1) { throw new IllegalStateException("Failed to figure out if we should handle a snapshot - " - + "something has gone awry with the online regions.", e1); + + "something has gone awry with the online regions.", e1); } - // We need to run the subprocedure even if we have no relevant regions. The coordinator + // We need to run the subprocedure even if we have no relevant regions. The coordinator // expects participation in the procedure and without sending message the snapshot attempt // will hang and fail. LOG.debug("Launching subprocedure for snapshot " + snapshot.getName() + " from table " - + snapshot.getTable() + " type " + snapshot.getType()); + + snapshot.getTable() + " type " + snapshot.getType()); ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(snapshot.getName()); Configuration conf = rss.getConfiguration(); - long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, - SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); - long wakeMillis = conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, - SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT); + long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); + long wakeMillis = + conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT); switch (snapshot.getType()) { - case FLUSH: - SnapshotSubprocedurePool taskManager = - new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); - return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, - timeoutMillis, involvedRegions, snapshot, taskManager); - case SKIPFLUSH: + case FLUSH: + SnapshotSubprocedurePool taskManager = + new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); + return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, + involvedRegions, snapshot, taskManager); + case SKIPFLUSH: /* - * This is to take an online-snapshot without force a coordinated flush to prevent pause - * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure + * This is to take an online-snapshot without force a coordinated flush to prevent pause The + * snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be - * turned on/off based on the flush type. - * To minimized the code change, class name is not changed. + * turned on/off based on the flush type. To minimized the code change, class name is not + * changed. */ SnapshotSubprocedurePool taskManager2 = - new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); - return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, - timeoutMillis, involvedRegions, snapshot, taskManager2); + new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); + return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, + involvedRegions, snapshot, taskManager2); - default: - throw new UnsupportedOperationException("Unrecognized snapshot type:" + snapshot.getType()); + default: + throw new UnsupportedOperationException("Unrecognized snapshot type:" + snapshot.getType()); } } /** - * Determine if the snapshot should be handled on this server - * - * NOTE: This is racy -- the master expects a list of regionservers. - * This means if a region moves somewhere between the calls we'll miss some regions. - * For example, a region move during a snapshot could result in a region to be skipped or done - * twice. This is manageable because the {@link MasterSnapshotVerifier} will double check the - * region lists after the online portion of the snapshot completes and will explicitly fail the - * snapshot. - * - * @param snapshot - * @return the list of online regions. Empty list is returned if no regions are responsible for - * the given snapshot. - * @throws IOException + * Determine if the snapshot should be handled on this server NOTE: This is racy -- the master + * expects a list of regionservers. This means if a region moves somewhere between the calls we'll + * miss some regions. For example, a region move during a snapshot could result in a region to be + * skipped or done twice. This is manageable because the {@link MasterSnapshotVerifier} will + * double check the region lists after the online portion of the snapshot completes and will + * explicitly fail the snapshot. n * @return the list of online regions. Empty list is returned if + * no regions are responsible for the given snapshot. n */ private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { - List onlineRegions = (List) rss - .getRegions(TableName.valueOf(snapshot.getTable())); + List onlineRegions = + (List) rss.getRegions(TableName.valueOf(snapshot.getTable())); Iterator iterator = onlineRegions.iterator(); // remove the non-default regions while (iterator.hasNext()) { @@ -256,16 +246,13 @@ public Subprocedure buildSubprocedure(String name, byte[] data) { /** * We use the SnapshotSubprocedurePool, a class specific thread pool instead of - * {@link org.apache.hadoop.hbase.executor.ExecutorService}. - * - * It uses a {@link java.util.concurrent.ExecutorCompletionService} which provides queuing of - * completed tasks which lets us efficiently cancel pending tasks upon the earliest operation - * failures. - * + * {@link org.apache.hadoop.hbase.executor.ExecutorService}. It uses a + * {@link java.util.concurrent.ExecutorCompletionService} which provides queuing of completed + * tasks which lets us efficiently cancel pending tasks upon the earliest operation failures. * HBase's ExecutorService (different from {@link java.util.concurrent.ExecutorService}) isn't - * really built for coordinated tasks where multiple threads as part of one larger task. In - * RS's the HBase Executor services are only used for open and close and not other threadpooled - * operations such as compactions and replication sinks. + * really built for coordinated tasks where multiple threads as part of one larger task. In RS's + * the HBase Executor services are only used for open and close and not other threadpooled + * operations such as compactions and replication sinks. */ static class SnapshotSubprocedurePool { private final Abortable abortable; @@ -278,8 +265,7 @@ static class SnapshotSubprocedurePool { SnapshotSubprocedurePool(String name, Configuration conf, Abortable abortable) { this.abortable = abortable; // configure the executor service - long keepAlive = conf.getLong( - RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY, + long keepAlive = conf.getLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY, RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS); this.name = name; @@ -294,10 +280,9 @@ boolean hasTasks() { } /** - * Submit a task to the pool. - * - * NOTE: all must be submitted before you can safely {@link #waitForOutstandingTasks()}. This - * version does not support issuing tasks from multiple concurrent table snapshots requests. + * Submit a task to the pool. NOTE: all must be submitted before you can safely + * {@link #waitForOutstandingTasks()}. This version does not support issuing tasks from multiple + * concurrent table snapshots requests. */ void submitTask(final Callable task) { Future f = this.taskPool.submit(task); @@ -307,10 +292,8 @@ void submitTask(final Callable task) { /** * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}. * This *must* be called after all tasks are submitted via submitTask. - * - * @return true on success, false otherwise - * @throws InterruptedException - * @throws SnapshotCreationException if the snapshot failed while we were waiting + * @return true on success, false otherwise n * @throws + * SnapshotCreationException if the snapshot failed while we were waiting */ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException { LOG.debug("Waiting for local region snapshots to finish."); @@ -324,9 +307,9 @@ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException if (!futures.remove(f)) { LOG.warn("unexpected future" + f); } - LOG.debug("Completed " + (i+1) + "/" + sz + " local region snapshots."); + LOG.debug("Completed " + (i + 1) + "/" + sz + " local region snapshots."); } - LOG.debug("Completed " + sz + " local region snapshots."); + LOG.debug("Completed " + sz + " local region snapshots."); return true; } catch (InterruptedException e) { LOG.warn("Got InterruptedException in SnapshotSubprocedurePool", e); @@ -339,7 +322,7 @@ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException Throwable cause = e.getCause(); if (cause instanceof ForeignException) { LOG.warn("Rethrowing ForeignException from SnapshotSubprocedurePool", e); - throw (ForeignException)e.getCause(); + throw (ForeignException) e.getCause(); } else if (cause instanceof DroppedSnapshotException) { // we have to abort the region server according to contract of flush abortable.abort("Received DroppedSnapshotException, aborting", cause); @@ -353,28 +336,28 @@ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException } /** - * This attempts to cancel out all pending and in progress tasks (interruptions issues) - * @throws InterruptedException + * This attempts to cancel out all pending and in progress tasks (interruptions issues) n */ void cancelTasks() throws InterruptedException { Collection> tasks = futures; LOG.debug("cancelling " + tasks.size() + " tasks for snapshot " + name); - for (Future f: tasks) { - // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there + for (Future f : tasks) { + // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there // are places in the HBase code where row/region locks are taken and not released in a - // finally block. Thus we cancel without interrupting. Cancellations will be slower to + // finally block. Thus we cancel without interrupting. Cancellations will be slower to // complete but we won't suffer from unreleased locks due to poor code discipline. f.cancel(false); } // evict remaining tasks and futures from taskPool. futures.clear(); - while (taskPool.poll() != null) {} + while (taskPool.poll() != null) { + } stop(); } /** - * Abruptly shutdown the thread pool. Call when exiting a region server. + * Abruptly shutdown the thread pool. Call when exiting a region server. */ void stop() { if (this.stopped) return; @@ -393,8 +376,8 @@ void stop() { public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; ZKWatcher zkw = rss.getZooKeeper(); - this.memberRpcs = new ZKProcedureMemberRpcs(zkw, - SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); + this.memberRpcs = + new ZKProcedureMemberRpcs(zkw, SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); // read in the snapshot request configuration properties Configuration conf = rss.getConfiguration(); @@ -402,8 +385,8 @@ public void initialize(RegionServerServices rss) throws KeeperException { int opThreads = conf.getInt(SNAPSHOT_REQUEST_THREADS_KEY, SNAPSHOT_REQUEST_THREADS_DEFAULT); // create the actual snapshot procedure member - ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), - opThreads, keepAlive); + ThreadPoolExecutor pool = + ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new SnapshotSubprocedureBuilder()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java index 99fd3d43572e..128537f10afe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; - import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java index 91e1bdc7dc67..d3dfe21521d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,9 +56,9 @@ class FileBasedStoreFileTracker extends StoreFileTrackerBase { public FileBasedStoreFileTracker(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) { super(conf, isPrimaryReplica, ctx); - //CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table - //descriptors with the SFT impl specific configs. By the time this happens, the table has no - //regions nor stores yet, so it can't create a proper StoreContext. + // CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table + // descriptors with the SFT impl specific configs. By the time this happens, the table has no + // regions nor stores yet, so it can't create a proper StoreContext. if (ctx != null) { backedFile = new StoreFileListFile(ctx); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java index 5a88f99588b1..838b3db95c20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,8 @@ @InterfaceAudience.Private public class InitializeStoreFileTrackerProcedure extends ModifyTableDescriptorProcedure { - public InitializeStoreFileTrackerProcedure(){} + public InitializeStoreFileTrackerProcedure() { + } public InitializeStoreFileTrackerProcedure(MasterProcedureEnv env, TableName tableName) { super(env, tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java index f483d3386729..c4962885a07f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java index 1ecfee26e252..0244a09d66a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java index a7d8e703acc3..2c3434365d6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,8 +71,10 @@ protected ModifyStoreFileTrackerProcedure(MasterProcedureEnv env, TableName tabl } private void checkDstSFT(String dstSFT) throws DoNotRetryIOException { - if (MigrationStoreFileTracker.class - .isAssignableFrom(StoreFileTrackerFactory.getTrackerClass(dstSFT))) { + if ( + MigrationStoreFileTracker.class + .isAssignableFrom(StoreFileTrackerFactory.getTrackerClass(dstSFT)) + ) { throw new DoNotRetryIOException("Do not need to transfer to " + dstSFT); } } @@ -88,7 +90,9 @@ public TableOperationType getTableOperationType() { } private enum StoreFileTrackerState { - NEED_FINISH_PREVIOUS_MIGRATION_FIRST, NEED_START_MIGRATION, NEED_FINISH_MIGRATION, + NEED_FINISH_PREVIOUS_MIGRATION_FIRST, + NEED_START_MIGRATION, + NEED_FINISH_MIGRATION, ALREADY_FINISHED } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java index 096f38fa36eb..ccb153b3b1b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java index 5ed35c7beae1..a7a0450c6360 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -101,8 +101,8 @@ private StoreFileList load(Path path) throws IOException { try (FSDataInputStream in = fs.open(path)) { int length = in.readInt(); if (length <= 0 || length > MAX_FILE_SIZE) { - throw new IOException("Invalid file length " + length + - ", either less than 0 or greater then max allowed size " + MAX_FILE_SIZE); + throw new IOException("Invalid file length " + length + + ", either less than 0 or greater then max allowed size " + MAX_FILE_SIZE); } data = new byte[length]; in.readFully(data); @@ -191,8 +191,8 @@ StoreFileList load(boolean readOnly) throws IOException { // should not have more than 2 files, if not, it means that the track files are broken, just // throw exception out and fail the region open. if (files.size() > 2) { - throw new DoNotRetryIOException("Should only have at most 2 track files for sequence id " + - entry.getKey() + ", but got " + files.size() + " files: " + files); + throw new DoNotRetryIOException("Should only have at most 2 track files for sequence id " + + entry.getKey() + ", but got " + files.size() + " files: " + files); } boolean loaded = false; for (int i = 0; i < files.size(); i++) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java index aabbe8d87494..b0024b73786a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java index f3e626707960..1c910b9670e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -136,12 +136,14 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well - if (cacheCompactedBlocksOnWrite && - totalCompactedFilesSize <= cacheConf.getCacheCompactedBlocksOnWriteThreshold()) { + if ( + cacheCompactedBlocksOnWrite + && totalCompactedFilesSize <= cacheConf.getCacheCompactedBlocksOnWriteThreshold() + ) { writerCacheConf.enableCacheOnWrite(); if (!cacheOnWriteLogged) { - LOG.info("For {} , cacheCompactedBlocksOnWrite is true, hence enabled " + - "cacheOnWrite for Data blocks, Index blocks and Bloom filter blocks", this); + LOG.info("For {} , cacheCompactedBlocksOnWrite is true, hence enabled " + + "cacheOnWrite for Data blocks, Index blocks and Bloom filter blocks", this); cacheOnWriteLogged = true; } } else { @@ -149,8 +151,8 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th if (totalCompactedFilesSize > cacheConf.getCacheCompactedBlocksOnWriteThreshold()) { // checking condition once again for logging LOG.debug( - "For {}, setting cacheCompactedBlocksOnWrite as false as total size of compacted " + - "files - {}, is greater than cacheCompactedBlocksOnWriteThreshold - {}", + "For {}, setting cacheCompactedBlocksOnWrite as false as total size of compacted " + + "files - {}, is greater than cacheCompactedBlocksOnWriteThreshold - {}", this, totalCompactedFilesSize, cacheConf.getCacheCompactedBlocksOnWriteThreshold()); } } @@ -159,8 +161,8 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th if (shouldCacheDataOnWrite) { writerCacheConf.enableCacheOnWrite(); if (!cacheOnWriteLogged) { - LOG.info("For {} , cacheDataOnWrite is true, hence enabled cacheOnWrite for " + - "Index blocks and Bloom filter blocks", this); + LOG.info("For {} , cacheDataOnWrite is true, hence enabled cacheOnWrite for " + + "Index blocks and Bloom filter blocks", this); cacheOnWriteLogged = true; } } @@ -177,12 +179,9 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th } StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, ctx.getRegionFileSystem().getFileSystem()) - .withOutputDir(outputDir) - .withBloomType(ctx.getBloomFilterType()) - .withMaxKeyCount(params.maxKeyCount()) - .withFavoredNodes(ctx.getFavoredNodes()) - .withFileContext(hFileContext) - .withShouldDropCacheBehind(params.shouldDropBehind()) + .withOutputDir(outputDir).withBloomType(ctx.getBloomFilterType()) + .withMaxKeyCount(params.maxKeyCount()).withFavoredNodes(ctx.getFavoredNodes()) + .withFileContext(hFileContext).withShouldDropCacheBehind(params.shouldDropBehind()) .withCompactedFilesSupplier(ctx.getCompactedFilesSupplier()) .withFileStoragePolicy(params.fileStoragePolicy()) .withWriterCreationTracker(params.writerCreationTracker()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java index 85c5ee24f3b2..4c7c19eebb9b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -57,7 +59,8 @@ public final class StoreFileTrackerFactory { * Maps between configuration names for trackers and implementation classes. */ public enum Trackers { - DEFAULT(DefaultStoreFileTracker.class), FILE(FileBasedStoreFileTracker.class), + DEFAULT(DefaultStoreFileTracker.class), + FILE(FileBasedStoreFileTracker.class), MIGRATION(MigrationStoreFileTracker.class); final Class clazz; @@ -172,13 +175,12 @@ static StoreFileTrackerBase createForMigration(Configuration conf, String config } public static TableDescriptor updateWithTrackerConfigs(Configuration conf, - TableDescriptor descriptor) { - //CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table - //descriptors with the SFT impl specific configs. By the time this happens, the table has no - //regions nor stores yet, so it can't create a proper StoreContext. + TableDescriptor descriptor) { + // CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table + // descriptors with the SFT impl specific configs. By the time this happens, the table has no + // regions nor stores yet, so it can't create a proper StoreContext. if (StringUtils.isEmpty(descriptor.getValue(TRACKER_IMPL))) { - StoreFileTracker tracker = - StoreFileTrackerFactory.create(conf, true, null); + StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, null); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(descriptor); return tracker.updateWithTrackerConfigs(builder).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java index 38040bc4f006..fcddb982147e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,8 +41,8 @@ private static void checkForNewFamily(Configuration conf, TableDescriptor table, Class tracker = StoreFileTrackerFactory.getTrackerClass(mergedConf); if (MigrationStoreFileTracker.class.isAssignableFrom(tracker)) { throw new DoNotRetryIOException( - "Should not use " + Trackers.MIGRATION + " as store file tracker for new family " + - family.getNameAsString() + " of table " + table.getTableName()); + "Should not use " + Trackers.MIGRATION + " as store file tracker for new family " + + family.getNameAsString() + " of table " + table.getTableName()); } } @@ -51,7 +51,7 @@ private static void checkForNewFamily(Configuration conf, TableDescriptor table, *

          * For now, only make sure that we do not use {@link Trackers#MIGRATION} for newly created tables. * @throws IOException when there are check errors, the upper layer should fail the - * {@code CreateTableProcedure}. + * {@code CreateTableProcedure}. */ public static void checkForCreateTable(Configuration conf, TableDescriptor table) throws IOException { @@ -92,7 +92,7 @@ public static void checkForCreateTable(Configuration conf, TableDescriptor table * *

        * @throws IOException when there are check errors, the upper layer should fail the - * {@code ModifyTableProcedure}. + * {@code ModifyTableProcedure}. */ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTable, TableDescriptor newTable, boolean isTableDisabled) throws IOException { @@ -120,18 +120,18 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa Class newSrcTracker = MigrationStoreFileTracker.getSrcTrackerClass(newConf); if (!oldSrcTracker.equals(newSrcTracker)) { - throw new DoNotRetryIOException("The src tracker has been changed from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldSrcTracker) + " to " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The src tracker has been changed from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldSrcTracker) + " to " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } Class newDstTracker = MigrationStoreFileTracker.getDstTrackerClass(newConf); if (!oldDstTracker.equals(newDstTracker)) { - throw new DoNotRetryIOException("The dst tracker has been changed from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " to " + - StoreFileTrackerFactory.getStoreFileTrackerName(newDstTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The dst tracker has been changed from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " to " + + StoreFileTrackerFactory.getStoreFileTrackerName(newDstTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } else { // do not allow changing from MIGRATION to its dst SFT implementation while the table is @@ -140,16 +140,16 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa // details. if (isTableDisabled) { throw new TableNotEnabledException( - "Should not change store file tracker implementation from " + - StoreFileTrackerFactory.Trackers.MIGRATION.name() + " while table " + - newTable.getTableName() + " is disabled"); + "Should not change store file tracker implementation from " + + StoreFileTrackerFactory.Trackers.MIGRATION.name() + " while table " + + newTable.getTableName() + " is disabled"); } // we can only change to the dst tracker if (!newTracker.equals(oldDstTracker)) { - throw new DoNotRetryIOException("Should migrate tracker to " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " but got " + - StoreFileTrackerFactory.getStoreFileTrackerName(newTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("Should migrate tracker to " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " but got " + + StoreFileTrackerFactory.getStoreFileTrackerName(newTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } } else { @@ -158,9 +158,9 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa // tracker if (!MigrationStoreFileTracker.class.isAssignableFrom(newTracker)) { throw new DoNotRetryIOException( - "Should change to " + Trackers.MIGRATION + " first when migrating from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + "Should change to " + Trackers.MIGRATION + " first when migrating from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } // here we do not check whether the table is disabled, as after changing to MIGRATION, we // still rely on the src SFT implementation to actually load the store files, so there @@ -168,20 +168,20 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa Class newSrcTracker = MigrationStoreFileTracker.getSrcTrackerClass(newConf); if (!oldTracker.equals(newSrcTracker)) { - throw new DoNotRetryIOException("Should use src tracker " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " first but got " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + - " when migrating from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("Should use src tracker " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " first but got " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + + " when migrating from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } Class newDstTracker = MigrationStoreFileTracker.getDstTrackerClass(newConf); // the src and dst tracker should not be the same if (newSrcTracker.equals(newDstTracker)) { - throw new DoNotRetryIOException("The src tracker and dst tracker are both " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The src tracker and dst tracker are both " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } } @@ -191,9 +191,9 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa /** * Makes sure restoring a snapshot does not break the current SFT setup follows * StoreUtils.createStoreConfiguration - * @param currentTableDesc Existing Table's TableDescriptor + * @param currentTableDesc Existing Table's TableDescriptor * @param snapshotTableDesc Snapshot's TableDescriptor - * @param baseConf Current global configuration + * @param baseConf Current global configuration * @throws RestoreSnapshotException if restore would break the current SFT setup */ public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, @@ -216,9 +216,9 @@ public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, // restoration is not possible if there is an SFT mismatch if (currentSFT != snapSFT) { throw new RestoreSnapshotException( - "Restoring Snapshot is not possible because " + " the config for column family " + - cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + - currentSFT + " SFT from snapshot: " + snapSFT); + "Restoring Snapshot is not possible because " + " the config for column family " + + cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + + currentSFT + " SFT from snapshot: " + snapSFT); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java index 45e7267ed265..03c922ce0a8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,25 +19,25 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public final class CompactionThroughputControllerFactory { private static final Logger LOG = - LoggerFactory.getLogger(CompactionThroughputControllerFactory.class); + LoggerFactory.getLogger(CompactionThroughputControllerFactory.class); public static final String HBASE_THROUGHPUT_CONTROLLER_KEY = - "hbase.regionserver.throughput.controller"; + "hbase.regionserver.throughput.controller"; private CompactionThroughputControllerFactory() { } - private static final Class - DEFAULT_THROUGHPUT_CONTROLLER_CLASS = PressureAwareCompactionThroughputController.class; + private static final Class DEFAULT_THROUGHPUT_CONTROLLER_CLASS = + PressureAwareCompactionThroughputController.class; // for backward compatibility and may not be supported in the future private static final String DEPRECATED_NAME_OF_PRESSURE_AWARE_THROUGHPUT_CONTROLLER_CLASS = @@ -45,26 +45,24 @@ private CompactionThroughputControllerFactory() { private static final String DEPRECATED_NAME_OF_NO_LIMIT_THROUGHPUT_CONTROLLER_CLASS = "org.apache.hadoop.hbase.regionserver.compactions.NoLimitThroughputController"; - public static ThroughputController create(RegionServerServices server, - Configuration conf) { + public static ThroughputController create(RegionServerServices server, Configuration conf) { Class clazz = getThroughputControllerClass(conf); ThroughputController controller = ReflectionUtils.newInstance(clazz, conf); controller.setup(server); return controller; } - public static Class getThroughputControllerClass( - Configuration conf) { + public static Class + getThroughputControllerClass(Configuration conf) { String className = - conf.get(HBASE_THROUGHPUT_CONTROLLER_KEY, DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName()); + conf.get(HBASE_THROUGHPUT_CONTROLLER_KEY, DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName()); className = resolveDeprecatedClassName(className); try { return Class.forName(className).asSubclass(ThroughputController.class); } catch (Exception e) { - LOG.warn( - "Unable to load configured throughput controller '" + className - + "', load default throughput controller " - + DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", e); + LOG.warn("Unable to load configured throughput controller '" + className + + "', load default throughput controller " + DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName() + + " instead", e); return DEFAULT_THROUGHPUT_CONTROLLER_CLASS; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java index fc75c5835831..5b998e5b72f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public final class FlushThroughputControllerFactory { @@ -31,34 +31,32 @@ public final class FlushThroughputControllerFactory { private static final Logger LOG = LoggerFactory.getLogger(FlushThroughputControllerFactory.class); public static final String HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY = - "hbase.regionserver.flush.throughput.controller"; + "hbase.regionserver.flush.throughput.controller"; - private static final Class - DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS = NoLimitThroughputController.class; + private static final Class< + ? extends ThroughputController> DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS = + NoLimitThroughputController.class; private FlushThroughputControllerFactory() { } - public static ThroughputController create(RegionServerServices server, - Configuration conf) { + public static ThroughputController create(RegionServerServices server, Configuration conf) { Class clazz = getThroughputControllerClass(conf); ThroughputController controller = ReflectionUtils.newInstance(clazz, conf); controller.setup(server); return controller; } - public static Class getThroughputControllerClass( - Configuration conf) { - String className = - conf.get(HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, - DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName()); + public static Class + getThroughputControllerClass(Configuration conf) { + String className = conf.get(HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName()); try { return Class.forName(className).asSubclass(ThroughputController.class); } catch (Exception e) { - LOG.warn( - "Unable to load configured flush throughput controller '" + className - + "', load default throughput controller " - + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", e); + LOG.warn("Unable to load configured flush throughput controller '" + className + + "', load default throughput controller " + + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", e); return DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java index 4b1b26108523..31a424d5e996 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.regionserver.throttle; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class NoLimitThroughputController implements ThroughputController { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java index 1c3952ed0491..4ccabde07f1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; /** * A throughput controller which uses the follow schema to limit throughput @@ -42,28 +42,28 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class PressureAwareCompactionThroughputController extends PressureAwareThroughputController { - private final static Logger LOG = LoggerFactory - .getLogger(PressureAwareCompactionThroughputController.class); + private final static Logger LOG = + LoggerFactory.getLogger(PressureAwareCompactionThroughputController.class); public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND = - "hbase.hstore.compaction.throughput.higher.bound"; + "hbase.hstore.compaction.throughput.higher.bound"; private static final long DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND = - 100L * 1024 * 1024; + 100L * 1024 * 1024; public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND = - "hbase.hstore.compaction.throughput.lower.bound"; + "hbase.hstore.compaction.throughput.lower.bound"; private static final long DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND = - 50L * 1024 * 1024; + 50L * 1024 * 1024; public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK = - "hbase.hstore.compaction.throughput.offpeak"; + "hbase.hstore.compaction.throughput.offpeak"; private static final long DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK = Long.MAX_VALUE; public static final String HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD = - "hbase.hstore.compaction.throughput.tune.period"; + "hbase.hstore.compaction.throughput.tune.period"; private static final int DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD = 60 * 1000; @@ -75,8 +75,8 @@ public class PressureAwareCompactionThroughputController extends PressureAwareTh @Override public void setup(final RegionServerServices server) { - server.getChoreService().scheduleChore( - new ScheduledChore("CompactionThroughputTuner", this, tuningPeriod) { + server.getChoreService() + .scheduleChore(new ScheduledChore("CompactionThroughputTuner", this, tuningPeriod) { @Override protected void chore() { @@ -95,17 +95,16 @@ private void tune(double compactionPressure) { } else { // compactionPressure is between 0.0 and 1.0, we use a simple linear formula to // calculate the throughput limitation. - maxThroughputToSet = - maxThroughputLowerBound + (maxThroughputUpperBound - maxThroughputLowerBound) - * compactionPressure; + maxThroughputToSet = maxThroughputLowerBound + + (maxThroughputUpperBound - maxThroughputLowerBound) * compactionPressure; } if (LOG.isDebugEnabled()) { if (Math.abs(maxThroughputToSet - getMaxThroughput()) < .0000001) { LOG.debug("CompactionPressure is " + compactionPressure + ", tune throughput to " - + throughputDesc(maxThroughputToSet)); + + throughputDesc(maxThroughputToSet)); } else if (LOG.isTraceEnabled()) { LOG.trace("CompactionPressure is " + compactionPressure + ", keep throughput throttling to " - + throughputDesc(maxThroughputToSet)); + + throughputDesc(maxThroughputToSet)); } } this.setMaxThroughput(maxThroughputToSet); @@ -117,33 +116,27 @@ public void setConf(Configuration conf) { if (conf == null) { return; } - this.maxThroughputUpperBound = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND); - this.maxThroughputLowerBound = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND); - this.maxThroughputOffpeak = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK); + this.maxThroughputUpperBound = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND); + this.maxThroughputLowerBound = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND); + this.maxThroughputOffpeak = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK); this.offPeakHours = OffPeakHours.getInstance(conf); - this.controlPerSize = - conf.getLong(HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL, - this.maxThroughputLowerBound); + this.controlPerSize = conf.getLong(HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL, + this.maxThroughputLowerBound); this.setMaxThroughput(this.maxThroughputLowerBound); - this.tuningPeriod = - getConf().getInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD, - DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD); + this.tuningPeriod = getConf().getInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD, + DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD); LOG.info("Compaction throughput configurations, higher bound: " - + throughputDesc(maxThroughputUpperBound) + ", lower bound " - + throughputDesc(maxThroughputLowerBound) + ", off peak: " - + throughputDesc(maxThroughputOffpeak) + ", tuning period: " + tuningPeriod + " ms"); + + throughputDesc(maxThroughputUpperBound) + ", lower bound " + + throughputDesc(maxThroughputLowerBound) + ", off peak: " + + throughputDesc(maxThroughputOffpeak) + ", tuning period: " + tuningPeriod + " ms"); } @Override public String toString() { return "DefaultCompactionThroughputController [maxThroughput=" - + throughputDesc(getMaxThroughput()) + ", activeCompactions=" + activeOperations.size() - + "]"; + + throughputDesc(getMaxThroughput()) + ", activeCompactions=" + activeOperations.size() + "]"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java index 51e7b42bf9d6..4720d26d2315 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; /** * A throughput controller which uses the follow schema to limit throughput @@ -32,8 +32,8 @@ *
      • If flush pressure is greater than or equal to 1.0, no limitation.
      • *
      • In normal case, the max throughput is tuned between * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND} and - * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND}, using the formula "lower + - * (upper - lower) * flushPressure", where flushPressure is in range [0.0, 1.0)
      • + * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND}, using the formula "lower + (upper - + * lower) * flushPressure", where flushPressure is in range [0.0, 1.0) *
      * @see org.apache.hadoop.hbase.regionserver.HRegionServer#getFlushPressure() */ @@ -41,31 +41,31 @@ public class PressureAwareFlushThroughputController extends PressureAwareThroughputController { private static final Logger LOG = - LoggerFactory.getLogger(PressureAwareFlushThroughputController.class); + LoggerFactory.getLogger(PressureAwareFlushThroughputController.class); public static final String HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND = - "hbase.hstore.flush.throughput.upper.bound"; + "hbase.hstore.flush.throughput.upper.bound"; private static final long DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND = - 200L * 1024 * 1024; + 200L * 1024 * 1024; public static final String HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND = - "hbase.hstore.flush.throughput.lower.bound"; + "hbase.hstore.flush.throughput.lower.bound"; private static final long DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND = - 100L * 1024 * 1024; + 100L * 1024 * 1024; public static final String HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD = - "hbase.hstore.flush.throughput.tune.period"; + "hbase.hstore.flush.throughput.tune.period"; private static final int DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD = 20 * 1000; // check flush throughput every this size public static final String HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL = - "hbase.hstore.flush.throughput.control.check.interval"; + "hbase.hstore.flush.throughput.control.check.interval"; private static final long DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL = - 10L * 1024 * 1024;// 10MB + 10L * 1024 * 1024;// 10MB @Override public void setup(final RegionServerServices server) { @@ -87,13 +87,12 @@ private void tune(double flushPressure) { } else { // flushPressure is between 0.0 and 1.0, we use a simple linear formula to // calculate the throughput limitation. - maxThroughputToSet = - maxThroughputLowerBound + (maxThroughputUpperBound - maxThroughputLowerBound) - * flushPressure; + maxThroughputToSet = maxThroughputLowerBound + + (maxThroughputUpperBound - maxThroughputLowerBound) * flushPressure; } if (LOG.isDebugEnabled()) { LOG.debug("flushPressure is " + flushPressure + ", tune flush throughput to " - + throughputDesc(maxThroughputToSet)); + + throughputDesc(maxThroughputToSet)); } this.setMaxThroughput(maxThroughputToSet); } @@ -104,28 +103,24 @@ public void setConf(Configuration conf) { if (conf == null) { return; } - this.maxThroughputUpperBound = - conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, - DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND); - this.maxThroughputLowerBound = - conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, - DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND); + this.maxThroughputUpperBound = conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, + DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND); + this.maxThroughputLowerBound = conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, + DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND); this.offPeakHours = OffPeakHours.getInstance(conf); - this.controlPerSize = - conf.getLong(HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL, - DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL); + this.controlPerSize = conf.getLong(HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL, + DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL); this.setMaxThroughput(this.maxThroughputLowerBound); - this.tuningPeriod = - getConf().getInt(HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD, - DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD); + this.tuningPeriod = getConf().getInt(HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD, + DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD); LOG.info("Flush throughput configurations, upper bound: " - + throughputDesc(maxThroughputUpperBound) + ", lower bound " - + throughputDesc(maxThroughputLowerBound) + ", tuning period: " + tuningPeriod + " ms"); + + throughputDesc(maxThroughputUpperBound) + ", lower bound " + + throughputDesc(maxThroughputLowerBound) + ", tuning period: " + tuningPeriod + " ms"); } @Override public String toString() { return "DefaultFlushController [maxThroughput=" + throughputDesc(getMaxThroughput()) - + ", activeFlushNumber=" + activeOperations.size() + "]"; + + ", activeFlushNumber=" + activeOperations.size() + "]"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java index 306df0b9d5a5..c55b507cb511 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +19,21 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public abstract class PressureAwareThroughputController extends Configured implements - ThroughputController, Stoppable { +public abstract class PressureAwareThroughputController extends Configured + implements ThroughputController, Stoppable { private static final Logger LOG = - LoggerFactory.getLogger(PressureAwareThroughputController.class); + LoggerFactory.getLogger(PressureAwareThroughputController.class); /** * Stores the information of one controlled compaction. @@ -77,7 +76,8 @@ private static final class ActiveOperation { private volatile double maxThroughput; private volatile double maxThroughputPerOperation; - protected final ConcurrentMap activeOperations = new ConcurrentHashMap<>(); + protected final ConcurrentMap activeOperations = + new ConcurrentHashMap<>(); @Override public abstract void setup(final RegionServerServices server); @@ -123,10 +123,10 @@ public long control(String opName, long size) throws InterruptedException { if (now - operation.lastLogTime > 5L * 1000) { LOG.debug("deltaSize: " + deltaSize + " bytes; elapseTime: " + elapsedTime + " ns"); LOG.debug(opName + " sleep=" + sleepTime + "ms because current throughput is " - + throughputDesc(deltaSize, elapsedTime) + ", max allowed is " - + throughputDesc(maxThroughputPerOperation) + ", already slept " - + operation.numberOfSleeps + " time(s) and total slept time is " - + operation.totalSleepTime + " ms till now."); + + throughputDesc(deltaSize, elapsedTime) + ", max allowed is " + + throughputDesc(maxThroughputPerOperation) + ", already slept " + + operation.numberOfSleeps + " time(s) and total slept time is " + + operation.totalSleepTime + " ms till now."); operation.lastLogTime = now; } } @@ -142,11 +142,10 @@ public void finish(String opName) { ActiveOperation operation = activeOperations.remove(opName); maxThroughputPerOperation = getMaxThroughput() / activeOperations.size(); long elapsedTime = EnvironmentEdgeManager.currentTime() - operation.startTime; - LOG.info(opName + " average throughput is " - + throughputDesc(operation.totalSize, elapsedTime) + ", slept " - + operation.numberOfSleeps + " time(s) and total slept time is " - + operation.totalSleepTime + " ms. " + activeOperations.size() - + " active operations remaining, total limit is " + throughputDesc(getMaxThroughput())); + LOG.info(opName + " average throughput is " + throughputDesc(operation.totalSize, elapsedTime) + + ", slept " + operation.numberOfSleeps + " time(s) and total slept time is " + + operation.totalSleepTime + " ms. " + activeOperations.size() + + " active operations remaining, total limit is " + throughputDesc(getMaxThroughput())); } private volatile boolean stopped = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java index 1bf1a9b52b39..70683cb45722 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java @@ -48,8 +48,8 @@ * 2. parallelPutToStoreThreadLimit: The amount of concurrency allowed to write puts to a Store at * the same time. *

      - * 3. parallelPreparePutToStoreThreadLimit: The amount of concurrency allowed to - * prepare writing puts to a Store at the same time. + * 3. parallelPreparePutToStoreThreadLimit: The amount of concurrency allowed to prepare writing + * puts to a Store at the same time. *

      * Notice that our writing pipeline includes three key process: MVCC acquire, writing MemStore, and * WAL. Only limit the concurrency of writing puts to Store(parallelPutToStoreThreadLimit) is not @@ -69,18 +69,18 @@ public class StoreHotnessProtector { private volatile int parallelPreparePutToStoreThreadLimit; public final static String PARALLEL_PUT_STORE_THREADS_LIMIT = - "hbase.region.store.parallel.put.limit"; + "hbase.region.store.parallel.put.limit"; public final static String PARALLEL_PREPARE_PUT_STORE_MULTIPLIER = - "hbase.region.store.parallel.prepare.put.multiplier"; + "hbase.region.store.parallel.prepare.put.multiplier"; private final static int DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT = 0; private volatile int parallelPutToStoreThreadLimitCheckMinColumnCount; public final static String PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_COUNT = - "hbase.region.store.parallel.put.limit.min.column.count"; + "hbase.region.store.parallel.put.limit.min.column.count"; private final static int DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_NUM = 100; private final static int DEFAULT_PARALLEL_PREPARE_PUT_STORE_MULTIPLIER = 2; private final ConcurrentMap preparePutToStoreMap = - new ConcurrentSkipListMap<>(Bytes.BYTES_RAWCOMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_RAWCOMPARATOR); private final Region region; public StoreHotnessProtector(Region region, Configuration conf) { @@ -90,12 +90,12 @@ public StoreHotnessProtector(Region region, Configuration conf) { public void init(Configuration conf) { this.parallelPutToStoreThreadLimit = - conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT, DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT); + conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT, DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT); this.parallelPreparePutToStoreThreadLimit = conf.getInt(PARALLEL_PREPARE_PUT_STORE_MULTIPLIER, - DEFAULT_PARALLEL_PREPARE_PUT_STORE_MULTIPLIER) * parallelPutToStoreThreadLimit; + DEFAULT_PARALLEL_PREPARE_PUT_STORE_MULTIPLIER) * parallelPutToStoreThreadLimit; this.parallelPutToStoreThreadLimitCheckMinColumnCount = - conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_COUNT, - DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_NUM); + conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_COUNT, + DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_NUM); if (!isEnable()) { logDisabledMessageOnce(); @@ -103,14 +103,14 @@ public void init(Configuration conf) { } /** - * {@link #init(Configuration)} is called for every Store that opens on a RegionServer. - * Here we make a lightweight attempt to log this message once per RegionServer, rather than - * per-Store. The goal is just to draw attention to this feature if debugging overload due to - * heavy writes. + * {@link #init(Configuration)} is called for every Store that opens on a RegionServer. Here we + * make a lightweight attempt to log this message once per RegionServer, rather than per-Store. + * The goal is just to draw attention to this feature if debugging overload due to heavy writes. */ private static void logDisabledMessageOnce() { if (!loggedDisableMessage) { - LOG.info("StoreHotnessProtector is disabled. Set {} > 0 to enable, " + LOG.info( + "StoreHotnessProtector is disabled. Set {} > 0 to enable, " + "which may help mitigate load under heavy write pressure.", PARALLEL_PUT_STORE_THREADS_LIMIT); loggedDisableMessage = true; @@ -140,38 +140,39 @@ public void start(Map> familyMaps) throws RegionTooBusyExcept if (e.getValue().size() > this.parallelPutToStoreThreadLimitCheckMinColumnCount) { - //we need to try to add #preparePutCount at first because preparePutToStoreMap will be - //cleared when changing the configuration. + // we need to try to add #preparePutCount at first because preparePutToStoreMap will be + // cleared when changing the configuration. int preparePutCount = preparePutToStoreMap - .computeIfAbsent(e.getKey(), key -> new AtomicInteger()) - .incrementAndGet(); + .computeIfAbsent(e.getKey(), key -> new AtomicInteger()).incrementAndGet(); boolean storeAboveThread = store.getCurrentParallelPutCount() > this.parallelPutToStoreThreadLimit; boolean storeAbovePrePut = preparePutCount > this.parallelPreparePutToStoreThreadLimit; if (storeAboveThread || storeAbovePrePut) { - tooBusyStore = (tooBusyStore == null ? - store.getColumnFamilyName() : - tooBusyStore + "," + store.getColumnFamilyName()); + tooBusyStore = (tooBusyStore == null + ? store.getColumnFamilyName() + : tooBusyStore + "," + store.getColumnFamilyName()); } aboveParallelThreadLimit |= storeAboveThread; aboveParallelPrePutLimit |= storeAbovePrePut; if (LOG.isTraceEnabled()) { LOG.trace(store.getColumnFamilyName() + ": preparePutCount=" + preparePutCount - + "; currentParallelPutCount=" + store.getCurrentParallelPutCount()); + + "; currentParallelPutCount=" + store.getCurrentParallelPutCount()); } } } if (aboveParallelThreadLimit || aboveParallelPrePutLimit) { - String msg = - "StoreTooBusy," + this.region.getRegionInfo().getRegionNameAsString() + ":" + tooBusyStore - + " Above " - + (aboveParallelThreadLimit ? "parallelPutToStoreThreadLimit(" - + this.parallelPutToStoreThreadLimit + ")" : "") - + (aboveParallelThreadLimit && aboveParallelPrePutLimit ? " or " : "") - + (aboveParallelPrePutLimit ? "parallelPreparePutToStoreThreadLimit(" - + this.parallelPreparePutToStoreThreadLimit + ")" : ""); + String msg = "StoreTooBusy," + this.region.getRegionInfo().getRegionNameAsString() + ":" + + tooBusyStore + " Above " + + (aboveParallelThreadLimit + ? "parallelPutToStoreThreadLimit(" + this.parallelPutToStoreThreadLimit + ")" + : "") + + (aboveParallelThreadLimit && aboveParallelPrePutLimit ? " or " : "") + + (aboveParallelPrePutLimit + ? "parallelPreparePutToStoreThreadLimit(" + this.parallelPreparePutToStoreThreadLimit + + ")" + : ""); LOG.trace(msg); throw new RegionTooBusyException(msg); } @@ -201,11 +202,10 @@ public void finish(Map> familyMaps) { public String toString() { return "StoreHotnessProtector, parallelPutToStoreThreadLimit=" - + this.parallelPutToStoreThreadLimit + " ; minColumnNum=" - + this.parallelPutToStoreThreadLimitCheckMinColumnCount + " ; preparePutThreadLimit=" - + this.parallelPreparePutToStoreThreadLimit + " ; hotProtect now " + (this.isEnable() ? - "enable" : - "disable"); + + this.parallelPutToStoreThreadLimit + " ; minColumnNum=" + + this.parallelPutToStoreThreadLimitCheckMinColumnCount + " ; preparePutThreadLimit=" + + this.parallelPreparePutToStoreThreadLimit + " ; hotProtect now " + + (this.isEnable() ? "enable" : "disable"); } public boolean isEnable() { @@ -218,5 +218,5 @@ Map getPreparePutToStoreMap() { } public static final long FIXED_SIZE = - ClassSize.align(ClassSize.OBJECT + 2 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT); + ClassSize.align(ClassSize.OBJECT + 2 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java index ad65c59436ef..466dc96757de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.throttle; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.yetus.audience.InterfaceAudience; @@ -36,7 +35,7 @@ private ThroughputControlUtil() { /** * Generate a name for throttling, to prevent name conflict when multiple IO operation running * parallel on the same store. - * @param store the Store instance on which IO operation is happening + * @param store the Store instance on which IO operation is happening * @param opName Name of the IO operation, e.g. "flush", "compaction", etc. * @return The name for throttling */ @@ -49,8 +48,8 @@ public static String getNameForThrottling(HStore store, String opName) { break; } } - return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + - store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + - NAME_DELIMITER + counter; + return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + + store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + + NAME_DELIMITER + counter; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java index 707d02d5f92b..284aa6814dd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +19,12 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.yetus.audience.InterfaceAudience; /** - * A utility that constrains the total throughput of one or more simultaneous flows by - * sleeping when necessary. + * A utility that constrains the total throughput of one or more simultaneous flows by sleeping when + * necessary. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public interface ThroughputController extends Stoppable { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 5416e3a2d669..2c0a656049c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -131,7 +131,7 @@ public abstract class AbstractFSWAL implements WAL { /** Don't log blocking regions more frequently than this. */ private static final long SURVIVED_TOO_LONG_LOG_INTERVAL_NS = TimeUnit.MINUTES.toNanos(5); - protected static final String SLOW_SYNC_TIME_MS ="hbase.regionserver.wal.slowsync.ms"; + protected static final String SLOW_SYNC_TIME_MS = "hbase.regionserver.wal.slowsync.ms"; protected static final int DEFAULT_SLOW_SYNC_TIME_MS = 100; // in ms protected static final String ROLL_ON_SYNC_TIME_MS = "hbase.regionserver.wal.roll.on.sync.ms"; protected static final int DEFAULT_ROLL_ON_SYNC_TIME_MS = 10000; // in ms @@ -152,8 +152,7 @@ public abstract class AbstractFSWAL implements WAL { public static final String RING_BUFFER_SLOT_COUNT = "hbase.regionserver.wal.disruptor.event.count"; - public static final String WAL_SHUTDOWN_WAIT_TIMEOUT_MS = - "hbase.wal.shutdown.wait.timeout.ms"; + public static final String WAL_SHUTDOWN_WAIT_TIMEOUT_MS = "hbase.wal.shutdown.wait.timeout.ms"; public static final int DEFAULT_WAL_SHUTDOWN_WAIT_TIMEOUT_MS = 15 * 1000; /** @@ -302,7 +301,8 @@ private static final class WalProps { /** * Map the encoded region name to the highest sequence id. - *

      Contains all the regions it has an entry for. + *

      + * Contains all the regions it has an entry for. */ public final Map encodedName2HighestSequenceId; @@ -370,7 +370,7 @@ protected long getFileNumFromFileName(Path fileName) { checkNotNull(fileName, "file name can't be null"); if (!ourFiles.accept(fileName)) { throw new IllegalArgumentException( - "The log file " + fileName + " doesn't belong to this WAL. (" + toString() + ")"); + "The log file " + fileName + " doesn't belong to this WAL. (" + toString() + ")"); } final String fileNameString = fileName.toString(); String chompedPath = fileNameString.substring(prefixPathStr.length(), @@ -389,8 +389,7 @@ protected final int getPreallocatedEventCount() { // be stuck and make no progress if the buffer is filled with appends only and there is no // sync. If no sync, then the handlers will be outstanding just waiting on sync completion // before they return. - int preallocatedEventCount = - this.conf.getInt(RING_BUFFER_SLOT_COUNT, 1024 * 16); + int preallocatedEventCount = this.conf.getInt(RING_BUFFER_SLOT_COUNT, 1024 * 16); checkArgument(preallocatedEventCount >= 0, RING_BUFFER_SLOT_COUNT + " must > 0"); int floor = Integer.highestOneBit(preallocatedEventCount); if (floor == preallocatedEventCount) { @@ -404,17 +403,16 @@ protected final int getPreallocatedEventCount() { } protected AbstractFSWAL(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, final List listeners, - final boolean failIfWALExists, final String prefix, final String suffix) - throws FailedLogCloseException, IOException { + final String archiveDir, final Configuration conf, final List listeners, + final boolean failIfWALExists, final String prefix, final String suffix) + throws FailedLogCloseException, IOException { this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); } protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Path rootDir, - final String logDir, final String archiveDir, final Configuration conf, - final List listeners, final boolean failIfWALExists, final String prefix, - final String suffix) - throws FailedLogCloseException, IOException { + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) throws FailedLogCloseException, IOException { this.fs = fs; this.walDir = new Path(rootDir, logDir); this.walArchiveDir = new Path(rootDir, archiveDir); @@ -436,13 +434,13 @@ protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Pa prefix == null || prefix.isEmpty() ? "wal" : URLEncoder.encode(prefix, "UTF8"); // we only correctly differentiate suffices when numeric ones start with '.' if (suffix != null && !(suffix.isEmpty()) && !(suffix.startsWith(WAL_FILE_NAME_DELIMITER))) { - throw new IllegalArgumentException("WAL suffix must start with '" + WAL_FILE_NAME_DELIMITER + - "' but instead was '" + suffix + "'"); + throw new IllegalArgumentException("WAL suffix must start with '" + WAL_FILE_NAME_DELIMITER + + "' but instead was '" + suffix + "'"); } // Now that it exists, set the storage policy for the entire directory of wal files related to // this FSHLog instance String storagePolicy = - conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); CommonFSUtils.setStoragePolicy(fs, this.walDir, storagePolicy); this.walFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8"); this.prefixPathStr = new Path(walDir, walFilePrefix + WAL_FILE_NAME_DELIMITER).toString(); @@ -458,7 +456,7 @@ public boolean accept(final Path fileName) { if (walFileSuffix.isEmpty()) { // in the case of the null suffix, we need to ensure the filename ends with a timestamp. return org.apache.commons.lang3.StringUtils - .isNumeric(fileNameString.substring(prefixPathStr.length())); + .isNumeric(fileNameString.substring(prefixPathStr.length())); } else if (!fileNameString.endsWith(walFileSuffix)) { return false; } @@ -494,28 +492,28 @@ public boolean accept(final Path fileName) { this.logrollsize = (long) (this.blocksize * multiplier); this.maxLogs = conf.getInt(MAX_LOGS, Math.max(32, calculateMaxLogFiles(conf, logrollsize))); - LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize=" + - StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", suffix=" + - walFileSuffix + ", logDir=" + this.walDir + ", archiveDir=" + this.walArchiveDir + - ", maxLogs=" + this.maxLogs); - this.slowSyncNs = TimeUnit.MILLISECONDS.toNanos(conf.getInt(SLOW_SYNC_TIME_MS, - DEFAULT_SLOW_SYNC_TIME_MS)); - this.rollOnSyncNs = TimeUnit.MILLISECONDS.toNanos(conf.getInt(ROLL_ON_SYNC_TIME_MS, - DEFAULT_ROLL_ON_SYNC_TIME_MS)); - this.slowSyncRollThreshold = conf.getInt(SLOW_SYNC_ROLL_THRESHOLD, - DEFAULT_SLOW_SYNC_ROLL_THRESHOLD); - this.slowSyncCheckInterval = conf.getInt(SLOW_SYNC_ROLL_INTERVAL_MS, - DEFAULT_SLOW_SYNC_ROLL_INTERVAL_MS); - this.walSyncTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf.getLong(WAL_SYNC_TIMEOUT_MS, - DEFAULT_WAL_SYNC_TIMEOUT_MS)); + LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize=" + + StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", suffix=" + + walFileSuffix + ", logDir=" + this.walDir + ", archiveDir=" + this.walArchiveDir + + ", maxLogs=" + this.maxLogs); + this.slowSyncNs = + TimeUnit.MILLISECONDS.toNanos(conf.getInt(SLOW_SYNC_TIME_MS, DEFAULT_SLOW_SYNC_TIME_MS)); + this.rollOnSyncNs = TimeUnit.MILLISECONDS + .toNanos(conf.getInt(ROLL_ON_SYNC_TIME_MS, DEFAULT_ROLL_ON_SYNC_TIME_MS)); + this.slowSyncRollThreshold = + conf.getInt(SLOW_SYNC_ROLL_THRESHOLD, DEFAULT_SLOW_SYNC_ROLL_THRESHOLD); + this.slowSyncCheckInterval = + conf.getInt(SLOW_SYNC_ROLL_INTERVAL_MS, DEFAULT_SLOW_SYNC_ROLL_INTERVAL_MS); + this.walSyncTimeoutNs = + TimeUnit.MILLISECONDS.toNanos(conf.getLong(WAL_SYNC_TIMEOUT_MS, DEFAULT_WAL_SYNC_TIMEOUT_MS)); this.syncFutureCache = new SyncFutureCache(conf); this.implClassName = getClass().getSimpleName(); - this.walTooOldNs = TimeUnit.SECONDS.toNanos(conf.getInt( - SURVIVED_TOO_LONG_SEC_KEY, SURVIVED_TOO_LONG_SEC_DEFAULT)); + this.walTooOldNs = TimeUnit.SECONDS + .toNanos(conf.getInt(SURVIVED_TOO_LONG_SEC_KEY, SURVIVED_TOO_LONG_SEC_DEFAULT)); this.useHsync = conf.getBoolean(HRegion.WAL_HSYNC_CONF_KEY, HRegion.DEFAULT_WAL_HSYNC); archiveRetries = this.conf.getInt("hbase.regionserver.walroll.archive.retries", 0); - this.walShutdownTimeout = conf.getLong(WAL_SHUTDOWN_WAIT_TIMEOUT_MS, - DEFAULT_WAL_SHUTDOWN_WAIT_TIMEOUT_MS); + this.walShutdownTimeout = + conf.getLong(WAL_SHUTDOWN_WAIT_TIMEOUT_MS, DEFAULT_WAL_SHUTDOWN_WAIT_TIMEOUT_MS); } /** @@ -607,10 +605,10 @@ public final void sync(long txid, boolean forceSync) throws IOException { protected abstract void doSync(boolean forceSync) throws IOException; protected abstract void doSync(long txid, boolean forceSync) throws IOException; + /** * This is a convenience method that computes a new filename with a given file-number. - * @param filenum to use - * @return Path + * @param filenum to use n */ protected Path computeFilename(final long filenum) { if (filenum < 0) { @@ -622,8 +620,7 @@ protected Path computeFilename(final long filenum) { /** * This is a convenience method that computes a new filename with a given using the current WAL - * file-number - * @return Path + * file-number n */ public Path getCurrentFileName() { return computeFilename(this.filenum.get()); @@ -656,7 +653,7 @@ Path getOldPath() { * Tell listeners about pre log roll. */ private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath) - throws IOException { + throws IOException { coprocessorHost.preWALRoll(oldPath, newPath); if (!this.listeners.isEmpty()) { @@ -670,7 +667,7 @@ private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath * Tell listeners about post log roll. */ private void tellListenersAboutPostLogRoll(final Path oldPath, final Path newPath) - throws IOException { + throws IOException { if (!this.listeners.isEmpty()) { for (WALActionsListener i : this.listeners) { i.postLogRoll(oldPath, newPath); @@ -694,9 +691,9 @@ public int getNumLogFiles() { } /** - * If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, - * check the first (oldest) WAL, and return those regions which should be flushed so that - * it can be let-go/'archived'. + * If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, check the + * first (oldest) WAL, and return those regions which should be flushed so that it can be + * let-go/'archived'. * @return stores of regions (encodedRegionNames) to flush in order to archive oldest WAL file. */ Map> findRegionsToForceFlush() throws IOException { @@ -719,9 +716,9 @@ Map> findRegionsToForceFlush() throws IOException { } listForPrint.add(Bytes.toStringBinary(r.getKey()) + "[" + families.toString() + "]"); } - LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs + - "; forcing (partial) flush of " + regions.size() + " region(s): " + - StringUtils.join(",", listForPrint)); + LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs + + "; forcing (partial) flush of " + regions.size() + " region(s): " + + StringUtils.join(",", listForPrint)); } return regions; } @@ -800,8 +797,7 @@ protected void archive(final Pair log) { break; } } else { - LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, - e); + LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, e); } retry++; } @@ -865,8 +861,8 @@ private Span createSpan(String name) { *

    • In the case of closing out this FSHLog with no further use newPath and nextWriter will be * null.
    • *
    - * @param oldPath may be null - * @param newPath may be null + * @param oldPath may be null + * @param newPath may be null * @param nextWriter may be null * @return the passed in newPath * @throws IOException if there is a problem flushing or closing the underlying FS @@ -930,8 +926,8 @@ private Map> rollWriterInternal(boolean force) throws IOExc newPath = replaceWriter(oldPath, newPath, nextWriter); tellListenersAboutPostLogRoll(oldPath, newPath); if (LOG.isDebugEnabled()) { - LOG.debug("Create new " + implClassName + " writer with pipeline: " + - Arrays.toString(getPipeline())); + LOG.debug("Create new " + implClassName + " writer with pipeline: " + + Arrays.toString(getPipeline())); } // We got a new writer, so reset the slow sync count lastTimeCheckSlowSync = EnvironmentEdgeManager.currentTime(); @@ -1021,8 +1017,8 @@ public Void call() throws Exception { } catch (TimeoutException e) { throw new TimeoutIOException("We have waited " + walShutdownTimeout + "ms, but" + " the shutdown of WAL doesn't complete! Please check the status of underlying " - + "filesystem or increase the wait time by the config \"" - + WAL_SHUTDOWN_WAIT_TIMEOUT_MS + "\"", e); + + "filesystem or increase the wait time by the config \"" + WAL_SHUTDOWN_WAIT_TIMEOUT_MS + + "\"", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); @@ -1081,7 +1077,7 @@ public int getInflightWALCloseCount() { */ @Override public void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, - boolean onlyIfGreater) { + boolean onlyIfGreater) { sequenceIdAccounting.updateStore(encodedRegionName, familyName, sequenceid, onlyIfGreater); } @@ -1174,19 +1170,18 @@ private long postAppend(final Entry e, final long elapsedTime) throws IOExceptio protected final void postSync(long timeInNanos, int handlerSyncs) { if (timeInNanos > this.slowSyncNs) { String msg = new StringBuilder().append("Slow sync cost: ") - .append(TimeUnit.NANOSECONDS.toMillis(timeInNanos)) - .append(" ms, current pipeline: ") - .append(Arrays.toString(getPipeline())).toString(); + .append(TimeUnit.NANOSECONDS.toMillis(timeInNanos)).append(" ms, current pipeline: ") + .append(Arrays.toString(getPipeline())).toString(); LOG.info(msg); if (timeInNanos > this.rollOnSyncNs) { // A single sync took too long. // Elsewhere in checkSlowSync, called from checkLogRoll, we will look at cumulative // effects. Here we have a single data point that indicates we should take immediate // action, so do so. - LOG.warn("Requesting log roll because we exceeded slow sync threshold; time=" + - TimeUnit.NANOSECONDS.toMillis(timeInNanos) + " ms, threshold=" + - TimeUnit.NANOSECONDS.toMillis(rollOnSyncNs) + " ms, current pipeline: " + - Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because we exceeded slow sync threshold; time=" + + TimeUnit.NANOSECONDS.toMillis(timeInNanos) + " ms, threshold=" + + TimeUnit.NANOSECONDS.toMillis(rollOnSyncNs) + " ms, current pipeline: " + + Arrays.toString(getPipeline())); requestLogRoll(SLOW_SYNC); } slowSyncCount.incrementAndGet(); // it's fine to unconditionally increment this @@ -1285,26 +1280,26 @@ public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws * passed in WALKey walKey parameter. Be warned that the WriteEntry is not * immediately available on return from this method. It WILL be available subsequent to a sync of * this append; otherwise, you will just have to wait on the WriteEntry to get filled in. - * @param info the regioninfo associated with append - * @param key Modified by this call; we add to it this edits region edit/sequence id. - * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit - * sequence id that is after all currently appended edits. - * @param inMemstore Always true except for case where we are writing a region event meta - * marker edit, for example, a compaction completion record into the WAL or noting a - * Region Open event. In these cases the entry is just so we can finish an unfinished - * compaction after a crash when the new Server reads the WAL on recovery, etc. These - * transition event 'Markers' do not go via the memstore. When memstore is false, - * we presume a Marker event edit. + * @param info the regioninfo associated with append + * @param key Modified by this call; we add to it this edits region edit/sequence id. + * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit + * sequence id that is after all currently appended edits. + * @param inMemstore Always true except for case where we are writing a region event meta marker + * edit, for example, a compaction completion record into the WAL or noting a + * Region Open event. In these cases the entry is just so we can finish an + * unfinished compaction after a crash when the new Server reads the WAL on + * recovery, etc. These transition event 'Markers' do not go via the memstore. + * When memstore is false, we presume a Marker event edit. * @return Returns a 'transaction id' and key will have the region edit/sequence id * in it. */ protected abstract long append(RegionInfo info, WALKeyImpl key, WALEdit edits, boolean inMemstore) - throws IOException; + throws IOException; protected abstract void doAppend(W writer, FSWALEntry entry) throws IOException; protected abstract W createWriterInstance(Path path) - throws IOException, CommonFSUtils.StreamLacksCapabilityException; + throws IOException, CommonFSUtils.StreamLacksCapabilityException; /** * Notice that you need to clear the {@link #rollRequested} flag in this method, as the new writer @@ -1314,15 +1309,14 @@ protected abstract W createWriterInstance(Path path) * start writing to the new writer. */ protected abstract void doReplaceWriter(Path oldPath, Path newPath, W nextWriter) - throws IOException; + throws IOException; protected abstract void doShutdown() throws IOException; protected abstract boolean doCheckLogLowReplication(); /** - * @return true if we exceeded the slow sync roll threshold over the last check - * interval + * @return true if we exceeded the slow sync roll threshold over the last check interval */ protected boolean doCheckSlowSync() { boolean result = false; @@ -1336,16 +1330,15 @@ protected boolean doCheckSlowSync() { // interval from then until the one more that pushed us over. If so, we // should do nothing and let the count reset. if (LOG.isDebugEnabled()) { - LOG.debug("checkSlowSync triggered but we decided to ignore it; " + - "count=" + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + - ", elapsedTime=" + elapsedTime + " ms, slowSyncCheckInterval=" + - slowSyncCheckInterval + " ms"); + LOG.debug("checkSlowSync triggered but we decided to ignore it; " + "count=" + + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + ", elapsedTime=" + + elapsedTime + " ms, slowSyncCheckInterval=" + slowSyncCheckInterval + " ms"); } // Fall through to count reset below } else { - LOG.warn("Requesting log roll because we exceeded slow sync threshold; count=" + - slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + - ", current pipeline: " + Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because we exceeded slow sync threshold; count=" + + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + ", current pipeline: " + + Arrays.toString(getPipeline())); result = true; } } @@ -1395,8 +1388,10 @@ private static void split(final Configuration conf, final Path p) throws IOExcep final Path baseDir = CommonFSUtils.getWALRootDir(conf); Path archiveDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME); - if (conf.getBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, - AbstractFSWALProvider.DEFAULT_SEPARATE_OLDLOGDIR)) { + if ( + conf.getBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, + AbstractFSWALProvider.DEFAULT_SEPARATE_OLDLOGDIR) + ) { archiveDir = new Path(archiveDir, p.getName()); } WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALFactory.getInstance(conf)); @@ -1406,8 +1401,8 @@ private static void usage() { System.err.println("Usage: AbstractFSWAL "); System.err.println("Arguments:"); System.err.println(" --dump Dump textual representation of passed one or more files"); - System.err.println(" For example: " + - "AbstractFSWAL --dump hdfs://example.com:9000/hbase/WALs/MACHINE/LOGFILE"); + System.err.println(" For example: " + + "AbstractFSWAL --dump hdfs://example.com:9000/hbase/WALs/MACHINE/LOGFILE"); System.err.println(" --split Split the passed directory of WAL logs"); System.err.println( " For example: AbstractFSWAL --split hdfs://example.com:9000/hbase/WALs/DIR"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java index a56a31a5a632..608032b8e4ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,6 +46,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; @@ -70,7 +71,7 @@ public abstract class AbstractProtobufLogWriter { protected AtomicLong length = new AtomicLong(); private WALCellCodec getCodec(Configuration conf, CompressionContext compressionContext) - throws IOException { + throws IOException { return WALCellCodec.create(conf, null, compressionContext); } @@ -79,14 +80,13 @@ private WALHeader buildWALHeader0(Configuration conf, WALHeader.Builder builder) builder.setWriterClsName(getWriterClassName()); } if (!builder.hasCellCodecClsName()) { - builder.setCellCodecClsName( - WALCellCodec.getWALCellCodecClass(conf).getName()); + builder.setCellCodecClsName(WALCellCodec.getWALCellCodecClass(conf).getName()); } return builder.build(); } protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder builder) - throws IOException { + throws IOException { return buildWALHeader0(conf, builder); } @@ -94,7 +94,7 @@ protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder builder // environment. Do not forget to override the setEncryptor method as it will be called in this // method to init your encryptor. protected final WALHeader buildSecureWALHeader(Configuration conf, WALHeader.Builder builder) - throws IOException { + throws IOException { builder.setWriterClsName(getWriterClassName()); if (conf.getBoolean(HConstants.ENABLE_WAL_ENCRYPTION, false)) { EncryptionTest.testKeyProvider(conf); @@ -102,7 +102,7 @@ protected final WALHeader buildSecureWALHeader(Configuration conf, WALHeader.Bui // Get an instance of our cipher final String cipherName = - conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, cipherName); if (cipher == null) { throw new RuntimeException("Cipher '" + cipherName + "' is not available"); @@ -111,10 +111,9 @@ protected final WALHeader buildSecureWALHeader(Configuration conf, WALHeader.Bui // Generate a random encryption key for this WAL Key key = cipher.getRandomKey(); builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(EncryptionUtil.wrapKey(conf, - conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY, - conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName())), - key))); + conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY, + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName())), + key))); // Set up the encryptor Encryptor encryptor = cipher.getEncryptor(); @@ -144,13 +143,14 @@ private boolean initializeCompressionContext(Configuration conf, Path path) thro conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); final boolean useValueCompression = conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false); - final Compression.Algorithm valueCompressionType = - useValueCompression ? CompressionContext.getValueCompressionAlgorithm(conf) : - Compression.Algorithm.NONE; + final Compression.Algorithm valueCompressionType = useValueCompression + ? CompressionContext.getValueCompressionAlgorithm(conf) + : Compression.Algorithm.NONE; if (LOG.isTraceEnabled()) { - LOG.trace("Initializing compression context for {}: isRecoveredEdits={}" + - ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", path, - CommonFSUtils.isRecoveredEdits(path), useTagCompression, useValueCompression, + LOG.trace( + "Initializing compression context for {}: isRecoveredEdits={}" + + ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", + path, CommonFSUtils.isRecoveredEdits(path), useTagCompression, useValueCompression, valueCompressionType); } this.compressionContext = @@ -164,8 +164,7 @@ private boolean initializeCompressionContext(Configuration conf, Path path) thro } public void init(FileSystem fs, Path path, Configuration conf, boolean overwritable, - long blocksize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { + long blocksize, StreamSlowMonitor monitor) throws IOException, StreamLacksCapabilityException { try { this.conf = conf; boolean doCompress = initializeCompressionContext(conf, path); @@ -180,9 +179,8 @@ public void init(FileSystem fs, Path path, Configuration conf, boolean overwrita doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); boolean doValueCompress = doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false); - WALHeader.Builder headerBuilder = - WALHeader.newBuilder().setHasCompression(doCompress).setHasTagCompression(doTagCompress) - .setHasValueCompression(doValueCompress); + WALHeader.Builder headerBuilder = WALHeader.newBuilder().setHasCompression(doCompress) + .setHasTagCompression(doTagCompress).setHasValueCompression(doValueCompress); if (doValueCompress) { headerBuilder.setValueCompressionAlgorithm( CompressionContext.getValueCompressionAlgorithm(conf).ordinal()); @@ -222,7 +220,7 @@ protected void initAfterHeader(boolean doCompress) throws IOException { // should be called in sub classes's initAfterHeader method to init SecureWALCellCodec. protected final void secureInitAfterHeader(boolean doCompress, Encryptor encryptor) - throws IOException { + throws IOException { if (conf.getBoolean(HConstants.ENABLE_WAL_ENCRYPTION, false) && encryptor != null) { WALCellCodec codec = SecureWALCellCodec.getCodec(this.conf, encryptor); this.cellEncoder = codec.getEncoder(getOutputStreamForCellEncoder()); @@ -257,7 +255,7 @@ protected void writeWALTrailer() { } else if ((trailerSize = this.trailer.getSerializedSize()) > this.trailerWarnSize) { // continue writing after warning the user. LOG.warn("Please investigate WALTrailer usage. Trailer size > maximum size : " + trailerSize - + " > " + this.trailerWarnSize); + + " > " + this.trailerWarnSize); } length.set(writeWALTrailerAndMagic(trailer, ProtobufLogReader.PB_WAL_COMPLETE_MAGIC)); this.trailerWritten = true; @@ -267,8 +265,8 @@ protected void writeWALTrailer() { } protected abstract void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) - throws IOException, StreamLacksCapabilityException; + short replication, long blockSize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException; /** * simply close the output, do not need to write trailer like the Writer.close @@ -281,7 +279,7 @@ protected abstract void initOutput(FileSystem fs, Path path, boolean overwritabl protected abstract long writeMagicAndWALHeader(byte[] magic, WALHeader header) throws IOException; protected abstract long writeWALTrailerAndMagic(WALTrailer trailer, byte[] magic) - throws IOException; + throws IOException; protected abstract OutputStream getOutputStreamForCellEncoder(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 2602c089216b..8aec10cb1cf5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor; - /** * An asynchronous implementation of FSWAL. *

    @@ -131,8 +130,8 @@ public class AsyncFSWAL extends AbstractFSWAL { private static final Logger LOG = LoggerFactory.getLogger(AsyncFSWAL.class); - private static final Comparator SEQ_COMPARATOR = Comparator.comparingLong( - SyncFuture::getTxid).thenComparingInt(System::identityHashCode); + private static final Comparator SEQ_COMPARATOR = + Comparator.comparingLong(SyncFuture::getTxid).thenComparingInt(System::identityHashCode); public static final String WAL_BATCH_SIZE = "hbase.wal.batch.size"; public static final long DEFAULT_WAL_BATCH_SIZE = 64L * 1024; @@ -204,20 +203,20 @@ public class AsyncFSWAL extends AbstractFSWAL { private final StreamSlowMonitor streamSlowMonitor; public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, - String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass) throws FailedLogCloseException, IOException { + Configuration conf, List listeners, boolean failIfWALExists, String prefix, + String suffix, EventLoopGroup eventLoopGroup, Class channelClass) + throws FailedLogCloseException, IOException { this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, "monitorForSuffix")); + eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, "monitorForSuffix")); } public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDir, - String archiveDir, Configuration conf, List listeners, - boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass, StreamSlowMonitor monitor) - throws FailedLogCloseException, IOException { + String archiveDir, Configuration conf, List listeners, + boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, + Class channelClass, StreamSlowMonitor monitor) + throws FailedLogCloseException, IOException { super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, - suffix); + suffix); this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; this.streamSlowMonitor = monitor; @@ -231,8 +230,8 @@ public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDi Queue queue = (Queue) field.get(consumeExecutor); hasConsumerTask = () -> queue.peek() == consumer; } catch (Exception e) { - LOG.warn("Can not get task queue of " + consumeExecutor + - ", this is not necessary, just give up", e); + LOG.warn("Can not get task queue of " + consumeExecutor + + ", this is not necessary, just give up", e); hasConsumerTask = () -> false; } } else { @@ -240,11 +239,10 @@ public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDi } } else { ThreadPoolExecutor threadPool = - new ThreadPoolExecutor(1, 1, 0L, - TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), - new ThreadFactoryBuilder().setNameFormat("AsyncFSWAL-%d-"+ rootDir.toString() + - "-prefix:" + (prefix == null ? "default" : prefix).replace("%", "%%")) - .setDaemon(true).build()); + new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), + new ThreadFactoryBuilder().setNameFormat("AsyncFSWAL-%d-" + rootDir.toString() + + "-prefix:" + (prefix == null ? "default" : prefix).replace("%", "%%")).setDaemon(true) + .build()); hasConsumerTask = () -> threadPool.getQueue().peek() == consumer; this.consumeExecutor = threadPool; } @@ -394,8 +392,8 @@ private void syncCompleted(long epochWhenSync, AsyncWriter writer, long processe // If we haven't already requested a roll, check if we have exceeded logrollsize if (!isLogRollRequested() && writer.getLength() > logrollsize) { if (LOG.isDebugEnabled()) { - LOG.debug("Requesting log roll because of file size threshold; length=" + - writer.getLength() + ", logrollsize=" + logrollsize); + LOG.debug("Requesting log roll because of file size threshold; length=" + writer.getLength() + + ", logrollsize=" + logrollsize); } requestLogRoll(SIZE); } @@ -404,9 +402,8 @@ private void syncCompleted(long epochWhenSync, AsyncWriter writer, long processe // find all the sync futures between these two txids to see if we need to issue a hsync, if no // sync futures then just use the default one. private boolean isHsync(long beginTxid, long endTxid) { - SortedSet futures = - syncFutures.subSet(new SyncFuture().reset(beginTxid, false), - new SyncFuture().reset(endTxid + 1, false)); + SortedSet futures = syncFutures.subSet(new SyncFuture().reset(beginTxid, false), + new SyncFuture().reset(endTxid + 1, false)); if (futures.isEmpty()) { return useHsync; } @@ -512,8 +509,10 @@ private void appendAndSync() { if (appended) { // This is possible, when we fail to sync, we will add the unackedAppends back to // toWriteAppends, so here we may get an entry which is already in the unackedAppends. - if (addedToUnackedAppends || unackedAppends.isEmpty() || - getLastTxid(unackedAppends) < entry.getTxid()) { + if ( + addedToUnackedAppends || unackedAppends.isEmpty() + || getLastTxid(unackedAppends) < entry.getTxid() + ) { unackedAppends.addLast(entry); addedToUnackedAppends = true; } @@ -525,8 +524,10 @@ private void appendAndSync() { // There could be other ways to fix, such as changing the logic in the consume method, but // it will break the assumption and then (may) lead to a big refactoring. So here let's use // this way to fix first, can optimize later. - if (writer.getLength() - fileLengthAtLastSync >= batchSize && - (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends))) { + if ( + writer.getLength() - fileLengthAtLastSync >= batchSize + && (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends)) + ) { break; } } @@ -625,8 +626,8 @@ private void consume() { consumeLock.unlock(); } long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; - for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor <= cursorBound; - nextCursor++) { + for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor + <= cursorBound; nextCursor++) { if (!waitingConsumePayloads.isPublished(nextCursor)) { break; } @@ -662,8 +663,10 @@ private void consume() { // 3. we set consumerScheduled to false and also give up scheduling consumer task. if (waitingConsumePayloadsGatingSequence.get() == waitingConsumePayloads.getCursor()) { // we will give up consuming so if there are some unsynced data we need to issue a sync. - if (writer.getLength() > fileLengthAtLastSync && !syncFutures.isEmpty() && - syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync) { + if ( + writer.getLength() > fileLengthAtLastSync && !syncFutures.isEmpty() + && syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync + ) { // no new data in the ringbuffer and we have at least one sync request sync(writer); } @@ -796,7 +799,7 @@ protected final long closeWriter(AsyncWriter writer, Path path) { @Override protected void doReplaceWriter(Path oldPath, Path newPath, AsyncWriter nextWriter) - throws IOException { + throws IOException { Preconditions.checkNotNull(nextWriter); waitForSafePoint(); long oldFileLen = closeWriter(this.writer, oldPath); @@ -830,11 +833,11 @@ protected void doShutdown() throws IOException { closeExecutor.shutdown(); try { if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { - LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" + - " the close of async writer doesn't complete." + - "Please check the status of underlying filesystem" + - " or increase the wait time by the config \"" + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS + - "\""); + LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" + + " the close of async writer doesn't complete." + + "Please check the status of underlying filesystem" + + " or increase the wait time by the config \"" + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS + + "\""); } } catch (InterruptedException e) { LOG.error("The wait for close of async writer is interrupted"); @@ -843,8 +846,8 @@ protected void doShutdown() throws IOException { IOException error = new IOException("WAL has been closed"); long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; // drain all the pending sync requests - for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor <= cursorBound; - nextCursor++) { + for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor + <= cursorBound; nextCursor++) { if (!waitingConsumePayloads.isPublished(nextCursor)) { break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java index fbd3882d4f73..42d9a1f15f53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; @@ -55,7 +56,7 @@ */ @InterfaceAudience.Private public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter - implements AsyncFSWALProvider.AsyncWriter { + implements AsyncFSWALProvider.AsyncWriter { private static final Logger LOG = LoggerFactory.getLogger(AsyncProtobufLogWriter.class); @@ -69,8 +70,7 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter */ private volatile long finalSyncedLength = -1; - private static final class OutputStreamWrapper extends OutputStream - implements ByteBufferWriter { + private static final class OutputStreamWrapper extends OutputStream implements ByteBufferWriter { private final AsyncFSOutput out; @@ -114,7 +114,7 @@ public void close() throws IOException { private long waitTimeout; public AsyncProtobufLogWriter(EventLoopGroup eventLoopGroup, - Class channelClass) { + Class channelClass) { this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; // Reuse WAL_ROLL_WAIT_TIMEOUT here to avoid an infinite wait if somehow a wait on a future @@ -129,7 +129,7 @@ public AsyncProtobufLogWriter(EventLoopGroup eventLoopGroup, /* * @return class name which is recognized by hbase-1.x to avoid ProtobufLogReader throwing error: - * IOException: Got unknown writer class: AsyncProtobufLogWriter + * IOException: Got unknown writer class: AsyncProtobufLogWriter */ @Override protected String getWriterClassName() { @@ -140,9 +140,8 @@ protected String getWriterClassName() { public void append(Entry entry) { int buffered = output.buffered(); try { - entry.getKey(). - getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() - .writeDelimitedTo(asyncOutputWrapper); + entry.getKey().getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() + .writeDelimitedTo(asyncOutputWrapper); } catch (IOException e) { throw new AssertionError("should not happen", e); } @@ -174,9 +173,8 @@ public synchronized void close() throws IOException { output.recoverAndClose(null); } /** - * We have to call {@link AsyncFSOutput#getSyncedLength()} - * after {@link AsyncFSOutput#close()} to get the final length - * synced to underlying filesystem because {@link AsyncFSOutput#close()} + * We have to call {@link AsyncFSOutput#getSyncedLength()} after {@link AsyncFSOutput#close()} + * to get the final length synced to underlying filesystem because {@link AsyncFSOutput#close()} * may also flush some data to underlying filesystem. */ this.finalSyncedLength = this.output.getSyncedLength(); @@ -189,10 +187,10 @@ public AsyncFSOutput getOutput() { @Override protected void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { + short replication, long blockSize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException { this.output = AsyncFSOutputHelper.createOutput(fs, path, overwritable, false, replication, - blockSize, eventLoopGroup, channelClass, monitor); + blockSize, eventLoopGroup, channelClass, monitor); this.asyncOutputWrapper = new OutputStreamWrapper(output); } @@ -206,7 +204,7 @@ protected void closeOutput() { } } } - + private long writeWALMetadata(Consumer> action) throws IOException { CompletableFuture future = new CompletableFuture<>(); action.accept(future); @@ -270,16 +268,16 @@ protected OutputStream getOutputStreamForCellEncoder() { @Override public long getSyncedLength() { - /** - * The statement "this.output = null;" in {@link AsyncProtobufLogWriter#close} - * is a sync point, if output is null, then finalSyncedLength must set, - * so we can return finalSyncedLength, else we return output.getSyncedLength - */ + /** + * The statement "this.output = null;" in {@link AsyncProtobufLogWriter#close} is a sync point, + * if output is null, then finalSyncedLength must set, so we can return finalSyncedLength, else + * we return output.getSyncedLength + */ AsyncFSOutput outputToUse = this.output; - if(outputToUse == null) { - long finalSyncedLengthToUse = this.finalSyncedLength; - assert finalSyncedLengthToUse >= 0; - return finalSyncedLengthToUse; + if (outputToUse == null) { + long finalSyncedLengthToUse = this.finalSyncedLength; + assert finalSyncedLengthToUse >= 0; + return finalSyncedLengthToUse; } return outputToUse.getSyncedLength(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java index 850359187ae5..e47b2c3a2f6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,8 +68,8 @@ public void close() throws IOException { } } if (error != null) { - throw new IOException("Failed to close at least one writer, please see the warn log above. " + - "The cause is the first exception occurred", error); + throw new IOException("Failed to close at least one writer, please see the warn log above. " + + "The cause is the first exception occurred", error); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java index bfb7f9a85a5b..e626d9e14a8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.ByteArrayOutputStream; @@ -41,10 +40,10 @@ /** * Context that holds the various dictionaries for compression in WAL. *

    - * CompressionContexts are not expected to be shared among threads. Multithreaded use may - * produce unexpected results. + * CompressionContexts are not expected to be shared among threads. Multithreaded use may produce + * unexpected results. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) public class CompressionContext { private static final Logger LOG = LoggerFactory.getLogger(CompressionContext.class); @@ -59,12 +58,16 @@ public class CompressionContext { "hbase.regionserver.wal.value.compression.type"; public enum DictionaryIndex { - REGION, TABLE, FAMILY, QUALIFIER, ROW + REGION, + TABLE, + FAMILY, + QUALIFIER, + ROW } /** - * Encapsulates the compression algorithm and its streams that we will use for value - * compression in this WAL. + * Encapsulates the compression algorithm and its streams that we will use for value compression + * in this WAL. */ static class ValueCompressor { @@ -86,16 +89,14 @@ public Compression.Algorithm getAlgorithm() { return algorithm; } - public byte[] compress(byte[] valueArray, int valueOffset, int valueLength) - throws IOException { + public byte[] compress(byte[] valueArray, int valueOffset, int valueLength) throws IOException { if (compressedOut == null) { // Create the output streams here the first time around. lowerOut = new ByteArrayOutputStream(); if (compressor == null) { compressor = algorithm.getCompressor(); } - compressedOut = algorithm.createCompressionStream(lowerOut, compressor, - IO_BUFFER_SIZE); + compressedOut = algorithm.createCompressionStream(lowerOut, compressor, IO_BUFFER_SIZE); } else { lowerOut.reset(); } @@ -105,7 +106,7 @@ public byte[] compress(byte[] valueArray, int valueOffset, int valueLength) } public int decompress(InputStream in, int inLength, byte[] outArray, int outOffset, - int outLength) throws IOException { + int outLength) throws IOException { // Our input is a sequence of bounded byte ranges (call them segments), with // BoundedDelegatingInputStream providing a way to switch in a new segment when the @@ -117,8 +118,7 @@ public int decompress(InputStream in, int inLength, byte[] outArray, int outOffs if (decompressor == null) { decompressor = algorithm.getDecompressor(); } - compressedIn = algorithm.createDecompressionStream(lowerIn, decompressor, - IO_BUFFER_SIZE); + compressedIn = algorithm.createDecompressionStream(lowerIn, decompressor, IO_BUFFER_SIZE); } else { lowerIn.setDelegate(in, inLength); } @@ -173,23 +173,21 @@ public void clear() { } private final Map dictionaries = - new EnumMap<>(DictionaryIndex.class); + new EnumMap<>(DictionaryIndex.class); // Context used for compressing tags TagCompressionContext tagCompressionContext = null; ValueCompressor valueCompressor = null; - public CompressionContext(Class dictType, - boolean recoveredEdits, boolean hasTagCompression, boolean hasValueCompression, - Compression.Algorithm valueCompressionType) - throws SecurityException, NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException, IOException { - Constructor dictConstructor = - dictType.getConstructor(); + public CompressionContext(Class dictType, boolean recoveredEdits, + boolean hasTagCompression, boolean hasValueCompression, + Compression.Algorithm valueCompressionType) throws SecurityException, NoSuchMethodException, + InstantiationException, IllegalAccessException, InvocationTargetException, IOException { + Constructor dictConstructor = dictType.getConstructor(); for (DictionaryIndex dictionaryIndex : DictionaryIndex.values()) { Dictionary newDictionary = dictConstructor.newInstance(); dictionaries.put(dictionaryIndex, newDictionary); } - if(recoveredEdits) { + if (recoveredEdits) { getDictionary(DictionaryIndex.REGION).init(1); getDictionary(DictionaryIndex.TABLE).init(1); } else { @@ -210,9 +208,8 @@ public CompressionContext(Class dictType, } public CompressionContext(Class dictType, boolean recoveredEdits, - boolean hasTagCompression) - throws SecurityException, NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException, IOException { + boolean hasTagCompression) throws SecurityException, NoSuchMethodException, + InstantiationException, IllegalAccessException, InvocationTargetException, IOException { this(dictType, recoveredEdits, hasTagCompression, false, null); } @@ -233,7 +230,7 @@ public ValueCompressor getValueCompressor() { } void clear() { - for(Dictionary dictionary : dictionaries.values()){ + for (Dictionary dictionary : dictionaries.values()) { dictionary.clear(); } if (tagCompressionContext != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java index 13f5d6ef35bd..d283a19e45fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -13,15 +13,13 @@ * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and - * limitations under the License + * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -29,17 +27,17 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.util.Dictionary; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.WritableUtils; - -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.hadoop.io.WritableUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * A set of static functions for running our custom WAL compression/decompression. - * Also contains a command line tool to compress and uncompress WALs. + * A set of static functions for running our custom WAL compression/decompression. Also contains a + * command line tool to compress and uncompress WALs. */ @InterfaceAudience.Private public class Compressor { @@ -65,8 +63,7 @@ private static void printHelp() { return; } - private static void transformFile(Path input, Path output) - throws IOException { + private static void transformFile(Path input, Path output) throws IOException { Configuration conf = HBaseConfiguration.create(); FileSystem inFS = input.getFileSystem(conf); @@ -80,12 +77,13 @@ private static void transformFile(Path input, Path output) System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName()); return; } - boolean compress = ((ReaderBase)in).hasCompression(); + boolean compress = ((ReaderBase) in).hasCompression(); conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress); out = WALFactory.createWALWriter(outFS, output, conf); WAL.Entry e = null; - while ((e = in.next()) != null) out.append(e); + while ((e = in.next()) != null) + out.append(e); } finally { in.close(); if (out != null) { @@ -97,14 +95,12 @@ private static void transformFile(Path input, Path output) /** * Reads the next compressed entry and returns it as a byte array - * - * @param in the DataInput to read from + * @param in the DataInput to read from * @param dict the dictionary we use for our read. * @return the uncompressed array. */ @Deprecated - static byte[] readCompressed(DataInput in, Dictionary dict) - throws IOException { + static byte[] readCompressed(DataInput in, Dictionary dict) throws IOException { byte status = in.readByte(); if (status == Dictionary.NOT_IN_DICTIONARY) { @@ -121,27 +117,23 @@ static byte[] readCompressed(DataInput in, Dictionary dict) short dictIdx = toShort(status, in.readByte()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { - throw new IOException("Missing dictionary entry for index " - + dictIdx); + throw new IOException("Missing dictionary entry for index " + dictIdx); } return entry; } } /** - * Reads a compressed entry into an array. - * The output into the array ends up length-prefixed. - * - * @param to the array to write into + * Reads a compressed entry into an array. The output into the array ends up length-prefixed. + * @param to the array to write into * @param offset array offset to start writing to - * @param in the DataInput to read from - * @param dict the dictionary to use for compression - * + * @param in the DataInput to read from + * @param dict the dictionary to use for compression * @return the length of the uncompressed data */ @Deprecated - static int uncompressIntoArray(byte[] to, int offset, DataInput in, - Dictionary dict) throws IOException { + static int uncompressIntoArray(byte[] to, int offset, DataInput in, Dictionary dict) + throws IOException { byte status = in.readByte(); if (status == Dictionary.NOT_IN_DICTIONARY) { @@ -162,8 +154,7 @@ static int uncompressIntoArray(byte[] to, int offset, DataInput in, throw new IOException("Unable to uncompress the log entry", ex); } if (entry == null) { - throw new IOException("Missing dictionary entry for index " - + dictIdx); + throw new IOException("Missing dictionary entry for index " + dictIdx); } // now we write the uncompressed value. Bytes.putBytes(to, offset, entry, 0, entry.length); @@ -173,15 +164,13 @@ static int uncompressIntoArray(byte[] to, int offset, DataInput in, /** * Compresses and writes an array to a DataOutput - * * @param data the array to write. - * @param out the DataOutput to write into + * @param out the DataOutput to write into * @param dict the dictionary to use for compression */ @Deprecated - static void writeCompressed(byte[] data, int offset, int length, - DataOutput out, Dictionary dict) - throws IOException { + static void writeCompressed(byte[] data, int offset, int length, DataOutput out, Dictionary dict) + throws IOException { short dictIdx = Dictionary.NOT_IN_DICTIONARY; if (dict != null) { dictIdx = dict.findEntry(data, offset, length); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java index c38515e08178..5825ba3217f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when a failed append or sync on a WAL. - * Thrown when WAL can no longer be used. Roll the WAL. + * Thrown when a failed append or sync on a WAL. Thrown when WAL can no longer be used. Roll the + * WAL. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java index 1279c2f31e83..467675f770f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,9 +50,9 @@ public class DualAsyncFSWAL extends AsyncFSWAL { private volatile boolean markerEditOnly = false; public DualAsyncFSWAL(FileSystem fs, FileSystem remoteFs, Path rootDir, Path remoteWALDir, - String logDir, String archiveDir, Configuration conf, List listeners, - boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass) throws FailedLogCloseException, IOException { + String logDir, String archiveDir, Configuration conf, List listeners, + boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, + Class channelClass) throws FailedLogCloseException, IOException { super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, eventLoopGroup, channelClass); this.remoteFs = remoteFs; @@ -61,7 +61,7 @@ public DualAsyncFSWAL(FileSystem fs, FileSystem remoteFs, Path rootDir, Path rem // will be overridden in testcase protected AsyncWriter createCombinedAsyncWriter(AsyncWriter localWriter, - AsyncWriter remoteWriter) { + AsyncWriter remoteWriter) { return CombinedAsyncWriter.create(remoteWriter, localWriter); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 54dfdcde740f..25a2974ea96f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -107,8 +107,10 @@ public class FSHLog extends AbstractFSWAL { // We use ring buffer sequence as txid of FSWALEntry and SyncFuture. private static final Logger LOG = LoggerFactory.getLogger(FSHLog.class); - private static final String TOLERABLE_LOW_REPLICATION = "hbase.regionserver.hlog.tolerable.lowreplication"; - private static final String LOW_REPLICATION_ROLL_LIMIT = "hbase.regionserver.hlog.lowreplication.rolllimit"; + private static final String TOLERABLE_LOW_REPLICATION = + "hbase.regionserver.hlog.tolerable.lowreplication"; + private static final String LOW_REPLICATION_ROLL_LIMIT = + "hbase.regionserver.hlog.lowreplication.rolllimit"; private static final int DEFAULT_LOW_REPLICATION_ROLL_LIMIT = 5; private static final String ROLL_ERRORS_TOLERATED = "hbase.regionserver.logroll.errors.tolerated"; private static final int DEFAULT_ROLL_ERRORS_TOLERATED = 2; @@ -117,7 +119,8 @@ public class FSHLog extends AbstractFSWAL { private static final String MAX_BATCH_COUNT = "hbase.regionserver.wal.sync.batch.count"; private static final int DEFAULT_MAX_BATCH_COUNT = 200; - private static final String FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = "hbase.wal.fshlog.wait.on.shutdown.seconds"; + private static final String FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = + "hbase.wal.fshlog.wait.on.shutdown.seconds"; private static final int DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = 5; /** @@ -196,25 +199,25 @@ public void handleOnShutdownException(Throwable ex) { /** * Constructor. - * @param fs filesystem handle - * @param root path for stored and archived wals + * @param fs filesystem handle + * @param root path for stored and archived wals * @param logDir dir where wals are stored - * @param conf configuration to use + * @param conf configuration to use */ public FSHLog(final FileSystem fs, final Path root, final String logDir, final Configuration conf) - throws IOException { + throws IOException { this(fs, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null); } public FSHLog(final FileSystem fs, Abortable abortable, final Path root, final String logDir, - final Configuration conf) throws IOException { + final Configuration conf) throws IOException { this(fs, abortable, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, - null); + null); } public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, final List listeners, - final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { + final String archiveDir, final Configuration conf, final List listeners, + final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); } @@ -222,33 +225,34 @@ public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, * Create an edit log at the given dir location. You should never have to load an * existing log. If there is a log at startup, it should have already been processed and deleted * by the time the WAL object is started up. - * @param fs filesystem handle - * @param abortable Abortable - the server here - * @param rootDir path to where logs and oldlogs - * @param logDir dir where wals are stored - * @param archiveDir dir where wals are archived - * @param conf configuration to use - * @param listeners Listeners on WAL events. Listeners passed here will be registered before we do - * anything else; e.g. the Constructor {@link #rollWriter()}. + * @param fs filesystem handle + * @param abortable Abortable - the server here + * @param rootDir path to where logs and oldlogs + * @param logDir dir where wals are stored + * @param archiveDir dir where wals are archived + * @param conf configuration to use + * @param listeners Listeners on WAL events. Listeners passed here will be registered before + * we do anything else; e.g. the Constructor {@link #rollWriter()}. * @param failIfWALExists If true IOException will be thrown if files related to this wal already - * exist. - * @param prefix should always be hostname and port in distributed env and it will be URL encoded - * before being used. If prefix is null, "wal" will be used - * @param suffix will be url encoded. null is treated as empty. non-empty must start with - * {@link org.apache.hadoop.hbase.wal.AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} + * exist. + * @param prefix should always be hostname and port in distributed env and it will be URL + * encoded before being used. If prefix is null, "wal" will be used + * @param suffix will be url encoded. null is treated as empty. non-empty must start with + * {@link org.apache.hadoop.hbase.wal.AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} */ public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir, - final String logDir, final String archiveDir, final Configuration conf, - final List listeners, final boolean failIfWALExists, final String prefix, - final String suffix) throws IOException { + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) throws IOException { super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, - suffix); - this.minTolerableReplication = conf.getInt(TOLERABLE_LOW_REPLICATION, - CommonFSUtils.getDefaultReplication(fs, this.walDir)); - this.lowReplicationRollLimit = conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); + suffix); + this.minTolerableReplication = + conf.getInt(TOLERABLE_LOW_REPLICATION, CommonFSUtils.getDefaultReplication(fs, this.walDir)); + this.lowReplicationRollLimit = + conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); this.closeErrorsTolerated = conf.getInt(ROLL_ERRORS_TOLERATED, DEFAULT_ROLL_ERRORS_TOLERATED); - this.waitOnShutdownInSeconds = conf.getInt(FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS, - DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); + this.waitOnShutdownInSeconds = + conf.getInt(FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS, DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); // This is the 'writer' -- a single threaded executor. This single thread 'consumes' what is // put on the ring buffer. String hostingThreadName = Thread.currentThread().getName(); @@ -273,9 +277,9 @@ public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir /** * Currently, we need to expose the writer's OutputStream to tests so that they can manipulate the - * default behavior (such as setting the maxRecoveryErrorCount value). This is - * done using reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 support is - * removed. + * default behavior (such as setting the maxRecoveryErrorCount value). This is done using + * reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 + * support is removed. * @return null if underlying stream is not ready. */ OutputStream getOutputStream() { @@ -444,11 +448,11 @@ private void closeWriter(Writer writer, Path path, boolean syncCloseCall) throws boolean hasUnflushedEntries = isUnflushedEntries(); if (syncCloseCall && (hasUnflushedEntries || (errors > this.closeErrorsTolerated))) { LOG.error("Close of WAL " + path + " failed. Cause=\"" + ioe.getMessage() + "\", errors=" - + errors + ", hasUnflushedEntries=" + hasUnflushedEntries); + + errors + ", hasUnflushedEntries=" + hasUnflushedEntries); throw ioe; } LOG.warn("Riding over failed WAL close of " + path - + "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK", ioe); + + "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK", ioe); } finally { inflightWALClosures.remove(path.getName()); } @@ -465,7 +469,7 @@ protected void doShutdown() throws IOException { this.disruptor.shutdown(timeoutms, TimeUnit.MILLISECONDS); } catch (TimeoutException e) { LOG.warn("Timed out bringing down disruptor after " + timeoutms + "ms; forcing halt " - + "(It is a problem if this is NOT an ABORT! -- DATALOSS!!!!)"); + + "(It is a problem if this is NOT an ABORT! -- DATALOSS!!!!)"); this.disruptor.halt(); this.disruptor.shutdown(); } @@ -481,10 +485,11 @@ protected void doShutdown() throws IOException { closeExecutor.shutdown(); try { if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { - LOG.error("We have waited {} seconds but the close of writer(s) doesn't complete." + LOG.error( + "We have waited {} seconds but the close of writer(s) doesn't complete." + "Please check the status of underlying filesystem" - + " or increase the wait time by the config \"{}\"", this.waitOnShutdownInSeconds, - FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); + + " or increase the wait time by the config \"{}\"", + this.waitOnShutdownInSeconds, FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); } } catch (InterruptedException e) { LOG.error("The wait for termination of FSHLog writer(s) is interrupted"); @@ -552,7 +557,7 @@ void offer(final long sequence, final SyncFuture[] syncFutures, final int syncFu * @return Returns 1. */ private int releaseSyncFuture(final SyncFuture syncFuture, final long currentSequence, - final Throwable t) { + final Throwable t) { if (!syncFuture.done(currentSequence, t)) { throw new IllegalStateException(); } @@ -603,8 +608,7 @@ private long updateHighestSyncedSequence(long sequence) { boolean areSyncFuturesReleased() { // check whether there is no sync futures offered, and no in-flight sync futures that is being // processed. - return syncFutures.size() <= 0 - && takeSyncFuture == null; + return syncFutures.size() <= 0 && takeSyncFuture == null; } @Override @@ -614,8 +618,8 @@ public void run() { int syncCount = 0; try { - // Make a local copy of takeSyncFuture after we get it. We've been running into NPEs - // 2020-03-22 16:54:32,180 WARN [sync.1] wal.FSHLog$SyncRunner(589): UNEXPECTED + // Make a local copy of takeSyncFuture after we get it. We've been running into NPEs + // 2020-03-22 16:54:32,180 WARN [sync.1] wal.FSHLog$SyncRunner(589): UNEXPECTED // java.lang.NullPointerException // at org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:582) // at java.lang.Thread.run(Thread.java:748) @@ -630,7 +634,7 @@ public void run() { long syncFutureSequence = sf.getTxid(); if (syncFutureSequence > currentSequence) { throw new IllegalStateException("currentSequence=" + currentSequence - + ", syncFutureSequence=" + syncFutureSequence); + + ", syncFutureSequence=" + syncFutureSequence); } // See if we can process any syncfutures BEFORE we go sync. long currentHighestSyncedSequence = highestSyncedTxid.get(); @@ -694,14 +698,14 @@ private boolean checkLogRoll() { } try { if (doCheckLogLowReplication()) { - LOG.warn("Requesting log roll because of low replication, current pipeline: " + - Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because of low replication, current pipeline: " + + Arrays.toString(getPipeline())); requestLogRoll(LOW_REPLICATION); return true; } else if (writer != null && writer.getLength() > logrollsize) { if (LOG.isDebugEnabled()) { - LOG.debug("Requesting log roll because of file size threshold; length=" + - writer.getLength() + ", logrollsize=" + logrollsize); + LOG.debug("Requesting log roll because of file size threshold; length=" + + writer.getLength() + ", logrollsize=" + logrollsize); } requestLogRoll(SIZE); return true; @@ -730,9 +734,9 @@ protected boolean doCheckLogLowReplication() { if (this.lowReplicationRollEnabled) { if (this.consecutiveLogRolls.get() < this.lowReplicationRollLimit) { LOG.warn("HDFS pipeline error detected. " + "Found " + numCurrentReplicas - + " replicas but expecting no less than " + this.minTolerableReplication - + " replicas. " + " Requesting close of WAL. current pipeline: " - + Arrays.toString(getPipeline())); + + " replicas but expecting no less than " + this.minTolerableReplication + + " replicas. " + " Requesting close of WAL. current pipeline: " + + Arrays.toString(getPipeline())); logRollNeeded = true; // If rollWriter is requested, increase consecutiveLogRolls. Once it // is larger than lowReplicationRollLimit, disable the @@ -740,7 +744,7 @@ protected boolean doCheckLogLowReplication() { this.consecutiveLogRolls.getAndIncrement(); } else { LOG.warn("Too many consecutive RollWriter requests, it's a sign of " - + "the total number of live datanodes is lower than the tolerable replicas."); + + "the total number of live datanodes is lower than the tolerable replicas."); this.consecutiveLogRolls.set(0); this.lowReplicationRollEnabled = false; } @@ -830,9 +834,9 @@ boolean isLowReplicationRollEnabled() { return lowReplicationRollEnabled; } - public static final long FIXED_OVERHEAD = ClassSize - .align(ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + (2 * ClassSize.ATOMIC_INTEGER) - + (3 * Bytes.SIZEOF_INT) + (4 * Bytes.SIZEOF_LONG)); + public static final long FIXED_OVERHEAD = + ClassSize.align(ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + (2 * ClassSize.ATOMIC_INTEGER) + + (3 * Bytes.SIZEOF_INT) + (4 * Bytes.SIZEOF_LONG)); /** * This class is used coordinating two threads holding one thread at a 'safe point' while the @@ -850,13 +854,13 @@ boolean isLowReplicationRollEnabled() { * To start up the drama, Thread A creates an instance of this class each time it would do this * zigzag dance and passes it to Thread B (these classes use Latches so it is one shot only). * Thread B notices the new instance (via reading a volatile reference or how ever) and it starts - * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it cannot proceed - * until the Thread B 'safe point' is attained. Thread A will be held inside in - * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread B frees - * Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the 'safe - * point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it blocks - * here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it needs - * to do while Thread B is paused. When finished, it lets Thread B lose by calling + * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it + * cannot proceed until the Thread B 'safe point' is attained. Thread A will be held inside in + * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread + * B frees Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the + * 'safe point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it + * blocks here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it + * needs to do while Thread B is paused. When finished, it lets Thread B lose by calling * {@link #releaseSafePoint()} and away go both Threads again. */ static class SafePointZigZagLatch { @@ -880,11 +884,11 @@ private void checkIfSyncFailed(SyncFuture syncFuture) throws FailedSyncBeforeLog * For Thread A to call when it is ready to wait on the 'safe point' to be attained. Thread A * will be held in here until Thread B calls {@link #safePointAttained()} * @param syncFuture We need this as barometer on outstanding syncs. If it comes home with an - * exception, then something is up w/ our syncing. + * exception, then something is up w/ our syncing. * @return The passed syncFuture */ - SyncFuture waitSafePoint(SyncFuture syncFuture) throws InterruptedException, - FailedSyncBeforeLogCloseException { + SyncFuture waitSafePoint(SyncFuture syncFuture) + throws InterruptedException, FailedSyncBeforeLogCloseException { while (!this.safePointAttainedLatch.await(1, TimeUnit.MILLISECONDS)) { checkIfSyncFailed(syncFuture); } @@ -923,7 +927,7 @@ void releaseSafePoint() { */ boolean isCocked() { return this.safePointAttainedLatch.getCount() > 0 - && this.safePointReleasedLatch.getCount() > 0; + && this.safePointReleasedLatch.getCount() > 0; } } @@ -1015,8 +1019,8 @@ private boolean isOutstandingSyncs() { private boolean isOutstandingSyncsFromRunners() { // Look at SyncFutures in the SyncRunners - for (SyncRunner syncRunner: syncRunners) { - if(syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { + for (SyncRunner syncRunner : syncRunners) { + if (syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { return true; } } @@ -1026,7 +1030,7 @@ private boolean isOutstandingSyncsFromRunners() { @Override // We can set endOfBatch in the below method if at end of our this.syncFutures array public void onEvent(final RingBufferTruck truck, final long sequence, boolean endOfBatch) - throws Exception { + throws Exception { // Appends and syncs are coming in order off the ringbuffer. We depend on this fact. We'll // add appends to dfsclient as they come in. Batching appends doesn't give any significant // benefit on measurement. Handler sync calls we will batch up. If we get an exception @@ -1054,11 +1058,14 @@ public void onEvent(final RingBufferTruck truck, final long sequence, boolean en // Failed append. Record the exception. this.exception = e; // invoking cleanupOutstandingSyncsOnException when append failed with exception, - // it will cleanup existing sync requests recorded in syncFutures but not offered to SyncRunner yet, - // so there won't be any sync future left over if no further truck published to disruptor. + // it will cleanup existing sync requests recorded in syncFutures but not offered to + // SyncRunner yet, + // so there won't be any sync future left over if no further truck published to + // disruptor. cleanupOutstandingSyncsOnException(sequence, - this.exception instanceof DamagedWALException ? this.exception - : new DamagedWALException("On sync", this.exception)); + this.exception instanceof DamagedWALException + ? this.exception + : new DamagedWALException("On sync", this.exception)); // Return to keep processing events coming off the ringbuffer return; } finally { @@ -1081,15 +1088,15 @@ public void onEvent(final RingBufferTruck truck, final long sequence, boolean en return; } // syncRunnerIndex is bound to the range [0, Integer.MAX_INT - 1] as follows: - // * The maximum value possible for syncRunners.length is Integer.MAX_INT - // * syncRunnerIndex starts at 0 and is incremented only here - // * after the increment, the value is bounded by the '%' operator to - // [0, syncRunners.length), presuming the value was positive prior to - // the '%' operator. - // * after being bound to [0, Integer.MAX_INT - 1], the new value is stored in - // syncRunnerIndex ensuring that it can't grow without bound and overflow. - // * note that the value after the increment must be positive, because the most it - // could have been prior was Integer.MAX_INT - 1 and we only increment by 1. + // * The maximum value possible for syncRunners.length is Integer.MAX_INT + // * syncRunnerIndex starts at 0 and is incremented only here + // * after the increment, the value is bounded by the '%' operator to + // [0, syncRunners.length), presuming the value was positive prior to + // the '%' operator. + // * after being bound to [0, Integer.MAX_INT - 1], the new value is stored in + // syncRunnerIndex ensuring that it can't grow without bound and overflow. + // * note that the value after the increment must be positive, because the most it + // could have been prior was Integer.MAX_INT - 1 and we only increment by 1. this.syncRunnerIndex = (this.syncRunnerIndex + 1) % this.syncRunners.length; try { // Below expects that the offer 'transfers' responsibility for the outstanding syncs to @@ -1104,8 +1111,10 @@ public void onEvent(final RingBufferTruck truck, final long sequence, boolean en } // We may have picked up an exception above trying to offer sync if (this.exception != null) { - cleanupOutstandingSyncsOnException(sequence, this.exception instanceof DamagedWALException - ? this.exception : new DamagedWALException("On sync", this.exception)); + cleanupOutstandingSyncsOnException(sequence, + this.exception instanceof DamagedWALException + ? this.exception + : new DamagedWALException("On sync", this.exception)); } attainSafePoint(sequence); // It is critical that we offer the futures back to the cache for reuse here after the @@ -1136,13 +1145,15 @@ private void attainSafePoint(final long currentSequence) { // Wait on outstanding syncers; wait for them to finish syncing (unless we've been // shutdown or unless our latch has been thrown because we have been aborted or unless // this WAL is broken and we can't get a sync/append to complete). - while ((!this.shutdown && this.zigzagLatch.isCocked() + while ( + (!this.shutdown && this.zigzagLatch.isCocked() && highestSyncedTxid.get() < currentSequence && // We could be in here and all syncs are failing or failed. Check for this. Otherwise // we'll just be stuck here for ever. In other words, ensure there syncs running. isOutstandingSyncs()) // Wait for all SyncRunners to finish their work so that we can replace the writer - || isOutstandingSyncsFromRunners()) { + || isOutstandingSyncsFromRunners() + ) { synchronized (this.safePointWaiter) { this.safePointWaiter.wait(0, 1); } @@ -1165,8 +1176,8 @@ void append(final FSWALEntry entry) throws Exception { try { FSHLog.this.appendEntry(writer, entry); } catch (Exception e) { - String msg = "Append sequenceId=" + entry.getKey().getSequenceId() - + ", requesting roll of WAL"; + String msg = + "Append sequenceId=" + entry.getKey().getSequenceId() + ", requesting roll of WAL"; LOG.warn(msg, e); requestLogRoll(ERROR); throw new DamagedWALException(msg, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index ca51ec0c5684..24043ab504d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -37,12 +37,12 @@ import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** - * A WAL Entry for {@link AbstractFSWAL} implementation. Immutable. - * A subclass of {@link Entry} that carries extra info across the ring buffer such as - * region sequenceid (we want to use this later, just before we write the WAL to ensure region - * edits maintain order). The extra info added here is not 'serialized' as part of the WALEdit - * hence marked 'transient' to underline this fact. It also adds mechanism so we can wait on - * the assign of the region sequence id. See #stampRegionSequenceId(). + * A WAL Entry for {@link AbstractFSWAL} implementation. Immutable. A subclass of {@link Entry} that + * carries extra info across the ring buffer such as region sequenceid (we want to use this later, + * just before we write the WAL to ensure region edits maintain order). The extra info added here is + * not 'serialized' as part of the WALEdit hence marked 'transient' to underline this fact. It also + * adds mechanism so we can wait on the assign of the region sequence id. See + * #stampRegionSequenceId(). */ @InterfaceAudience.Private class FSWALEntry extends Entry { @@ -51,9 +51,9 @@ class FSWALEntry extends Entry { private final transient long txid; /** - * If false, means this is a meta edit written by the hbase system itself. It was not in - * memstore. HBase uses these edit types to note in the log operational transitions such - * as compactions, flushes, or region open/closes. + * If false, means this is a meta edit written by the hbase system itself. It was not in memstore. + * HBase uses these edit types to note in the log operational transitions such as compactions, + * flushes, or region open/closes. */ private final transient boolean inMemstore; @@ -67,8 +67,8 @@ class FSWALEntry extends Entry { private final transient ServerCall rpcCall; /** - * @param inMemstore If true, then this is a data edit, one that came from client. If false, it - * is a meta edit made by the hbase system itself and is for the WAL only. + * @param inMemstore If true, then this is a data edit, one that came from client. If false, it is + * a meta edit made by the hbase system itself and is for the WAL only. */ FSWALEntry(final long txid, final WALKeyImpl key, final WALEdit edit, final RegionInfo regionInfo, final boolean inMemstore, ServerCall rpcCall) { @@ -95,7 +95,7 @@ static Set collectFamilies(List cells) { return Collections.emptySet(); } else { Set set = new TreeSet<>(Bytes.BYTES_COMPARATOR); - for (Cell cell: cells) { + for (Cell cell : cells) { if (!WALEdit.isMetaEditFamily(cell)) { set.add(CellUtil.cloneFamily(cell)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java index 671147208f1b..89481161f4a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.TableName; @@ -32,8 +29,8 @@ import org.slf4j.LoggerFactory; /** - * Class used to push numbers about the WAL into the metrics subsystem. This will take a - * single function call and turn it into multiple manipulations of the hadoop metrics system. + * Class used to push numbers about the WAL into the metrics subsystem. This will take a single + * function call and turn it into multiple manipulations of the hadoop metrics system. */ @InterfaceAudience.Private public class MetricsWAL implements WALActionsListener { @@ -51,12 +48,12 @@ public MetricsWAL() { @Override public void postSync(final long timeInNanos, final int handlerSyncs) { - source.incrementSyncTime(timeInNanos/1000000L); + source.incrementSyncTime(timeInNanos / 1000000L); } @Override public void postAppend(final long size, final long time, final WALKey logkey, - final WALEdit logEdit) throws IOException { + final WALEdit logEdit) throws IOException { TableName tableName = logkey.getTableName(); source.incrementAppendCount(tableName); source.incrementAppendTime(time); @@ -66,9 +63,7 @@ public void postAppend(final long size, final long time, final WALKey logkey, if (time > 1000) { source.incrementSlowAppendCount(); LOG.warn(String.format("%s took %d ms appending an edit to wal; len~=%s", - Thread.currentThread().getName(), - time, - StringUtils.humanReadableInt(size))); + Thread.currentThread().getName(), time, StringUtils.humanReadableInt(size))); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 8aba943d0fba..42dcb51e1e72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.EOFException; @@ -25,23 +23,16 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader.Builder; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,6 +40,12 @@ import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader.Builder; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; + /** * A Protobuf based WAL has the following structure: *

    @@ -56,12 +53,12 @@ * <TrailerSize> <PB_WAL_COMPLETE_MAGIC> *

    * The Reader reads meta information (WAL Compression state, WALTrailer, etc) in - * ProtobufLogReader#initReader(FSDataInputStream). A WALTrailer is an extensible structure - * which is appended at the end of the WAL. This is empty for now; it can contain some meta - * information such as Region level stats, etc in future. + * ProtobufLogReader#initReader(FSDataInputStream). A WALTrailer is an extensible structure which is + * appended at the end of the WAL. This is empty for now; it can contain some meta information such + * as Region level stats, etc in future. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, - HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, + HBaseInterfaceAudience.CONFIG }) public class ProtobufLogReader extends ReaderBase { private static final Logger LOG = LoggerFactory.getLogger(ProtobufLogReader.class); // public for WALFactory until we move everything to o.a.h.h.wal @@ -97,7 +94,7 @@ public class ProtobufLogReader extends ReaderBase { writerClsNames.add(ProtobufLogWriter.class.getSimpleName()); writerClsNames.add(AsyncProtobufLogWriter.class.getSimpleName()); } - + // cell codec classname private String codecClsName = null; @@ -105,12 +102,12 @@ public class ProtobufLogReader extends ReaderBase { public long trailerSize() { if (trailerPresent) { // sizeof PB_WAL_COMPLETE_MAGIC + sizof trailerSize + trailer - final long calculatedSize = (long) PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT - + trailer.getSerializedSize(); + final long calculatedSize = + (long) PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT + trailer.getSerializedSize(); final long expectedSize = fileLength - walEditsStopOffset; if (expectedSize != calculatedSize) { LOG.warn("After parsing the trailer, we expect the total footer to be {} bytes, but we " - + "calculate it as being {}", expectedSize, calculatedSize); + + "calculate it as being {}", expectedSize, calculatedSize); } return expectedSize; } else { @@ -119,23 +116,25 @@ public long trailerSize() { } enum WALHdrResult { - EOF, // stream is at EOF when method starts + EOF, // stream is at EOF when method starts SUCCESS, - UNKNOWN_WRITER_CLS // name of writer class isn't recognized + UNKNOWN_WRITER_CLS // name of writer class isn't recognized } - + // context for WALHdr carrying information such as Cell Codec classname static class WALHdrContext { WALHdrResult result; String cellCodecClsName; - + WALHdrContext(WALHdrResult result, String cellCodecClsName) { this.result = result; this.cellCodecClsName = cellCodecClsName; } + WALHdrResult getResult() { return result; } + String getCellCodecClsName() { return cellCodecClsName; } @@ -166,7 +165,7 @@ public void reset() throws IOException { @Override public void init(FileSystem fs, Path path, Configuration conf, FSDataInputStream stream) - throws IOException { + throws IOException { this.trailerWarnSize = conf.getInt(WAL_TRAILER_WARN_SIZE, DEFAULT_WAL_TRAILER_WARN_SIZE); super.init(fs, path, conf, stream); } @@ -182,31 +181,28 @@ protected String initReader(FSDataInputStream stream) throws IOException { public List getWriterClsNames() { return writerClsNames; } - + /* * Returns the cell codec classname */ public String getCodecClsName() { - return codecClsName; + return codecClsName; } - protected WALHdrContext readHeader(Builder builder, FSDataInputStream stream) - throws IOException { - boolean res = builder.mergeDelimitedFrom(stream); - if (!res) return new WALHdrContext(WALHdrResult.EOF, null); - if (builder.hasWriterClsName() && - !getWriterClsNames().contains(builder.getWriterClsName())) { - return new WALHdrContext(WALHdrResult.UNKNOWN_WRITER_CLS, null); - } - String clsName = null; - if (builder.hasCellCodecClsName()) { - clsName = builder.getCellCodecClsName(); - } - return new WALHdrContext(WALHdrResult.SUCCESS, clsName); + protected WALHdrContext readHeader(Builder builder, FSDataInputStream stream) throws IOException { + boolean res = builder.mergeDelimitedFrom(stream); + if (!res) return new WALHdrContext(WALHdrResult.EOF, null); + if (builder.hasWriterClsName() && !getWriterClsNames().contains(builder.getWriterClsName())) { + return new WALHdrContext(WALHdrResult.UNKNOWN_WRITER_CLS, null); + } + String clsName = null; + if (builder.hasCellCodecClsName()) { + clsName = builder.getCellCodecClsName(); + } + return new WALHdrContext(WALHdrResult.SUCCESS, clsName); } - private String initInternal(FSDataInputStream stream, boolean isFirst) - throws IOException { + private String initInternal(FSDataInputStream stream, boolean isFirst) throws IOException { close(); if (!isFirst) { // Re-compute the file length. @@ -234,8 +230,7 @@ private String initInternal(FSDataInputStream stream, boolean isFirst) WALProtos.WALHeader header = builder.build(); this.hasCompression = header.hasHasCompression() && header.getHasCompression(); this.hasTagCompression = header.hasHasTagCompression() && header.getHasTagCompression(); - this.hasValueCompression = header.hasHasValueCompression() && - header.getHasValueCompression(); + this.hasValueCompression = header.hasHasValueCompression() && header.getHasValueCompression(); if (header.hasValueCompressionAlgorithm()) { try { this.valueCompressionType = @@ -252,13 +247,13 @@ private String initInternal(FSDataInputStream stream, boolean isFirst) this.seekOnFs(currentPosition); if (LOG.isTraceEnabled()) { LOG.trace("After reading the trailer: walEditsStopOffset: " + this.walEditsStopOffset - + ", fileLength: " + this.fileLength + ", " + "trailerPresent: " + - (trailerPresent ? "true, size: " + trailer.getSerializedSize() : "false") + - ", currentPosition: " + currentPosition); + + ", fileLength: " + this.fileLength + ", " + "trailerPresent: " + + (trailerPresent ? "true, size: " + trailer.getSerializedSize() : "false") + + ", currentPosition: " + currentPosition); } - + codecClsName = hdrCtxt.getCellCodecClsName(); - + return hdrCtxt.getCellCodecClsName(); } @@ -276,8 +271,7 @@ private String initInternal(FSDataInputStream stream, boolean isFirst) * *

    * In case the trailer size > this.trailerMaxSize, it is read after a WARN message. - * @return true if a valid trailer is present - * @throws IOException + * @return true if a valid trailer is present n */ private boolean setTrailerIfPresent() { try { @@ -316,7 +310,7 @@ private boolean setTrailerIfPresent() { } protected WALCellCodec getCodec(Configuration conf, String cellCodecClsName, - CompressionContext compressionContext) throws IOException { + CompressionContext compressionContext) throws IOException { return WALCellCodec.create(conf, cellCodecClsName, compressionContext); } @@ -324,7 +318,7 @@ protected WALCellCodec getCodec(Configuration conf, String cellCodecClsName, protected void initAfterCompression() throws IOException { initAfterCompression(null); } - + @Override protected void initAfterCompression(String cellCodecClsName) throws IOException { WALCellCodec codec = getCodec(this.conf, cellCodecClsName, this.compressionContext); @@ -375,28 +369,25 @@ protected boolean readNext(Entry entry) throws IOException { throw new EOFException(); } size = CodedInputStream.readRawVarint32(firstByte, this.inputStream); - // available may be < 0 on local fs for instance. If so, can't depend on it. + // available may be < 0 on local fs for instance. If so, can't depend on it. available = this.inputStream.available(); if (available > 0 && available < size) { - throw new EOFException( - "Available stream not enough for edit, " + "inputStream.available()= " - + this.inputStream.available() + ", " + "entry size= " + size + " at offset = " - + this.inputStream.getPos()); + throw new EOFException("Available stream not enough for edit, " + + "inputStream.available()= " + this.inputStream.available() + ", " + "entry size= " + + size + " at offset = " + this.inputStream.getPos()); } ProtobufUtil.mergeFrom(builder, ByteStreams.limit(this.inputStream, size), (int) size); } catch (InvalidProtocolBufferException ipbe) { resetPosition = true; - throw (EOFException) new EOFException( - "Invalid PB, EOF? Ignoring; originalPosition=" + originalPosition + ", currentPosition=" - + this.inputStream.getPos() + ", messageSize=" + size + ", currentAvailable=" - + available).initCause(ipbe); + throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; originalPosition=" + + originalPosition + ", currentPosition=" + this.inputStream.getPos() + ", messageSize=" + + size + ", currentAvailable=" + available).initCause(ipbe); } if (!builder.isInitialized()) { // TODO: not clear if we should try to recover from corrupt PB that looks semi-legit. - // If we can get the KV count, we could, theoretically, try to get next record. - throw new EOFException( - "Partial PB while reading WAL, " + "probably an unexpected EOF, ignoring. current offset=" - + this.inputStream.getPos()); + // If we can get the KV count, we could, theoretically, try to get next record. + throw new EOFException("Partial PB while reading WAL, " + + "probably an unexpected EOF, ignoring. current offset=" + this.inputStream.getPos()); } WALKey walKey = builder.build(); entry.getKey().readFieldsFromPb(walKey, this.byteStringUncompressor); @@ -421,12 +412,11 @@ protected boolean readNext(Entry entry) throws IOException { } catch (Throwable t) { LOG.trace("Error getting pos for error message - ignoring", t); } - String message = - " while reading " + expectedCells + " WAL KVs; started reading at " + posBefore - + " and read up to " + posAfterStr; + String message = " while reading " + expectedCells + " WAL KVs; started reading at " + + posBefore + " and read up to " + posAfterStr; IOException realEofEx = extractHiddenEof(ex); - throw (EOFException) new EOFException("EOF " + message). - initCause(realEofEx != null ? realEofEx : ex); + throw (EOFException) new EOFException("EOF " + message) + .initCause(realEofEx != null ? realEofEx : ex); } if (trailerPresent && this.inputStream.getPos() > this.walEditsStopOffset) { LOG.error( @@ -437,8 +427,10 @@ protected boolean readNext(Entry entry) throws IOException { } catch (EOFException eof) { // If originalPosition is < 0, it is rubbish and we cannot use it (probably local fs) if (originalPosition < 0) { - LOG.debug("Encountered a malformed edit, but can't seek back to last good position " - + "because originalPosition is negative. last offset={}", this.inputStream.getPos(), eof); + LOG.debug( + "Encountered a malformed edit, but can't seek back to last good position " + + "because originalPosition is negative. last offset={}", + this.inputStream.getPos(), eof); throw eof; } // If stuck at the same place and we got an exception, lets go back at the beginning. @@ -467,12 +459,14 @@ private IOException extractHiddenEof(Exception ex) { // for EOF, not EOFException; and scanner further hides it inside RuntimeException. IOException ioEx = null; if (ex instanceof EOFException) { - return (EOFException)ex; + return (EOFException) ex; } else if (ex instanceof IOException) { - ioEx = (IOException)ex; - } else if (ex instanceof RuntimeException - && ex.getCause() != null && ex.getCause() instanceof IOException) { - ioEx = (IOException)ex.getCause(); + ioEx = (IOException) ex; + } else if ( + ex instanceof RuntimeException && ex.getCause() != null + && ex.getCause() instanceof IOException + ) { + ioEx = (IOException) ex.getCause(); } if ((ioEx != null) && (ioEx.getMessage() != null)) { if (ioEx.getMessage().contains("EOF")) return ioEx; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java index dd586b3e0a96..b15c9dd958c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,7 @@ * Writer for protobuf-based WAL. */ @InterfaceAudience.Private -public class ProtobufLogWriter extends AbstractProtobufLogWriter - implements FSHLogProvider.Writer { +public class ProtobufLogWriter extends AbstractProtobufLogWriter implements FSHLogProvider.Writer { private static final Logger LOG = LoggerFactory.getLogger(ProtobufLogWriter.class); @@ -55,8 +54,8 @@ public class ProtobufLogWriter extends AbstractProtobufLogWriter @Override public void append(Entry entry) throws IOException { - entry.getKey().getBuilder(compressor). - setFollowingKvCount(entry.getEdit().size()).build().writeDelimitedTo(output); + entry.getKey().getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() + .writeDelimitedTo(output); for (Cell cell : entry.getEdit().getCells()) { // cellEncoder must assume little about the stream, since we write PB and cells in turn. cellEncoder.write(cell); @@ -106,17 +105,13 @@ public FSDataOutputStream getStream() { @Override protected void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { - FSDataOutputStreamBuilder builder = fs - .createFile(path) - .overwrite(overwritable) - .bufferSize(bufferSize) - .replication(replication) - .blockSize(blockSize); + short replication, long blockSize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException { + FSDataOutputStreamBuilder builder = fs.createFile(path).overwrite(overwritable) + .bufferSize(bufferSize).replication(replication).blockSize(blockSize); if (builder instanceof DistributedFileSystem.HdfsDataOutputStreamBuilder) { - this.output = ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder) - .replicate().build(); + this.output = + ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder).replicate().build(); } else { this.output = builder.build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java index 90a1653a5140..8e84169a7a2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -35,7 +33,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) public abstract class ReaderBase implements AbstractFSWALProvider.Reader { private static final Logger LOG = LoggerFactory.getLogger(ReaderBase.class); protected Configuration conf; @@ -44,7 +42,7 @@ public abstract class ReaderBase implements AbstractFSWALProvider.Reader { protected long edit = 0; protected long fileLength; /** - * Compression context to use reading. Can be null if no compression. + * Compression context to use reading. Can be null if no compression. */ protected CompressionContext compressionContext = null; protected boolean emptyCompressionContext = true; @@ -57,7 +55,7 @@ public ReaderBase() { @Override public void init(FileSystem fs, Path path, Configuration conf, FSDataInputStream stream) - throws IOException { + throws IOException { this.conf = conf; this.path = path; this.fs = fs; @@ -70,14 +68,15 @@ public void init(FileSystem fs, Path path, Configuration conf, FSDataInputStream try { if (compressionContext == null) { if (LOG.isDebugEnabled()) { - LOG.debug("Initializing compression context for {}: isRecoveredEdits={}" + - ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", path, - CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), hasValueCompression(), - getValueCompressionAlgorithm()); + LOG.debug( + "Initializing compression context for {}: isRecoveredEdits={}" + + ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", + path, CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), + hasValueCompression(), getValueCompressionAlgorithm()); } - compressionContext = new CompressionContext(LRUDictionary.class, - CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), - hasValueCompression(), getValueCompressionAlgorithm()); + compressionContext = + new CompressionContext(LRUDictionary.class, CommonFSUtils.isRecoveredEdits(path), + hasTagCompression(), hasValueCompression(), getValueCompressionAlgorithm()); } else { compressionContext.clear(); } @@ -109,8 +108,7 @@ public Entry next(Entry reuse) throws IOException { // It is old ROOT table edit, ignore it LOG.info("Got an old ROOT edit, ignoring "); return next(e); - } - else throw iae; + } else throw iae; } edit++; if (compressionContext != null && emptyCompressionContext) { @@ -133,8 +131,8 @@ public void seek(long pos) throws IOException { } /** - * Initializes the log reader with a particular stream (may be null). - * Reader assumes ownership of the stream if not null and may use it. Called once. + * Initializes the log reader with a particular stream (may be null). Reader assumes ownership of + * the stream if not null and may use it. Called once. * @return the class name of cell Codec, null if such information is not available */ protected abstract String initReader(FSDataInputStream stream) throws IOException; @@ -149,6 +147,7 @@ public void seek(long pos) throws IOException { * @param cellCodecClsName class name of cell Codec */ protected abstract void initAfterCompression(String cellCodecClsName) throws IOException; + /** * @return Whether compression is enabled for this log. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java index dfef429455cb..b03540dcd88d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,9 @@ final class RingBufferTruck { public enum Type { - APPEND, SYNC, EMPTY + APPEND, + SYNC, + EMPTY } private Type type = Type.EMPTY; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java index e2d294ac1f23..ea56e0926b48 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.crypto.Encryptor; @@ -26,6 +25,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -34,21 +34,22 @@ public class SecureAsyncProtobufLogWriter extends AsyncProtobufLogWriter { private Encryptor encryptor = null; public SecureAsyncProtobufLogWriter(EventLoopGroup eventLoopGroup, - Class channelClass) { + Class channelClass) { super(eventLoopGroup, channelClass); } /* * @return class name which is recognized by hbase-1.x to avoid ProtobufLogReader throwing error: - * IOException: Got unknown writer class: SecureAsyncProtobufLogWriter + * IOException: Got unknown writer class: SecureAsyncProtobufLogWriter */ @Override protected String getWriterClassName() { return "SecureProtobufLogWriter"; } + @Override protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder builder) - throws IOException { + throws IOException { return super.buildSecureWALHeader(conf, builder); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java index e43d140826c0..863739c72f2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -23,20 +22,20 @@ import java.security.KeyException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Decryptor; import org.apache.hadoop.hbase.io.crypto.Encryption; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.EncryptionTest; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SecureProtobufLogReader extends ProtobufLogReader { @@ -59,7 +58,7 @@ public List getWriterClsNames() { @Override protected WALHdrContext readHeader(WALHeader.Builder builder, FSDataInputStream stream) - throws IOException { + throws IOException { WALHdrContext hdrCtxt = super.readHeader(builder, stream); WALHdrResult result = hdrCtxt.getResult(); // We need to unconditionally handle the case where the WAL has a key in @@ -89,8 +88,8 @@ protected WALHdrContext readHeader(WALHeader.Builder builder, FSDataInputStream } } if (key == null) { - String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()); + String masterKeyName = + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()); try { // Then, try the cluster master key key = EncryptionUtil.unwrapWALKey(conf, masterKeyName, keyBytes); @@ -100,8 +99,7 @@ protected WALHdrContext readHeader(WALHeader.Builder builder, FSDataInputStream if (LOG.isDebugEnabled()) { LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); } - String alternateKeyName = - conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); + String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); if (alternateKeyName != null) { try { key = EncryptionUtil.unwrapWALKey(conf, alternateKeyName, keyBytes); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java index eb8c591a15e2..0928f148de0c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.crypto.Encryptor; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -33,7 +32,7 @@ public class SecureProtobufLogWriter extends ProtobufLogWriter { @Override protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder builder) - throws IOException { + throws IOException { return super.buildSecureWALHeader(conf, builder); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java index 6d2bd61a0234..4201dd07533d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java @@ -22,14 +22,12 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.commons.io.IOUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.io.ByteBufferWriterOutputStream; import org.apache.hadoop.hbase.io.crypto.Decryptor; @@ -37,6 +35,7 @@ import org.apache.hadoop.hbase.io.crypto.Encryptor; import org.apache.hadoop.hbase.io.util.StreamUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A WALCellCodec that encrypts the WALedits. @@ -91,8 +90,7 @@ protected Cell parseCell() throws IOException { // encoder supports that just read the remainder in directly if (ivLength != this.iv.length) { - throw new IOException("Incorrect IV length: expected=" + iv.length + " have=" + - ivLength); + throw new IOException("Incorrect IV length: expected=" + iv.length + " have=" + ivLength); } IOUtils.readFully(in, this.iv); @@ -124,12 +122,12 @@ protected Cell parseCell() throws IOException { // Row int elemLen = StreamUtils.readRawVarint32(cin); - pos = Bytes.putShort(backingArray, pos, (short)elemLen); + pos = Bytes.putShort(backingArray, pos, (short) elemLen); IOUtils.readFully(cin, backingArray, pos, elemLen); pos += elemLen; // Family elemLen = StreamUtils.readRawVarint32(cin); - pos = Bytes.putByte(backingArray, pos, (byte)elemLen); + pos = Bytes.putByte(backingArray, pos, (byte) elemLen); IOUtils.readFully(cin, backingArray, pos, elemLen); pos += elemLen; // Qualifier diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 6be95391819b..3040655e813f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,41 +55,42 @@ class SequenceIdAccounting { /** * This lock ties all operations on {@link SequenceIdAccounting#flushingSequenceIds} and - * {@link #lowestUnflushedSequenceIds} Maps. {@link #lowestUnflushedSequenceIds} has the - * lowest outstanding sequence ids EXCEPT when flushing. When we flush, the current - * lowest set for the region/column family are moved (atomically because of this lock) to + * {@link #lowestUnflushedSequenceIds} Maps. {@link #lowestUnflushedSequenceIds} has the lowest + * outstanding sequence ids EXCEPT when flushing. When we flush, the current lowest set for the + * region/column family are moved (atomically because of this lock) to * {@link #flushingSequenceIds}. - * - *

    The two Maps are tied by this locking object EXCEPT when we go to update the lowest - * entry; see {@link #lowestUnflushedSequenceIds}. In here is a putIfAbsent call on - * {@link #lowestUnflushedSequenceIds}. In this latter case, we will add this lowest - * sequence id if we find that there is no entry for the current column family. There will be no - * entry only if we just came up OR we have moved aside current set of lowest sequence ids - * because the current set are being flushed (by putting them into {@link #flushingSequenceIds}). - * This is how we pick up the next 'lowest' sequence id per region per column family to be used - * figuring what is in the next flush. + *

    + * The two Maps are tied by this locking object EXCEPT when we go to update the lowest entry; see + * {@link #lowestUnflushedSequenceIds}. In here is a putIfAbsent call on + * {@link #lowestUnflushedSequenceIds}. In this latter case, we will add this lowest sequence id + * if we find that there is no entry for the current column family. There will be no entry only if + * we just came up OR we have moved aside current set of lowest sequence ids because the current + * set are being flushed (by putting them into {@link #flushingSequenceIds}). This is how we pick + * up the next 'lowest' sequence id per region per column family to be used figuring what is in + * the next flush. */ private final Object tieLock = new Object(); /** - * Map of encoded region names and family names to their OLDEST -- i.e. their first, - * the longest-lived, their 'earliest', the 'lowest' -- sequence id. - * - *

    When we flush, the current lowest sequence ids get cleared and added to - * {@link #flushingSequenceIds}. The next append that comes in, is then added - * here to {@link #lowestUnflushedSequenceIds} as the next lowest sequenceid. - * - *

    If flush fails, currently server is aborted so no need to restore previous sequence ids. - *

    Needs to be concurrent Maps because we use putIfAbsent updating oldest. + * Map of encoded region names and family names to their OLDEST -- i.e. their first, the + * longest-lived, their 'earliest', the 'lowest' -- sequence id. + *

    + * When we flush, the current lowest sequence ids get cleared and added to + * {@link #flushingSequenceIds}. The next append that comes in, is then added here to + * {@link #lowestUnflushedSequenceIds} as the next lowest sequenceid. + *

    + * If flush fails, currently server is aborted so no need to restore previous sequence ids. + *

    + * Needs to be concurrent Maps because we use putIfAbsent updating oldest. */ - private final ConcurrentMap> - lowestUnflushedSequenceIds = new ConcurrentHashMap<>(); + private final ConcurrentMap> lowestUnflushedSequenceIds = new ConcurrentHashMap<>(); /** * Map of encoded region names and family names to their lowest or OLDEST sequence/edit id * currently being flushed out to hfiles. Entries are moved here from - * {@link #lowestUnflushedSequenceIds} while the lock {@link #tieLock} is held - * (so movement between the Maps is atomic). + * {@link #lowestUnflushedSequenceIds} while the lock {@link #tieLock} is held (so movement + * between the Maps is atomic). */ private final Map> flushingSequenceIds = new HashMap<>(); @@ -108,8 +109,8 @@ class SequenceIdAccounting { /** * Returns the lowest unflushed sequence id for the region. - * @return Lowest outstanding unflushed sequenceid for encodedRegionName. Will - * return {@link HConstants#NO_SEQNUM} when none. + * @return Lowest outstanding unflushed sequenceid for encodedRegionName. Will return + * {@link HConstants#NO_SEQNUM} when none. */ long getLowestSequenceId(final byte[] encodedRegionName) { synchronized (this.tieLock) { @@ -150,7 +151,7 @@ long getLowestSequenceId(final byte[] encodedRegionName, final byte[] familyName /** * Reset the accounting of highest sequenceid by regionname. * @return Return the previous accounting Map of regions to the last sequence id written into - * each. + * each. */ Map resetHighest() { Map old = this.highestSequenceIds; @@ -160,15 +161,11 @@ Map resetHighest() { /** * We've been passed a new sequenceid for the region. Set it as highest seen for this region and - * if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing - * currently older. - * @param encodedRegionName - * @param families - * @param sequenceid - * @param lowest Whether to keep running account of oldest sequence id. + * if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing currently + * older. nnn * @param lowest Whether to keep running account of oldest sequence id. */ void update(byte[] encodedRegionName, Set families, long sequenceid, - final boolean lowest) { + final boolean lowest) { Long l = Long.valueOf(sequenceid); this.highestSequenceIds.put(encodedRegionName, l); if (lowest) { @@ -207,7 +204,7 @@ void onRegionClose(byte[] encodedRegionName) { * Update the store sequence id, e.g., upon executing in-memory compaction */ void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceId, - boolean onlyIfGreater) { + boolean onlyIfGreater) { if (sequenceId == null) { return; } @@ -250,8 +247,8 @@ ConcurrentMap getOrCreateLowestSequenceIds(byte[] enco */ private static long getLowestSequenceId(Map sequenceids) { long lowest = HConstants.NO_SEQNUM; - for (Map.Entry entry : sequenceids.entrySet()){ - if (entry.getKey().toString().equals("METAFAMILY")){ + for (Map.Entry entry : sequenceids.entrySet()) { + if (entry.getKey().toString().equals("METAFAMILY")) { continue; } Long sid = entry.getValue(); @@ -263,9 +260,8 @@ private static long getLowestSequenceId(Map sequenceids) { } /** - * @param src - * @return New Map that has same keys as src but instead of a Map for a value, it - * instead has found the smallest sequence id and it returns that as the value instead. + * n * @return New Map that has same keys as src but instead of a Map for a value, it + * instead has found the smallest sequence id and it returns that as the value instead. */ private > Map flattenToLowestSequenceId(Map src) { if (src == null || src.isEmpty()) { @@ -283,19 +279,19 @@ private > Map flattenToLowestSequenceId(Map /** * @param encodedRegionName Region to flush. - * @param families Families to flush. May be a subset of all families in the region. - * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if - * we are flushing a subset of all families but there are no edits in those families not - * being flushed; in other words, this is effectively same as a flush of all of the region - * though we were passed a subset of regions. Otherwise, it returns the sequence id of the - * oldest/lowest outstanding edit. + * @param families Families to flush. May be a subset of all families in the region. + * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if we are + * flushing a subset of all families but there are no edits in those families not being + * flushed; in other words, this is effectively same as a flush of all of the region + * though we were passed a subset of regions. Otherwise, it returns the sequence id of the + * oldest/lowest outstanding edit. */ Long startCacheFlush(final byte[] encodedRegionName, final Set families) { - Map familytoSeq = new HashMap<>(); - for (byte[] familyName : families){ - familytoSeq.put(familyName,HConstants.NO_SEQNUM); + Map familytoSeq = new HashMap<>(); + for (byte[] familyName : families) { + familytoSeq.put(familyName, HConstants.NO_SEQNUM); } - return startCacheFlush(encodedRegionName,familytoSeq); + return startCacheFlush(encodedRegionName, familytoSeq); } Long startCacheFlush(final byte[] encodedRegionName, final Map familyToSeq) { @@ -311,7 +307,7 @@ Long startCacheFlush(final byte[] encodedRegionName, final Map fam for (Map.Entry entry : familyToSeq.entrySet()) { ImmutableByteArray familyNameWrapper = ImmutableByteArray.wrap((byte[]) entry.getKey()); Long seqId = null; - if(entry.getValue() == HConstants.NO_SEQNUM) { + if (entry.getValue() == HConstants.NO_SEQNUM) { seqId = m.remove(familyNameWrapper); } else { seqId = m.replace(familyNameWrapper, entry.getValue()); @@ -325,8 +321,8 @@ Long startCacheFlush(final byte[] encodedRegionName, final Map fam } if (oldSequenceIds != null && !oldSequenceIds.isEmpty()) { if (this.flushingSequenceIds.put(encodedRegionName, oldSequenceIds) != null) { - LOG.warn("Flushing Map not cleaned up for " + Bytes.toString(encodedRegionName) + - ", sequenceid=" + oldSequenceIds); + LOG.warn("Flushing Map not cleaned up for " + Bytes.toString(encodedRegionName) + + ", sequenceid=" + oldSequenceIds); } } if (m.isEmpty()) { @@ -398,7 +394,7 @@ void abortCacheFlush(final byte[] encodedRegionName) { flushing = this.flushingSequenceIds.remove(encodedRegionName); if (flushing != null) { Map unflushed = getOrCreateLowestSequenceIds(encodedRegionName); - for (Map.Entry e: flushing.entrySet()) { + for (Map.Entry e : flushing.entrySet()) { // Set into unflushed the 'old' oldest sequenceid and if any value in flushed with this // value, it will now be in tmpMap. tmpMap.put(e.getKey(), unflushed.put(e.getKey(), e.getValue())); @@ -412,9 +408,9 @@ void abortCacheFlush(final byte[] encodedRegionName) { for (Map.Entry e : flushing.entrySet()) { Long currentId = tmpMap.get(e.getKey()); if (currentId != null && currentId.longValue() < e.getValue().longValue()) { - String errorStr = Bytes.toString(encodedRegionName) + " family " - + e.getKey().toString() + " acquired edits out of order current memstore seq=" - + currentId + ", previous oldest unflushed id=" + e.getValue(); + String errorStr = Bytes.toString(encodedRegionName) + " family " + e.getKey().toString() + + " acquired edits out of order current memstore seq=" + currentId + + ", previous oldest unflushed id=" + e.getValue(); LOG.error(errorStr); Runtime.getRuntime().halt(1); } @@ -425,10 +421,10 @@ void abortCacheFlush(final byte[] encodedRegionName) { /** * See if passed sequenceids are lower -- i.e. earlier -- than any outstanding * sequenceids, sequenceids we are holding on to in this accounting instance. - * @param sequenceids Keyed by encoded region name. Cannot be null (doesn't make sense for it to - * be null). + * @param sequenceids Keyed by encoded region name. Cannot be null (doesn't make sense for it to + * be null). * @param keysBlocking An optional collection that is used to return the specific keys that are - * causing this method to return false. + * causing this method to return false. * @return true if all sequenceids are lower, older than, the old sequenceids in this instance. */ boolean areAllLower(Map sequenceids, Collection keysBlocking) { @@ -465,9 +461,8 @@ boolean areAllLower(Map sequenceids, Collection keysBlocki /** * Iterates over the given Map and compares sequence ids with corresponding entries in - * {@link #lowestUnflushedSequenceIds}. If a region in - * {@link #lowestUnflushedSequenceIds} has a sequence id less than that passed in - * sequenceids then return it. + * {@link #lowestUnflushedSequenceIds}. If a region in {@link #lowestUnflushedSequenceIds} has a + * sequence id less than that passed in sequenceids then return it. * @param sequenceids Sequenceids keyed by encoded region name. * @return stores of regions found in this instance with sequence ids less than those passed in. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java index 862e91826b5f..90825d4884c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java @@ -93,9 +93,7 @@ class SyncFuture { /** * Call this method to clear old usage and get it ready for new deploy. - * - * @param txid the new transaction id - * @return this + * @param txid the new transaction id n */ SyncFuture reset(long txid, boolean forceSync) { if (t != null && t != Thread.currentThread()) { @@ -114,8 +112,8 @@ SyncFuture reset(long txid, boolean forceSync) { @Override public String toString() { - return "done=" + isDone() + ", txid=" + this.txid + " threadID=" + t.getId() + - " threadName=" + t.getName(); + return "done=" + isDone() + ", txid=" + this.txid + " threadID=" + t.getId() + " threadName=" + + t.getName(); } long getTxid() { @@ -137,7 +135,7 @@ Thread getThread() { /** * @param txid the transaction id at which this future 'completed'. - * @param t Can be null. Set if we are 'completing' on error (and this 't' is the error). + * @param t Can be null. Set if we are 'completing' on error (and this 't' is the error). * @return True if we successfully marked this outstanding future as completed/done. Returns false * if this future is already 'done' when this method called. */ @@ -152,7 +150,7 @@ boolean done(final long txid, final Throwable t) { // Something badly wrong. if (throwable == null) { this.throwable = - new IllegalStateException("done txid=" + txid + ", my txid=" + this.txid); + new IllegalStateException("done txid=" + txid + ", my txid=" + this.txid); } } // Mark done. @@ -164,15 +162,14 @@ boolean done(final long txid, final Throwable t) { } } - long get(long timeoutNs) throws InterruptedException, - ExecutionException, TimeoutIOException { + long get(long timeoutNs) throws InterruptedException, ExecutionException, TimeoutIOException { doneLock.lock(); try { while (doneTxid == NOT_DONE) { if (!doneCondition.await(timeoutNs, TimeUnit.NANOSECONDS)) { - throw new TimeoutIOException("Failed to get sync result after " - + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + " ms for txid=" + this.txid - + ", WAL system stuck?"); + throw new TimeoutIOException( + "Failed to get sync result after " + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + " ms for txid=" + this.txid + ", WAL system stuck?"); } } if (this.throwable != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java index de3188f08976..986b9ca036d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java @@ -21,22 +21,18 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; /** - * A cache of {@link SyncFuture}s. This class supports two methods - * {@link SyncFutureCache#getIfPresentOrNew()} and {@link SyncFutureCache#offer()}. - * - * Usage pattern: - * SyncFuture sf = syncFutureCache.getIfPresentOrNew(); - * sf.reset(...); - * // Use the sync future - * finally: syncFutureCache.offer(sf); - * - * Offering the sync future back to the cache makes it eligible for reuse within the same thread - * context. Cache keyed by the accessing thread instance and automatically invalidated if it remains - * unused for {@link SyncFutureCache#SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS} minutes. + * A cache of {@link SyncFuture}s. This class supports two methods + * {@link SyncFutureCache#getIfPresentOrNew()} and {@link SyncFutureCache#offer()}. Usage pattern: + * SyncFuture sf = syncFutureCache.getIfPresentOrNew(); sf.reset(...); // Use the sync future + * finally: syncFutureCache.offer(sf); Offering the sync future back to the cache makes it eligible + * for reuse within the same thread context. Cache keyed by the accessing thread instance and + * automatically invalidated if it remains unused for + * {@link SyncFutureCache#SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS} minutes. */ @InterfaceAudience.Private public final class SyncFutureCache { @@ -47,9 +43,9 @@ public final class SyncFutureCache { public SyncFutureCache(final Configuration conf) { final int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); syncFutureCache = CacheBuilder.newBuilder().initialCapacity(handlerCount) - .expireAfterWrite(SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS, TimeUnit.MINUTES).build(); + .expireAfterWrite(SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS, TimeUnit.MINUTES).build(); } public SyncFuture getIfPresentOrNew() { @@ -71,4 +67,4 @@ public void clear() { syncFutureCache.invalidateAll(); } } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java index c109a1b4bdd0..901ada78015c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,8 +25,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Get notification of WAL events. The invocations are inline - * so make sure your implementation is fast else you'll slow hbase. + * Get notification of WAL events. The invocations are inline so make sure your implementation is + * fast else you'll slow hbase. */ @InterfaceAudience.Private public interface WALActionsListener { @@ -45,67 +44,75 @@ static enum RollRequestReason { }; /** - * The WAL is going to be rolled. The oldPath can be null if this is - * the first log file from the regionserver. + * The WAL is going to be rolled. The oldPath can be null if this is the first log file from the + * regionserver. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void preLogRoll(Path oldPath, Path newPath) throws IOException {} + default void preLogRoll(Path oldPath, Path newPath) throws IOException { + } /** - * The WAL has been rolled. The oldPath can be null if this is - * the first log file from the regionserver. + * The WAL has been rolled. The oldPath can be null if this is the first log file from the + * regionserver. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void postLogRoll(Path oldPath, Path newPath) throws IOException {} + default void postLogRoll(Path oldPath, Path newPath) throws IOException { + } /** * The WAL is going to be archived. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void preLogArchive(Path oldPath, Path newPath) throws IOException {} + default void preLogArchive(Path oldPath, Path newPath) throws IOException { + } /** * The WAL has been archived. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void postLogArchive(Path oldPath, Path newPath) throws IOException {} + default void postLogArchive(Path oldPath, Path newPath) throws IOException { + } /** * A request was made that the WAL be rolled. */ - default void logRollRequested(RollRequestReason reason) {} + default void logRollRequested(RollRequestReason reason) { + } /** * The WAL is about to close. */ - default void logCloseRequested() {} + default void logCloseRequested() { + } /** - * Called before each write. - */ - default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) {} + * Called before each write. + */ + default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { + } /** - * For notification post append to the writer. Used by metrics system at least. - * TODO: Combine this with above. - * @param entryLen approx length of cells in this append. + * For notification post append to the writer. Used by metrics system at least. TODO: Combine this + * with above. + * @param entryLen approx length of cells in this append. * @param elapsedTimeMillis elapsed time in milliseconds. - * @param logKey A WAL key - * @param logEdit A WAL edit containing list of cells. + * @param logKey A WAL key + * @param logEdit A WAL edit containing list of cells. * @throws IOException if any network or I/O error occurred */ default void postAppend(final long entryLen, final long elapsedTimeMillis, final WALKey logKey, - final WALEdit logEdit) throws IOException {} + final WALEdit logEdit) throws IOException { + } /** - * For notification post writer sync. Used by metrics system at least. - * @param timeInNanos How long the filesystem sync took in nanoseconds. - * @param handlerSyncs How many sync handler calls were released by this call to filesystem - * sync. + * For notification post writer sync. Used by metrics system at least. + * @param timeInNanos How long the filesystem sync took in nanoseconds. + * @param handlerSyncs How many sync handler calls were released by this call to filesystem sync. */ - default void postSync(final long timeInNanos, final int handlerSyncs) {} + default void postSync(final long timeInNanos, final int handlerSyncs) { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index 31eccc7a18af..5b60b10e128b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -21,14 +21,12 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.codec.BaseDecoder; import org.apache.hadoop.hbase.codec.BaseEncoder; import org.apache.hadoop.hbase.codec.Codec; @@ -43,20 +41,19 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; - /** - * Compression in this class is lifted off Compressor/KeyValueCompression. - * This is a pure coincidence... they are independent and don't have to be compatible. - * - * This codec is used at server side for writing cells to WAL as well as for sending edits - * as part of the distributed splitting process. + * Compression in this class is lifted off Compressor/KeyValueCompression. This is a pure + * coincidence... they are independent and don't have to be compatible. This codec is used at server + * side for writing cells to WAL as well as for sending edits as part of the distributed splitting + * process. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, - HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, + HBaseInterfaceAudience.CONFIG }) public class WALCellCodec implements Codec { /** Configuration key for the class to use when encoding cells in the WAL */ public static final String WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec"; @@ -73,9 +70,9 @@ public WALCellCodec() { /** * Default constructor - all subclasses must implement a constructor with this signature * if they are to be dynamically loaded from the {@link Configuration}. - * @param conf configuration to configure this + * @param conf configuration to configure this * @param compression compression the codec should support, can be null to indicate no - * compression + * compression */ public WALCellCodec(Configuration conf, CompressionContext compression) { this.compression = compression; @@ -87,42 +84,41 @@ public static Class getWALCellCodecClass(Configuration conf) { /** * Create and setup a {@link WALCellCodec} from the {@code cellCodecClsName} and - * CompressionContext, if {@code cellCodecClsName} is specified. - * Otherwise Cell Codec classname is read from {@link Configuration}. - * Fully prepares the codec for use. - * @param conf {@link Configuration} to read for the user-specified codec. If none is specified, - * uses a {@link WALCellCodec}. + * CompressionContext, if {@code cellCodecClsName} is specified. Otherwise Cell Codec classname is + * read from {@link Configuration}. Fully prepares the codec for use. + * @param conf {@link Configuration} to read for the user-specified codec. If none is + * specified, uses a {@link WALCellCodec}. * @param cellCodecClsName name of codec - * @param compression compression the codec should use + * @param compression compression the codec should use * @return a {@link WALCellCodec} ready for use. * @throws UnsupportedOperationException if the codec cannot be instantiated */ public static WALCellCodec create(Configuration conf, String cellCodecClsName, - CompressionContext compression) throws UnsupportedOperationException { + CompressionContext compression) throws UnsupportedOperationException { if (cellCodecClsName == null) { cellCodecClsName = getWALCellCodecClass(conf).getName(); } - return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[] - { Configuration.class, CompressionContext.class }, new Object[] { conf, compression }); + return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, + new Class[] { Configuration.class, CompressionContext.class }, + new Object[] { conf, compression }); } /** - * Create and setup a {@link WALCellCodec} from the - * CompressionContext. - * Cell Codec classname is read from {@link Configuration}. - * Fully prepares the codec for use. - * @param conf {@link Configuration} to read for the user-specified codec. If none is specified, - * uses a {@link WALCellCodec}. + * Create and setup a {@link WALCellCodec} from the CompressionContext. Cell Codec classname is + * read from {@link Configuration}. Fully prepares the codec for use. + * @param conf {@link Configuration} to read for the user-specified codec. If none is + * specified, uses a {@link WALCellCodec}. * @param compression compression the codec should use * @return a {@link WALCellCodec} ready for use. * @throws UnsupportedOperationException if the codec cannot be instantiated */ - public static WALCellCodec create(Configuration conf, - CompressionContext compression) throws UnsupportedOperationException { + public static WALCellCodec create(Configuration conf, CompressionContext compression) + throws UnsupportedOperationException { String cellCodecClsName = getWALCellCodecClass(conf).getName(); - return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[] - { Configuration.class, CompressionContext.class }, new Object[] { conf, compression }); + return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, + new Class[] { Configuration.class, CompressionContext.class }, + new Object[] { conf, compression }); } public interface ByteStringCompressor { @@ -152,6 +148,7 @@ static class BaosAndCompressor extends ByteArrayOutputStream implements ByteStri public BaosAndCompressor(CompressionContext compressionContext) { this.compressionContext = compressionContext; } + public ByteString toByteString() { // We need this copy to create the ByteString as the byte[] 'buf' is not immutable. We reuse // them. @@ -200,7 +197,7 @@ public byte[] uncompress(ByteString data, Enum dictIndex) { private static byte[] uncompressByteString(ByteString bs, Dictionary dict) throws IOException { InputStream in = bs.newInput(); - byte status = (byte)in.read(); + byte status = (byte) in.read(); if (status == Dictionary.NOT_IN_DICTIONARY) { byte[] arr = new byte[StreamUtils.readRawVarint32(in)]; int bytesRead = in.read(arr); @@ -211,7 +208,7 @@ private static byte[] uncompressByteString(ByteString bs, Dictionary dict) throw return arr; } else { // Status here is the higher-order byte of index of the dictionary entry. - short dictIdx = StreamUtils.toShort(status, (byte)in.read()); + short dictIdx = StreamUtils.toShort(status, (byte) in.read()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { throw new IOException("Missing dictionary entry for index " + dictIdx); @@ -224,6 +221,7 @@ static class CompressedKvEncoder extends BaseEncoder { private final CompressionContext compression; private final boolean hasValueCompression; private final boolean hasTagCompression; + public CompressedKvEncoder(OutputStream out, CompressionContext compression) { super(out); this.compression = compression; @@ -278,6 +276,7 @@ static class CompressedKvDecoder extends BaseDecoder { private final CompressionContext compression; private final boolean hasValueCompression; private final boolean hasTagCompression; + public CompressedKvDecoder(InputStream in, CompressionContext compression) { super(in); this.compression = compression; @@ -291,7 +290,7 @@ protected Cell parseCell() throws IOException { int vlength = StreamUtils.readRawVarint32(in); int tagsLength = StreamUtils.readRawVarint32(in); int length = 0; - if(tagsLength == 0) { + if (tagsLength == 0) { length = KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE + keylength + vlength; } else { length = KeyValue.KEYVALUE_WITH_TAGS_INFRASTRUCTURE_SIZE + keylength + vlength + tagsLength; @@ -306,14 +305,14 @@ protected Cell parseCell() throws IOException { int elemLen = readIntoArray(backingArray, pos + Bytes.SIZEOF_SHORT, compression.getDictionary(CompressionContext.DictionaryIndex.ROW)); checkLength(elemLen, Short.MAX_VALUE); - pos = Bytes.putShort(backingArray, pos, (short)elemLen); + pos = Bytes.putShort(backingArray, pos, (short) elemLen); pos += elemLen; // family elemLen = readIntoArray(backingArray, pos + Bytes.SIZEOF_BYTE, compression.getDictionary(CompressionContext.DictionaryIndex.FAMILY)); checkLength(elemLen, Byte.MAX_VALUE); - pos = Bytes.putByte(backingArray, pos, (byte)elemLen); + pos = Bytes.putByte(backingArray, pos, (byte) elemLen); pos += elemLen; // qualifier @@ -329,7 +328,7 @@ protected Cell parseCell() throws IOException { if (tagsLength > 0) { typeValLen = typeValLen - tagsLength - KeyValue.TAGS_LENGTH_SIZE; } - pos = Bytes.putByte(backingArray, pos, (byte)in.read()); + pos = Bytes.putByte(backingArray, pos, (byte) in.read()); int valLen = typeValLen - 1; if (hasValueCompression) { readCompressedValue(in, backingArray, pos, valLen); @@ -351,7 +350,7 @@ protected Cell parseCell() throws IOException { } private int readIntoArray(byte[] to, int offset, Dictionary dict) throws IOException { - byte status = (byte)in.read(); + byte status = (byte) in.read(); if (status == Dictionary.NOT_IN_DICTIONARY) { // status byte indicating that data to be read is not in dictionary. // if this isn't in the dictionary, we need to add to the dictionary. @@ -361,7 +360,7 @@ private int readIntoArray(byte[] to, int offset, Dictionary dict) throws IOExcep return length; } else { // the status byte also acts as the higher order byte of the dictionary entry. - short dictIdx = StreamUtils.toShort(status, (byte)in.read()); + short dictIdx = StreamUtils.toShort(status, (byte) in.read()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { throw new IOException("Missing dictionary entry for index " + dictIdx); @@ -379,10 +378,10 @@ private static void checkLength(int len, int max) throws IOException { } private void readCompressedValue(InputStream in, byte[] outArray, int outOffset, - int expectedLength) throws IOException { + int expectedLength) throws IOException { int compressedLen = StreamUtils.readRawVarint32(in); - int read = compression.getValueCompressor().decompress(in, compressedLen, outArray, - outOffset, expectedLength); + int read = compression.getValueCompressor().decompress(in, compressedLen, outArray, outOffset, + expectedLength); if (read != expectedLength) { throw new IOException("ValueCompressor state error: short read"); } @@ -394,6 +393,7 @@ public static class EnsureKvEncoder extends BaseEncoder { public EnsureKvEncoder(OutputStream out) { super(out); } + @Override public void write(Cell cell) throws IOException { checkFlushed(); @@ -406,7 +406,8 @@ public void write(Cell cell) throws IOException { @Override public Decoder getDecoder(InputStream is) { return (compression == null) - ? new KeyValueCodecWithTags.KeyValueDecoder(is) : new CompressedKvDecoder(is, compression); + ? new KeyValueCodecWithTags.KeyValueDecoder(is) + : new CompressedKvDecoder(is, compression); } @Override @@ -416,8 +417,7 @@ public Decoder getDecoder(ByteBuff buf) { @Override public Encoder getEncoder(OutputStream os) { - os = (os instanceof ByteBufferWriter) ? os - : new ByteBufferWriterOutputStream(os); + os = (os instanceof ByteBufferWriter) ? os : new ByteBufferWriterOutputStream(os); if (compression == null) { return new EnsureKvEncoder(os); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALClosedException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALClosedException.java index ac6aad0a3815..15594743a20d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALClosedException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALClosedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.hbase.regionserver.LogRoller; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.yetus.audience.InterfaceAudience; @@ -44,4 +43,4 @@ public WALClosedException() { public WALClosedException(String msg) { super(msg); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java index 40d6d0fc948a..c594122c29b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java @@ -1,6 +1,4 @@ - /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -9,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; import java.lang.reflect.InvocationTargetException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.RegionInfo; @@ -41,12 +37,11 @@ import org.slf4j.LoggerFactory; /** - * Implements the coprocessor environment and runtime support for coprocessors - * loaded within a {@link WAL}. + * Implements the coprocessor environment and runtime support for coprocessors loaded within a + * {@link WAL}. */ @InterfaceAudience.Private -public class WALCoprocessorHost - extends CoprocessorHost { +public class WALCoprocessorHost extends CoprocessorHost { private static final Logger LOG = LoggerFactory.getLogger(WALCoprocessorHost.class); /** @@ -66,18 +61,18 @@ public WAL getWAL() { /** * Constructor - * @param impl the coprocessor instance + * @param impl the coprocessor instance * @param priority chaining priority - * @param seq load sequence - * @param conf configuration - * @param wal WAL + * @param seq load sequence + * @param conf configuration + * @param wal WAL */ private WALEnvironment(final WALCoprocessor impl, final int priority, final int seq, - final Configuration conf, final WAL wal) { + final Configuration conf, final WAL wal) { super(impl, priority, seq, conf); this.wal = wal; - this.metricRegistry = MetricsCoprocessor.createRegistryForWALCoprocessor( - impl.getClass().getName()); + this.metricRegistry = + MetricsCoprocessor.createRegistryForWALCoprocessor(impl.getClass().getName()); } @Override @@ -96,7 +91,7 @@ public void shutdown() { /** * Constructor - * @param log the write ahead log + * @param log the write ahead log * @param conf the configuration */ public WALCoprocessorHost(final WAL log, final Configuration conf) { @@ -114,13 +109,13 @@ public WALCoprocessorHost(final WAL log, final Configuration conf) { @Override public WALEnvironment createEnvironment(final WALCoprocessor instance, final int priority, - final int seq, final Configuration conf) { + final int seq, final Configuration conf) { return new WALEnvironment(instance, priority, seq, conf, this.wal); } @Override - public WALCoprocessor checkAndGetInstance(Class implClass) throws IllegalAccessException, - InstantiationException { + public WALCoprocessor checkAndGetInstance(Class implClass) + throws IllegalAccessException, InstantiationException { if (WALCoprocessor.class.isAssignableFrom(implClass)) { try { return implClass.asSubclass(WALCoprocessor.class).getDeclaredConstructor().newInstance(); @@ -129,23 +124,22 @@ public WALCoprocessor checkAndGetInstance(Class implClass) throws IllegalAcce } } else { LOG.error(implClass.getName() + " is not of type WALCoprocessor. Check the " - + "configuration " + CoprocessorHost.WAL_COPROCESSOR_CONF_KEY); + + "configuration " + CoprocessorHost.WAL_COPROCESSOR_CONF_KEY); return null; } } private ObserverGetter walObserverGetter = - WALCoprocessor::getWALObserver; + WALCoprocessor::getWALObserver; - abstract class WALObserverOperation extends - ObserverOperationWithoutResult { + abstract class WALObserverOperation extends ObserverOperationWithoutResult { public WALObserverOperation() { super(walObserverGetter); } } public void preWALWrite(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) - throws IOException { + throws IOException { // Not bypassable. if (this.coprocEnvironments.isEmpty()) { return; @@ -159,7 +153,7 @@ public void call(WALObserver oserver) throws IOException { } public void postWALWrite(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) - throws IOException { + throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() { @Override protected void call(WALObserver observer) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java index 2076dd4fb35b..9db6a4d9e918 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -49,9 +48,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor; /** - * Helper methods to ease Region Server integration with the Write Ahead Log (WAL). - * Note that methods in this class specifically should not require access to anything - * other than the API found in {@link WAL}. For internal use only. + * Helper methods to ease Region Server integration with the Write Ahead Log (WAL). Note that + * methods in this class specifically should not require access to anything other than the API found + * in {@link WAL}. For internal use only. */ @InterfaceAudience.Private public class WALUtil { @@ -115,12 +114,13 @@ public static WALKeyImpl writeRegionEventMarker(WAL wal, } /** - * Write a log marker that a bulk load has succeeded and is about to be committed. - * This write is for internal use only. Not for external client consumption. - * @param wal The log to write into. + * Write a log marker that a bulk load has succeeded and is about to be committed. This write is + * for internal use only. Not for external client consumption. + * @param wal The log to write into. * @param replicationScope The replication scope of the families in the HRegion - * @param hri A description of the region in the table that we are bulk loading into. - * @param desc A protocol buffers based description of the client's bulk loading request + * @param hri A description of the region in the table that we are bulk loading into. + * @param desc A protocol buffers based description of the client's bulk loading + * request * @return walKey with sequenceid filled out for this bulk load marker * @throws IOException We will throw an IOException if we can not append to the HLog. */ @@ -189,17 +189,17 @@ private static WALKeyImpl doFullMarkerAppendTransaction(final WAL wal, * @return Blocksize to use writing WALs. */ public static long getWALBlockSize(Configuration conf, FileSystem fs, Path dir) - throws IOException { + throws IOException { return getWALBlockSize(conf, fs, dir, false); } /** * Public because of FSHLog. Should be package-private - * @param isRecoverEdits the created writer is for recovered edits or WAL. - * For recovered edits, it is true and for WAL it is false. + * @param isRecoverEdits the created writer is for recovered edits or WAL. For recovered edits, it + * is true and for WAL it is false. */ public static long getWALBlockSize(Configuration conf, FileSystem fs, Path dir, - boolean isRecoverEdits) throws IOException { + boolean isRecoverEdits) throws IOException { long defaultBlockSize = CommonFSUtils.getDefaultBlockSize(fs, dir) * 2; if (isRecoverEdits) { return conf.getLong("hbase.regionserver.recoverededits.blocksize", defaultBlockSize); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java index 56576a6cf3e1..1a39dd222a83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; import java.util.ArrayList; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService; @@ -38,30 +37,31 @@ public abstract class BaseReplicationEndpoint extends AbstractService implements ReplicationEndpoint { private static final Logger LOG = LoggerFactory.getLogger(BaseReplicationEndpoint.class); - public static final String REPLICATION_WALENTRYFILTER_CONFIG_KEY - = "hbase.replication.source.custom.walentryfilters"; + public static final String REPLICATION_WALENTRYFILTER_CONFIG_KEY = + "hbase.replication.source.custom.walentryfilters"; protected Context ctx; @Override public void init(Context context) throws IOException { this.ctx = context; - if (this.ctx != null){ + if (this.ctx != null) { ReplicationPeer peer = this.ctx.getReplicationPeer(); - if (peer != null){ + if (peer != null) { peer.registerPeerConfigListener(this); } else { - LOG.warn("Not tracking replication peer config changes for Peer Id " + this.ctx.getPeerId() + - " because there's no such peer"); + LOG.warn("Not tracking replication peer config changes for Peer Id " + this.ctx.getPeerId() + + " because there's no such peer"); } } } @Override /** - * No-op implementation for subclasses to override if they wish to execute logic if their config changes + * No-op implementation for subclasses to override if they wish to execute logic if their config + * changes */ - public void peerConfigUpdated(ReplicationPeerConfig rpc){ + public void peerConfigUpdated(ReplicationPeerConfig rpc) { } @@ -78,7 +78,8 @@ public WALEntryFilter getWALEntryfilter() { filters.add(tableCfFilter); } if (ctx != null && ctx.getPeerConfig() != null) { - String filterNameCSV = ctx.getPeerConfig().getConfiguration().get(REPLICATION_WALENTRYFILTER_CONFIG_KEY); + String filterNameCSV = + ctx.getPeerConfig().getConfiguration().get(REPLICATION_WALENTRYFILTER_CONFIG_KEY); if (filterNameCSV != null && !filterNameCSV.isEmpty()) { String[] filterNames = filterNameCSV.split(","); for (String filterName : filterNames) { @@ -94,14 +95,18 @@ public WALEntryFilter getWALEntryfilter() { return filters.isEmpty() ? null : new ChainWALEntryFilter(filters); } - /** Returns a WALEntryFilter for checking the scope. Subclasses can - * return null if they don't want this filter */ + /** + * Returns a WALEntryFilter for checking the scope. Subclasses can return null if they don't want + * this filter + */ protected WALEntryFilter getScopeWALEntryFilter() { return new ScopeWALEntryFilter(); } - /** Returns a WALEntryFilter for checking replication per table and CF. Subclasses can - * return null if they don't want this filter */ + /** + * Returns a WALEntryFilter for checking replication per table and CF. Subclasses can return null + * if they don't want this filter + */ protected WALEntryFilter getNamespaceTableCfWALEntryFilter() { return new NamespaceTableCfWALEntryFilter(ctx.getReplicationPeer()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java index 6814640dfe50..c06c6d19a654 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,10 +40,12 @@ public class BulkLoadCellFilter { private static final Logger LOG = LoggerFactory.getLogger(BulkLoadCellFilter.class); - private final ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + private final ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + /** * Filters the bulk load cell using the supplied predicate. - * @param cell The WAL cell to filter. + * @param cell The WAL cell to filter. * @param famPredicate Returns true of given family should be removed. * @return The filtered cell. */ @@ -75,19 +77,13 @@ public Cell filterCell(Cell cell, Predicate famPredicate) { } else if (copiedStoresList.isEmpty()) { return null; } - BulkLoadDescriptor.Builder newDesc = - BulkLoadDescriptor.newBuilder().setTableName(bld.getTableName()) - .setEncodedRegionName(bld.getEncodedRegionName()) - .setBulkloadSeqNum(bld.getBulkloadSeqNum()); + BulkLoadDescriptor.Builder newDesc = BulkLoadDescriptor.newBuilder() + .setTableName(bld.getTableName()).setEncodedRegionName(bld.getEncodedRegionName()) + .setBulkloadSeqNum(bld.getBulkloadSeqNum()); newDesc.addAllStores(copiedStoresList); BulkLoadDescriptor newBulkLoadDescriptor = newDesc.build(); - return cellBuilder.clear() - .setRow(CellUtil.cloneRow(cell)) - .setFamily(WALEdit.METAFAMILY) - .setQualifier(WALEdit.BULK_LOAD) - .setTimestamp(cell.getTimestamp()) - .setType(cell.getTypeByte()) - .setValue(newBulkLoadDescriptor.toByteArray()) - .build(); + return cellBuilder.clear().setRow(CellUtil.cloneRow(cell)).setFamily(WALEdit.METAFAMILY) + .setQualifier(WALEdit.BULK_LOAD).setTimestamp(cell.getTimestamp()).setType(cell.getTypeByte()) + .setValue(newBulkLoadDescriptor.toByteArray()).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java index 492364780718..ee1c8afbdf28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -48,14 +48,13 @@ public WAL.Entry filter(WAL.Entry entry) { } /** - * To allow the empty entries to get filtered, we want to set this optional flag to decide - * if we want to filter the entries which have no cells or all cells got filtered - * though {@link WALCellFilter}. - * + * To allow the empty entries to get filtered, we want to set this optional flag to decide if we + * want to filter the entries which have no cells or all cells got filtered though + * {@link WALCellFilter}. * @param filterEmptyEntry flag */ @InterfaceAudience.Private public void setFilterEmptyEntry(final boolean filterEmptyEntry) { this.filterEmptyEntry = filterEmptyEntry; } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java index ae3c74ad4753..09f5b9083644 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.util.ArrayList; @@ -28,8 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A {@link WALEntryFilter} which contains multiple filters and applies them - * in chain order + * A {@link WALEntryFilter} which contains multiple filters and applies them in chain order */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public class ChainWALEntryFilter implements WALEntryFilter { @@ -37,7 +35,7 @@ public class ChainWALEntryFilter implements WALEntryFilter { private final WALEntryFilter[] filters; private WALCellFilter[] cellFilters; - public ChainWALEntryFilter(WALEntryFilter...filters) { + public ChainWALEntryFilter(WALEntryFilter... filters) { this.filters = filters; initCellFilters(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java index 5f92bbf3a65a..e05e79eab5a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,18 +18,16 @@ package org.apache.hadoop.hbase.replication; import java.util.UUID; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKeyImpl; -import org.apache.hadoop.hbase.wal.WAL.Entry; - +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * Filters out entries with our peerClusterId (i.e. already replicated) - * and marks all other entries with our clusterID + * Filters out entries with our peerClusterId (i.e. already replicated) and marks all other entries + * with our clusterID */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) @InterfaceStability.Evolving @@ -40,22 +37,26 @@ public class ClusterMarkingEntryFilter implements WALEntryFilter { private ReplicationEndpoint replicationEndpoint; /** - * @param clusterId id of this cluster - * @param peerClusterId of the other cluster + * @param clusterId id of this cluster + * @param peerClusterId of the other cluster * @param replicationEndpoint ReplicationEndpoint which will handle the actual replication */ - public ClusterMarkingEntryFilter(UUID clusterId, UUID peerClusterId, ReplicationEndpoint replicationEndpoint) { + public ClusterMarkingEntryFilter(UUID clusterId, UUID peerClusterId, + ReplicationEndpoint replicationEndpoint) { this.clusterId = clusterId; this.peerClusterId = peerClusterId; this.replicationEndpoint = replicationEndpoint; } + @Override public Entry filter(Entry entry) { // don't replicate if the log entries have already been consumed by the cluster - if (replicationEndpoint.canReplicateToSameCluster() - || !entry.getKey().getClusterIds().contains(peerClusterId)) { + if ( + replicationEndpoint.canReplicateToSameCluster() + || !entry.getKey().getClusterIds().contains(peerClusterId) + ) { WALEdit edit = entry.getEdit(); - WALKeyImpl logKey = (WALKeyImpl)entry.getKey(); + WALKeyImpl logKey = (WALKeyImpl) entry.getKey(); if (edit != null && !edit.isEmpty()) { // Mark that the current cluster has the change diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 86786856f214..5b9c3da7cf8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; @@ -26,15 +25,15 @@ import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.ClusterConnectionFactory; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -48,8 +47,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * A {@link BaseReplicationEndpoint} for replication endpoints whose - * target cluster is an HBase cluster. + * A {@link BaseReplicationEndpoint} for replication endpoints whose target cluster is an HBase + * cluster. */ @InterfaceAudience.Private public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint @@ -65,15 +64,14 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private AsyncClusterConnection conn; /** - * Default maximum number of times a replication sink can be reported as bad before - * it will no longer be provided as a sink for replication without the pool of - * replication sinks being refreshed. + * Default maximum number of times a replication sink can be reported as bad before it will no + * longer be provided as a sink for replication without the pool of replication sinks being + * refreshed. */ public static final int DEFAULT_BAD_SINK_THRESHOLD = 3; /** - * Default ratio of the total number of peer cluster region servers to consider - * replicating to. + * Default ratio of the total number of peer cluster region servers to consider replicating to. */ public static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; @@ -94,8 +92,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * as protected for possible overridings. */ protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { - return ClusterConnectionFactory.createAsyncClusterConnection(conf, - null, User.getCurrent()); + return ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent()); } @Override @@ -126,12 +123,13 @@ protected void disconnect() { } /** - * A private method used to re-establish a zookeeper session with a peer cluster. - * @param ke + * A private method used to re-establish a zookeeper session with a peer cluster. n */ private void reconnect(KeeperException ke) { - if (ke instanceof ConnectionLossException || ke instanceof SessionExpiredException - || ke instanceof AuthFailedException) { + if ( + ke instanceof ConnectionLossException || ke instanceof SessionExpiredException + || ke instanceof AuthFailedException + ) { String clusterKey = ctx.getPeerConfig().getClusterKey(); LOG.warn("Lost the ZooKeeper connection for peer {}", clusterKey, ke); try { @@ -195,8 +193,8 @@ private void reloadZkWatcher() throws IOException { if (zkw != null) { zkw.close(); } - zkw = new ZKWatcher(ctx.getConfiguration(), - "connection to cluster: " + ctx.getPeerId(), this); + zkw = + new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this); zkw.registerListener(new PeerRegionServerListener(this)); } } @@ -213,7 +211,7 @@ private void connectPeerCluster() throws IOException { @Override public void abort(String why, Throwable e) { LOG.error("The HBaseReplicationEndpoint corresponding to peer " + ctx.getPeerId() - + " was aborted for the following reason(s):" + why, e); + + " was aborted for the following reason(s):" + why, e); } @Override @@ -224,7 +222,6 @@ public boolean isAborted() { /** * Get the list of all the region servers from the specified peer - * * @return list of region server addresses or an empty list if the slave is unavailable */ protected List fetchSlavesAddresses() { @@ -282,11 +279,9 @@ protected synchronized SinkPeer getReplicationSink() throws IOException { } /** - * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it - * failed). If a single SinkPeer is reported as bad more than - * replication.bad.sink.threshold times, it will be removed + * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it failed). If a single + * SinkPeer is reported as bad more than replication.bad.sink.threshold times, it will be removed * from the pool of potential replication targets. - * * @param sinkPeer The SinkPeer that had a failed replication attempt on it */ protected synchronized void reportBadSink(SinkPeer sinkPeer) { @@ -301,10 +296,8 @@ protected synchronized void reportBadSink(SinkPeer sinkPeer) { } /** - * Report that a {@code SinkPeer} successfully replicated a chunk of data. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it + * Report that a {@code SinkPeer} successfully replicated a chunk of data. n * The SinkPeer that + * had a failed replication attempt on it */ protected synchronized void reportSinkSuccess(SinkPeer sinkPeer) { badReportCounts.remove(sinkPeer.getServerName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java index 4fe04cd6ee5a..82ac9ebd1f32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.Cell; @@ -27,7 +26,6 @@ /** * Filter a WAL Entry by the peer config according to the table and family which it belongs to. - * * @see ReplicationPeerConfig#needToReplicate(TableName, byte[]) */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java index 81be5a3e3a00..8bf32baada22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -154,10 +154,10 @@ public List getParentRegionNames() { @Override public String toString() { - return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" + - state + ", parentRegionNames=" + - parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) + - "]"; + return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" + state + + ", parentRegionNames=" + + parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) + + "]"; } } @@ -203,8 +203,9 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co // TODO: we may look up a region which has already been split or merged so we need to check // whether the encoded name matches. Need to find a way to quit earlier when there is no // record for the given region, for now it will scan to the end of the table. - if (!Bytes.equals(encodedRegionName, - Bytes.toBytes(RegionInfo.encodeRegionName(regionName)))) { + if ( + !Bytes.equals(encodedRegionName, Bytes.toBytes(RegionInfo.encodeRegionName(regionName))) + ) { continue; } return getReplicationBarrierResult(result); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java index 3fec8131d090..a0b550495073 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; @@ -23,29 +22,27 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - -import org.apache.hadoop.hbase.Abortable; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; +import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** - * ReplicationEndpoint is a plugin which implements replication - * to other HBase clusters, or other systems. ReplicationEndpoint implementation - * can be specified at the peer creation time by specifying it - * in the {@link ReplicationPeerConfig}. A ReplicationEndpoint is run in a thread - * in each region server in the same process. + * ReplicationEndpoint is a plugin which implements replication to other HBase clusters, or other + * systems. ReplicationEndpoint implementation can be specified at the peer creation time by + * specifying it in the {@link ReplicationPeerConfig}. A ReplicationEndpoint is run in a thread in + * each region server in the same process. *

    - * ReplicationEndpoint is closely tied to ReplicationSource in a producer-consumer - * relation. ReplicationSource is an HBase-private class which tails the logs and manages - * the queue of logs plus management and persistence of all the state for replication. - * ReplicationEndpoint on the other hand is responsible for doing the actual shipping - * and persisting of the WAL entries in the other cluster. + * ReplicationEndpoint is closely tied to ReplicationSource in a producer-consumer relation. + * ReplicationSource is an HBase-private class which tails the logs and manages the queue of logs + * plus management and persistence of all the state for replication. ReplicationEndpoint on the + * other hand is responsible for doing the actual shipping and persisting of the WAL entries in the + * other cluster. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface ReplicationEndpoint extends ReplicationPeerConfigListener { @@ -67,9 +64,9 @@ class Context { @InterfaceAudience.Private public Context(final Server server, final Configuration localConf, final Configuration conf, - final FileSystem fs, final String peerId, final UUID clusterId, - final ReplicationPeer replicationPeer, final MetricsSource metrics, - final TableDescriptors tableDescriptors, final Abortable abortable) { + final FileSystem fs, final String peerId, final UUID clusterId, + final ReplicationPeer replicationPeer, final MetricsSource metrics, + final TableDescriptors tableDescriptors, final Abortable abortable) { this.server = server; this.localConf = localConf; this.conf = conf; @@ -134,14 +131,16 @@ public Abortable getAbortable() { */ void init(Context context) throws IOException; - /** Whether or not, the replication endpoint can replicate to it's source cluster with the same - * UUID */ + /** + * Whether or not, the replication endpoint can replicate to it's source cluster with the same + * UUID + */ boolean canReplicateToSameCluster(); /** - * Returns a UUID of the provided peer id. Every HBase cluster instance has a persisted - * associated UUID. If the replication is not performed to an actual HBase cluster (but - * some other system), the UUID returned has to uniquely identify the connected target system. + * Returns a UUID of the provided peer id. Every HBase cluster instance has a persisted associated + * UUID. If the replication is not performed to an actual HBase cluster (but some other system), + * the UUID returned has to uniquely identify the connected target system. * @return a UUID or null if the peer cluster does not exist or is not connected. */ UUID getPeerUUID(); @@ -162,6 +161,7 @@ static class ReplicateContext { int size; String walGroupId; int timeout; + @InterfaceAudience.Private public ReplicateContext() { } @@ -170,42 +170,46 @@ public ReplicateContext setEntries(List entries) { this.entries = entries; return this; } + public ReplicateContext setSize(int size) { this.size = size; return this; } + public ReplicateContext setWalGroupId(String walGroupId) { this.walGroupId = walGroupId; return this; } + public List getEntries() { return entries; } + public int getSize() { return size; } - public String getWalGroupId(){ + + public String getWalGroupId() { return walGroupId; } + public void setTimeout(int timeout) { this.timeout = timeout; } + public int getTimeout() { return this.timeout; } } /** - * Replicate the given set of entries (in the context) to the other cluster. - * Can block until all the given entries are replicated. Upon this method is returned, - * all entries that were passed in the context are assumed to be persisted in the - * target cluster. - * @param replicateContext a context where WAL entries and other - * parameters can be obtained. + * Replicate the given set of entries (in the context) to the other cluster. Can block until all + * the given entries are replicated. Upon this method is returned, all entries that were passed in + * the context are assumed to be persisted in the target cluster. + * @param replicateContext a context where WAL entries and other parameters can be obtained. */ boolean replicate(ReplicateContext replicateContext); - // The below methods are inspired by Guava Service. See // https://github.com/google/guava/wiki/ServiceExplained for overview of Guava Service. // Below we implement a subset only with different names on some methods so we can implement @@ -231,23 +235,24 @@ public int getTimeout() { /** * Waits for the {@link ReplicationEndpoint} to be up and running. - * * @throws IllegalStateException if the service reaches a state from which it is not possible to - * enter the (internal) running state. e.g. if the state is terminated when this method is - * called then this will throw an IllegalStateException. + * enter the (internal) running state. e.g. if the state is + * terminated when this method is called then this will throw an + * IllegalStateException. */ void awaitRunning(); /** - * Waits for the {@link ReplicationEndpoint} to to be up and running for no more - * than the given time. - * + * Waits for the {@link ReplicationEndpoint} to to be up and running for no more than the given + * time. * @param timeout the maximum time to wait - * @param unit the time unit of the timeout argument - * @throws TimeoutException if the service has not reached the given state within the deadline + * @param unit the time unit of the timeout argument + * @throws TimeoutException if the service has not reached the given state within the + * deadline * @throws IllegalStateException if the service reaches a state from which it is not possible to - * enter the (internal) running state. e.g. if the state is terminated when this method is - * called then this will throw an IllegalStateException. + * enter the (internal) running state. e.g. if the state is + * terminated when this method is called then this will throw an + * IllegalStateException. */ void awaitRunning(long timeout, TimeUnit unit) throws TimeoutException; @@ -260,25 +265,23 @@ public int getTimeout() { /** * Waits for the {@link ReplicationEndpoint} to reach the terminated (internal) state. - * * @throws IllegalStateException if the service FAILED. */ void awaitTerminated(); /** - * Waits for the {@link ReplicationEndpoint} to reach a terminal state for no - * more than the given time. - * + * Waits for the {@link ReplicationEndpoint} to reach a terminal state for no more than the given + * time. * @param timeout the maximum time to wait - * @param unit the time unit of the timeout argument - * @throws TimeoutException if the service has not reached the given state within the deadline + * @param unit the time unit of the timeout argument + * @throws TimeoutException if the service has not reached the given state within the + * deadline * @throws IllegalStateException if the service FAILED. */ void awaitTerminated(long timeout, TimeUnit unit) throws TimeoutException; /** * Returns the {@link Throwable} that caused this service to fail. - * * @throws IllegalStateException if this service's state isn't FAILED. */ Throwable failureCause(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java index edd567914dc7..7021bd27cfe7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java @@ -21,7 +21,6 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -33,7 +32,6 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSink; import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,17 +66,15 @@ public void initialize(Server server, FileSystem fs, Path logdir, Path oldLogDir WALFactory walFactory) throws IOException { this.server = server; this.conf = server.getConfiguration(); - this.statsPeriodInSecond = - this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); } @Override public void startReplicationService() throws IOException { this.replicationSink = new ReplicationSink(this.conf); - this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSinkStatistics", server, - (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); + this.server.getChoreService().scheduleChore(new ReplicationStatisticsChore( + "ReplicationSinkStatistics", server, (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java index f8722eb3da44..6dc41bcc014a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ private boolean hasGlobalScope(NavigableMap scopes, byte[] fami Integer scope = scopes.get(family); return scope != null && scope.intValue() == HConstants.REPLICATION_SCOPE_GLOBAL; } + @Override public Cell filterCell(Entry entry, Cell cell) { NavigableMap scopes = entry.getKey().getReplicationScopes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java index 3cda94a1c028..d71260cce5c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java @@ -15,8 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; + import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; @@ -27,6 +27,6 @@ public class SystemTableWALEntryFilter implements WALEntryFilter { @Override public Entry filter(Entry entry) { - return entry.getKey().getTableName().isSystemTable()? null: entry; + return entry.getKey().getTableName().isSystemTable() ? null : entry; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java index 088827f4d2e3..229cec57e976 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java index 46b2f6cb4ddf..2e79fa35b0f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +19,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** * A filter for WAL entry cells before being sent over to replication. @@ -29,12 +29,12 @@ public interface WALCellFilter { /** - * Applies the filter, possibly returning a different Cell instance. - * If null is returned, the cell will be skipped. + * Applies the filter, possibly returning a different Cell instance. If null is returned, the cell + * will be skipped. * @param entry Entry which contains the cell - * @param cell Cell to filter - * @return a (possibly modified) Cell to use. Returning null will cause the cell - * to be skipped for replication. + * @param cell Cell to filter + * @return a (possibly modified) Cell to use. Returning null will cause the cell to be skipped for + * replication. */ public Cell filterCell(Entry entry, Cell cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java index 23c1c60f2db1..8aa60f74ebba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java @@ -16,20 +16,22 @@ * limitations under the License. */ package org.apache.hadoop.hbase.replication; + import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; /** - * A Filter for WAL entries before being sent over to replication. Multiple - * filters might be chained together using {@link ChainWALEntryFilter}. - * Applied on the replication source side. - *

    There is also a filter that can be installed on the sink end of a replication stream. - * See {@link org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter}. Certain - * use-cases may need such a facility but better to filter here on the source side rather - * than later, after the edit arrives at the sink.

    + * A Filter for WAL entries before being sent over to replication. Multiple filters might be chained + * together using {@link ChainWALEntryFilter}. Applied on the replication source side. + *

    + * There is also a filter that can be installed on the sink end of a replication stream. See + * {@link org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter}. Certain use-cases + * may need such a facility but better to filter here on the source side rather than later, after + * the edit arrives at the sink. + *

    * @see org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter for filtering - * replication on the sink-side. + * replication on the sink-side. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface WALEntryFilter { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java index f06b29ccdeff..819e4c5e54ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -90,13 +90,13 @@ public boolean apply(FileStatus file) { @Override public void setConf(Configuration config) { // If either replication or replication of bulk load hfiles is disabled, keep all members null - if (!(config.getBoolean( - HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, - HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT))) { - LOG.warn(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY - + " is not enabled. Better to remove " - + ReplicationHFileCleaner.class + " from " + HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS - + " configuration."); + if ( + !(config.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) + ) { + LOG.warn(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY + " is not enabled. Better to remove " + + ReplicationHFileCleaner.class + " from " + HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS + + " configuration."); return; } // Make my own Configuration. Then I'll have my own connection to zk that @@ -154,7 +154,7 @@ public boolean isFileDeletable(FileStatus fStat) { hfileRefsFromQueue = rqs.getAllHFileRefs(); } catch (ReplicationException e) { LOG.warn("Failed to read hfile references from zookeeper, skipping checking deletable " - + "file for " + fStat.getPath()); + + "file for " + fStat.getPath()); return false; } return !hfileRefsFromQueue.contains(fStat.getPath().getName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index a7821f1894a1..92021681064a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,17 +31,17 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Predicate; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; /** - * Implementation of a log cleaner that checks if a log is still scheduled for - * replication before deleting it when its TTL is over. + * Implementation of a log cleaner that checks if a log is still scheduled for replication before + * deleting it when its TTL is over. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ReplicationLogCleaner extends BaseLogCleanerDelegate { @@ -128,7 +128,7 @@ public void setConf(Configuration conf, ZKWatcher zk) { @InterfaceAudience.Private public void setConf(Configuration conf, ZKWatcher zk, - ReplicationQueueStorage replicationQueueStorage) { + ReplicationQueueStorage replicationQueueStorage) { super.setConf(conf); this.zkw = zk; this.queueStorage = replicationQueueStorage; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java index ddae7311225b..b9a7be813af8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java index f7040d6fc811..8b334dfb809b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; @@ -15,7 +22,6 @@ import java.net.URL; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -26,13 +32,13 @@ import org.slf4j.LoggerFactory; /** - * This will load all the xml configuration files for the source cluster replication ID from - * user configured replication configuration directory. + * This will load all the xml configuration files for the source cluster replication ID from user + * configured replication configuration directory. */ @InterfaceAudience.Private public class DefaultSourceFSConfigurationProvider implements SourceFSConfigurationProvider { private static final Logger LOG = - LoggerFactory.getLogger(DefaultSourceFSConfigurationProvider.class); + LoggerFactory.getLogger(DefaultSourceFSConfigurationProvider.class); // Map containing all the source clusters configurations against their replication cluster id private final Map sourceClustersConfs = new HashMap<>(); @@ -40,7 +46,7 @@ public class DefaultSourceFSConfigurationProvider implements SourceFSConfigurati @Override public Configuration getConf(Configuration sinkConf, String replicationClusterId) - throws IOException { + throws IOException { if (sourceClustersConfs.get(replicationClusterId) == null) { synchronized (this.sourceClustersConfs) { if (sourceClustersConfs.get(replicationClusterId) == null) { @@ -62,8 +68,7 @@ public Configuration getConf(Configuration sinkConf, String replicationClusterId File confDir = new File(replicationConfDir, replicationClusterId); LOG.info("Loading source cluster " + replicationClusterId - + " file system configurations from xml " - + "files under directory " + confDir); + + " file system configurations from xml " + "files under directory " + confDir); String[] listofConfFiles = FileUtil.list(confDir); for (String confFile : listofConfFiles) { if (new File(confDir, confFile).isFile() && confFile.endsWith(XML)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index dc4ee347e20a..b2dbd591fc9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,10 +60,9 @@ /** * Provides information about the existing states of replication, replication peers and queues. - * * Usage: hbase org.apache.hadoop.hbase.replication.regionserver.DumpReplicationQueues [args] - * Arguments: --distributed Polls each RS to dump information about the queue - * --hdfs Reports HDFS usage by the replication queues (note: can be overestimated). + * Arguments: --distributed Polls each RS to dump information about the queue --hdfs Reports HDFS + * usage by the replication queues (note: can be overestimated). */ @InterfaceAudience.Private public class DumpReplicationQueues extends Configured implements Tool { @@ -96,7 +95,7 @@ public DumpOptions(DumpOptions that) { this.distributed = that.distributed; } - boolean isHdfs () { + boolean isHdfs() { return hdfs; } @@ -104,7 +103,7 @@ boolean isDistributed() { return distributed; } - void setHdfs (boolean hdfs) { + void setHdfs(boolean hdfs) { this.hdfs = hdfs; } @@ -136,7 +135,7 @@ static DumpOptions parseOpts(Queue args) { printUsageAndExit("ERROR: Unrecognized option/command: " + cmd, -1); } // check that --distributed is present when --hdfs is in the arguments - if (!opts.isDistributed() && opts.isHdfs()) { + if (!opts.isDistributed() && opts.isHdfs()) { printUsageAndExit("ERROR: --hdfs option can only be used with --distributed: " + cmd, -1); } } @@ -144,10 +143,7 @@ static DumpOptions parseOpts(Queue args) { } /** - * Main - * - * @param args - * @throws Exception + * Main nn */ public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); @@ -190,10 +186,10 @@ protected static void printUsage(final String className, final String message) { System.err.println("General Options:"); System.err.println(" -h|--h|--help Show this help and exit."); System.err.println(" --distributed Poll each RS and print its own replication queue. " - + "Default only polls ZooKeeper"); + + "Default only polls ZooKeeper"); System.err.println(" --hdfs Use HDFS to calculate usage of WALs by replication." - + " It could be overestimated if replicating to multiple peers." - + " --distributed flag is also needed."); + + " It could be overestimated if replicating to multiple peers." + + " --distributed flag is also needed."); } protected static void printUsageAndExit(final String message, final int exitCode) { @@ -206,9 +202,9 @@ private int dumpReplicationQueues(DumpOptions opts) throws Exception { Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); - ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + - EnvironmentEdgeManager.currentTime(), - new WarnOnlyAbortable(), true); + ZKWatcher zkw = + new ZKWatcher(conf, "DumpReplicationQueues" + EnvironmentEdgeManager.currentTime(), + new WarnOnlyAbortable(), true); try { // Our zk watcher @@ -216,7 +212,7 @@ private int dumpReplicationQueues(DumpOptions opts) throws Exception { List replicatedTableCFs = admin.listReplicatedTableCFs(); if (replicatedTableCFs.isEmpty()) { LOG.info("No tables with a configured replication peer were found."); - return(0); + return (0); } else { LOG.info("Replicated Tables: " + replicatedTableCFs); } @@ -232,8 +228,8 @@ private int dumpReplicationQueues(DumpOptions opts) throws Exception { if (opts.isDistributed()) { LOG.info("Found [--distributed], will poll each RegionServer."); - Set peerIds = peers.stream().map((peer) -> peer.getPeerId()) - .collect(Collectors.toSet()); + Set peerIds = + peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet()); System.out.println(dumpQueues(zkw, peerIds, opts.isHdfs())); System.out.println(dumpReplicationSummary()); } else { @@ -253,14 +249,14 @@ public String dumpReplicationSummary() { StringBuilder sb = new StringBuilder(); if (!deletedQueues.isEmpty()) { sb.append("Found " + deletedQueues.size() + " deleted queues" - + ", run hbck -fixReplication in order to remove the deleted replication queues\n"); + + ", run hbck -fixReplication in order to remove the deleted replication queues\n"); for (String deletedQueue : deletedQueues) { sb.append(" " + deletedQueue + "\n"); } } if (!deadRegionServers.isEmpty()) { sb.append("Found " + deadRegionServers.size() + " dead regionservers" - + ", restart one regionserver to transfer the queues of dead regionservers\n"); + + ", restart one regionserver to transfer the queues of dead regionservers\n"); for (String deadRs : deadRegionServers) { sb.append(" " + deadRs + "\n"); } @@ -268,7 +264,8 @@ public String dumpReplicationSummary() { if (!peersQueueSize.isEmpty()) { sb.append("Dumping all peers's number of WALs in replication queue\n"); for (Map.Entry entry : peersQueueSize.asMap().entrySet()) { - sb.append(" PeerId: " + entry.getKey() + " , sizeOfLogQueue: " + entry.getValue() + "\n"); + sb.append( + " PeerId: " + entry.getKey() + " , sizeOfLogQueue: " + entry.getValue() + "\n"); } } sb.append(" Total size of WALs on HDFS: " + StringUtils.humanSize(totalSizeOfWALs) + "\n"); @@ -332,8 +329,8 @@ public String dumpQueues(ZKWatcher zkw, Set peerIds, boolean hdfs) throw } private String formatQueue(ServerName regionserver, ReplicationQueueStorage queueStorage, - ReplicationQueueInfo queueInfo, String queueId, List wals, boolean isDeleted, - boolean hdfs) throws Exception { + ReplicationQueueInfo queueInfo, String queueId, List wals, boolean isDeleted, + boolean hdfs) throws Exception { StringBuilder sb = new StringBuilder(); List deadServers; @@ -354,23 +351,23 @@ private String formatQueue(ServerName regionserver, ReplicationQueueStorage queu for (String wal : wals) { long position = queueStorage.getWALPosition(regionserver, queueInfo.getPeerId(), wal); - sb.append(" Replication position for " + wal + ": " + (position > 0 ? position : "0" - + " (not started or nothing to replicate)") + "\n"); + sb.append(" Replication position for " + wal + ": " + + (position > 0 ? position : "0" + " (not started or nothing to replicate)") + "\n"); } if (hdfs) { FileSystem fs = FileSystem.get(getConf()); sb.append(" Total size of WALs on HDFS for this queue: " - + StringUtils.humanSize(getTotalWALSize(fs, wals, regionserver)) + "\n"); + + StringUtils.humanSize(getTotalWALSize(fs, wals, regionserver)) + "\n"); } return sb.toString(); } /** - * return total size in bytes from a list of WALs + * return total size in bytes from a list of WALs */ private long getTotalWALSize(FileSystem fs, List wals, ServerName server) - throws IOException { + throws IOException { long size = 0; FileStatus fileStatus; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 6dd60d14db0d..cec360a4c97e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; @@ -69,29 +68,28 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} - * implementation for replicating to another HBase cluster. - * For the slave cluster it selects a random number of peers - * using a replication ratio. For example, if replication ration = 0.1 - * and slave cluster has 100 region servers, 10 will be selected. + * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} implementation for replicating + * to another HBase cluster. For the slave cluster it selects a random number of peers using a + * replication ratio. For example, if replication ration = 0.1 and slave cluster has 100 region + * servers, 10 will be selected. *

    - * A stream is considered down when we cannot contact a region server on the - * peer cluster for more than 55 seconds by default. + * A stream is considered down when we cannot contact a region server on the peer cluster for more + * than 55 seconds by default. *

    */ @InterfaceAudience.Private public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoint { private static final Logger LOG = - LoggerFactory.getLogger(HBaseInterClusterReplicationEndpoint.class); + LoggerFactory.getLogger(HBaseInterClusterReplicationEndpoint.class); private static final long DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER = 2; /** Drop edits for tables that been deleted from the replication source and target */ public static final String REPLICATION_DROP_ON_DELETED_TABLE_KEY = - "hbase.replication.drop.on.deleted.table"; + "hbase.replication.drop.on.deleted.table"; /** Drop edits for CFs that been deleted from the replication source and target */ public static final String REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY = - "hbase.replication.drop.on.deleted.columnfamily"; + "hbase.replication.drop.on.deleted.columnfamily"; // How long should we sleep for each retry private long sleepForRetries; @@ -103,7 +101,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private long maxTerminationWait; // Size limit for replication RPCs, in bytes private int replicationRpcLimit; - //Metrics for this source + // Metrics for this source private MetricsSource metrics; private boolean peersSelected = false; private String replicationClusterId = ""; @@ -116,7 +114,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private boolean dropOnDeletedTables; private boolean dropOnDeletedColumnFamilies; private boolean isSerial = false; - //Initialising as 0 to guarantee at least one logging message + // Initialising as 0 to guarantee at least one logging message private long lastSinkFetchTime = 0; @Override @@ -124,37 +122,33 @@ public void init(Context context) throws IOException { super.init(context); decorateConf(); this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); - this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", - maxRetriesMultiplier); + this.socketTimeoutMultiplier = + this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier); // A Replicator job is bound by the RPC timeout. We will wait this long for all Replicator // tasks to terminate when doStop() is called. long maxTerminationWaitMultiplier = this.conf.getLong( - "replication.source.maxterminationmultiplier", - DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); - this.maxTerminationWait = maxTerminationWaitMultiplier * - this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); + "replication.source.maxterminationmultiplier", DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); + this.maxTerminationWait = maxTerminationWaitMultiplier + * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); // per sink thread pool this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); this.exec = Threads.getBoundedCachedThreadPool(maxThreads, 60, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SinkThread-%d").build()); + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SinkThread-%d").build()); this.abortable = ctx.getAbortable(); // Set the size limit for replication RPCs to 95% of the max request size. // We could do with less slop if we have an accurate estimate of encoded size. Being // conservative for now. - this.replicationRpcLimit = (int)(0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE, - RpcServer.DEFAULT_MAX_REQUEST_SIZE)); - this.dropOnDeletedTables = - this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false); - this.dropOnDeletedColumnFamilies = this.conf - .getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false); - - this.replicationBulkLoadDataEnabled = - conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, - HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); + this.replicationRpcLimit = + (int) (0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE, RpcServer.DEFAULT_MAX_REQUEST_SIZE)); + this.dropOnDeletedTables = this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false); + this.dropOnDeletedColumnFamilies = + this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false); + + this.replicationBulkLoadDataEnabled = conf.getBoolean( + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); if (this.replicationBulkLoadDataEnabled) { replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID); } @@ -188,15 +182,15 @@ private void connectToPeers() { /** * Do the sleeping logic - * @param msg Why we sleep + * @param msg Why we sleep * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ private boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { - LOG.trace("{} {}, sleeping {} times {}", - logPeerId(), msg, sleepForRetries, sleepMultiplier); + LOG.trace("{} {}, sleeping {} times {}", logPeerId(), msg, sleepForRetries, + sleepMultiplier); } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { @@ -217,7 +211,7 @@ private List> createParallelBatches(final List entries) { int numSinks = Math.max(getNumSinks(), 1); int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), numSinks); List> entryLists = - Stream.generate(ArrayList::new).limit(n).collect(Collectors.toList()); + Stream.generate(ArrayList::new).limit(n).collect(Collectors.toList()); int[] sizes = new int[n]; for (Entry e : entries) { int index = Math.abs(Bytes.hashCode(e.getKey().getEncodedRegionName()) % n); @@ -239,7 +233,7 @@ private List> createSerialBatches(final List entries) { Map> regionEntries = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Entry e : entries) { regionEntries.computeIfAbsent(e.getKey().getEncodedRegionName(), key -> new ArrayList<>()) - .add(e); + .add(e); } return new ArrayList<>(regionEntries.values()); } @@ -299,7 +293,7 @@ List> filterNotExistTableEdits(final List> oldEntryList) List> entryList = new ArrayList<>(); Map existMap = new HashMap<>(); try (Connection localConn = ConnectionFactory.createConnection(ctx.getLocalConfiguration()); - Admin localAdmin = localConn.getAdmin()) { + Admin localAdmin = localConn.getAdmin()) { for (List oldEntries : oldEntryList) { List entries = new ArrayList<>(); for (Entry e : oldEntries) { @@ -324,7 +318,7 @@ List> filterNotExistTableEdits(final List> oldEntryList) // and add a table filter there; but that would break the encapsulation, // so we're doing the filtering here. LOG.warn("Missing table detected at sink, local table also does not exist, " - + "filtering edits for table '{}'", tableName); + + "filtering edits for table '{}'", tableName); } } if (!entries.isEmpty()) { @@ -342,7 +336,7 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt List> entryList = new ArrayList<>(); Map> existColumnFamilyMap = new HashMap<>(); try (Connection localConn = ConnectionFactory.createConnection(ctx.getLocalConfiguration()); - Admin localAdmin = localConn.getAdmin()) { + Admin localAdmin = localConn.getAdmin()) { for (List oldEntries : oldEntryList) { List entries = new ArrayList<>(); for (Entry e : oldEntries) { @@ -350,7 +344,7 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt if (!existColumnFamilyMap.containsKey(tableName)) { try { Set cfs = localAdmin.getDescriptor(tableName).getColumnFamilyNames().stream() - .map(Bytes::toString).collect(Collectors.toSet()); + .map(Bytes::toString).collect(Collectors.toSet()); existColumnFamilyMap.put(tableName, cfs); } catch (Exception ex) { LOG.warn("Exception getting cf names for local table {}", tableName, ex); @@ -384,8 +378,9 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt // and add a table filter there; but that would break the encapsulation, // so we're doing the filtering here. LOG.warn( - "Missing column family detected at sink, local column family also does not exist," - + " filtering edits for table '{}',column family '{}'", tableName, missingCFs); + "Missing column family detected at sink, local column family also does not exist," + + " filtering edits for table '{}',column family '{}'", + tableName, missingCFs); } } if (!entries.isEmpty()) { @@ -400,7 +395,7 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt } private long parallelReplicate(CompletionService pool, ReplicateContext replicateContext, - List> batches) throws IOException { + List> batches) throws IOException { int futures = 0; for (int i = 0; i < batches.size(); i++) { List entries = batches.get(i); @@ -433,8 +428,9 @@ private long parallelReplicate(CompletionService pool, ReplicateContext } catch (InterruptedException ie) { iox = new IOException(ie); } catch (ExecutionException ee) { - iox = ee.getCause() instanceof IOException? - (IOException)ee.getCause(): new IOException(ee.getCause()); + iox = ee.getCause() instanceof IOException + ? (IOException) ee.getCause() + : new IOException(ee.getCause()); } } if (iox != null) { @@ -459,12 +455,12 @@ public boolean replicate(ReplicateContext replicateContext) { int numSinks = getNumSinks(); if (numSinks == 0) { - if ((EnvironmentEdgeManager.currentTime() - lastSinkFetchTime) >= - (maxRetriesMultiplier*1000)) { - LOG.warn( - "No replication sinks found, returning without replicating. " - + "The source should retry with the same set of edits. Not logging this again for " - + "the next {} seconds.", maxRetriesMultiplier); + if ( + (EnvironmentEdgeManager.currentTime() - lastSinkFetchTime) >= (maxRetriesMultiplier * 1000) + ) { + LOG.warn("No replication sinks found, returning without replicating. " + + "The source should retry with the same set of edits. Not logging this again for " + + "the next {} seconds.", maxRetriesMultiplier); lastSinkFetchTime = EnvironmentEdgeManager.currentTime(); } sleepForRetries("No sinks available at peer", sleepMultiplier); @@ -496,13 +492,13 @@ public boolean replicate(ReplicateContext replicateContext) { } else if (dropOnDeletedColumnFamilies && isNoSuchColumnFamilyException(ioe)) { batches = filterNotExistColumnFamilyEdits(batches); if (batches.isEmpty()) { - LOG.warn("After filter not exist column family's edits, 0 edits to replicate, " + - "just return"); + LOG.warn("After filter not exist column family's edits, 0 edits to replicate, " + + "just return"); return true; } } else { LOG.warn("{} Peer encountered RemoteException, rechecking all sinks: ", logPeerId(), - ioe); + ioe); chooseSinks(); } } else { @@ -510,9 +506,10 @@ public boolean replicate(ReplicateContext replicateContext) { // This exception means we waited for more than 60s and nothing // happened, the cluster is alive and calling it right away // even for a test just makes things worse. - sleepForRetries("Encountered a SocketTimeoutException. Since the " + - "call to the remote cluster timed out, which is usually " + - "caused by a machine failure or a massive slowdown", + sleepForRetries( + "Encountered a SocketTimeoutException. Since the " + + "call to the remote cluster timed out, which is usually " + + "caused by a machine failure or a massive slowdown", this.socketTimeoutMultiplier); } else if (ioe instanceof ConnectException || ioe instanceof UnknownHostException) { LOG.warn("{} Peer is unavailable, rechecking all sinks: ", logPeerId(), ioe); @@ -544,16 +541,16 @@ protected void doStop() { } // Abort if the tasks did not terminate in time if (!exec.isTerminated()) { - String errMsg = "HBaseInterClusterReplicationEndpoint termination failed. The " + - "ThreadPoolExecutor failed to finish all tasks within " + maxTerminationWait + "ms. " + - "Aborting to prevent Replication from deadlocking. See HBASE-16081."; + String errMsg = "HBaseInterClusterReplicationEndpoint termination failed. The " + + "ThreadPoolExecutor failed to finish all tasks within " + maxTerminationWait + "ms. " + + "Aborting to prevent Replication from deadlocking. See HBASE-16081."; abortable.abort(errMsg, new IOException(errMsg)); } notifyStopped(); } protected int replicateEntries(List entries, int batchIndex, int timeout) - throws IOException { + throws IOException { SinkPeer sinkPeer = null; try { int entriesHashCode = System.identityHashCode(entries); @@ -588,7 +585,7 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) } private int serialReplicateRegionEntries(List entries, int batchIndex, int timeout) - throws IOException { + throws IOException { int batchSize = 0, index = 0; List batch = new ArrayList<>(); for (Entry entry : entries) { @@ -608,11 +605,12 @@ private int serialReplicateRegionEntries(List entries, int batchIndex, in } protected Callable createReplicator(List entries, int batchIndex, int timeout) { - return isSerial ? () -> serialReplicateRegionEntries(entries, batchIndex, timeout) - : () -> replicateEntries(entries, batchIndex, timeout); + return isSerial + ? () -> serialReplicateRegionEntries(entries, batchIndex, timeout) + : () -> replicateEntries(entries, batchIndex, timeout); } - private String logPeerId(){ + private String logPeerId() { return "[Source for peer " + this.ctx.getPeerId() + "]:"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java index 209537137d7b..2d0b4e32ced0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java @@ -69,11 +69,11 @@ public class HFileReplicator implements Closeable { /** Maximum number of threads to allow in pool to copy hfiles during replication */ public static final String REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY = - "hbase.replication.bulkload.copy.maxthreads"; + "hbase.replication.bulkload.copy.maxthreads"; public static final int REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT = 10; /** Number of hfiles to copy per thread during replication */ public static final String REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY = - "hbase.replication.bulkload.copy.hfiles.perthread"; + "hbase.replication.bulkload.copy.hfiles.perthread"; public static final int REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT = 10; private static final Logger LOG = LoggerFactory.getLogger(HFileReplicator.class); @@ -95,10 +95,10 @@ public class HFileReplicator implements Closeable { private int copiesPerThread; private List sourceClusterIds; - public HFileReplicator(Configuration sourceClusterConf, - String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath, - Map>>> tableQueueMap, Configuration conf, - AsyncClusterConnection connection, List sourceClusterIds) throws IOException { + public HFileReplicator(Configuration sourceClusterConf, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath, Map>>> tableQueueMap, + Configuration conf, AsyncClusterConnection connection, List sourceClusterIds) + throws IOException { this.sourceClusterConf = sourceClusterConf; this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath; this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath; @@ -111,16 +111,13 @@ public HFileReplicator(Configuration sourceClusterConf, fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); this.hbaseStagingDir = new Path(CommonFSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); - this.maxCopyThreads = - this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, - REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); + this.maxCopyThreads = this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, + REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); this.exec = Threads.getBoundedCachedThreadPool(maxCopyThreads, 60, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("HFileReplicationCopier-%1$d-" + this.sourceBaseNamespaceDirPath). - build()); - this.copiesPerThread = - conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, - REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("HFileReplicationCopier-%1$d-" + this.sourceBaseNamespaceDirPath).build()); + this.copiesPerThread = conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, + REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); sinkFs = FileSystem.get(conf); } @@ -163,11 +160,11 @@ public Void replicate() throws IOException { } private void doBulkLoad(Configuration conf, TableName tableName, Path stagingDir, - Deque queue, int maxRetries) throws IOException { + Deque queue, int maxRetries) throws IOException { BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf); // Set the staging directory which will be used by BulkLoadHFilesTool for loading the data loader.setBulkToken(stagingDir.toString()); - //updating list of cluster ids where this bulkload event has already been processed + // updating list of cluster ids where this bulkload event has already been processed loader.setClusterIds(sourceClusterIds); for (int count = 0; !queue.isEmpty(); count++) { if (count != 0) { @@ -218,7 +215,7 @@ private Map copyHFilesToStagingDir() throws IOException { */ String sourceScheme = sourceClusterPath.toUri().getScheme(); String disableCacheName = - String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme }); + String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme }); sourceClusterConf.setBoolean(disableCacheName, true); sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf); @@ -226,12 +223,11 @@ private Map copyHFilesToStagingDir() throws IOException { User user = userProvider.getCurrent(); // For each table name in the map for (Entry>>> tableEntry : bulkLoadHFileMap - .entrySet()) { + .entrySet()) { String tableName = tableEntry.getKey(); // Create staging directory for each table - Path stagingDir = - createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName)); + Path stagingDir = createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName)); familyHFilePathsPairsList = tableEntry.getValue(); familyHFilePathsPairsListSize = familyHFilePathsPairsList.size(); @@ -253,9 +249,8 @@ private Map copyHFilesToStagingDir() throws IOException { int currentCopied = 0; // Copy the hfiles parallely while (totalNoOfHFiles > currentCopied + this.copiesPerThread) { - c = - new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, - currentCopied + this.copiesPerThread)); + c = new Copier(sourceFs, familyStagingDir, + hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread)); future = exec.submit(c); futures.add(future); currentCopied += this.copiesPerThread; @@ -263,9 +258,8 @@ private Map copyHFilesToStagingDir() throws IOException { int remaining = totalNoOfHFiles - currentCopied; if (remaining > 0) { - c = - new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, - currentCopied + remaining)); + c = new Copier(sourceFs, familyStagingDir, + hfilePaths.subList(currentCopied, currentCopied + remaining)); future = exec.submit(c); futures.add(future); } @@ -274,15 +268,14 @@ private Map copyHFilesToStagingDir() throws IOException { try { f.get(); } catch (InterruptedException e) { - InterruptedIOException iioe = - new InterruptedIOException( - "Failed to copy HFiles to local file system. This will be retried again " - + "by the source cluster."); + InterruptedIOException iioe = new InterruptedIOException( + "Failed to copy HFiles to local file system. This will be retried again " + + "by the source cluster."); iioe.initCause(e); throw iioe; } catch (ExecutionException e) { throw new IOException("Failed to copy HFiles to local file system. This will " - + "be retried again by the source cluster.", e); + + "be retried again by the source cluster.", e); } } } @@ -295,7 +288,7 @@ private Map copyHFilesToStagingDir() throws IOException { if (sourceFs != null) { sourceFs.close(); } - if(exec != null) { + if (exec != null) { exec.shutdown(); } } @@ -307,7 +300,7 @@ private Path createStagingDir(Path baseDir, User user, TableName tableName) thro int RANDOM_RADIX = 32; String doubleUnderScore = UNDERSCORE + UNDERSCORE; String randomDir = user.getShortName() + doubleUnderScore + tblName + doubleUnderScore - + (new BigInteger(RANDOM_WIDTH, ThreadLocalRandom.current()).toString(RANDOM_RADIX)); + + (new BigInteger(RANDOM_WIDTH, ThreadLocalRandom.current()).toString(RANDOM_RADIX)); return createStagingDir(baseDir, user, randomDir); } @@ -328,7 +321,7 @@ private class Copier implements Callable { private List hfiles; public Copier(FileSystem sourceFs, final Path stagingDir, final List hfiles) - throws IOException { + throws IOException { this.sourceFs = sourceFs; this.stagingDir = stagingDir; this.hfiles = hfiles; @@ -348,8 +341,7 @@ public Void call() throws IOException { // source will retry to replicate these data. } catch (FileNotFoundException e) { LOG.info("Failed to copy hfile from " + sourceHFilePath + " to " + localHFilePath - + ". Trying to copy from hfile archive directory.", - e); + + ". Trying to copy from hfile archive directory.", e); sourceHFilePath = new Path(sourceHFileArchiveDirPath, hfiles.get(i)); try { @@ -358,8 +350,7 @@ public Void call() throws IOException { // This will mean that the hfile does not exists any where in source cluster FS. So we // cannot do anything here just log and continue. LOG.debug("Failed to copy hfile from " + sourceHFilePath + " to " + localHFilePath - + ". Hence ignoring this hfile from replication..", - e1); + + ". Hence ignoring this hfile from replication..", e1); continue; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index dede79d138cc..f21532073e58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is for maintaining the various replication statistics for a sink and publishing them @@ -35,12 +34,11 @@ public class MetricsSink { public MetricsSink() { mss = - CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getSink(); + CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getSink(); } /** * Set the age of the last applied operation - * * @param timestamp The timestamp of the last operation applied. * @return the age that was set */ @@ -55,8 +53,8 @@ public long setAgeOfLastAppliedOp(long timestamp) { } /** - * Refreshing the age makes sure the value returned is the actual one and - * not the one set a replication time + * Refreshing the age makes sure the value returned is the actual one and not the one set a + * replication time * @return refreshed age */ public long refreshAgeOfLastAppliedOp() { @@ -64,9 +62,7 @@ public long refreshAgeOfLastAppliedOp() { } /** - * Convience method to change metrics when a batch of operations are applied. - * - * @param batchSize + * Convience method to change metrics when a batch of operations are applied. n */ public void applyBatch(long batchSize) { mss.incrAppliedBatches(1); @@ -75,7 +71,6 @@ public void applyBatch(long batchSize) { /** * Convience method to change metrics when a batch of operations are applied. - * * @param batchSize total number of mutations that are applied/replicated * @param hfileSize total number of hfiles that are applied/replicated */ @@ -85,8 +80,7 @@ public void applyBatch(long batchSize, long hfileSize) { } /** - * Get the Age of Last Applied Op - * @return ageOfLastAppliedOp + * Get the Age of Last Applied Op n */ public long getAgeOfLastAppliedOp() { return mss.getLastAppliedOpAge(); @@ -102,16 +96,14 @@ public long getTimestampOfLastAppliedOp() { } /** - * Gets the time stamp from when the Sink was initialized. - * @return startTimestamp + * Gets the time stamp from when the Sink was initialized. n */ public long getStartTimestamp() { return this.startTimestamp; } /** - * Gets the total number of OPs delivered to this sink. - * @return totalAplliedOps + * Gets the total number of OPs delivered to this sink. n */ public long getAppliedOps() { return this.mss.getSinkAppliedOps(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index 3ab08065ca78..0331c277fa89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.util.HashMap; @@ -53,14 +52,12 @@ public class MetricsSource implements BaseSource { /** * Constructor used to register the metrics - * * @param id Name of the source this class is monitoring */ public MetricsSource(String id) { this.id = id; - singleSourceSource = - CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) - .getSource(id); + singleSourceSource = CompatibilitySingletonFactory + .getInstance(MetricsReplicationSourceFactory.class).getSource(id); globalSourceSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); singleSourceSourceByTable = new HashMap<>(); @@ -68,13 +65,13 @@ public MetricsSource(String id) { /** * Constructor for injecting custom (or test) MetricsReplicationSourceSources - * @param id Name of the source this class is monitoring + * @param id Name of the source this class is monitoring * @param singleSourceSource Class to monitor id-scoped metrics * @param globalSourceSource Class to monitor global-scoped metrics */ public MetricsSource(String id, MetricsReplicationSourceSource singleSourceSource, - MetricsReplicationGlobalSourceSource globalSourceSource, - Map singleSourceSourceByTable) { + MetricsReplicationGlobalSourceSource globalSourceSource, + Map singleSourceSourceByTable) { this.id = id; this.singleSourceSource = singleSourceSource; this.globalSourceSource = globalSourceSource; @@ -84,7 +81,7 @@ public MetricsSource(String id, MetricsReplicationSourceSource singleSourceSourc /** * Set the age of the last edit that was shipped * @param timestamp target write time of the edit - * @param walGroup which group we are setting + * @param walGroup which group we are setting */ public void setAgeOfLastShippedOp(long timestamp, String walGroup) { long age = EnvironmentEdgeManager.currentTime() - timestamp; @@ -96,7 +93,6 @@ public void setAgeOfLastShippedOp(long timestamp, String walGroup) { /** * Update the table level replication metrics per table - * * @param walEntries List of pairs of WAL entry and it's size */ public void updateTableLevelMetrics(List> walEntries) { @@ -109,9 +105,8 @@ public void updateTableLevelMetrics(List> walEntries) { // get the replication metrics source for table at the run time MetricsReplicationTableSource tableSource = this.getSingleSourceSourceByTable() - .computeIfAbsent(tableName, - t -> CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) - .getTableSource(t)); + .computeIfAbsent(tableName, t -> CompatibilitySingletonFactory + .getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)); tableSource.setLastShippedAge(age); tableSource.incrShippedBytes(entrySize); } @@ -124,16 +119,15 @@ public void updateTableLevelMetrics(List> walEntries) { */ public void setAgeOfLastShippedOpByTable(long timestamp, String tableName) { long age = EnvironmentEdgeManager.currentTime() - timestamp; - this.getSingleSourceSourceByTable().computeIfAbsent( - tableName, t -> CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)) - .setLastShippedAge(age); + this.getSingleSourceSourceByTable() + .computeIfAbsent(tableName, t -> CompatibilitySingletonFactory + .getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)) + .setLastShippedAge(age); } /** * get age of last shipped op of given wal group. If the walGroup is null, return 0 - * @param walGroup which group we are getting - * @return age + * @param walGroup which group we are getting n */ public long getAgeOfLastShippedOp(String walGroup) { return this.ageOfLastShippedOp.get(walGroup) == null ? 0 : ageOfLastShippedOp.get(walGroup); @@ -186,7 +180,6 @@ public void decrSourceInitializing() { /** * Add on the the number of log edits read - * * @param delta the number of log edits read. */ private void incrLogEditsRead(long delta) { @@ -201,7 +194,6 @@ public void incrLogEditsRead() { /** * Add on the number of log edits filtered - * * @param delta the number filtered. */ public void incrLogEditsFiltered(long delta) { @@ -216,7 +208,6 @@ public void incrLogEditsFiltered() { /** * Convience method to apply changes to metrics do to shipping a batch of logs. - * * @param batchSize the size of the batch that was shipped to sinks. */ public void shipBatch(long batchSize, int sizeInBytes) { @@ -234,7 +225,7 @@ public void shipBatch(long batchSize, int sizeInBytes) { * Gets the number of edits not eligible for replication this source queue logs so far. * @return logEditsFiltered non-replicable edits filtered from this queue logs. */ - public long getEditsFiltered(){ + public long getEditsFiltered() { return this.singleSourceSource.getEditsFiltered(); } @@ -242,7 +233,7 @@ public long getEditsFiltered(){ * Gets the number of edits eligible for replication read from this source queue logs so far. * @return replicableEdits total number of replicable edits read from this queue logs. */ - public long getReplicableEdits(){ + public long getReplicableEdits() { return this.singleSourceSource.getWALEditsRead() - this.singleSourceSource.getEditsFiltered(); } @@ -256,9 +247,8 @@ public long getOpsShipped() { /** * Convience method to apply changes to metrics do to shipping a batch of logs. - * * @param batchSize the size of the batch that was shipped to sinks. - * @param hfiles total number of hfiles shipped to sinks. + * @param hfiles total number of hfiles shipped to sinks. */ public void shipBatch(long batchSize, int sizeInBytes, long hfiles) { shipBatch(batchSize, sizeInBytes); @@ -285,33 +275,28 @@ public void clear() { } /** - * Get AgeOfLastShippedOp - * @return AgeOfLastShippedOp + * Get AgeOfLastShippedOp n */ public Long getAgeOfLastShippedOp() { return singleSourceSource.getLastShippedAge(); } /** - * Get the sizeOfLogQueue - * @return sizeOfLogQueue + * Get the sizeOfLogQueue n */ public int getSizeOfLogQueue() { return singleSourceSource.getSizeOfLogQueue(); } - /** - * Get the value of uncleanlyClosedWAL counter - * @return uncleanlyClosedWAL + * Get the value of uncleanlyClosedWAL counter n */ public long getUncleanlyClosedWALs() { return singleSourceSource.getUncleanlyClosedWALs(); } /** - * Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one - * @return lastTimestampForAge + * Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one n */ public long getTimestampOfLastShippedOp() { long lastTimestamp = 0L; @@ -332,9 +317,9 @@ public long getTimeStampNextToReplicate() { } /** - * TimeStamp of next edit targeted for replication. Used for calculating lag, - * as if this timestamp is greater than timestamp of last shipped, it means there's - * at least one edit pending replication. + * TimeStamp of next edit targeted for replication. Used for calculating lag, as if this timestamp + * is greater than timestamp of last shipped, it means there's at least one edit pending + * replication. * @param timeStampNextToReplicate timestamp of next edit in the queue that should be replicated. */ public void setTimeStampNextToReplicate(long timeStampNextToReplicate) { @@ -342,9 +327,9 @@ public void setTimeStampNextToReplicate(long timeStampNextToReplicate) { } public long getReplicationDelay() { - if(getTimestampOfLastShippedOp()>=timeStampNextToReplicate){ + if (getTimestampOfLastShippedOp() >= timeStampNextToReplicate) { return 0; - }else{ + } else { return EnvironmentEdgeManager.currentTime() - timeStampNextToReplicate; } } @@ -358,8 +343,7 @@ public int getSourceInitializing() { } /** - * Get the slave peer ID - * @return peerID + * Get the slave peer ID n */ public String getPeerID() { return id; @@ -420,8 +404,8 @@ public void incrFailedRecoveryQueue() { } /* - Sets the age of oldest log file just for source. - */ + * Sets the age of oldest log file just for source. + */ public void setOldestWalAge(long age) { singleSourceSource.setOldestWalAge(age); } @@ -505,8 +489,7 @@ public void setWALReaderEditsBufferUsage(long usageInBytes) { } /** - * Returns the amount of memory in bytes used in this RegionServer by edits pending replication. - * @return + * Returns the amount of memory in bytes used in this RegionServer by edits pending replication. n */ public long getWALReaderEditsBufferUsage() { return globalSourceSource.getWALReaderEditsBufferBytes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java index efafd09bedce..b55699331c2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,10 @@ @InterfaceAudience.Private public interface PeerActionListener { - static final PeerActionListener DUMMY = new PeerActionListener() {}; + static final PeerActionListener DUMMY = new PeerActionListener() { + }; default void peerSyncReplicationStateChange(String peerId, SyncReplicationState from, - SyncReplicationState to, int stage) {} + SyncReplicationState to, int stage) { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java index 2fe3110d7972..3df78c1d8313 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public interface PeerProcedureHandler { void updatePeerConfig(String peerId) throws ReplicationException, IOException; void transitSyncReplicationPeerState(String peerId, int stage, HRegionServer rs) - throws ReplicationException, IOException; + throws ReplicationException, IOException; void claimReplicationQueue(ServerName crashedServer, String queue) throws ReplicationException, IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java index a50d74a448b3..0187de14f806 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class PeerProcedureHandlerImpl implements PeerProcedureHandler { private final KeyLocker peersLock = new KeyLocker<>(); public PeerProcedureHandlerImpl(ReplicationSourceManager replicationSourceManager, - PeerActionListener peerActionListener) { + PeerActionListener peerActionListener) { this.replicationSourceManager = replicationSourceManager; this.peerActionListener = peerActionListener; } @@ -128,9 +128,11 @@ public void updatePeerConfig(String peerId) throws ReplicationException, IOExcep // disable it first and then enable it. PeerState newState = peers.refreshPeerState(peerId); // RS need to start work with the new replication config change - if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldConfig, newConfig) || - oldConfig.isSerial() != newConfig.isSerial() || - (oldState.equals(PeerState.ENABLED) && newState.equals(PeerState.DISABLED))) { + if ( + !ReplicationUtils.isNamespacesAndTableCFsEqual(oldConfig, newConfig) + || oldConfig.isSerial() != newConfig.isSerial() + || (oldState.equals(PeerState.ENABLED) && newState.equals(PeerState.DISABLED)) + ) { replicationSourceManager.refreshSources(peerId); } success = true; @@ -146,7 +148,7 @@ public void updatePeerConfig(String peerId) throws ReplicationException, IOExcep @Override public void transitSyncReplicationPeerState(String peerId, int stage, HRegionServer rs) - throws ReplicationException, IOException { + throws ReplicationException, IOException { ReplicationPeers replicationPeers = replicationSourceManager.getReplicationPeers(); Lock peerLock = peersLock.acquireLock(peerId); try { @@ -160,8 +162,8 @@ public void transitSyncReplicationPeerState(String peerId, int stage, HRegionSer SyncReplicationState newSyncReplicationState = peer.getNewSyncReplicationState(); if (stage == 0) { if (newSyncReplicationState != SyncReplicationState.NONE) { - LOG.warn("The new sync replication state for peer {} has already been set to {}, " + - "this should be a retry, give up", peerId, newSyncReplicationState); + LOG.warn("The new sync replication state for peer {} has already been set to {}, " + + "this should be a retry, give up", peerId, newSyncReplicationState); return; } // refresh the peer state first, as when we transit to STANDBY, we may need to disable the @@ -186,8 +188,8 @@ public void transitSyncReplicationPeerState(String peerId, int stage, HRegionSer } else { if (newSyncReplicationState == SyncReplicationState.NONE) { LOG.warn( - "The new sync replication state for peer {} has already been clear, and the " + - "current state is {}, this should be a retry, give up", + "The new sync replication state for peer {} has already been clear, and the " + + "current state is {}, this should be a retry, give up", peerId, newSyncReplicationState); return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index 526c3e3ec16d..024248a3f8c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -36,8 +36,8 @@ import org.slf4j.LoggerFactory; /** - * Class that handles the recovered source of a replication stream, which is transfered from - * another dead region server. This will be closed when all logs are pushed to peer cluster. + * Class that handles the recovered source of a replication stream, which is transfered from another + * dead region server. This will be closed when all logs are pushed to peer cluster. */ @InterfaceAudience.Private public class RecoveredReplicationSource extends ReplicationSource { @@ -48,9 +48,9 @@ public class RecoveredReplicationSource extends ReplicationSource { @Override public void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, - ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, - String peerClusterZnode, UUID clusterId, WALFileLengthProvider walFileLengthProvider, - MetricsSource metrics) throws IOException { + ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, + String peerClusterZnode, UUID clusterId, WALFileLengthProvider walFileLengthProvider, + MetricsSource metrics) throws IOException { super.init(conf, fs, manager, queueStorage, replicationPeer, server, peerClusterZnode, clusterId, walFileLengthProvider, metrics); this.actualPeerId = this.replicationQueueInfo.getPeerId(); @@ -86,11 +86,10 @@ public void locateRecoveredPaths(String walGroupId) throws IOException { LOG.info("NB dead servers : " + deadRegionServers.size()); final Path walDir = CommonFSUtils.getWALRootDir(conf); for (ServerName curDeadServerName : deadRegionServers) { - final Path deadRsDirectory = - new Path(walDir, AbstractFSWALProvider.getWALDirectoryName(curDeadServerName - .getServerName())); - Path[] locs = new Path[] { new Path(deadRsDirectory, path.getName()), new Path( - deadRsDirectory.suffix(AbstractFSWALProvider.SPLITTING_EXT), path.getName()) }; + final Path deadRsDirectory = new Path(walDir, + AbstractFSWALProvider.getWALDirectoryName(curDeadServerName.getServerName())); + Path[] locs = new Path[] { new Path(deadRsDirectory, path.getName()), + new Path(deadRsDirectory.suffix(AbstractFSWALProvider.SPLITTING_EXT), path.getName()) }; for (Path possibleLogLocation : locs) { LOG.info("Possible location " + possibleLogLocation.toUri().toString()); if (manager.getFs().exists(possibleLogLocation)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java index a9c1fa4a423f..4f2bafcf156d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,19 +27,19 @@ import org.slf4j.LoggerFactory; /** - * Used by a {@link RecoveredReplicationSource}. + * Used by a {@link RecoveredReplicationSource}. */ @InterfaceAudience.Private public class RecoveredReplicationSourceShipper extends ReplicationSourceShipper { private static final Logger LOG = - LoggerFactory.getLogger(RecoveredReplicationSourceShipper.class); + LoggerFactory.getLogger(RecoveredReplicationSourceShipper.class); protected final RecoveredReplicationSource source; private final ReplicationQueueStorage replicationQueues; public RecoveredReplicationSourceShipper(Configuration conf, String walGroupId, - ReplicationSourceLogQueue logQueue, RecoveredReplicationSource source, - ReplicationQueueStorage queueStorage) { + ReplicationSourceLogQueue logQueue, RecoveredReplicationSource source, + ReplicationQueueStorage queueStorage) { super(conf, walGroupId, logQueue, source); this.source = source; this.replicationQueues = queueStorage; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java index 0c07b1125b9e..094a61dcdd1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java index 9ad0af2286e4..643eeb7d3ba8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.util.function.BiPredicate; - import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.yetus.audience.InterfaceAudience; @@ -27,16 +26,16 @@ */ @InterfaceAudience.Private public class RejectReplicationRequestStateChecker - implements BiPredicate { + implements BiPredicate { private static final RejectReplicationRequestStateChecker INST = - new RejectReplicationRequestStateChecker(); + new RejectReplicationRequestStateChecker(); @Override public boolean test(SyncReplicationState state, SyncReplicationState newState) { return state == SyncReplicationState.ACTIVE || state == SyncReplicationState.DOWNGRADE_ACTIVE - || newState == SyncReplicationState.ACTIVE - || newState == SyncReplicationState.DOWNGRADE_ACTIVE; + || newState == SyncReplicationState.ACTIVE + || newState == SyncReplicationState.DOWNGRADE_ACTIVE; } public static RejectReplicationRequestStateChecker get() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectRequestsFromClientStateChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectRequestsFromClientStateChecker.java index 8e68f0fe3ed0..a70d112ef2dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectRequestsFromClientStateChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectRequestsFromClientStateChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ */ @InterfaceAudience.Private public class RejectRequestsFromClientStateChecker - implements BiPredicate { + implements BiPredicate { private static final RejectRequestsFromClientStateChecker INST = new RejectRequestsFromClientStateChecker(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index fa4167b16789..af854ef08dd6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ public class ReplaySyncReplicationWALCallable extends BaseRSProcedureCallable { private static final Logger LOG = LoggerFactory.getLogger(ReplaySyncReplicationWALCallable.class); private static final String REPLAY_SYNC_REPLICATION_WAL_BATCH_SIZE = - "hbase.replay.sync.replication.wal.batch.size"; + "hbase.replay.sync.replication.wal.batch.size"; private static final long DEFAULT_REPLAY_SYNC_REPLICATION_WAL_BATCH_SIZE = 8 * 1024 * 1024; @@ -102,11 +102,11 @@ private void replayWAL(String wal) throws IOException { List entries = readWALEntries(reader); while (!entries.isEmpty()) { Pair pair = ReplicationProtobufUtil - .buildReplicateWALEntryRequest(entries.toArray(new Entry[entries.size()])); + .buildReplicateWALEntryRequest(entries.toArray(new Entry[entries.size()])); ReplicateWALEntryRequest request = pair.getFirst(); - rs.getReplicationSinkService().replicateLogEntries(request.getEntryList(), - pair.getSecond(), request.getReplicationClusterId(), - request.getSourceBaseNamespaceDirPath(), request.getSourceHFileArchiveDirPath()); + rs.getReplicationSinkService().replicateLogEntries(request.getEntryList(), pair.getSecond(), + request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(), + request.getSourceHFileArchiveDirPath()); // Read next entries. entries = readWALEntries(reader); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 4cf2b495fa1a..ea28a20c56b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -53,8 +53,7 @@ */ @InterfaceAudience.Private public class Replication implements ReplicationSourceService { - private static final Logger LOG = - LoggerFactory.getLogger(Replication.class); + private static final Logger LOG = LoggerFactory.getLogger(Replication.class); private boolean isReplicationForBulkLoadDataEnabled; private ReplicationSourceManager replicationManager; private ReplicationQueueStorage queueStorage; @@ -78,25 +77,27 @@ public Replication() { @Override public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, - WALFactory walFactory) throws IOException { + WALFactory walFactory) throws IOException { this.server = server; this.conf = this.server.getConfiguration(); this.isReplicationForBulkLoadDataEnabled = ReplicationUtils.isReplicationForBulkLoadDataEnabled(this.conf); if (this.isReplicationForBulkLoadDataEnabled) { - if (conf.get(HConstants.REPLICATION_CLUSTER_ID) == null - || conf.get(HConstants.REPLICATION_CLUSTER_ID).isEmpty()) { - throw new IllegalArgumentException(HConstants.REPLICATION_CLUSTER_ID - + " cannot be null/empty when " + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY - + " is set to true."); + if ( + conf.get(HConstants.REPLICATION_CLUSTER_ID) == null + || conf.get(HConstants.REPLICATION_CLUSTER_ID).isEmpty() + ) { + throw new IllegalArgumentException( + HConstants.REPLICATION_CLUSTER_ID + " cannot be null/empty when " + + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY + " is set to true."); } } try { this.queueStorage = - ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf); + ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf); this.replicationPeers = - ReplicationFactory.getReplicationPeers(server.getZooKeeper(), this.conf); + ReplicationFactory.getReplicationPeers(server.getZooKeeper(), this.conf); this.replicationPeers.init(); } catch (Exception e) { throw new IOException("Failed replication handler create", e); @@ -109,14 +110,14 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir } SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager(); this.globalMetricsSource = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); + .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, conf, this.server, fs, logDir, oldLogDir, clusterId, walFactory, mapping, globalMetricsSource); this.syncReplicationPeerInfoProvider = - new SyncReplicationPeerInfoProviderImpl(replicationPeers, mapping); + new SyncReplicationPeerInfoProviderImpl(replicationPeers, mapping); PeerActionListener peerActionListener = PeerActionListener.DUMMY; // Get the user-space WAL provider - WALProvider walProvider = walFactory != null? walFactory.getWALProvider(): null; + WALProvider walProvider = walFactory != null ? walFactory.getWALProvider() : null; if (walProvider != null) { walProvider .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager)); @@ -130,14 +131,13 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir // repeat the action we have done in the first refresh, otherwise when the second refresh // comes we will be in trouble, such as NPE. replicationPeers.getAllPeerIds().stream().map(replicationPeers::getPeer) - .filter(p -> p.getPeerConfig().isSyncReplication()) - .filter(p -> p.getNewSyncReplicationState() != SyncReplicationState.NONE) - .forEach(p -> syncWALProvider.peerSyncReplicationStateChange(p.getId(), - p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0)); + .filter(p -> p.getPeerConfig().isSyncReplication()) + .filter(p -> p.getNewSyncReplicationState() != SyncReplicationState.NONE) + .forEach(p -> syncWALProvider.peerSyncReplicationStateChange(p.getId(), + p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0)); } } - this.statsPeriodInSecond = - this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); this.peerProcedureHandler = @@ -158,15 +158,13 @@ public void stopReplicationService() { } /** - * If replication is enabled and this cluster is a master, - * it starts + * If replication is enabled and this cluster is a master, it starts */ @Override public void startReplicationService() throws IOException { this.replicationManager.init(); - this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSourceStatistics", server, - (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); + this.server.getChoreService().scheduleChore(new ReplicationStatisticsChore( + "ReplicationSourceStatistics", server, (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); LOG.info("{} started", this.server.toString()); } @@ -179,7 +177,7 @@ public ReplicationSourceManager getReplicationManager() { } void addHFileRefsToQueue(TableName tableName, byte[] family, List> pairs) - throws IOException { + throws IOException { try { this.replicationManager.addHFileRefs(tableName, family, pairs); } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java index 6fb21dcfbcc0..00306dd1702a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -7,23 +7,20 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; +import java.util.ArrayList; import java.util.Date; import java.util.List; -import java.util.ArrayList; - import org.apache.hadoop.hbase.util.Strings; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; @@ -47,12 +44,12 @@ public ReplicationLoad() { /** * buildReplicationLoad - * @param sources List of ReplicationSource instances for which metrics should be reported + * @param sources List of ReplicationSource instances for which metrics should be reported * @param sinkMetrics metrics of the replication sink */ public void buildReplicationLoad(final List sources, - final MetricsSink sinkMetrics) { + final MetricsSink sinkMetrics) { if (sinkMetrics != null) { // build the SinkLoad @@ -81,7 +78,7 @@ public void buildReplicationLoad(final List sources, long timeStampOfNextToReplicate = sm.getTimeStampNextToReplicate(); long replicationLag = sm.getReplicationDelay(); ClusterStatusProtos.ReplicationLoadSource.Builder rLoadSourceBuild = - ClusterStatusProtos.ReplicationLoadSource.newBuilder(); + ClusterStatusProtos.ReplicationLoadSource.newBuilder(); rLoadSourceBuild.setPeerID(peerId); rLoadSourceBuild.setAgeOfLastShippedOp(ageOfLastShippedOp); rLoadSourceBuild.setSizeOfLogQueue(sizeOfLogQueue); @@ -90,12 +87,12 @@ public void buildReplicationLoad(final List sources, rLoadSourceBuild.setTimeStampOfNextToReplicate(timeStampOfNextToReplicate); rLoadSourceBuild.setEditsRead(editsRead); rLoadSourceBuild.setOPsShipped(oPsShipped); - if (source instanceof ReplicationSource){ - ReplicationSource replSource = (ReplicationSource)source; + if (source instanceof ReplicationSource) { + ReplicationSource replSource = (ReplicationSource) source; rLoadSourceBuild.setRecovered(replSource.getReplicationQueueInfo().isQueueRecovered()); rLoadSourceBuild.setQueueId(replSource.getReplicationQueueInfo().getQueueId()); rLoadSourceBuild.setRunning(replSource.isWorkerRunning()); - rLoadSourceBuild.setEditsSinceRestart(timeStampOfNextToReplicate>0); + rLoadSourceBuild.setEditsSinceRestart(timeStampOfNextToReplicate > 0); } this.replicationLoadSourceEntries.add(rLoadSourceBuild.build()); @@ -109,15 +106,13 @@ public void buildReplicationLoad(final List sources, public String sourceToString() { StringBuilder sb = new StringBuilder(); - for (ClusterStatusProtos.ReplicationLoadSource rls : - this.replicationLoadSourceEntries) { + for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceEntries) { sb = Strings.appendKeyValue(sb, "\n PeerID", rls.getPeerID()); sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp()); sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue()); - sb = - Strings.appendKeyValue(sb, "TimestampsOfLastShippedOp", - (new Date(rls.getTimeStampOfLastShippedOp()).toString())); + sb = Strings.appendKeyValue(sb, "TimestampsOfLastShippedOp", + (new Date(rls.getTimeStampOfLastShippedOp()).toString())); sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag()); } @@ -132,12 +127,10 @@ public String sinkToString() { if (this.replicationLoadSink == null) return null; StringBuilder sb = new StringBuilder(); - sb = - Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", - this.replicationLoadSink.getAgeOfLastAppliedOp()); - sb = - Strings.appendKeyValue(sb, "TimestampsOfLastAppliedOp", - (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); + sb = Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", + this.replicationLoadSink.getAgeOfLastAppliedOp()); + sb = Strings.appendKeyValue(sb, "TimestampsOfLastAppliedOp", + (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); return sb.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java index b7e437f46241..541021f4d5da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; import java.util.List; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; @@ -53,24 +50,26 @@ public Optional getRegionObserver() { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="NPE should never happen; if it does it is a bigger issue") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "NPE should never happen; if it does it is a bigger issue") public void preCommitStoreFile(final ObserverContext ctx, - final byte[] family, final List> pairs) throws IOException { + final byte[] family, final List> pairs) throws IOException { RegionCoprocessorEnvironment env = ctx.getEnvironment(); Configuration c = env.getConfiguration(); - if (pairs == null || pairs.isEmpty() || - !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, - HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) { + if ( + pairs == null || pairs.isEmpty() + || !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT) + ) { LOG.debug("Skipping recording bulk load entries in preCommitStoreFile for bulkloaded " - + "data replication."); + + "data replication."); return; } // This is completely cheating AND getting a HRegionServer from a RegionServerEnvironment is // just going to break. This is all private. Not allowed. Regions shouldn't assume they are // hosted in a RegionServer. TODO: fix. - RegionServerServices rss = ((HasRegionServerServices)env).getRegionServerServices(); - Replication rep = (Replication)((HRegionServer)rss).getReplicationSourceService(); + RegionServerServices rss = ((HasRegionServerServices) env).getRegionServerServices(); + Replication rep = (Replication) ((HRegionServer) rss).getReplicationSourceService(); rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationRuntimeException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationRuntimeException.java index 81ec0d9129cd..bbf03620fff1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationRuntimeException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationRuntimeException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,8 @@ /** * This exception is thrown when a replication source is terminated and source threads got - * interrupted. - * - * It is inherited from RuntimeException so that it can skip all the following processing logic - * and be propagated to the most top level and handled there. + * interrupted. It is inherited from RuntimeException so that it can skip all the following + * processing logic and be propagated to the most top level and handled there. */ @InterfaceAudience.Private public class ReplicationRuntimeException extends RuntimeException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index d1ee0220a9d8..6d03f082b28f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,10 +54,12 @@ import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; @@ -65,17 +67,17 @@ /** *

    - * This class is responsible for replicating the edits coming - * from another cluster. - *

    - * This replication process is currently waiting for the edits to be applied - * before the method can return. This means that the replication of edits - * is synchronized (after reading from WALs in ReplicationSource) and that a - * single region server cannot receive edits from two sources at the same time - *

    + * This class is responsible for replicating the edits coming from another cluster. + *

    + *

    + * This replication process is currently waiting for the edits to be applied before the method can + * return. This means that the replication of edits is synchronized (after reading from WALs in + * ReplicationSource) and that a single region server cannot receive edits from two sources at the + * same time + *

    + *

    * This class uses the native HBase client in order to replicate entries. *

    - * * TODO make this class more like ReplicationSource wrt log handling */ @InterfaceAudience.Private @@ -104,11 +106,10 @@ public class ReplicationSink { * @param conf conf object * @throws IOException thrown when HDFS goes bad or bad file name */ - public ReplicationSink(Configuration conf) - throws IOException { + public ReplicationSink(Configuration conf) throws IOException { this.conf = HBaseConfiguration.create(conf); - rowSizeWarnThreshold = conf.getInt( - HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); + rowSizeWarnThreshold = + conf.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); decorateConf(); this.metrics = new MetricsSink(); this.walEntrySinkFilter = setupWALEntrySinkFilter(); @@ -116,21 +117,22 @@ public ReplicationSink(Configuration conf) DefaultSourceFSConfigurationProvider.class.getCanonicalName()); try { Class c = - Class.forName(className).asSubclass(SourceFSConfigurationProvider.class); + Class.forName(className).asSubclass(SourceFSConfigurationProvider.class); this.provider = c.getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new IllegalArgumentException( - "Configured source fs configuration provider class " + className + " throws error.", e); + "Configured source fs configuration provider class " + className + " throws error.", e); } } private WALEntrySinkFilter setupWALEntrySinkFilter() throws IOException { Class walEntryFilterClass = - this.conf.getClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY, null); + this.conf.getClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY, null); WALEntrySinkFilter filter = null; try { - filter = walEntryFilterClass == null? null: - (WALEntrySinkFilter)walEntryFilterClass.getDeclaredConstructor().newInstance(); + filter = walEntryFilterClass == null + ? null + : (WALEntrySinkFilter) walEntryFilterClass.getDeclaredConstructor().newInstance(); } catch (Exception e) { LOG.warn("Failed to instantiate " + walEntryFilterClass); } @@ -141,14 +143,14 @@ private WALEntrySinkFilter setupWALEntrySinkFilter() throws IOException { } /** - * decorate the Configuration object to make replication more receptive to delays: - * lessen the timeout and numTries. + * decorate the Configuration object to make replication more receptive to delays: lessen the + * timeout and numTries. */ private void decorateConf() { this.conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - this.conf.getInt("replication.sink.client.retries.number", 4)); + this.conf.getInt("replication.sink.client.retries.number", 4)); this.conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - this.conf.getInt("replication.sink.client.ops.timeout", 10000)); + this.conf.getInt("replication.sink.client.ops.timeout", 10000)); String replicationCodec = this.conf.get(HConstants.REPLICATION_CODEC_CONF_KEY); if (StringUtils.isNotEmpty(replicationCodec)) { this.conf.set(HConstants.RPC_CODEC_CONF_KEY, replicationCodec); @@ -162,16 +164,16 @@ private void decorateConf() { /** * Replicate this array of entries directly into the local cluster using the native client. Only * operates against raw protobuf type saving on a conversion from pb to pojo. - * @param replicationClusterId Id which will uniquely identify source cluster FS client - * configurations in the replication configuration directory + * @param replicationClusterId Id which will uniquely identify source cluster FS client + * configurations in the replication configuration directory * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace - * directory - * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory + * directory + * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory * @throws IOException If failed to replicate the data */ public void replicateEntries(List entries, final CellScanner cells, - String replicationClusterId, String sourceBaseNamespaceDirPath, - String sourceHFileArchiveDirPath) throws IOException { + String replicationClusterId, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath) throws IOException { if (entries.isEmpty()) { return; } @@ -211,7 +213,7 @@ public void replicateEntries(List entries, final CellScanner cells, // Handle bulk load hfiles replication if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell); - if(bld.getReplicate()) { + if (bld.getReplicate()) { if (bulkLoadsPerClusters == null) { bulkLoadsPerClusters = new HashMap<>(); } @@ -225,10 +227,9 @@ public void replicateEntries(List entries, final CellScanner cells, // Handle wal replication if (isNewRowOrType(previousCell, cell)) { // Create new mutation - mutation = - CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()); + mutation = CellUtil.isDelete(cell) + ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); List clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size()); for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) { clusterIds.add(toUUID(clusterId)); @@ -258,16 +259,16 @@ public void replicateEntries(List entries, final CellScanner cells, LOG.debug("Finished replicating mutations."); } - if(bulkLoadsPerClusters != null) { - for (Entry, Map>>>> entry : - bulkLoadsPerClusters.entrySet()) { + if (bulkLoadsPerClusters != null) { + for (Entry, + Map>>>> entry : bulkLoadsPerClusters.entrySet()) { Map>>> bulkLoadHFileMap = entry.getValue(); if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) { LOG.debug("Replicating {} bulk loaded data", entry.getKey().toString()); Configuration providerConf = this.provider.getConf(this.conf, replicationClusterId); try (HFileReplicator hFileReplicator = new HFileReplicator(providerConf, - sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, - getConnection(), entry.getKey())) { + sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, + getConnection(), entry.getKey())) { hFileReplicator.replicate(); LOG.debug("Finished replicating {} bulk loaded data", entry.getKey().toString()); } @@ -286,8 +287,8 @@ public void replicateEntries(List entries, final CellScanner cells, } private void buildBulkLoadHFileMap( - final Map>>> bulkLoadHFileMap, TableName table, - BulkLoadDescriptor bld) throws IOException { + final Map>>> bulkLoadHFileMap, TableName table, + BulkLoadDescriptor bld) throws IOException { List storesList = bld.getStoresList(); int storesSize = storesList.size(); for (int j = 0; j < storesSize; j++) { @@ -304,7 +305,7 @@ private void buildBulkLoadHFileMap( List>> familyHFilePathsList = bulkLoadHFileMap.get(tableName); if (familyHFilePathsList != null) { boolean foundFamily = false; - for (Pair> familyHFilePathsPair : familyHFilePathsList) { + for (Pair> familyHFilePathsPair : familyHFilePathsList) { if (Bytes.equals(familyHFilePathsPair.getFirst(), family)) { // Found family already present, just add the path to the existing list familyHFilePathsPair.getSecond().add(pathToHfileFromNS); @@ -325,15 +326,15 @@ private void buildBulkLoadHFileMap( } private void addFamilyAndItsHFilePathToTableInMap(byte[] family, String pathToHfileFromNS, - List>> familyHFilePathsList) { + List>> familyHFilePathsList) { List hfilePaths = new ArrayList<>(1); hfilePaths.add(pathToHfileFromNS); familyHFilePathsList.add(new Pair<>(family, hfilePaths)); } private void addNewTableEntryInMap( - final Map>>> bulkLoadHFileMap, byte[] family, - String pathToHfileFromNS, String tableName) { + final Map>>> bulkLoadHFileMap, byte[] family, + String pathToHfileFromNS, String tableName) { List hfilePaths = new ArrayList<>(1); hfilePaths.add(pathToHfileFromNS); Pair> newFamilyHFilePathsPair = new Pair<>(family, hfilePaths); @@ -343,19 +344,19 @@ private void addNewTableEntryInMap( } private String getHFilePath(TableName table, BulkLoadDescriptor bld, String storeFile, - byte[] family) { + byte[] family) { return new StringBuilder(100).append(table.getNamespaceAsString()).append(Path.SEPARATOR) - .append(table.getQualifierAsString()).append(Path.SEPARATOR) - .append(Bytes.toString(bld.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR) - .append(Bytes.toString(family)).append(Path.SEPARATOR).append(storeFile).toString(); + .append(table.getQualifierAsString()).append(Path.SEPARATOR) + .append(Bytes.toString(bld.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR) + .append(Bytes.toString(family)).append(Path.SEPARATOR).append(storeFile).toString(); } /** * @return True if we have crossed over onto a new row or type */ private boolean isNewRowOrType(final Cell previousCell, final Cell cell) { - return previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || - !CellUtil.matchingRows(previousCell, cell); + return previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() + || !CellUtil.matchingRows(previousCell, cell); } private java.util.UUID toUUID(final HBaseProtos.UUID uuid) { @@ -363,12 +364,11 @@ private java.util.UUID toUUID(final HBaseProtos.UUID uuid) { } /** - * Simple helper to a map from key to (a list of) values - * TODO: Make a general utility method + * Simple helper to a map from key to (a list of) values TODO: Make a general utility method * @return the list of values corresponding to key1 and key2 */ - private List addToHashMultiMap(Map>> map, K1 key1, - K2 key2, V value) { + private List addToHashMultiMap(Map>> map, K1 key1, K2 key2, + V value) { Map> innerMap = map.computeIfAbsent(key1, k -> new HashMap<>()); List values = innerMap.computeIfAbsent(key2, k -> new ArrayList<>()); values.add(value); @@ -393,15 +393,14 @@ public void stopReplicationSinkServices() { } } - /** * Do the changes and handle the pool - * @param tableName table to insert into - * @param allRows list of actions + * @param tableName table to insert into + * @param allRows list of actions * @param batchRowSizeThreshold rowSize threshold for batch mutation */ private void batch(TableName tableName, Collection> allRows, int batchRowSizeThreshold) - throws IOException { + throws IOException { if (allRows.isEmpty()) { return; } @@ -446,19 +445,19 @@ private AsyncClusterConnection getConnection() throws IOException { /** * Get a string representation of this sink's metrics - * @return string with the total replicated edits count and the date - * of the last edit that was applied + * @return string with the total replicated edits count and the date of the last edit that was + * applied */ public String getStats() { long total = this.totalReplicatedEdits.get(); - return total == 0 ? "" - : "Sink: " + "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + - ", total replicated edits: " + total; + return total == 0 + ? "" + : "Sink: " + "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + + ", total replicated edits: " + total; } /** - * Get replication Sink Metrics - * @return MetricsSink + * Get replication Sink Metrics n */ public MetricsSink getSinkMetrics() { return this.metrics; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 34409030a8c0..ed138abfad73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -70,14 +70,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Class that handles the source of a replication stream. - * Currently does not handle more than 1 slave cluster. - * For each slave cluster it selects a random number of peers - * using a replication ratio. For example, if replication ration = 0.1 - * and slave cluster has 100 region servers, 10 will be selected. + * Class that handles the source of a replication stream. Currently does not handle more than 1 + * slave cluster. For each slave cluster it selects a random number of peers using a replication + * ratio. For example, if replication ration = 0.1 and slave cluster has 100 region servers, 10 will + * be selected. *

    - * A stream is considered down when we cannot contact a region server on the - * peer cluster for more than 55 seconds by default. + * A stream is considered down when we cannot contact a region server on the peer cluster for more + * than 55 seconds by default. *

    */ @InterfaceAudience.Private @@ -116,12 +115,12 @@ public class ReplicationSource implements ReplicationSourceInterface { private volatile ReplicationEndpoint replicationEndpoint; private boolean abortOnError; - //This is needed for the startup loop to identify when there's already - //an initialization happening (but not finished yet), - //so that it doesn't try submit another initialize thread. - //NOTE: this should only be set to false at the end of initialize method, prior to return. + // This is needed for the startup loop to identify when there's already + // an initialization happening (but not finished yet), + // so that it doesn't try submit another initialize thread. + // NOTE: this should only be set to false at the end of initialize method, prior to return. private AtomicBoolean startupOngoing = new AtomicBoolean(false); - //Flag that signalizes uncaught error happening while starting up the source + // Flag that signalizes uncaught error happening while starting up the source // and a retry should be attempted private AtomicBoolean retryStartup = new AtomicBoolean(false); @@ -136,7 +135,7 @@ public class ReplicationSource implements ReplicationSourceInterface { private long currentBandwidth; private WALFileLengthProvider walFileLengthProvider; protected final ConcurrentHashMap workerThreads = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private AtomicLong totalBufferUsed; @@ -148,17 +147,17 @@ public class ReplicationSource implements ReplicationSourceInterface { private Thread initThread; /** - * WALs to replicate. - * Predicate that returns 'true' for WALs to replicate and false for WALs to skip. + * WALs to replicate. Predicate that returns 'true' for WALs to replicate and false for WALs to + * skip. */ private final Predicate filterInWALs; /** - * Base WALEntry filters for this class. Unmodifiable. Set on construction. - * Filters *out* edits we do not want replicated, passed on to replication endpoints. - * This is the basic set. Down in #initializeWALEntryFilter this set is added to the end of - * the WALEntry filter chain. These are put after those that we pick up from the configured - * endpoints and other machinations to create the final {@link #walEntryFilter}. + * Base WALEntry filters for this class. Unmodifiable. Set on construction. Filters *out* edits we + * do not want replicated, passed on to replication endpoints. This is the basic set. Down in + * #initializeWALEntryFilter this set is added to the end of the WALEntry filter chain. These are + * put after those that we pick up from the configured endpoints and other machinations to create + * the final {@link #walEntryFilter}. * @see WALEntryFilter */ private final List baseFilterOutWALEntries; @@ -170,10 +169,11 @@ public class ReplicationSource implements ReplicationSourceInterface { } /** - * @param replicateWAL Pass a filter to run against WAL Path; filter *in* WALs to Replicate; - * i.e. return 'true' if you want to replicate the content of the WAL. + * @param replicateWAL Pass a filter to run against WAL Path; filter *in* WALs to + * Replicate; i.e. return 'true' if you want to replicate the + * content of the WAL. * @param baseFilterOutWALEntries Base set of filters you want applied always; filters *out* - * WALEntries so they never make it out of this ReplicationSource. + * WALEntries so they never make it out of this ReplicationSource. */ ReplicationSource(Predicate replicateWAL, List baseFilterOutWALEntries) { this.filterInWALs = replicateWAL; @@ -182,19 +182,19 @@ public class ReplicationSource implements ReplicationSourceInterface { /** * Instantiation method used by region servers - * @param conf configuration to use - * @param fs file system to use - * @param manager replication manager to ping to - * @param server the server for this region server - * @param queueId the id of our replication queue + * @param conf configuration to use + * @param fs file system to use + * @param manager replication manager to ping to + * @param server the server for this region server + * @param queueId the id of our replication queue * @param clusterId unique UUID for the cluster - * @param metrics metrics for replication source + * @param metrics metrics for replication source */ @Override public void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, - ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, - String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, - MetricsSource metrics) throws IOException { + ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, + String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, + MetricsSource metrics) throws IOException { this.server = server; this.conf = HBaseConfiguration.create(conf); this.waitOnEndpointSeconds = @@ -223,8 +223,7 @@ public void init(Configuration conf, FileSystem fs, ReplicationSourceManager man this.totalBufferUsed = manager.getTotalBufferUsed(); this.walFileLengthProvider = walFileLengthProvider; - this.abortOnError = this.conf.getBoolean("replication.source.regionserver.abort", - true); + this.abortOnError = this.conf.getBoolean("replication.source.regionserver.abort", true); LOG.info("queueId={}, ReplicationSource: {}, currentBandwidth={}", queueId, replicationPeer.getId(), this.currentBandwidth); @@ -268,7 +267,7 @@ public Map> getQueues() { @Override public void addHFileRefs(TableName tableName, byte[] family, List> pairs) - throws ReplicationException { + throws ReplicationException { String peerId = replicationPeer.getId(); if (replicationPeer.getPeerConfig().needToReplicate(tableName, family)) { this.queueStorage.addHFileRefs(peerId, pairs); @@ -280,7 +279,7 @@ public void addHFileRefs(TableName tableName, byte[] family, List this.uncaughtException(t, e, this.manager, this.getPeerId())); + walReader, Thread.currentThread().getName() + ".replicationSource.wal-reader." + + walGroupId + "," + queueId, + (t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); worker.setWALReader(walReader); - worker.startup((t,e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); + worker.startup((t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); return worker; } }); @@ -380,14 +377,10 @@ public Map getWalGroupStatus() { LOG.warn("{} No replication ongoing, waiting for new log", logPeerId()); } ReplicationStatus.ReplicationStatusBuilder statusBuilder = ReplicationStatus.newBuilder(); - statusBuilder.withPeerId(this.getPeerId()) - .withQueueSize(queueSize) - .withWalGroup(walGroupId) - .withCurrentPath(currentPath) - .withCurrentPosition(shipper.getCurrentPosition()) - .withFileSize(fileSize) - .withAgeOfLastShippedOp(ageOfLastShippedOp) - .withReplicationDelay(replicationDelay); + statusBuilder.withPeerId(this.getPeerId()).withQueueSize(queueSize).withWalGroup(walGroupId) + .withCurrentPath(currentPath).withCurrentPosition(shipper.getCurrentPosition()) + .withFileSize(fileSize).withAgeOfLastShippedOp(ageOfLastShippedOp) + .withReplicationDelay(replicationDelay); sourceReplicationStatus.put(this.getPeerId() + "=>" + walGroupId, statusBuilder.build()); } return sourceReplicationStatus; @@ -415,9 +408,9 @@ protected ReplicationSourceShipper createNewShipper(String walGroupId) { private ReplicationSourceWALReader createNewWALReader(String walGroupId, long startPosition) { return replicationPeer.getPeerConfig().isSerial() ? new SerialReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, - this, walGroupId) - : new ReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, - this, walGroupId); + this, walGroupId) + : new ReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, this, + walGroupId); } /** @@ -428,15 +421,14 @@ WALEntryFilter getWalEntryFilter() { return walEntryFilter; } - protected final void uncaughtException(Thread t, Throwable e, - ReplicationSourceManager manager, String peerId) { + protected final void uncaughtException(Thread t, Throwable e, ReplicationSourceManager manager, + String peerId) { OOMEChecker.exitIfOOME(e, getClass().getSimpleName()); - LOG.error("Unexpected exception in {} currentPath={}", - t.getName(), getCurrentPath(), e); - if(abortOnError){ + LOG.error("Unexpected exception in {} currentPath={}", t.getName(), getCurrentPath(), e); + if (abortOnError) { server.abort("Unexpected exception in " + t.getName(), e); } - if(manager != null){ + if (manager != null) { while (true) { try { LOG.info("Refreshing replication sources now due to previous error on thread: {}", @@ -445,8 +437,7 @@ protected final void uncaughtException(Thread t, Throwable e, break; } catch (IOException e1) { LOG.error("Replication sources refresh failed.", e1); - sleepForRetries("Sleeping before try refreshing sources again", - maxRetriesMultiplier); + sleepForRetries("Sleeping before try refreshing sources again", maxRetriesMultiplier); } } } @@ -496,19 +487,19 @@ private long getCurrentBandwidth() { /** * Do the sleeping logic - * @param msg Why we sleep + * @param msg Why we sleep * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ protected boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { - LOG.trace("{} {}, sleeping {} times {}", - logPeerId(), msg, sleepForRetries, sleepMultiplier); + LOG.trace("{} {}, sleeping {} times {}", logPeerId(), msg, sleepForRetries, + sleepMultiplier); } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("{} Interrupted while sleeping between retries", logPeerId()); } Thread.currentThread().interrupt(); @@ -559,7 +550,7 @@ private void initialize() { for (;;) { peerClusterId = replicationEndpoint.getPeerUUID(); if (this.isSourceActive() && peerClusterId == null) { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("{} Could not connect to Peer ZK. Sleeping for {} millis", logPeerId(), (this.sleepForRetries * sleepMultiplier)); } @@ -571,17 +562,16 @@ private void initialize() { } } - if(!this.isSourceActive()) { + if (!this.isSourceActive()) { retryStartup.set(!this.abortOnError); setSourceStartupStatus(false); throw new IllegalStateException("Source should be active."); } - LOG.info("{} queueId={} (queues={}) is replicating from cluster={} to cluster={}", - logPeerId(), this.replicationQueueInfo.getQueueId(), logQueue.getNumQueues(), clusterId, - peerClusterId); + LOG.info("{} queueId={} (queues={}) is replicating from cluster={} to cluster={}", logPeerId(), + this.replicationQueueInfo.getQueueId(), logQueue.getNumQueues(), clusterId, peerClusterId); initializeWALEntryFilter(peerClusterId); // Start workers - for (String walGroupId: logQueue.getQueues().keySet()) { + for (String walGroupId : logQueue.getQueues().keySet()) { tryStartNewShipper(walGroupId); } setSourceStartupStatus(false); @@ -605,22 +595,21 @@ public ReplicationSourceInterface startup() { setSourceStartupStatus(true); initThread = new Thread(this::initialize); Threads.setDaemonThreadRunning(initThread, - Thread.currentThread().getName() + ".replicationSource," + this.queueId, - (t,e) -> { - //if first initialization attempt failed, and abortOnError is false, we will - //keep looping in this thread until initialize eventually succeeds, - //while the server main startup one can go on with its work. + Thread.currentThread().getName() + ".replicationSource," + this.queueId, (t, e) -> { + // if first initialization attempt failed, and abortOnError is false, we will + // keep looping in this thread until initialize eventually succeeds, + // while the server main startup one can go on with its work. sourceRunning = false; uncaughtException(t, e, null, null); retryStartup.set(!this.abortOnError); do { - if(retryStartup.get()) { + if (retryStartup.get()) { this.sourceRunning = true; setSourceStartupStatus(true); retryStartup.set(false); try { initialize(); - } catch(Throwable error){ + } catch (Throwable error) { setSourceStartupStatus(false); uncaughtException(t, error, null, null); retryStartup.set(!this.abortOnError); @@ -646,13 +635,12 @@ public void terminate(String reason, Exception cause, boolean clearMetrics) { terminate(reason, cause, clearMetrics, true); } - public void terminate(String reason, Exception cause, boolean clearMetrics, - boolean join) { + public void terminate(String reason, Exception cause, boolean clearMetrics, boolean join) { if (cause == null) { LOG.info("{} Closing source {} because: {}", logPeerId(), this.queueId, reason); } else { - LOG.error(String.format("%s Closing source %s because an error occurred: %s", - logPeerId(), this.queueId, reason), cause); + LOG.error(String.format("%s Closing source %s because an error occurred: %s", logPeerId(), + this.queueId, reason), cause); } this.sourceRunning = false; if (initThread != null && Thread.currentThread() != initThread) { @@ -666,7 +654,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, for (ReplicationSourceShipper worker : workers) { worker.stopWorker(); - if(worker.entryReader != null) { + if (worker.entryReader != null) { worker.entryReader.setReaderRunning(false); } } @@ -694,8 +682,8 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, } } if (!server.isAborted() && !server.isStopped()) { - //If server is running and worker is already stopped but there was still entries batched, - //we need to clear buffer used for non processed entries + // If server is running and worker is already stopped but there was still entries batched, + // we need to clear buffer used for non processed entries worker.clearWALEntryBatch(); } } @@ -748,9 +736,9 @@ public ReplicationQueueInfo getReplicationQueueInfo() { return replicationQueueInfo; } - public boolean isWorkerRunning(){ - for(ReplicationSourceShipper worker : this.workerThreads.values()){ - if(worker.isActive()){ + public boolean isWorkerRunning() { + for (ReplicationSourceShipper worker : this.workerThreads.values()) { + if (worker.isActive()) { return worker.isActive(); } } @@ -761,7 +749,7 @@ public boolean isWorkerRunning(){ public String getStats() { StringBuilder sb = new StringBuilder(); sb.append("Total replicated edits: ").append(totalReplicatedEdits) - .append(", current progress: \n"); + .append(", current progress: \n"); for (Map.Entry entry : workerThreads.entrySet()) { String walGroupId = entry.getKey(); ReplicationSourceShipper worker = entry.getValue(); @@ -770,7 +758,7 @@ public String getStats() { sb.append("walGroup [").append(walGroupId).append("]: "); if (currentPath != null) { sb.append("currently replicating from: ").append(currentPath).append(" at position: ") - .append(position).append("\n"); + .append(position).append("\n"); } else { sb.append("no replication ongoing, waiting for new log"); } @@ -784,7 +772,7 @@ public MetricsSource getSourceMetrics() { } @Override - //offsets totalBufferUsed by deducting shipped batchSize. + // offsets totalBufferUsed by deducting shipped batchSize. public void postShipEdits(List entries, int batchSize) { if (throttler.isEnabled()) { throttler.addPushSize(batchSize); @@ -823,7 +811,7 @@ void removeWorker(ReplicationSourceShipper worker) { workerThreads.remove(worker.walGroupId, worker); } - public String logPeerId(){ + public String logPeerId() { return "peerId=" + this.getPeerId() + ","; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java index b05590279e9b..331f2269cf9d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,30 +24,30 @@ import org.slf4j.LoggerFactory; /** - * Constructs a {@link ReplicationSourceInterface} - * Note, not used to create specialized ReplicationSources + * Constructs a {@link ReplicationSourceInterface} Note, not used to create specialized + * ReplicationSources */ @InterfaceAudience.Private public final class ReplicationSourceFactory { private static final Logger LOG = LoggerFactory.getLogger(ReplicationSourceFactory.class); - private ReplicationSourceFactory() {} + private ReplicationSourceFactory() { + } static ReplicationSourceInterface create(Configuration conf, String queueId) { ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(queueId); boolean isQueueRecovered = replicationQueueInfo.isQueueRecovered(); ReplicationSourceInterface src; try { - String defaultReplicationSourceImpl = - isQueueRecovered ? RecoveredReplicationSource.class.getCanonicalName() - : ReplicationSource.class.getCanonicalName(); + String defaultReplicationSourceImpl = isQueueRecovered + ? RecoveredReplicationSource.class.getCanonicalName() + : ReplicationSource.class.getCanonicalName(); Class c = Class.forName( conf.get("replication.replicationsource.implementation", defaultReplicationSourceImpl)); src = c.asSubclass(ReplicationSourceInterface.class).getDeclaredConstructor().newInstance(); } catch (Exception e) { LOG.warn("Passed replication source implementation throws errors, " - + "defaulting to ReplicationSource", - e); + + "defaulting to ReplicationSource", e); src = isQueueRecovered ? new RecoveredReplicationSource() : new ReplicationSource(); } return src; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index 27e4b79c141b..6b09e8a1de91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,15 +43,15 @@ public interface ReplicationSourceInterface { /** * Initializer for the source - * @param conf the configuration to use - * @param fs the file system to use + * @param conf the configuration to use + * @param fs the file system to use * @param manager the manager to use - * @param server the server for this region server + * @param server the server for this region server */ void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, - ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, - String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, - MetricsSource metrics) throws IOException; + ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, + String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, + MetricsSource metrics) throws IOException; /** * Add a log to the list of logs to replicate @@ -63,13 +62,13 @@ void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, /** * Add hfile names to the queue to be replicated. * @param tableName Name of the table these files belongs to - * @param family Name of the family these files belong to - * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir which - * will be added in the queue for replication} + * @param family Name of the family these files belong to + * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir + * which will be added in the queue for replication} * @throws ReplicationException If failed to add hfile references */ void addHFileRefs(TableName tableName, byte[] family, List> pairs) - throws ReplicationException; + throws ReplicationException; /** * Start the replication @@ -85,14 +84,14 @@ void addHFileRefs(TableName tableName, byte[] family, List> pai /** * End the replication * @param reason why it's terminating - * @param cause the error that's causing it + * @param cause the error that's causing it */ void terminate(String reason, Exception cause); /** * End the replication - * @param reason why it's terminating - * @param cause the error that's causing it + * @param reason why it's terminating + * @param cause the error that's causing it * @param clearMetrics removes all metrics about this Source */ void terminate(String reason, Exception cause, boolean clearMetrics); @@ -105,7 +104,6 @@ void addHFileRefs(TableName tableName, byte[] family, List> pai /** * Get the queue id that the source is replicating to - * * @return queue id */ String getQueueId(); @@ -125,8 +123,7 @@ default String getPeerId() { ReplicationPeer getPeer(); /** - * Get a string representation of the current statistics - * for this source + * Get a string representation of the current statistics for this source * @return printable stats */ String getStats(); @@ -144,6 +141,7 @@ default boolean isPeerEnabled() { default boolean isSyncReplication() { return getPeer().getPeerConfig().isSyncReplication(); } + /** * @return active or not */ @@ -177,7 +175,7 @@ default boolean isSyncReplication() { /** * Call this after the shipper thread ship some entries to peer cluster. - * @param entries pushed + * @param entries pushed * @param batchSize entries size pushed */ void postShipEdits(List entries, int batchSize); @@ -210,10 +208,10 @@ default boolean isRecovered() { ReplicationQueueStorage getReplicationQueueStorage(); /** - * Log the current position to storage. Also clean old logs from the replication queue. - * Use to bypass the default call to - * {@link ReplicationSourceManager#logPositionAndCleanOldLogs(ReplicationSourceInterface, - * WALEntryBatch)} whem implementation does not need to persist state to backing storage. + * Log the current position to storage. Also clean old logs from the replication queue. Use to + * bypass the default call to + * {@link ReplicationSourceManager#logPositionAndCleanOldLogs(ReplicationSourceInterface, WALEntryBatch)} + * whem implementation does not need to persist state to backing storage. * @param entryBatch the wal entry batch we just shipped * @return The instance of queueStorage used by this ReplicationSource. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java index 4d89edef5fdc..a0e6f1b8d1fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java @@ -50,7 +50,7 @@ public class ReplicationSourceLogQueue { private ReplicationSource source; public ReplicationSourceLogQueue(Configuration conf, MetricsSource metrics, - ReplicationSource source) { + ReplicationSource source) { this.conf = conf; this.metrics = metrics; this.source = source; @@ -60,7 +60,7 @@ public ReplicationSourceLogQueue(Configuration conf, MetricsSource metrics, /** * Enqueue the wal - * @param wal wal to be enqueued + * @param wal wal to be enqueued * @param walGroupId Key for the wal in @queues map * @return boolean whether this is the first time we are seeing this walGroupId. */ @@ -85,9 +85,9 @@ public boolean enqueueLog(Path wal, String walGroupId) { // This will wal a warning for each new wal that gets created above the warn threshold int queueSize = queue.size(); if (queueSize > this.logQueueWarnThreshold) { - LOG.warn("{} WAL group {} queue size: {} exceeds value of " + - "replication.source.log.queue.warn {}", source.logPeerId(), walGroupId, queueSize, - logQueueWarnThreshold); + LOG.warn( + "{} WAL group {} queue size: {} exceeds value of " + "replication.source.log.queue.warn {}", + source.logPeerId(), walGroupId, queueSize, logQueueWarnThreshold); } return exists; } @@ -116,9 +116,8 @@ public Map> getQueues() { } /** - * Return queue for the given walGroupId - * Please don't add or remove elements from the returned queue. - * Use @enqueueLog and @remove methods respectively. + * Return queue for the given walGroupId Please don't add or remove elements from the returned + * queue. Use @enqueueLog and @remove methods respectively. * @param walGroupId walGroupId */ public PriorityBlockingQueue getQueue(String walGroupId) { @@ -156,7 +155,7 @@ public void clear(String walGroupId) { } /* - Returns the age of oldest wal. + * Returns the age of oldest wal. */ long getOldestWalAge() { long now = EnvironmentEdgeManager.currentTime(); @@ -171,8 +170,8 @@ long getOldestWalAge() { } /* - Get the oldest wal timestamp from all the queues. - */ + * Get the oldest wal timestamp from all the queues. + */ private long getOldestWalTimestamp() { long oldestWalTimestamp = Long.MAX_VALUE; for (Map.Entry> entry : queues.entrySet()) { @@ -180,8 +179,8 @@ private long getOldestWalTimestamp() { Path path = queue.peek(); // Can path ever be null ? if (path != null) { - oldestWalTimestamp = Math.min(oldestWalTimestamp, - AbstractFSWALProvider.WALStartTimeComparator.getTS(path)); + oldestWalTimestamp = + Math.min(oldestWalTimestamp, AbstractFSWALProvider.WALStartTimeComparator.getTS(path)); } } return oldestWalTimestamp; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index faa654dcf282..2973db521bd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -125,10 +125,9 @@ public class ReplicationSourceManager { private final List oldsources; /** - * Storage for queues that need persistance; e.g. Replication state so can be recovered - * after a crash. queueStorage upkeep is spread about this class and passed - * to ReplicationSource instances for these to do updates themselves. Not all ReplicationSource - * instances keep state. + * Storage for queues that need persistance; e.g. Replication state so can be recovered after a + * crash. queueStorage upkeep is spread about this class and passed to ReplicationSource instances + * for these to do updates themselves. Not all ReplicationSource instances keep state. */ private final ReplicationQueueStorage queueStorage; @@ -180,18 +179,17 @@ public class ReplicationSourceManager { /** * Creates a replication manager and sets the watch on all the other registered region servers * @param queueStorage the interface for manipulating replication queues - * @param conf the configuration to use - * @param server the server for this region server - * @param fs the file system to use - * @param logDir the directory that contains all wal directories of live RSs - * @param oldLogDir the directory where old logs are archived + * @param conf the configuration to use + * @param server the server for this region server + * @param fs the file system to use + * @param logDir the directory that contains all wal directories of live RSs + * @param oldLogDir the directory where old logs are archived */ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, - ReplicationPeers replicationPeers, Configuration conf, - Server server, FileSystem fs, Path logDir, Path oldLogDir, UUID clusterId, - WALFactory walFactory, - SyncReplicationPeerMappingManager syncReplicationPeerMappingManager, - MetricsReplicationGlobalSourceSource globalMetrics) throws IOException { + ReplicationPeers replicationPeers, Configuration conf, Server server, FileSystem fs, + Path logDir, Path oldLogDir, UUID clusterId, WALFactory walFactory, + SyncReplicationPeerMappingManager syncReplicationPeerMappingManager, + MetricsReplicationGlobalSourceSource globalMetrics) throws IOException { this.sources = new ConcurrentHashMap<>(); this.queueStorage = queueStorage; this.replicationPeers = replicationPeers; @@ -213,8 +211,8 @@ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, int nbWorkers = conf.getInt("replication.executor.workers", 1); // use a short 100ms sleep since this could be done inline with a RS startup // even if we fail, other region servers can take care of it - this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, 100, - TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); + this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, 100, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>()); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); tfb.setNameFormat("ReplicationExecutor-%d"); tfb.setDaemon(true); @@ -226,7 +224,7 @@ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, this.maxRetriesMultiplier = this.conf.getInt("replication.source.sync.maxretriesmultiplier", 60); this.totalBufferLimit = conf.getLong(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, - HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT); + HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT); this.globalMetrics = globalMetrics; } @@ -294,8 +292,8 @@ public void removePeer(String peerId) { removeRecoveredSource(src); } } - LOG.info( - "Number of deleted recovered sources for " + peerId + ": " + oldSourcesToDelete.size()); + LOG + .info("Number of deleted recovered sources for " + peerId + ": " + oldSourcesToDelete.size()); // Now close the normal source for this peer ReplicationSourceInterface srcToRemove = this.sources.get(peerId); if (srcToRemove != null) { @@ -322,14 +320,14 @@ public void removePeer(String peerId) { * @see #createCatalogReplicationSource(RegionInfo) for creating a ReplicationSource for meta. */ private ReplicationSourceInterface createSource(String queueId, ReplicationPeer replicationPeer) - throws IOException { + throws IOException { ReplicationSourceInterface src = ReplicationSourceFactory.create(conf, queueId); // Init the just created replication source. Pass the default walProvider's wal file length // provider. Presumption is we replicate user-space Tables only. For hbase:meta region replica // replication, see #createCatalogReplicationSource(). - WALFileLengthProvider walFileLengthProvider = - this.walFactory.getWALProvider() != null? - this.walFactory.getWALProvider().getWALFileLengthProvider() : p -> OptionalLong.empty(); + WALFileLengthProvider walFileLengthProvider = this.walFactory.getWALProvider() != null + ? this.walFactory.getWALProvider().getWALFileLengthProvider() + : p -> OptionalLong.empty(); src.init(conf, fs, this, queueStorage, replicationPeer, server, queueId, clusterId, walFileLengthProvider, new MetricsSource(queueId)); return src; @@ -344,8 +342,10 @@ private ReplicationSourceInterface createSource(String queueId, ReplicationPeer */ void addSource(String peerId) throws IOException { ReplicationPeer peer = replicationPeers.getPeer(peerId); - if (ReplicationUtils.LEGACY_REGION_REPLICATION_ENDPOINT_NAME - .equals(peer.getPeerConfig().getReplicationEndpointImpl())) { + if ( + ReplicationUtils.LEGACY_REGION_REPLICATION_ENDPOINT_NAME + .equals(peer.getPeerConfig().getReplicationEndpointImpl()) + ) { // we do not use this endpoint for region replication any more, see HBASE-26233 LOG.info("Legacy region replication peer found, skip adding: {}", peer.getPeerConfig()); return; @@ -394,8 +394,8 @@ void addSource(String peerId) throws IOException { * @param peerId the id of the sync replication peer */ public void drainSources(String peerId) throws IOException, ReplicationException { - String terminateMessage = "Sync replication peer " + peerId + - " is transiting to STANDBY. Will close the previous replication source and open a new one"; + String terminateMessage = "Sync replication peer " + peerId + + " is transiting to STANDBY. Will close the previous replication source and open a new one"; ReplicationPeer peer = replicationPeers.getPeer(peerId); assert peer.getPeerConfig().isSyncReplication(); ReplicationSourceInterface src = createSource(peerId, peer); @@ -458,8 +458,8 @@ public void drainSources(String peerId) throws IOException, ReplicationException * @param peerId the id of the replication peer */ public void refreshSources(String peerId) throws IOException { - String terminateMessage = "Peer " + peerId + - " state or config changed. Will close the previous replication source and open a new one"; + String terminateMessage = "Peer " + peerId + + " state or config changed. Will close the previous replication source and open a new one"; ReplicationPeer peer = replicationPeers.getPeer(peerId); ReplicationSourceInterface src = createSource(peerId, peer); // synchronized on latestPaths to avoid missing the new log @@ -482,7 +482,7 @@ public void refreshSources(String peerId) throws IOException { synchronized (this.oldsources) { List previousQueueIds = new ArrayList<>(); for (Iterator iter = this.oldsources.iterator(); iter - .hasNext();) { + .hasNext();) { ReplicationSourceInterface oldSource = iter.next(); if (oldSource.getPeerId().equals(peerId)) { previousQueueIds.add(oldSource.getQueueId()); @@ -563,9 +563,11 @@ private void interruptOrAbortWhenFail(ReplicationQueueOperation op) { try { op.exec(); } catch (ReplicationException e) { - if (e.getCause() != null && e.getCause() instanceof KeeperException.SystemErrorException - && e.getCause().getCause() != null && e.getCause() - .getCause() instanceof InterruptedException) { + if ( + e.getCause() != null && e.getCause() instanceof KeeperException.SystemErrorException + && e.getCause().getCause() != null + && e.getCause().getCause() instanceof InterruptedException + ) { // ReplicationRuntimeException(a RuntimeException) is thrown out here. The reason is // that thread is interrupted deep down in the stack, it should pass the following // processing logic and propagate to the most top layer which can handle this exception @@ -606,11 +608,11 @@ private void abortAndThrowIOExceptionWhenFail(ReplicationQueueOperation op) thro /** * This method will log the current position to storage. And also clean old logs from the * replication queue. - * @param source the replication source + * @param source the replication source * @param entryBatch the wal entry batch we just shipped */ public void logPositionAndCleanOldLogs(ReplicationSourceInterface source, - WALEntryBatch entryBatch) { + WALEntryBatch entryBatch) { String fileName = entryBatch.getLastWalPath().getName(); interruptOrAbortWhenFail(() -> this.queueStorage.setWALPosition(server.getServerName(), source.getQueueId(), fileName, entryBatch.getLastWalPosition(), entryBatch.getLastSeqIds())); @@ -620,9 +622,9 @@ public void logPositionAndCleanOldLogs(ReplicationSourceInterface source, /** * Cleans a log file and all older logs from replication queue. Called when we are sure that a log * file is closed and has no more entries. - * @param log Path to the log + * @param log Path to the log * @param inclusive whether we should also remove the given log file - * @param source the replication source + * @param source the replication source */ void cleanOldLogs(String log, boolean inclusive, ReplicationSourceInterface source) { String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(log); @@ -663,7 +665,7 @@ void cleanOldLogs(String log, boolean inclusive, ReplicationSourceInterface sour } private void removeRemoteWALs(String peerId, String remoteWALDir, Collection wals) - throws IOException { + throws IOException { Path remoteWALDirForPeer = ReplicationUtils.getPeerRemoteWALDir(remoteWALDir, peerId); FileSystem fs = ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir); for (String wal : wals) { @@ -694,9 +696,9 @@ private void cleanOldLogs(NavigableSet wals, ReplicationSourceInterface // Filter out the wals need to be removed from the remote directory. Its name should be the // special format, and also, the peer id in its name should match the peer id for the // replication source. - List remoteWals = wals.stream().filter(w -> SyncReplicationWALProvider - .getSyncReplicationPeerIdFromWALName(w).map(peerId::equals).orElse(false)) - .collect(Collectors.toList()); + List remoteWals = + wals.stream().filter(w -> SyncReplicationWALProvider.getSyncReplicationPeerIdFromWALName(w) + .map(peerId::equals).orElse(false)).collect(Collectors.toList()); LOG.debug("Removing {} logs from remote dir {} in the list: {}", remoteWals.size(), remoteWALDir, remoteWals); if (!remoteWals.isEmpty()) { @@ -712,8 +714,10 @@ private void cleanOldLogs(NavigableSet wals, ReplicationSourceInterface // skip the following operations return; } - if (ReplicationUtils.sleepForRetries("Failed to delete remote wals", sleepForRetries, - sleepMultiplier, maxRetriesMultiplier)) { + if ( + ReplicationUtils.sleepForRetries("Failed to delete remote wals", sleepForRetries, + sleepMultiplier, maxRetriesMultiplier) + ) { sleepMultiplier++; } } @@ -780,8 +784,8 @@ public void postLogRoll(Path newLog) throws IOException { // This only updates the sources we own, not the recovered ones for (ReplicationSourceInterface source : this.sources.values()) { source.enqueueLog(newLog); - LOG.trace("Enqueued {} to source {} while performing postLogRoll operation.", - newLog, source.getQueueId()); + LOG.trace("Enqueued {} to source {} while performing postLogRoll operation.", newLog, + source.getQueueId()); } } @@ -789,8 +793,8 @@ void claimQueue(ServerName deadRS, String queue) { // Wait a bit before transferring the queues, we may be shutting down. // This sleep may not be enough in some cases. try { - Thread.sleep(sleepBeforeFailover + - (long) (ThreadLocalRandom.current().nextFloat() * sleepBeforeFailover)); + Thread.sleep(sleepBeforeFailover + + (long) (ThreadLocalRandom.current().nextFloat() * sleepBeforeFailover)); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting before transferring a queue."); Thread.currentThread().interrupt(); @@ -818,10 +822,9 @@ void claimQueue(ServerName deadRS, String queue) { claimedQueue = queueStorage.claimQueue(deadRS, queue, server.getServerName()); } catch (ReplicationException e) { LOG.error( - "ReplicationException: cannot claim dead region ({})'s " + - "replication queue. Znode : ({})" + - " Possible solution: check if znode size exceeds jute.maxBuffer value. " + - " If so, increase it for both client and server side.", + "ReplicationException: cannot claim dead region ({})'s " + "replication queue. Znode : ({})" + + " Possible solution: check if znode size exceeds jute.maxBuffer value. " + + " If so, increase it for both client and server side.", deadRS, queueStorage.getRsNode(deadRS), e); server.abort("Failed to claim queue from dead regionserver.", e); return; @@ -837,8 +840,10 @@ void claimQueue(ServerName deadRS, String queue) { abortWhenFail(() -> queueStorage.removeQueue(server.getServerName(), queueId)); return; } - if (server instanceof ReplicationSyncUp.DummyServer && - peer.getPeerState().equals(PeerState.DISABLED)) { + if ( + server instanceof ReplicationSyncUp.DummyServer + && peer.getPeerState().equals(PeerState.DISABLED) + ) { LOG.warn( "Peer {} is disabled. ReplicationSyncUp tool will skip " + "replicating data to this peer.", peerId); @@ -868,9 +873,11 @@ void claimQueue(ServerName deadRS, String queue) { if (peer.getPeerConfig().isSyncReplication()) { Pair stateAndNewState = peer.getSyncReplicationStateAndNewState(); - if ((stateAndNewState.getFirst().equals(SyncReplicationState.STANDBY) && - stateAndNewState.getSecond().equals(SyncReplicationState.NONE)) || - stateAndNewState.getSecond().equals(SyncReplicationState.STANDBY)) { + if ( + (stateAndNewState.getFirst().equals(SyncReplicationState.STANDBY) + && stateAndNewState.getSecond().equals(SyncReplicationState.NONE)) + || stateAndNewState.getSecond().equals(SyncReplicationState.STANDBY) + ) { src.terminate("Sync replication peer is in STANDBY state"); deleteQueue(queueId); return; @@ -980,8 +987,8 @@ public AtomicLong getTotalBufferUsed() { } /** - * Returns the maximum size in bytes of edits held in memory which are pending replication - * across all sources inside this RegionServer. + * Returns the maximum size in bytes of edits held in memory which are pending replication across + * all sources inside this RegionServer. */ public long getTotalBufferLimit() { return totalBufferLimit; @@ -1027,7 +1034,7 @@ public String getStats() { // Print stats that apply across all Replication Sources stats.append("Global stats: "); stats.append("WAL Edits Buffer Used=").append(getTotalBufferUsed().get()).append("B, Limit=") - .append(getTotalBufferLimit()).append("B\n"); + .append(getTotalBufferLimit()).append("B\n"); for (ReplicationSourceInterface source : this.sources.values()) { stats.append("Normal source for cluster " + source.getPeerId() + ": "); stats.append(source.getStats() + "\n"); @@ -1040,7 +1047,7 @@ public String getStats() { } public void addHFileRefs(TableName tableName, byte[] family, List> pairs) - throws IOException { + throws IOException { for (ReplicationSourceInterface source : this.sources.values()) { throwIOExceptionWhenFail(() -> source.addHFileRefs(tableName, family, pairs)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index 33869dbf7c7f..1c9bed8bb584 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,10 @@ import static org.apache.hadoop.hbase.replication.ReplicationUtils.getAdaptiveTimeout; import static org.apache.hadoop.hbase.replication.ReplicationUtils.sleepForRetries; + import java.io.IOException; import java.util.List; import java.util.concurrent.atomic.LongAccumulator; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -36,6 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; @@ -51,7 +52,7 @@ public class ReplicationSourceShipper extends Thread { public enum WorkerState { RUNNING, STOPPED, - FINISHED, // The worker is done processing a queue + FINISHED, // The worker is done processing a queue } private final Configuration conf; @@ -77,7 +78,7 @@ public enum WorkerState { private final int shipEditsTimeout; public ReplicationSourceShipper(Configuration conf, String walGroupId, - ReplicationSourceLogQueue logQueue, ReplicationSource source) { + ReplicationSourceLogQueue logQueue, ReplicationSource source) { this.conf = conf; this.walGroupId = walGroupId; this.logQueue = logQueue; @@ -108,8 +109,8 @@ public final void run() { } try { WALEntryBatch entryBatch = entryReader.poll(getEntriesTimeout); - LOG.debug("Shipper from source {} got entry batch from reader: {}", - source.getQueueId(), entryBatch); + LOG.debug("Shipper from source {} got entry batch from reader: {}", source.getQueueId(), + entryBatch); if (entryBatch == null) { continue; } @@ -150,15 +151,15 @@ protected void postFinish() { } /** - * get batchEntry size excludes bulk load file sizes. - * Uses ReplicationSourceWALReader's static method. + * get batchEntry size excludes bulk load file sizes. Uses ReplicationSourceWALReader's static + * method. */ private int getBatchEntrySizeExcludeBulkLoad(WALEntryBatch entryBatch) { int totalSize = 0; - for(Entry entry : entryBatch.getWalEntries()) { + for (Entry entry : entryBatch.getWalEntries()) { totalSize += ReplicationSourceWALReader.getEntrySizeExcludeBulkLoad(entry); } - return totalSize; + return totalSize; } /** @@ -173,8 +174,8 @@ private void shipEdits(WALEntryBatch entryBatch) { } int currentSize = (int) entryBatch.getHeapSize(); int sizeExcludeBulkLoad = getBatchEntrySizeExcludeBulkLoad(entryBatch); - source.getSourceMetrics().setTimeStampNextToReplicate(entries.get(entries.size() - 1) - .getKey().getWriteTime()); + source.getSourceMetrics() + .setTimeStampNextToReplicate(entries.get(entries.size() - 1).getKey().getWriteTime()); while (isActive()) { try { try { @@ -189,7 +190,7 @@ private void shipEdits(WALEntryBatch entryBatch) { // create replicateContext here, so the entries can be GC'd upon return from this call // stack ReplicationEndpoint.ReplicateContext replicateContext = - new ReplicationEndpoint.ReplicateContext(); + new ReplicationEndpoint.ReplicateContext(); replicateContext.setEntries(entries).setSize(currentSize); replicateContext.setWalGroupId(walGroupId); replicateContext.setTimeout(getAdaptiveTimeout(this.shipEditsTimeout, sleepMultiplier)); @@ -212,10 +213,10 @@ private void shipEdits(WALEntryBatch entryBatch) { // Log and clean up WAL logs updateLogPosition(entryBatch); - //offsets totalBufferUsed by deducting shipped batchSize (excludes bulk load size) - //this sizeExcludeBulkLoad has to use same calculation that when calling - //acquireBufferQuota() in ReplicationSourceWALReader because they maintain - //same variable: totalBufferUsed + // offsets totalBufferUsed by deducting shipped batchSize (excludes bulk load size) + // this sizeExcludeBulkLoad has to use same calculation that when calling + // acquireBufferQuota() in ReplicationSourceWALReader because they maintain + // same variable: totalBufferUsed source.postShipEdits(entries, sizeExcludeBulkLoad); // FIXME check relationship between wal group and overall source.getSourceMetrics().shipBatch(entryBatch.getNbOperations(), currentSize, @@ -225,15 +226,17 @@ private void shipEdits(WALEntryBatch entryBatch) { source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWalEntriesWithSize()); if (LOG.isTraceEnabled()) { - LOG.debug("Replicated {} entries or {} operations in {} ms", - entries.size(), entryBatch.getNbOperations(), (endTimeNs - startTimeNs) / 1000000); + LOG.debug("Replicated {} entries or {} operations in {} ms", entries.size(), + entryBatch.getNbOperations(), (endTimeNs - startTimeNs) / 1000000); } break; } catch (Exception ex) { LOG.warn("{} threw unknown exception:", source.getReplicationEndpoint().getClass().getName(), ex); - if (sleepForRetries("ReplicationEndpoint threw exception", sleepForRetries, sleepMultiplier, - maxRetriesMultiplier)) { + if ( + sleepForRetries("ReplicationEndpoint threw exception", sleepForRetries, sleepMultiplier, + maxRetriesMultiplier) + ) { sleepMultiplier++; } } @@ -270,8 +273,10 @@ private boolean updateLogPosition(WALEntryBatch batch) { // record on zk, so let's call it. The last wal position maybe zero if end of file is true and // there is no entry in the batch. It is OK because that the queue storage will ignore the zero // position and the file will be removed soon in cleanOldLogs. - if (batch.isEndOfFile() || !batch.getLastWalPath().equals(currentPath) || - batch.getLastWalPosition() != currentPosition) { + if ( + batch.isEndOfFile() || !batch.getLastWalPath().equals(currentPath) + || batch.getLastWalPosition() != currentPosition + ) { source.logPositionAndCleanOldLogs(batch); updated = true; } @@ -328,28 +333,26 @@ public boolean isFinished() { } /** - * Attempts to properly update ReplicationSourceManager.totalBufferUser, - * in case there were unprocessed entries batched by the reader to the shipper, - * but the shipper didn't manage to ship those because the replication source is being terminated. - * In that case, it iterates through the batched entries and decrease the pending - * entries size from ReplicationSourceManager.totalBufferUser + * Attempts to properly update ReplicationSourceManager.totalBufferUser, in case + * there were unprocessed entries batched by the reader to the shipper, but the shipper didn't + * manage to ship those because the replication source is being terminated. In that case, it + * iterates through the batched entries and decrease the pending entries size from + * ReplicationSourceManager.totalBufferUser *

    - * NOTES - * 1) This method should only be called upon replication source termination. - * It blocks waiting for both shipper and reader threads termination, - * to make sure no race conditions - * when updating ReplicationSourceManager.totalBufferUser. - * - * 2) It does not attempt to terminate reader and shipper threads. Those must - * have been triggered interruption/termination prior to calling this method. + * NOTES 1) This method should only be called upon replication source termination. It + * blocks waiting for both shipper and reader threads termination, to make sure no race conditions + * when updating ReplicationSourceManager.totalBufferUser. 2) It does not + * attempt to terminate reader and shipper threads. Those must have been triggered + * interruption/termination prior to calling this method. */ void clearWALEntryBatch() { long timeout = EnvironmentEdgeManager.currentTime() + this.shipEditsTimeout; - while(this.isAlive() || this.entryReader.isAlive()){ + while (this.isAlive() || this.entryReader.isAlive()) { try { if (EnvironmentEdgeManager.currentTime() >= timeout) { - LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " - + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", + LOG.warn( + "Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " + + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive()); return; } else { @@ -358,11 +361,11 @@ void clearWALEntryBatch() { } } catch (InterruptedException e) { LOG.warn("{} Interrupted while waiting {} to stop on clearWALEntryBatch. " - + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e); + + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e); return; } } - LongAccumulator totalToDecrement = new LongAccumulator((a,b) -> a + b, 0); + LongAccumulator totalToDecrement = new LongAccumulator((a, b) -> a + b, 0); entryReader.entryBatchQueue.forEach(w -> { entryReader.entryBatchQueue.remove(w); w.getWalEntries().forEach(e -> { @@ -370,12 +373,12 @@ void clearWALEntryBatch() { totalToDecrement.accumulate(entrySizeExcludeBulkLoad); }); }); - if( LOG.isTraceEnabled()) { + if (LOG.isTraceEnabled()) { LOG.trace("Decrementing totalBufferUsed by {}B while stopping Replication WAL Readers.", totalToDecrement.longValue()); } - long newBufferUsed = source.getSourceManager().getTotalBufferUsed() - .addAndGet(-totalToDecrement.longValue()); + long newBufferUsed = + source.getSourceManager().getTotalBufferUsed().addAndGet(-totalToDecrement.longValue()); source.getSourceManager().getGlobalMetrics().setWALReaderEditsBufferBytes(newBufferUsed); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java index f3311eedbe06..6e5da0feffb1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit log /** * Utility method used to set the correct scopes on each log key. Doesn't set a scope on keys from * compaction WAL edits and if the scope is local. - * @param logKey Key that may get scoped according to its edits + * @param logKey Key that may get scoped according to its edits * @param logEdit Edits used to lookup the scopes */ static void scopeWALEdits(WALKey logKey, WALEdit logEdit, Configuration conf) { @@ -70,8 +70,9 @@ static void scopeWALEdits(WALKey logKey, WALEdit logEdit, Configuration conf) { return; } // For replay, or if all the cells are markers, do not need to store replication scope. - if (logEdit.isReplay() || - logEdit.getCells().stream().allMatch(c -> WALEdit.isMetaEditFamily(c))) { + if ( + logEdit.isReplay() || logEdit.getCells().stream().allMatch(c -> WALEdit.isMetaEditFamily(c)) + ) { ((WALKeyImpl) logKey).clearReplicationScope(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index c61494e12c6a..d95e21241e6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,6 +41,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; @@ -72,7 +72,7 @@ class ReplicationSourceWALReader extends Thread { private final int maxRetriesMultiplier; private final boolean eofAutoRecovery; - //Indicates whether this particular worker is running + // Indicates whether this particular worker is running private boolean isReaderRunning = true; private AtomicLong totalBufferUsed; @@ -82,16 +82,16 @@ class ReplicationSourceWALReader extends Thread { /** * Creates a reader worker for a given WAL queue. Reads WAL entries off a given queue, batches the * entries, and puts them on a batch queue. - * @param fs the files system to use - * @param conf configuration to use - * @param logQueue The WAL queue to read off of + * @param fs the files system to use + * @param conf configuration to use + * @param logQueue The WAL queue to read off of * @param startPosition position in the first WAL to start reading from - * @param filter The filter to use while reading - * @param source replication source + * @param filter The filter to use while reading + * @param source replication source */ public ReplicationSourceWALReader(FileSystem fs, Configuration conf, - ReplicationSourceLogQueue logQueue, long startPosition, WALEntryFilter filter, - ReplicationSource source, String walGroupId) { + ReplicationSourceLogQueue logQueue, long startPosition, WALEntryFilter filter, + ReplicationSource source, String walGroupId) { this.logQueue = logQueue; this.currentPosition = startPosition; this.fs = fs; @@ -99,7 +99,7 @@ public ReplicationSourceWALReader(FileSystem fs, Configuration conf, this.filter = filter; this.source = source; this.replicationBatchSizeCapacity = - this.conf.getLong("replication.source.size.capacity", 1024 * 1024 * 64); + this.conf.getLong("replication.source.size.capacity", 1024 * 1024 * 64); this.replicationBatchCountCapacity = this.conf.getInt("replication.source.nb.capacity", 25000); // memory used will be batchSizeCapacity * (nb.batches + 1) // the +1 is for the current thread reading before placing onto the queue @@ -113,11 +113,10 @@ public ReplicationSourceWALReader(FileSystem fs, Configuration conf, this.eofAutoRecovery = conf.getBoolean("replication.source.eof.autorecovery", false); this.entryBatchQueue = new LinkedBlockingQueue<>(batchCount); this.walGroupId = walGroupId; - LOG.info("peerClusterZnode=" + source.getQueueId() - + ", ReplicationSourceWALReaderThread : " + source.getPeerId() - + " inited, replicationBatchSizeCapacity=" + replicationBatchSizeCapacity - + ", replicationBatchCountCapacity=" + replicationBatchCountCapacity - + ", replicationBatchQueueCapacity=" + batchCount); + LOG.info("peerClusterZnode=" + source.getQueueId() + ", ReplicationSourceWALReaderThread : " + + source.getPeerId() + " inited, replicationBatchSizeCapacity=" + replicationBatchSizeCapacity + + ", replicationBatchCountCapacity=" + replicationBatchCountCapacity + + ", replicationBatchQueueCapacity=" + batchCount); } @Override @@ -126,9 +125,8 @@ public void run() { while (isReaderRunning()) { // we only loop back here if something fatal happened to our stream WALEntryBatch batch = null; try (WALEntryStream entryStream = - new WALEntryStream(logQueue, conf, currentPosition, - source.getWALFileLengthProvider(), source.getServerWALsBelongTo(), - source.getSourceMetrics(), walGroupId)) { + new WALEntryStream(logQueue, conf, currentPosition, source.getWALFileLengthProvider(), + source.getServerWALsBelongTo(), source.getSourceMetrics(), walGroupId)) { while (isReaderRunning()) { // loop here to keep reusing stream while we can batch = null; if (!source.isPeerEnabled()) { @@ -179,7 +177,7 @@ protected final boolean addEntryToBatch(WALEntryBatch batch, Entry entry) { return false; } LOG.debug("updating TimeStampOfLastAttempted to {}, from entry {}, for source queue: {}", - entry.getKey().getWriteTime(), entry.getKey(), this.source.getQueueId()); + entry.getKey().getWriteTime(), entry.getKey(), this.source.getQueueId()); long entrySize = getEntrySizeIncludeBulkLoad(entry); long entrySizeExcludeBulkLoad = getEntrySizeExcludeBulkLoad(entry); batch.addEntry(entry, entrySize); @@ -187,8 +185,8 @@ protected final boolean addEntryToBatch(WALEntryBatch batch, Entry entry) { boolean totalBufferTooLarge = acquireBufferQuota(entrySizeExcludeBulkLoad); // Stop if too many entries or too big - return totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity || - batch.getNbEntries() >= replicationBatchCountCapacity; + return totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity + || batch.getNbEntries() >= replicationBatchCountCapacity; } protected static final boolean switched(WALEntryStream entryStream, Path path) { @@ -257,24 +255,25 @@ private WALEntryBatch tryAdvanceStreamAndCreateWALBatch(WALEntryStream entryStre } /** - * This is to handle the EOFException from the WAL entry stream. EOFException should - * be handled carefully because there are chances of data loss because of never replicating - * the data. Thus we should always try to ship existing batch of entries here. - * If there was only one log in the queue before EOF, we ship the empty batch here - * and since reader is still active, in the next iteration of reader we will - * stop the reader. + * This is to handle the EOFException from the WAL entry stream. EOFException should be handled + * carefully because there are chances of data loss because of never replicating the data. Thus we + * should always try to ship existing batch of entries here. If there was only one log in the + * queue before EOF, we ship the empty batch here and since reader is still active, in the next + * iteration of reader we will stop the reader. *

    - * If there was more than one log in the queue before EOF, we ship the existing batch - * and reset the wal patch and position to the log with EOF, so shipper can remove - * logs from replication queue + * If there was more than one log in the queue before EOF, we ship the existing batch and reset + * the wal patch and position to the log with EOF, so shipper can remove logs from replication + * queue * @return true only the IOE can be handled */ private boolean handleEofException(Exception e, WALEntryBatch batch) { PriorityBlockingQueue queue = logQueue.getQueue(walGroupId); // Dump the log even if logQueue size is 1 if the source is from recovered Source // since we don't add current log to recovered source queue so it is safe to remove. - if ((e instanceof EOFException || e.getCause() instanceof EOFException) && - (source.isRecovered() || queue.size() > 1) && this.eofAutoRecovery) { + if ( + (e instanceof EOFException || e.getCause() instanceof EOFException) + && (source.isRecovered() || queue.size() > 1) && this.eofAutoRecovery + ) { Path path = queue.peek(); try { if (!fs.exists(path)) { @@ -325,12 +324,12 @@ public Path getCurrentPath() { return logQueue.getQueue(walGroupId).peek(); } - //returns false if we've already exceeded the global quota + // returns false if we've already exceeded the global quota private boolean checkQuota() { // try not to go over total quota if (totalBufferUsed.get() > totalBufferQuota) { LOG.warn("peer={}, can't read more edits from WAL as buffer usage {}B exceeds limit {}B", - this.source.getPeerId(), totalBufferUsed.get(), totalBufferQuota); + this.source.getPeerId(), totalBufferUsed.get(), totalBufferQuota); Threads.sleep(sleepForRetries); return false; } @@ -366,7 +365,7 @@ public WALEntryBatch poll(long timeout) throws InterruptedException { private long getEntrySizeIncludeBulkLoad(Entry entry) { WALEdit edit = entry.getEdit(); - return getEntrySizeExcludeBulkLoad(entry) + sizeOfStoreFilesIncludeBulkLoad(edit); + return getEntrySizeExcludeBulkLoad(entry) + sizeOfStoreFilesIncludeBulkLoad(edit); } public static long getEntrySizeExcludeBulkLoad(Entry entry) { @@ -375,7 +374,6 @@ public static long getEntrySizeExcludeBulkLoad(Entry entry) { return edit.heapSize() + key.estimatedSerializedSizeOf(); } - private void updateBatchStats(WALEntryBatch batch, Entry entry, long entrySize) { WALEdit edit = entry.getEdit(); batch.incrementHeapSize(entrySize); @@ -409,7 +407,7 @@ private Pair countDistinctRowKeysAndHFiles(WALEdit edit) { } } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " - + "Then its hfiles count will not be added into metric.", e); + + "Then its hfiles count will not be added into metric.", e); } } @@ -441,12 +439,12 @@ private int sizeOfStoreFilesIncludeBulkLoad(WALEdit edit) { int totalStores = stores.size(); for (int j = 0; j < totalStores; j++) { totalStoreFilesSize = - (int) (totalStoreFilesSize + stores.get(j).getStoreFileSizeBytes()); + (int) (totalStoreFilesSize + stores.get(j).getStoreFileSizeBytes()); } } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " - + "Size of HFiles part of cell will not be considered in replication " - + "request size calculation.", e); + + "Size of HFiles part of cell will not be considered in replication " + + "request size calculation.", e); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java index 10d6cd59d4ae..2161cc35ed99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 0e938ecf2026..daf9081234d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -71,8 +71,9 @@ public static void main(String[] args) throws Exception { private Set getLiveRegionServers(ZKWatcher zkw) throws KeeperException { List rsZNodes = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode); - return rsZNodes == null ? Collections.emptySet() : - rsZNodes.stream().map(ServerName::parseServerName).collect(Collectors.toSet()); + return rsZNodes == null + ? Collections.emptySet() + : rsZNodes.stream().map(ServerName::parseServerName).collect(Collectors.toSet()); } // When using this tool, usually the source cluster is unhealthy, so we should try to claim the @@ -106,8 +107,7 @@ public boolean isAborted() { }; Configuration conf = getConf(); try (ZKWatcher zkw = new ZKWatcher(conf, - "syncupReplication" + EnvironmentEdgeManager.currentTime(), - abortable, true)) { + "syncupReplication" + EnvironmentEdgeManager.currentTime(), abortable, true)) { Path walRootDir = CommonFSUtils.getWALRootDir(conf); FileSystem fs = CommonFSUtils.getWALFileSystem(conf); Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java index 7f73030699e8..3e4bb77b23fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** - * Per-peer per-node throttling controller for replication: enabled if - * bandwidth > 0, a cycle = 100ms, by throttling we guarantee data pushed - * to peer within each cycle won't exceed 'bandwidth' bytes + * Per-peer per-node throttling controller for replication: enabled if bandwidth > 0, a cycle = + * 100ms, by throttling we guarantee data pushed to peer within each cycle won't exceed 'bandwidth' + * bytes */ @InterfaceAudience.Private public class ReplicationThrottler { @@ -33,8 +33,7 @@ public class ReplicationThrottler { private long cycleStartTick; /** - * ReplicationThrottler constructor - * If bandwidth less than 1, throttling is disabled + * ReplicationThrottler constructor If bandwidth less than 1, throttling is disabled * @param bandwidth per cycle(100ms) */ public ReplicationThrottler(final double bandwidth) { @@ -55,9 +54,8 @@ public boolean isEnabled() { } /** - * Get how long the caller should sleep according to the current size and - * current cycle's total push size and start tick, return the sleep interval - * for throttling control. + * Get how long the caller should sleep according to the current size and current cycle's total + * push size and start tick, return the sleep interval for throttling control. * @param size is the size of edits to be pushed * @return sleep interval for throttling control */ @@ -69,11 +67,11 @@ public long getNextSleepInterval(final int size) { long sleepTicks = 0; long now = EnvironmentEdgeManager.currentTime(); // 1. if cyclePushSize exceeds bandwidth, we need to sleep some - // following cycles to amortize, this case can occur when a single push - // exceeds the bandwidth - if ((double)this.cyclePushSize > bandwidth) { - double cycles = Math.ceil((double)this.cyclePushSize / bandwidth); - long shouldTillTo = this.cycleStartTick + (long)(cycles * 100); + // following cycles to amortize, this case can occur when a single push + // exceeds the bandwidth + if ((double) this.cyclePushSize > bandwidth) { + double cycles = Math.ceil((double) this.cyclePushSize / bandwidth); + long shouldTillTo = this.cycleStartTick + (long) (cycles * 100); if (shouldTillTo > now) { sleepTicks = shouldTillTo - now; } else { @@ -82,16 +80,15 @@ public long getNextSleepInterval(final int size) { } this.cyclePushSize = 0; } else { - long nextCycleTick = this.cycleStartTick + 100; //a cycle is 100ms + long nextCycleTick = this.cycleStartTick + 100; // a cycle is 100ms if (now >= nextCycleTick) { // 2. switch to next cycle if the current cycle has passed this.cycleStartTick = now; this.cyclePushSize = 0; - } else if (this.cyclePushSize > 0 && - (double)(this.cyclePushSize + size) >= bandwidth) { + } else if (this.cyclePushSize > 0 && (double) (this.cyclePushSize + size) >= bandwidth) { // 3. delay the push to next cycle if exceeds throttling bandwidth. - // enforcing cyclePushSize > 0 to avoid the unnecessary sleep for case - // where a cycle's first push size(currentSize) > bandwidth + // enforcing cyclePushSize > 0 to avoid the unnecessary sleep for case + // where a cycle's first push size(currentSize) > bandwidth sleepTicks = nextCycleTick - now; this.cyclePushSize = 0; } @@ -101,8 +98,7 @@ public long getNextSleepInterval(final int size) { /** * Add current size to the current cycle's total push size - * @param size is the current size added to the current cycle's - * total push size + * @param size is the current size added to the current cycle's total push size */ public void addPushSize(final int size) { if (this.enabled) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java index fdc1e5414d00..714d77f72ec4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,6 +37,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; @@ -50,12 +51,11 @@ *

    * We record all the open sequence number for a region in a special family in meta, which is called * 'rep_barrier', so there will be a sequence of open sequence number (b1, b2, b3, ...). We call - * [bn, bn+1) a range, and it is obvious that a region will always be on the same RS within a - * range. + * [bn, bn+1) a range, and it is obvious that a region will always be on the same RS within a range. *

    * When split and merge, we will also record the parent for the generated region(s) in the special - * family in meta. And also, we will write an extra 'open sequence number' for the parent - * region(s), which is the max sequence id of the region plus one. + * family in meta. And also, we will write an extra 'open sequence number' for the parent region(s), + * which is the max sequence id of the region plus one. *

    *

    *

    @@ -170,8 +170,8 @@ private boolean isParentFinished(byte[] regionName) throws IOException { // if a region is in OPENING state and we are in the last range, it is not safe to say we can push // even if the previous range is finished. private boolean isLastRangeAndOpening(ReplicationBarrierResult barrierResult, int index) { - return index == barrierResult.getBarriers().length && - barrierResult.getState() == RegionState.State.OPENING; + return index == barrierResult.getBarriers().length + && barrierResult.getState() == RegionState.State.OPENING; } private void recordCanPush(String encodedNameAsString, long seqId, long[] barriers, int index) { @@ -264,7 +264,7 @@ public boolean canPush(Entry entry, Cell firstCellInEdit) throws IOException { } public void waitUntilCanPush(Entry entry, Cell firstCellInEdit) - throws IOException, InterruptedException { + throws IOException, InterruptedException { byte[] row = CellUtil.cloneRow(firstCellInEdit); while (!canPush(entry, row)) { LOG.debug("Can not push {}, wait", entry); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java index 1de4c998546e..1a8bbf74a2c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,8 +43,8 @@ public class SerialReplicationSourceWALReader extends ReplicationSourceWALReader private final SerialReplicationChecker checker; public SerialReplicationSourceWALReader(FileSystem fs, Configuration conf, - ReplicationSourceLogQueue logQueue, long startPosition, WALEntryFilter filter, - ReplicationSource source, String walGroupId) { + ReplicationSourceLogQueue logQueue, long startPosition, WALEntryFilter filter, + ReplicationSource source, String walGroupId) { super(fs, conf, logQueue, startPosition, filter, source, walGroupId); checker = new SerialReplicationChecker(conf, source); } @@ -108,7 +108,7 @@ protected void readWALEntries(WALEntryStream entryStream, WALEntryBatch batch) } private void removeEntryFromStream(WALEntryStream entryStream, WALEntryBatch batch) - throws IOException { + throws IOException { entryStream.next(); firstCellInEntryBeforeFiltering = null; batch.setLastWalPosition(entryStream.getPosition()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java index b578587193dd..88d86c3217d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java @@ -1,17 +1,23 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; @@ -29,12 +35,12 @@ public interface SourceFSConfigurationProvider { /** * Returns the source cluster file system configuration for the given source cluster replication * ID. - * @param sinkConf sink cluster configuration + * @param sinkConf sink cluster configuration * @param replicationClusterId unique ID which identifies the source cluster * @return source cluster file system configuration * @throws IOException for invalid directory or for a bad disk. */ public Configuration getConf(Configuration sinkConf, String replicationClusterId) - throws IOException; + throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java index c78fe40b028f..d09c821b9edc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java index cfe525ac5d3c..66ec3fdd7d74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.util.Optional; import java.util.function.BiPredicate; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; @@ -44,5 +43,5 @@ public interface SyncReplicationPeerInfoProvider { * Will call the checker with current sync replication state and new sync replication state. */ boolean checkState(TableName table, - BiPredicate checker); + BiPredicate checker); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java index 170441b45c1f..7efd3741ae0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.util.Optional; import java.util.function.BiPredicate; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.ReplicationPeerImpl; import org.apache.hadoop.hbase.replication.ReplicationPeers; @@ -35,7 +34,7 @@ class SyncReplicationPeerInfoProviderImpl implements SyncReplicationPeerInfoProv private final SyncReplicationPeerMappingManager mapping; SyncReplicationPeerInfoProviderImpl(ReplicationPeers replicationPeers, - SyncReplicationPeerMappingManager mapping) { + SyncReplicationPeerMappingManager mapping) { this.replicationPeers = replicationPeers; this.mapping = mapping; } @@ -54,11 +53,13 @@ public Optional> getPeerIdAndRemoteWALDir(TableName table) return Optional.empty(); } Pair states = - peer.getSyncReplicationStateAndNewState(); - if ((states.getFirst() == SyncReplicationState.ACTIVE && - states.getSecond() == SyncReplicationState.NONE) || - (states.getFirst() == SyncReplicationState.DOWNGRADE_ACTIVE && - states.getSecond() == SyncReplicationState.ACTIVE)) { + peer.getSyncReplicationStateAndNewState(); + if ( + (states.getFirst() == SyncReplicationState.ACTIVE + && states.getSecond() == SyncReplicationState.NONE) + || (states.getFirst() == SyncReplicationState.DOWNGRADE_ACTIVE + && states.getSecond() == SyncReplicationState.ACTIVE) + ) { return Optional.of(Pair.newPair(peerId, peer.getPeerConfig().getRemoteWALDir())); } else { return Optional.empty(); @@ -67,7 +68,7 @@ public Optional> getPeerIdAndRemoteWALDir(TableName table) @Override public boolean checkState(TableName table, - BiPredicate checker) { + BiPredicate checker) { String peerId = mapping.getPeerId(table); if (peerId == null) { return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java index 5d19f7224463..9a50ef433cdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java index 8301dff26d61..65575aba5a6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,6 @@ class WALEntryBatch { this.lastWalPath = lastWalPath; } - static WALEntryBatch endOfFile(Path lastWalPath) { WALEntryBatch batch = new WALEntryBatch(0, lastWalPath); batch.setLastWalPosition(-1L); @@ -174,9 +173,9 @@ public void setLastSeqId(String region, long sequenceId) { @Override public String toString() { - return "WALEntryBatch [walEntries=" + walEntriesWithSize + ", lastWalPath=" + lastWalPath + - ", lastWalPosition=" + lastWalPosition + ", nbRowKeys=" + nbRowKeys + ", nbHFiles=" + - nbHFiles + ", heapSize=" + heapSize + ", lastSeqIds=" + lastSeqIds + ", endOfFile=" + - endOfFile + "]"; + return "WALEntryBatch [walEntries=" + walEntriesWithSize + ", lastWalPath=" + lastWalPath + + ", lastWalPosition=" + lastWalPosition + ", nbRowKeys=" + nbRowKeys + ", nbHFiles=" + + nbHFiles + ", heapSize=" + heapSize + ", lastSeqIds=" + lastSeqIds + ", endOfFile=" + + endOfFile + "]"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java index f93f8b058b27..861f2d720077 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This exception should be thrown from any wal filter when the filter is expected - * to recover from the failures and it wants the replication to backup till it fails. - * There is special handling in replication wal reader to catch this exception and - * retry. + * This exception should be thrown from any wal filter when the filter is expected to recover from + * the failures and it wants the replication to backup till it fails. There is special handling in + * replication wal reader to catch this exception and retry. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public class WALEntryFilterRetryableException extends RuntimeException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java index 6f6ae1f8bd0f..2714d346d979 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,15 +26,16 @@ /** * Implementations are installed on a Replication Sink called from inside * ReplicationSink#replicateEntries to filter replicated WALEntries based off WALEntry attributes. - * Currently only table name and replication write time are exposed (WALEntry is a private, - * internal class so we cannot pass it here). To install, set - * hbase.replication.sink.walentryfilter to the name of the implementing - * class. Implementing class must have a no-param Constructor. - *

    This filter is of limited use. It is better to filter on the replication source rather than - * here after the edits have been shipped on the replication sink. That said, applications such - * as the hbase-indexer want to filter out any edits that were made before replication was enabled. + * Currently only table name and replication write time are exposed (WALEntry is a private, internal + * class so we cannot pass it here). To install, set + * hbase.replication.sink.walentryfilter to the name of the implementing class. + * Implementing class must have a no-param Constructor. + *

    + * This filter is of limited use. It is better to filter on the replication source rather than here + * after the edits have been shipped on the replication sink. That said, applications such as the + * hbase-indexer want to filter out any edits that were made before replication was enabled. * @see org.apache.hadoop.hbase.replication.WALEntryFilter for filtering on the replication - * source-side. + * source-side. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) @InterfaceStability.Evolving @@ -45,13 +46,12 @@ public interface WALEntrySinkFilter { public static final String WAL_ENTRY_FILTER_KEY = "hbase.replication.sink.walentrysinkfilter"; /** - * Called after Construction. - * Use passed Connection to keep any context the filter might need. + * Called after Construction. Use passed Connection to keep any context the filter might need. */ void init(AsyncConnection conn); /** - * @param table Table edit is destined for. + * @param table Table edit is destined for. * @param writeTime Time at which the edit was created on the source. * @return True if we are to filter out the edit. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 488355c3a2b2..041a10b8f316 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,17 +73,18 @@ class WALEntryStream implements Closeable { /** * Create an entry stream over the given queue at the given start position - * @param logQueue the queue of WAL paths - * @param conf the {@link Configuration} to use to create {@link Reader} for this stream - * @param startPosition the position in the first WAL to start reading at + * @param logQueue the queue of WAL paths + * @param conf the {@link Configuration} to use to create {@link Reader} for this + * stream + * @param startPosition the position in the first WAL to start reading at * @param walFileLengthProvider provides the length of the WAL file - * @param serverName the server name which all WALs belong to - * @param metrics the replication metrics + * @param serverName the server name which all WALs belong to + * @param metrics the replication metrics * @throws IOException throw IO exception from stream */ - public WALEntryStream(ReplicationSourceLogQueue logQueue, Configuration conf, - long startPosition, WALFileLengthProvider walFileLengthProvider, ServerName serverName, - MetricsSource metrics, String walGroupId) throws IOException { + public WALEntryStream(ReplicationSourceLogQueue logQueue, Configuration conf, long startPosition, + WALFileLengthProvider walFileLengthProvider, ServerName serverName, MetricsSource metrics, + String walGroupId) throws IOException { this.logQueue = logQueue; this.fs = CommonFSUtils.getWALFileSystem(conf); this.conf = conf; @@ -109,7 +109,7 @@ public boolean hasNext() throws IOException { * Returns the next WAL entry in this stream but does not advance. */ public Entry peek() throws IOException { - return hasNext() ? currentEntry: null; + return hasNext() ? currentEntry : null; } /** @@ -148,7 +148,7 @@ private String getCurrentPathStat() { StringBuilder sb = new StringBuilder(); if (currentPath != null) { sb.append("currently replicating from: ").append(currentPath).append(" at position: ") - .append(currentPositionOfEntry).append("\n"); + .append(currentPositionOfEntry).append("\n"); } else { sb.append("no replication ongoing, waiting for new log"); } @@ -224,15 +224,15 @@ private boolean checkAllBytesParsed() throws IOException { if (currentPositionOfReader < stat.getLen()) { final long skippedBytes = stat.getLen() - currentPositionOfReader; // See the commits in HBASE-25924/HBASE-25932 for context. - LOG.warn("Reached the end of WAL {}. It was not closed cleanly," + - " so we did not parse {} bytes of data.", currentPath, skippedBytes); + LOG.warn("Reached the end of WAL {}. It was not closed cleanly," + + " so we did not parse {} bytes of data.", currentPath, skippedBytes); metrics.incrUncleanlyClosedWALs(); metrics.incrBytesSkippedInUncleanlyClosedWALs(skippedBytes); } } else if (currentPositionOfReader + trailerSize < stat.getLen()) { LOG.warn( - "Processing end of WAL {} at position {}, which is too far away from" + - " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", + "Processing end of WAL {} at position {}, which is too far away from" + + " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", currentPath, currentPositionOfReader, stat.getLen(), getCurrentPathStat()); setPosition(0); resetReader(); @@ -242,8 +242,8 @@ private boolean checkAllBytesParsed() throws IOException { } } if (LOG.isTraceEnabled()) { - LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + - (stat == null ? "N/A" : stat.getLen())); + LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + + (stat == null ? "N/A" : stat.getLen())); } metrics.incrCompletedWAL(); return true; @@ -268,8 +268,8 @@ private boolean readNextEntryAndRecordReaderPosition() throws IOException { // See HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted // data, so we need to make sure that we do not read beyond the committed file length. if (LOG.isDebugEnabled()) { - LOG.debug("The provider tells us the valid length for " + currentPath + " is " + - fileLength.getAsLong() + ", but we have advanced to " + readerPos); + LOG.debug("The provider tells us the valid length for " + currentPath + " is " + + fileLength.getAsLong() + ", but we have advanced to " + readerPos); } resetReader(); return true; @@ -341,12 +341,12 @@ private void openReader(Path path) throws IOException { } } catch (FileNotFoundException fnfe) { handleFileNotFound(path, fnfe); - } catch (RemoteException re) { + } catch (RemoteException re) { IOException ioe = re.unwrapRemoteException(FileNotFoundException.class); if (!(ioe instanceof FileNotFoundException)) { throw ioe; } - handleFileNotFound(path, (FileNotFoundException)ioe); + handleFileNotFound(path, (FileNotFoundException) ioe); } catch (LeaseNotRecoveredException lnre) { // HBASE-15019 the WAL was not closed due to some hiccup. LOG.warn("Try to recover the WAL lease " + path, lnre); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java index c60faa9e5db8..b0550cc37cf9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +18,15 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.util.OptionalLong; - import org.apache.hadoop.fs.Path; import org.apache.yetus.audience.InterfaceAudience; /** * Used by replication to prevent replicating unacked log entries. See - * https://issues.apache.org/jira/browse/HBASE-14004 for more details. - * WALFileLengthProvider exists because we do not want to reference WALFactory and WALProvider - * directly in the replication code so in the future it will be easier to decouple them. - * Each walProvider will have its own implementation. + * https://issues.apache.org/jira/browse/HBASE-14004 for more details. WALFileLengthProvider exists + * because we do not want to reference WALFactory and WALProvider directly in the replication code + * so in the future it will be easier to decouple them. Each walProvider will have its own + * implementation. */ @InterfaceAudience.Private @FunctionalInterface diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java index 8ed250538c60..bba8600f89ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -110,7 +110,8 @@ public RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException { } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { throw new DoNotRetryIOException("RSGroup is disabled"); } @@ -130,7 +131,7 @@ public void renameRSGroup(String oldName, String newName) throws IOException { @Override public void updateRSGroupConfig(String groupName, Map configuration) - throws IOException { + throws IOException { throw new DoNotRetryIOException("RSGroup is disabled"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java index 3c03abc95949..daa7aa56dcb3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,8 +56,8 @@ protected Optional modify(MasterProcedureEnv env, TableDescript RSGroupInfo group = env.getMasterServices().getRSGroupInfoManager().getRSGroupForTable(current.getTableName()); if (group == null) { - LOG.debug("RSGroup for table {} is empty when migrating, usually this should not happen" + - " unless we have removed the RSGroup, ignore...", current.getTableName()); + LOG.debug("RSGroup for table {} is empty when migrating, usually this should not happen" + + " unless we have removed the RSGroup, ignore...", current.getTableName()); return Optional.empty(); } return Optional diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java index 4c291547110a..711aa7176b09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -165,7 +165,8 @@ public void removeRSGroup(String name) throws IOException { * Balance regions in the given RegionServer group. * @return BalanceResponse details about the balancer run */ - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { try { BalanceRSGroupRequest req = ProtobufUtil.createBalanceRSGroupRequest(groupName, request); return ProtobufUtil.toBalanceResponse(stub.balanceRSGroup(null, req)); @@ -212,8 +213,8 @@ public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { /** * Move given set of servers and tables to the specified target RegionServer group. - * @param servers set of servers to move - * @param tables set of tables to move + * @param servers set of servers to move + * @param tables set of tables to move * @param targetGroup the target group name * @throws IOException if moving the server and tables fail */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 574d6e79edb4..291a342405a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java index b8b2a4f3206f..35824fd36332 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,9 +39,11 @@ import org.apache.hadoop.hbase.procedure2.Procedure; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos; @@ -74,9 +76,8 @@ * Implementation of RSGroupAdminService defined in RSGroupAdmin.proto. This class calls * {@link RSGroupInfoManagerImpl} for actual work, converts result to protocol buffer response, * handles exceptions if any occurred and then calls the {@code RpcCallback} with the response. - * - * @deprecated Keep it here only for compatibility with {@link RSGroupAdminClient}, - * using {@link org.apache.hadoop.hbase.master.MasterRpcServices} instead. + * @deprecated Keep it here only for compatibility with {@link RSGroupAdminClient}, using + * {@link org.apache.hadoop.hbase.master.MasterRpcServices} instead. */ @Deprecated class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService { @@ -90,7 +91,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService { RSGroupAdminServiceImpl() { } - void initialize(MasterServices masterServices){ + void initialize(MasterServices masterServices) { this.master = masterServices; this.rsGroupInfoManager = masterServices.getRSGroupInfoManager(); } @@ -102,7 +103,7 @@ private RSGroupInfo fillTables(RSGroupInfo rsGroupInfo) throws IOException { @Override public void getRSGroupInfo(RpcController controller, GetRSGroupInfoRequest request, - RpcCallback done) { + RpcCallback done) { GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder(); String groupName = request.getRSGroupName(); LOG.info( @@ -126,7 +127,7 @@ public void getRSGroupInfo(RpcController controller, GetRSGroupInfoRequest reque @Override public void getRSGroupInfoOfTable(RpcController controller, GetRSGroupInfoOfTableRequest request, - RpcCallback done) { + RpcCallback done) { GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder(); TableName tableName = ProtobufUtil.toTableName(request.getTableName()); LOG.info( @@ -157,14 +158,14 @@ public void getRSGroupInfoOfTable(RpcController controller, GetRSGroupInfoOfTabl @Override public void moveServers(RpcController controller, MoveServersRequest request, - RpcCallback done) { + RpcCallback done) { MoveServersResponse.Builder builder = MoveServersResponse.newBuilder(); Set

    hostPorts = Sets.newHashSet(); for (HBaseProtos.ServerName el : request.getServersList()) { hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + - request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); @@ -187,7 +188,7 @@ private void moveTablesAndWait(Set tables, String targetGroup) throws continue; } TableDescriptor newTd = - TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build(); + TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build(); procIds.add(master.modifyTable(tableName, newTd, HConstants.NO_NONCE, HConstants.NO_NONCE)); } for (long procId : procIds) { @@ -202,14 +203,14 @@ private void moveTablesAndWait(Set tables, String targetGroup) throws @Override public void moveTables(RpcController controller, MoveTablesRequest request, - RpcCallback done) { + RpcCallback done) { MoveTablesResponse.Builder builder = MoveTablesResponse.newBuilder(); Set tables = new HashSet<>(request.getTableNameList().size()); for (HBaseProtos.TableName tableName : request.getTableNameList()) { tables.add(ProtobufUtil.toTableName(tableName)); } - LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables + " to rsgroup " + - request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables + " to rsgroup " + + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup()); @@ -226,7 +227,7 @@ public void moveTables(RpcController controller, MoveTablesRequest request, @Override public void addRSGroup(RpcController controller, AddRSGroupRequest request, - RpcCallback done) { + RpcCallback done) { AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder(); LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); try { @@ -245,7 +246,7 @@ public void addRSGroup(RpcController controller, AddRSGroupRequest request, @Override public void removeRSGroup(RpcController controller, RemoveRSGroupRequest request, - RpcCallback done) { + RpcCallback done) { RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder(); LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); try { @@ -264,17 +265,17 @@ public void removeRSGroup(RpcController controller, RemoveRSGroupRequest request @Override public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest request, - RpcCallback done) { + RpcCallback done) { BalanceRequest balanceRequest = ProtobufUtil.toBalanceRequest(request); - BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder() - .setBalanceRan(false); + BalanceRSGroupResponse.Builder builder = + BalanceRSGroupResponse.newBuilder().setBalanceRan(false); LOG.info( master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); try { if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost() - .preBalanceRSGroup(request.getRSGroupName(), balanceRequest); + master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(), + balanceRequest); } BalanceResponse response = @@ -282,8 +283,8 @@ public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest reque ProtobufUtil.populateBalanceRSGroupResponse(builder, response); if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost() - .postBalanceRSGroup(request.getRSGroupName(), balanceRequest, response); + master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), + balanceRequest, response); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -293,7 +294,7 @@ public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest reque @Override public void listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest request, - RpcCallback done) { + RpcCallback done) { ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder(); LOG.info(master.getClientIdAuditPrefix() + " list rsgroup"); try { @@ -301,7 +302,7 @@ public void listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest r master.getMasterCoprocessorHost().preListRSGroups(); } List rsGroupInfos = rsGroupInfoManager.listRSGroups().stream() - .map(RSGroupInfo::new).collect(Collectors.toList()); + .map(RSGroupInfo::new).collect(Collectors.toList()); Map name2Info = new HashMap<>(); for (RSGroupInfo rsGroupInfo : rsGroupInfos) { name2Info.put(rsGroupInfo.getName(), rsGroupInfo); @@ -328,10 +329,10 @@ public void listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest r @Override public void getRSGroupInfoOfServer(RpcController controller, - GetRSGroupInfoOfServerRequest request, RpcCallback done) { + GetRSGroupInfoOfServerRequest request, RpcCallback done) { GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder(); Address hp = - Address.fromParts(request.getServer().getHostName(), request.getServer().getPort()); + Address.fromParts(request.getServer().getHostName(), request.getServer().getPort()); LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp); try { if (master.getMasterCoprocessorHost() != null) { @@ -352,7 +353,7 @@ public void getRSGroupInfoOfServer(RpcController controller, @Override public void moveServersAndTables(RpcController controller, MoveServersAndTablesRequest request, - RpcCallback done) { + RpcCallback done) { MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder(); Set
    hostPorts = Sets.newHashSet(); for (HBaseProtos.ServerName el : request.getServersList()) { @@ -362,8 +363,8 @@ public void moveServersAndTables(RpcController controller, MoveServersAndTablesR for (HBaseProtos.TableName tableName : request.getTableNameList()) { tables.add(ProtobufUtil.toTableName(tableName)); } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " and tables " + - tables + " to rsgroup" + request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " and tables " + + tables + " to rsgroup" + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables, @@ -383,7 +384,7 @@ public void moveServersAndTables(RpcController controller, MoveServersAndTablesR @Override public void removeServers(RpcController controller, RemoveServersRequest request, - RpcCallback done) { + RpcCallback done) { RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder(); Set
    servers = Sets.newHashSet(); for (HBaseProtos.ServerName el : request.getServersList()) { @@ -407,11 +408,11 @@ public void removeServers(RpcController controller, RemoveServersRequest request @Override public void renameRSGroup(RpcController controller, RenameRSGroupRequest request, - RpcCallback done) { + RpcCallback done) { String oldRSGroup = request.getOldRsgroupName(); String newRSGroup = request.getNewRsgroupName(); - LOG.info("{} rename rsgroup from {} to {}", - master.getClientIdAuditPrefix(), oldRSGroup, newRSGroup); + LOG.info("{} rename rsgroup from {} to {}", master.getClientIdAuditPrefix(), oldRSGroup, + newRSGroup); RenameRSGroupResponse.Builder builder = RenameRSGroupResponse.newBuilder(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 07b32053cbd1..f539d1700ab3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseIOException; @@ -80,11 +79,9 @@ public class RSGroupBasedLoadBalancer implements LoadBalancer { private volatile LoadBalancer internalBalancer; /** - * Set this key to {@code true} to allow region fallback. - * Fallback to the default rsgroup first, then fallback to any group if no online servers in - * default rsgroup. - * Please keep balancer switch on at the same time, which is relied on to correct misplaced - * regions + * Set this key to {@code true} to allow region fallback. Fallback to the default rsgroup first, + * then fallback to any group if no online servers in default rsgroup. Please keep balancer switch + * on at the same time, which is relied on to correct misplaced regions */ public static final String FALLBACK_GROUP_ENABLE_KEY = "hbase.rsgroup.fallback.enable"; @@ -94,7 +91,8 @@ public class RSGroupBasedLoadBalancer implements LoadBalancer { * Used by reflection in {@link org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory}. */ @InterfaceAudience.Private - public RSGroupBasedLoadBalancer() {} + public RSGroupBasedLoadBalancer() { + } // must be called after calling initialize @Override @@ -104,8 +102,8 @@ public synchronized void updateClusterMetrics(ClusterMetrics sm) { } @Override - public synchronized void updateBalancerLoadInfo(Map>> - loadOfAllTable){ + public synchronized void + updateBalancerLoadInfo(Map>> loadOfAllTable) { internalBalancer.updateBalancerLoadInfo(loadOfAllTable); } @@ -118,17 +116,17 @@ public void setMasterServices(MasterServices masterServices) { */ @Override public synchronized List balanceCluster( - Map>> loadOfAllTable) throws IOException { + Map>> loadOfAllTable) throws IOException { if (!isOnline()) { throw new ConstraintException( - RSGroupInfoManager.class.getSimpleName() + " is not online, unable to perform balance"); + RSGroupInfoManager.class.getSimpleName() + " is not online, unable to perform balance"); } // Calculate correct assignments and a list of RegionPlan for mis-placed regions - Pair>>, List> - correctedStateAndRegionPlans = correctAssignments(loadOfAllTable); + Pair>>, + List> correctedStateAndRegionPlans = correctAssignments(loadOfAllTable); Map>> correctedLoadOfAllTable = - correctedStateAndRegionPlans.getFirst(); + correctedStateAndRegionPlans.getFirst(); List regionPlans = correctedStateAndRegionPlans.getSecond(); RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP); // Add RegionPlan @@ -140,10 +138,10 @@ public synchronized List balanceCluster( LOG.debug("Balancing RSGroup={}", rsgroup.getName()); Map>> loadOfTablesInGroup = new HashMap<>(); for (Map.Entry>> entry : correctedLoadOfAllTable - .entrySet()) { + .entrySet()) { TableName tableName = entry.getKey(); RSGroupInfo targetRSGInfo = RSGroupUtil - .getRSGroupInfo(masterServices, rsGroupInfoManager, tableName).orElse(defaultInfo); + .getRSGroupInfo(masterServices, rsGroupInfoManager, tableName).orElse(defaultInfo); if (targetRSGInfo.getName().equals(rsgroup.getName())) { loadOfTablesInGroup.put(tableName, entry.getValue()); } @@ -169,15 +167,15 @@ public synchronized List balanceCluster( @Override @NonNull public Map> roundRobinAssignment(List regions, - List servers) throws IOException { + List servers) throws IOException { Map> assignments = Maps.newHashMap(); List, List>> pairs = - generateGroupAssignments(regions, servers); + generateGroupAssignments(regions, servers); for (Pair, List> pair : pairs) { Map> result = - this.internalBalancer.roundRobinAssignment(pair.getFirst(), pair.getSecond()); + this.internalBalancer.roundRobinAssignment(pair.getFirst(), pair.getSecond()); result.forEach((server, regionInfos) -> assignments - .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); } return assignments; } @@ -185,19 +183,19 @@ public Map> roundRobinAssignment(List r @Override @NonNull public Map> retainAssignment(Map regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { try { Map> assignments = new TreeMap<>(); List, List>> pairs = - generateGroupAssignments(Lists.newArrayList(regions.keySet()), servers); + generateGroupAssignments(Lists.newArrayList(regions.keySet()), servers); for (Pair, List> pair : pairs) { List regionList = pair.getFirst(); Map currentAssignmentMap = Maps.newTreeMap(); regionList.forEach(r -> currentAssignmentMap.put(r, regions.get(r))); Map> pairResult = - this.internalBalancer.retainAssignment(currentAssignmentMap, pair.getSecond()); + this.internalBalancer.retainAssignment(currentAssignmentMap, pair.getSecond()); pairResult.forEach((server, rs) -> assignments - .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); } return assignments; } catch (IOException e) { @@ -206,24 +204,24 @@ public Map> retainAssignment(Map servers) throws IOException { + public ServerName randomAssignment(RegionInfo region, List servers) + throws IOException { List, List>> pairs = - generateGroupAssignments(Lists.newArrayList(region), servers); + generateGroupAssignments(Lists.newArrayList(region), servers); List filteredServers = pairs.iterator().next().getSecond(); return this.internalBalancer.randomAssignment(region, filteredServers); } private List, List>> generateGroupAssignments( - List regions, List servers) throws HBaseIOException { + List regions, List servers) throws HBaseIOException { try { ListMultimap regionMap = ArrayListMultimap.create(); ListMultimap serverMap = ArrayListMultimap.create(); RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP); for (RegionInfo region : regions) { String groupName = - RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable()) - .orElse(defaultInfo).getName(); + RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable()) + .orElse(defaultInfo).getName(); regionMap.put(groupName, region); } for (String groupKey : regionMap.keySet()) { @@ -250,18 +248,19 @@ private List, List>> generateGroupAssignments( } candidates = getFallBackCandidates(servers); } - candidates = (candidates == null || candidates.isEmpty()) ? - Lists.newArrayList(BOGUS_SERVER_NAME) : candidates; + candidates = (candidates == null || candidates.isEmpty()) + ? Lists.newArrayList(BOGUS_SERVER_NAME) + : candidates; result.add(Pair.newPair(fallbackRegions, candidates)); } return result; - } catch(IOException e) { + } catch (IOException e) { throw new HBaseIOException("Failed to generate group assignments", e); } } private List filterOfflineServers(RSGroupInfo RSGroupInfo, - List onlineServers) { + List onlineServers) { if (RSGroupInfo != null) { return filterServers(RSGroupInfo.getServers(), onlineServers); } else { @@ -278,7 +277,7 @@ private List filterOfflineServers(RSGroupInfo RSGroupInfo, *

    * TODO: consider using HashSet to pursue O(1) for contains() throughout the calling chain if * needed. - * @param servers the servers + * @param servers the servers * @param onlineServers List of servers which are online. * @return the list */ @@ -293,29 +292,30 @@ private List filterServers(Set

    servers, List on } private Pair>>, List> - correctAssignments(Map>> existingAssignments) - throws IOException { + correctAssignments(Map>> existingAssignments) + throws IOException { // To return Map>> correctAssignments = new HashMap<>(); List regionPlansForMisplacedRegions = new ArrayList<>(); RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP); for (Map.Entry>> assignments : existingAssignments - .entrySet()) { + .entrySet()) { TableName tableName = assignments.getKey(); Map> clusterLoad = assignments.getValue(); RSGroupInfo targetRSGInfo = null; Map> correctServerRegion = new TreeMap<>(); try { targetRSGInfo = RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, tableName) - .orElse(defaultInfo); + .orElse(defaultInfo); } catch (IOException exp) { LOG.debug("RSGroup information null for region of table " + tableName, exp); } for (Map.Entry> serverRegionMap : clusterLoad.entrySet()) { ServerName currentHostServer = serverRegionMap.getKey(); List regionInfoList = serverRegionMap.getValue(); - if (targetRSGInfo == null - || !targetRSGInfo.containsServer(currentHostServer.getAddress())) { + if ( + targetRSGInfo == null || !targetRSGInfo.containsServer(currentHostServer.getAddress()) + ) { regionInfoList.forEach(regionInfo -> { regionPlansForMisplacedRegions.add(new RegionPlan(regionInfo, currentHostServer, null)); }); @@ -328,7 +328,7 @@ private List filterServers(Set
    servers, List on // Return correct assignments and region movement plan for mis-placed regions together return new Pair>>, List>( - correctAssignments, regionPlansForMisplacedRegions); + correctAssignments, regionPlansForMisplacedRegions); } @Override @@ -399,8 +399,8 @@ public void regionOffline(RegionInfo regionInfo) { public synchronized void onConfigurationChange(Configuration conf) { boolean newFallbackEnabled = conf.getBoolean(FALLBACK_GROUP_ENABLE_KEY, false); if (fallbackEnabled != newFallbackEnabled) { - LOG.info("Changing the value of {} from {} to {}", FALLBACK_GROUP_ENABLE_KEY, - fallbackEnabled, newFallbackEnabled); + LOG.info("Changing the value of {} from {} to {}", FALLBACK_GROUP_ENABLE_KEY, fallbackEnabled, + newFallbackEnabled); fallbackEnabled = newFallbackEnabled; } provider.onConfigurationChange(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java index 9d73a5279886..4434c33b52cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -112,14 +112,13 @@ static RSGroupInfoManager create(MasterServices master) throws IOException { /** * Rename rsgroup * @param oldName old rsgroup name - * @param newName new rsgroup name - * @throws IOException + * @param newName new rsgroup name n */ void renameRSGroup(String oldName, String newName) throws IOException; /** * Update RSGroup configuration - * @param groupName the group name + * @param groupName the group name * @param configuration new configuration of the group name to be set * @throws IOException if a remote or network exception occurs */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 6ec3746c01b2..f01e482a2aaf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -110,8 +110,8 @@ * persistence store for the group information. It also makes use of zookeeper to store group * information needed for bootstrapping during offline mode. *

    Concurrency

    RSGroup state is kept locally in Maps. There is a rsgroup name to cached - * RSGroupInfo Map at {@link RSGroupInfoHolder#groupName2Group}. - * These Maps are persisted to the hbase:rsgroup table (and cached in zk) on each modification. + * RSGroupInfo Map at {@link RSGroupInfoHolder#groupName2Group}. These Maps are persisted to the + * hbase:rsgroup table (and cached in zk) on each modification. *

    * Mutations on state are synchronized but reads can continue without having to wait on an instance * monitor, mutations do wholesale replace of the Maps on update -- Copy-On-Write; the local Maps of @@ -130,10 +130,10 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { // Assigned before user tables static final TableName RSGROUP_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); - static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = "should keep at least " + - "one server in 'default' RSGroup."; + static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = + "should keep at least " + "one server in 'default' RSGroup."; /** Define the config key of retries threshold when movements failed */ static final String FAILED_MOVE_MAX_RETRY = "hbase.rsgroup.move.max.retry"; @@ -214,9 +214,8 @@ static class RSGroupMappingScript { return; } - rsgroupMappingScript = new Shell.ShellCommandExecutor( - new String[] { script, "", "" }, null, null, - conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds + rsgroupMappingScript = new Shell.ShellCommandExecutor(new String[] { script, "", "" }, null, + null, conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds ); } @@ -231,14 +230,14 @@ String getRSGroup(String namespace, String tablename) { rsgroupMappingScript.execute(); } catch (IOException e) { // This exception may happen, like process doesn't have permission to run this script. - LOG.error("{}, placing {} back to default rsgroup", - e.getMessage(), + LOG.error("{}, placing {} back to default rsgroup", e.getMessage(), TableName.valueOf(namespace, tablename)); return RSGroupInfo.DEFAULT_GROUP; } return rsgroupMappingScript.getOutput().trim(); } } + private RSGroupMappingScript script; private RSGroupInfoManagerImpl(MasterServices masterServices) { @@ -297,8 +296,10 @@ public void start() { public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException { checkGroupName(rsGroupInfo.getName()); Map rsGroupMap = holder.groupName2Group; - if (rsGroupMap.get(rsGroupInfo.getName()) != null || - rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + if ( + rsGroupMap.get(rsGroupInfo.getName()) != null + || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP) + ) { throw new ConstraintException("Group already exists: " + rsGroupInfo.getName()); } Map newGroupMap = Maps.newHashMap(rsGroupMap); @@ -324,7 +325,7 @@ private Set

    getOnlineServers() { } public synchronized Set
    moveServers(Set
    servers, String srcGroup, - String dstGroup) throws IOException { + String dstGroup) throws IOException { RSGroupInfo src = getRSGroupInfo(srcGroup); RSGroupInfo dst = getRSGroupInfo(dstGroup); Set
    movedServers = new HashSet<>(); @@ -373,22 +374,22 @@ public synchronized void removeRSGroup(String groupName) throws IOException { RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); int serverCount = rsGroupInfo.getServers().size(); if (serverCount > 0) { - throw new ConstraintException("RSGroup " + groupName + " has " + serverCount + - " servers; you must remove these servers from the RSGroup before" + - " the RSGroup can be removed."); + throw new ConstraintException("RSGroup " + groupName + " has " + serverCount + + " servers; you must remove these servers from the RSGroup before" + + " the RSGroup can be removed."); } for (TableDescriptor td : masterServices.getTableDescriptors().getAll().values()) { if (td.getRegionServerGroup().map(groupName::equals).orElse(false)) { - throw new ConstraintException("RSGroup " + groupName + " is already referenced by " + - td.getTableName() + "; you must remove all the tables from the RSGroup before " + - "the RSGroup can be removed."); + throw new ConstraintException("RSGroup " + groupName + " is already referenced by " + + td.getTableName() + "; you must remove all the tables from the RSGroup before " + + "the RSGroup can be removed."); } } for (NamespaceDescriptor ns : masterServices.getClusterSchema().getNamespaces()) { String nsGroup = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); if (nsGroup != null && nsGroup.equals(groupName)) { throw new ConstraintException( - "RSGroup " + groupName + " is referenced by namespace: " + ns.getName()); + "RSGroup " + groupName + " is referenced by namespace: " + ns.getName()); } } Map rsGroupMap = holder.groupName2Group; @@ -456,7 +457,7 @@ private List retrieveGroupListFromGroupTable() throws IOException { break; } RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo - .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES)); + .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES)); rsGroupInfoList.add(ProtobufUtil.toGroupInfo(proto)); } } @@ -479,8 +480,7 @@ private List retrieveGroupListFromZookeeper() throws IOException { ProtobufUtil.expectPBMagicPrefix(data); ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); - RSGroupInfoList - .add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); + RSGroupInfoList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); } } LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size()); @@ -493,8 +493,7 @@ private List retrieveGroupListFromZookeeper() throws IOException { private void migrate(Collection groupList) { TableDescriptors tds = masterServices.getTableDescriptors(); - ProcedureExecutor procExec = - masterServices.getMasterProcedureExecutor(); + ProcedureExecutor procExec = masterServices.getMasterProcedureExecutor(); for (RSGroupInfo groupInfo : groupList) { if (groupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { continue; @@ -659,9 +658,12 @@ private synchronized void flushConfig(Map newGroupMap) thro Map oldGroupMap = Maps.newHashMap(holder.groupName2Group); RSGroupInfo oldDefaultGroup = oldGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); - if (!oldGroupMap.equals(newGroupMap) /* compare both tables and servers in other groups */ || - !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables()) - /* compare tables in default group */) { + if ( + !oldGroupMap.equals(newGroupMap) + /* compare both tables and servers in other groups */ || !oldDefaultGroup.getTables() + .equals(newDefaultGroup.getTables()) + /* compare tables in default group */ + ) { throw new IOException("Only servers in default group can be updated during offline mode"); } @@ -695,7 +697,7 @@ private void saveRSGroupMapToZK(Map newGroupMap) throws IOE LOG.debug("Saving RSGroup info to ZK"); try { String groupBasePath = - ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE); + ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE); ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC); List zkOps = new ArrayList<>(newGroupMap.size()); @@ -827,8 +829,10 @@ private void createRSGroupTable() throws IOException { } // wait for region to be online int tries = 600; - while (!(masterServices.getMasterProcedureExecutor().isFinished(procId)) && - masterServices.getMasterProcedureExecutor().isRunning() && tries > 0) { + while ( + !(masterServices.getMasterProcedureExecutor().isFinished(procId)) + && masterServices.getMasterProcedureExecutor().isRunning() && tries > 0 + ) { try { Thread.sleep(100); } catch (InterruptedException e) { @@ -841,8 +845,8 @@ private void createRSGroupTable() throws IOException { } else { Procedure result = masterServices.getMasterProcedureExecutor().getResult(procId); if (result != null && result.isFailed()) { - throw new IOException("Failed to create group table. " + - MasterProcedureUtil.unwrapRemoteIOException(result)); + throw new IOException( + "Failed to create group table. " + MasterProcedureUtil.unwrapRemoteIOException(result)); } } } @@ -861,13 +865,13 @@ private void multiMutate(List mutations) throws IOException { for (Mutation mutation : mutations) { if (mutation instanceof Put) { builder - .addMutationRequest(ProtobufUtil.toMutation(MutationProto.MutationType.PUT, mutation)); + .addMutationRequest(ProtobufUtil.toMutation(MutationProto.MutationType.PUT, mutation)); } else if (mutation instanceof Delete) { - builder.addMutationRequest( - ProtobufUtil.toMutation(MutationProto.MutationType.DELETE, mutation)); + builder + .addMutationRequest(ProtobufUtil.toMutation(MutationProto.MutationType.DELETE, mutation)); } else { throw new DoNotRetryIOException( - "multiMutate doesn't support " + mutation.getClass().getName()); + "multiMutate doesn't support " + mutation.getClass().getName()); } } MutateRowsRequest request = builder.build(); @@ -890,7 +894,6 @@ public RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException { return holder.tableName2Group.get(tableName); } - /** * Check if the set of servers are belong to dead servers list or online servers list. * @param servers servers to remove @@ -907,19 +910,18 @@ private void checkForDeadOrOnlineServers(Set
    servers) throws IOExceptio } Set
    deadServers = new HashSet<>(); - for(ServerName server: masterServices.getServerManager().getDeadServers().copyServerNames()) { + for (ServerName server : masterServices.getServerManager().getDeadServers().copyServerNames()) { deadServers.add(server.getAddress()); } - for (Address address: servers) { + for (Address address : servers) { if (onlineServers.contains(address)) { throw new DoNotRetryIOException( - "Server " + address + " is an online server, not allowed to remove."); + "Server " + address + " is an online server, not allowed to remove."); } if (deadServers.contains(address)) { - throw new DoNotRetryIOException( - "Server " + address + " is on the dead servers list," - + " Maybe it will come back again, not allowed to remove."); + throw new DoNotRetryIOException("Server " + address + " is on the dead servers list," + + " Maybe it will come back again, not allowed to remove."); } } } @@ -928,13 +930,13 @@ private void checkOnlineServersOnly(Set
    servers) throws IOException { // This uglyness is because we only have Address, not ServerName. // Online servers are keyed by ServerName. Set
    onlineServers = new HashSet<>(); - for(ServerName server: masterServices.getServerManager().getOnlineServers().keySet()) { + for (ServerName server : masterServices.getServerManager().getOnlineServers().keySet()) { onlineServers.add(server.getAddress()); } - for (Address address: servers) { + for (Address address : servers) { if (!onlineServers.contains(address)) { - throw new DoNotRetryIOException("Server " + address + - " is not an online server in 'default' RSGroup."); + throw new DoNotRetryIOException( + "Server " + address + " is not an online server in 'default' RSGroup."); } } } @@ -944,8 +946,8 @@ private void checkOnlineServersOnly(Set
    servers) throws IOException { */ private List getRegions(final Address server) { LinkedList regions = new LinkedList<>(); - for (Map.Entry el : - masterServices.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + for (Map.Entry el : masterServices.getAssignmentManager() + .getRegionStates().getRegionAssignments().entrySet()) { if (el.getValue() == null) { continue; } @@ -955,8 +957,9 @@ private List getRegions(final Address server) { } } for (RegionStateNode state : masterServices.getAssignmentManager().getRegionsInTransition()) { - if (state.getRegionLocation() != null && - state.getRegionLocation().getAddress().equals(server)) { + if ( + state.getRegionLocation() != null && state.getRegionLocation().getAddress().equals(server) + ) { addRegion(regions, state.getRegionInfo()); } } @@ -977,8 +980,8 @@ private void addRegion(final LinkedList regions, RegionInfo hri) { /** * Move every region from servers which are currently located on these servers, but should not be * located there. - * @param movedServers the servers that are moved to new group - * @param srcGrpServers all servers in the source group, excluding the movedServers + * @param movedServers the servers that are moved to new group + * @param srcGrpServers all servers in the source group, excluding the movedServers * @param targetGroupName the target group * @param sourceGroupName the source group * @throws IOException if moving the server and tables fail @@ -986,14 +989,13 @@ private void addRegion(final LinkedList regions, RegionInfo hri) { private void moveServerRegionsFromGroup(Set
    movedServers, Set
    srcGrpServers, String targetGroupName, String sourceGroupName) throws IOException { moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroupName, sourceGroupName, - rs -> getRegions(rs), info -> { + rs -> getRegions(rs), info -> { try { String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); return groupName.equals(targetGroupName); } catch (IOException e) { - LOG.warn("Failed to test group for region {} and target group {}", info, - targetGroupName); + LOG.warn("Failed to test group for region {} and target group {}", info, targetGroupName); return false; } }); @@ -1061,15 +1063,16 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new } retry++; } - } while (!failedRegions.isEmpty() && retry <= masterServices.getConfiguration() - .getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); + } while ( + !failedRegions.isEmpty() && retry <= masterServices.getConfiguration() + .getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE) + ); - //has up to max retry time or there are no more regions to move + // has up to max retry time or there are no more regions to move if (!failedRegions.isEmpty()) { // print failed moved regions, for later process conveniently - String msg = String - .format("move regions for group %s failed, failed regions: %s", sourceGroupName, - failedRegions); + String msg = String.format("move regions for group %s failed, failed regions: %s", + sourceGroupName, failedRegions); LOG.error(msg); throw new DoNotRetryIOException( msg + ", just record the last failed region's cause, more details in server log", toThrow); @@ -1077,8 +1080,8 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new } /** - * Wait for all the region move to complete. Keep waiting for other region movement - * completion even if some region movement fails. + * Wait for all the region move to complete. Keep waiting for other region movement completion + * even if some region movement fails. */ private void waitForRegionMovement(List>> regionMoveFutures, Set failedRegions, String sourceGroupName, int retryCount) { @@ -1087,12 +1090,14 @@ private void waitForRegionMovement(List>> region for (Pair> pair : regionMoveFutures) { try { pair.getSecond().get(); - if (masterServices.getAssignmentManager().getRegionStates(). - getRegionState(pair.getFirst()).isFailedOpen()) { + if ( + masterServices.getAssignmentManager().getRegionStates().getRegionState(pair.getFirst()) + .isFailedOpen() + ) { failedRegions.add(pair.getFirst().getRegionNameAsString()); } } catch (InterruptedException e) { - //Dont return form there lets wait for other regions to complete movement. + // Dont return form there lets wait for other regions to complete movement. failedRegions.add(pair.getFirst().getRegionNameAsString()); LOG.warn("Sleep interrupted", e); } catch (Exception e) { @@ -1104,13 +1109,14 @@ private void waitForRegionMovement(List>> region } private boolean isTableInGroup(TableName tableName, String groupName, - Set tablesInGroupCache) throws IOException { + Set tablesInGroupCache) throws IOException { if (tablesInGroupCache.contains(tableName)) { return true; } - if (RSGroupUtil.getRSGroupInfo(masterServices, this, tableName) - .map(RSGroupInfo::getName) - .orElse(RSGroupInfo.DEFAULT_GROUP).equals(groupName)) { + if ( + RSGroupUtil.getRSGroupInfo(masterServices, this, tableName).map(RSGroupInfo::getName) + .orElse(RSGroupInfo.DEFAULT_GROUP).equals(groupName) + ) { tablesInGroupCache.add(tableName); return true; } @@ -1118,11 +1124,11 @@ private boolean isTableInGroup(TableName tableName, String groupName, } private Map rsGroupGetRegionsInTransition(String groupName) - throws IOException { + throws IOException { Map rit = Maps.newTreeMap(); Set tablesInGroupCache = new HashSet<>(); - for (RegionStateNode regionNode : - masterServices.getAssignmentManager().getRegionsInTransition()) { + for (RegionStateNode regionNode : masterServices.getAssignmentManager() + .getRegionsInTransition()) { TableName tn = regionNode.getTable(); if (isTableInGroup(tn, groupName, tablesInGroupCache)) { rit.put(regionNode.getRegionInfo().getEncodedName(), regionNode.toRegionState()); @@ -1148,8 +1154,9 @@ Map>> getRSGroupAssignmentsByTable( TableName tn = region.getTable(); ServerName server = entry.getValue(); if (isTableInGroup(tn, groupName, tablesInGroupCache)) { - if (tableStateManager - .isTableState(tn, TableState.State.DISABLED, TableState.State.DISABLING)) { + if ( + tableStateManager.isTableState(tn, TableState.State.DISABLED, TableState.State.DISABLING) + ) { continue; } if (region.isSplitParent()) { @@ -1171,7 +1178,8 @@ Map>> getRSGroupAssignmentsByTable( } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { ServerManager serverManager = masterServices.getServerManager(); LoadBalancer balancer = masterServices.getLoadBalancer(); getRSGroupInfo(groupName); @@ -1188,21 +1196,20 @@ public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) Map groupRIT = rsGroupGetRegionsInTransition(groupName); if (groupRIT.size() > 0 && !request.isIgnoreRegionsInTransition()) { LOG.debug("Not running balancer because {} region(s) in transition: {}", groupRIT.size(), - StringUtils.abbreviate(masterServices.getAssignmentManager().getRegionStates() - .getRegionsInTransition().toString(), - 256)); + StringUtils.abbreviate(masterServices.getAssignmentManager().getRegionStates() + .getRegionsInTransition().toString(), 256)); return responseBuilder.build(); } if (serverManager.areDeadServersInProgress()) { LOG.debug("Not running balancer because processing dead regionserver(s): {}", - serverManager.getDeadServers()); + serverManager.getDeadServers()); return responseBuilder.build(); } // We balance per group instead of per table Map>> assignmentsByTable = - getRSGroupAssignmentsByTable(masterServices.getTableStateManager(), groupName); + getRSGroupAssignmentsByTable(masterServices.getTableStateManager(), groupName); List plans = balancer.balanceCluster(assignmentsByTable); boolean balancerRan = !plans.isEmpty(); @@ -1228,9 +1235,9 @@ private void moveTablesAndWait(Set tables, String targetGroup) throws continue; } TableDescriptor newTd = - TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build(); - procIds.add(masterServices.modifyTable(tableName, newTd, HConstants.NO_NONCE, - HConstants.NO_NONCE)); + TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build(); + procIds.add( + masterServices.modifyTable(tableName, newTd, HConstants.NO_NONCE, HConstants.NO_NONCE)); } for (long procId : procIds) { Procedure proc = masterServices.getMasterProcedureExecutor().getProcedure(procId); @@ -1238,7 +1245,7 @@ private void moveTablesAndWait(Set tables, String targetGroup) throws continue; } ProcedureSyncWait.waitForProcedureToCompleteIOE(masterServices.getMasterProcedureExecutor(), - proc, Long.MAX_VALUE); + proc, Long.MAX_VALUE); } LOG.info("Move tables done: moved {} tables to {}", tables.size(), targetGroup); if (LOG.isDebugEnabled()) { @@ -1273,8 +1280,8 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE RSGroupInfo srcGrp = getRSGroupOfServer(firstServer); if (srcGrp == null) { // Be careful. This exception message is tested for in TestRSGroupAdmin2... - throw new ConstraintException("Server " + firstServer - + " is either offline or it does not exist."); + throw new ConstraintException( + "Server " + firstServer + " is either offline or it does not exist."); } // Only move online servers (when moving from 'default') or servers from other @@ -1286,11 +1293,11 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE checkOnlineServersOnly(servers); } // Ensure all servers are of same rsgroup. - for (Address server: servers) { + for (Address server : servers) { String tmpGroup = getRSGroupOfServer(server).getName(); if (!tmpGroup.equals(srcGrp.getName())) { - throw new ConstraintException("Move server request should only come from one source " + - "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); + throw new ConstraintException("Move server request should only come from one source " + + "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); } } if (srcGrp.getServers().size() <= servers.size()) { @@ -1299,16 +1306,16 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE Optional optGroupName = td.getRegionServerGroup(); if (optGroupName.isPresent() && optGroupName.get().equals(srcGrp.getName())) { throw new ConstraintException( - "Cannot leave a RSGroup " + srcGrp.getName() + " that contains tables('" + - td.getTableName() + "' at least) without servers to host them."); + "Cannot leave a RSGroup " + srcGrp.getName() + " that contains tables('" + + td.getTableName() + "' at least) without servers to host them."); } } } // MovedServers may be < passed in 'servers'. - Set
    movedServers = moveServers(servers, srcGrp.getName(), - targetGroupName); - moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroupName, srcGrp.getName()); + Set
    movedServers = moveServers(servers, srcGrp.getName(), targetGroupName); + moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroupName, + srcGrp.getName()); LOG.info("Move servers done: moved {} servers from {} to {}", movedServers.size(), srcGrp.getName(), targetGroupName); if (LOG.isDebugEnabled()) { @@ -1329,7 +1336,7 @@ public synchronized void renameRSGroup(String oldName, String newName) throws IO throw new ConstraintException(RSGroupInfo.DEFAULT_GROUP + " can't be rename"); } checkGroupName(newName); - //getRSGroupInfo validates old RSGroup existence. + // getRSGroupInfo validates old RSGroup existence. RSGroupInfo oldRSG = getRSGroupInfo(oldName); Map rsGroupMap = holder.groupName2Group; if (rsGroupMap.containsKey(newName)) { @@ -1341,24 +1348,21 @@ public synchronized void renameRSGroup(String oldName, String newName) throws IO RSGroupInfo newRSG = new RSGroupInfo(newName, oldRSG.getServers()); newGroupMap.put(newName, newRSG); flushConfig(newGroupMap); - Set updateTables = - masterServices.getTableDescriptors().getAll().values() - .stream() - .filter(t -> oldName.equals(t.getRegionServerGroup().orElse(null))) - .map(TableDescriptor::getTableName) - .collect(Collectors.toSet()); + Set updateTables = masterServices.getTableDescriptors().getAll().values().stream() + .filter(t -> oldName.equals(t.getRegionServerGroup().orElse(null))) + .map(TableDescriptor::getTableName).collect(Collectors.toSet()); setRSGroup(updateTables, newName); LOG.info("Rename RSGroup done: {} => {}", oldName, newName); } @Override public synchronized void updateRSGroupConfig(String groupName, Map configuration) - throws IOException { + throws IOException { if (RSGroupInfo.DEFAULT_GROUP.equals(groupName)) { // We do not persist anything of default group, therefore, it is not supported to update // default group's configuration which lost once master down. - throw new ConstraintException("configuration of " + RSGroupInfo.DEFAULT_GROUP - + " can't be stored persistently"); + throw new ConstraintException( + "configuration of " + RSGroupInfo.DEFAULT_GROUP + " can't be stored persistently"); } RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); rsGroupInfo.getConfiguration().forEach((k, v) -> rsGroupInfo.removeConfiguration(k)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java index aec38ee49052..44538f86f841 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import java.util.Arrays; @@ -91,8 +90,8 @@ public int run(String[] args) throws Exception { try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + - " due to: " + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java index 08c545327a3e..eb59ff62a001 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.rsgroup; @@ -53,8 +60,10 @@ public static List listTablesInRSGroup(MasterServices master, String boolean isDefaultGroup = RSGroupInfo.DEFAULT_GROUP.equals(groupName); for (TableDescriptor td : master.getTableDescriptors().getAll().values()) { // no config means in default group - if (RSGroupUtil.getRSGroupInfo(master, master.getRSGroupInfoManager(), td.getTableName()) - .map(g -> g.getName().equals(groupName)).orElse(isDefaultGroup)) { + if ( + RSGroupUtil.getRSGroupInfo(master, master.getRSGroupInfoManager(), td.getTableName()) + .map(g -> g.getName().equals(groupName)).orElse(isDefaultGroup) + ) { tables.add(td.getTableName()); } } @@ -66,7 +75,7 @@ public static List listTablesInRSGroup(MasterServices master, String * from the {@link NamespaceDescriptor}. If still not present, return empty. */ public static Optional getRSGroupInfo(MasterServices master, - RSGroupInfoManager manager, TableName tableName) throws IOException { + RSGroupInfoManager manager, TableName tableName) throws IOException { TableDescriptor td = master.getTableDescriptors().get(tableName); if (td == null) { return Optional.empty(); @@ -98,8 +107,8 @@ public static Optional getRSGroupInfo(MasterServices master, ClusterSchema clusterSchema = master.getClusterSchema(); if (clusterSchema == null) { if (TableName.isMetaTableName(tableName)) { - LOG.info("Can not get the namespace rs group config for meta table, since the" + - " meta table is not online yet, will use default group to assign meta first"); + LOG.info("Can not get the namespace rs group config for meta table, since the" + + " meta table is not online yet, will use default group to assign meta first"); } else { LOG.warn("ClusterSchema is null, can only use default rsgroup, should not happen?"); } @@ -132,7 +141,7 @@ public static RSGroupInfo fillTables(RSGroupInfo rsGroupInfo, Collection saslProps, SecretManager secretManager) - throws IOException { + Map saslProps, SecretManager secretManager) + throws IOException { serverWithProvider = provider.createServer(secretManager, saslProps); saslServer = serverWithProvider.getServer(); } @@ -62,9 +62,7 @@ public void dispose() { } public String getAttemptingUser() { - return serverWithProvider.getAttemptingUser() - .map(Object::toString) - .orElse("Unknown"); + return serverWithProvider.getAttemptingUser().map(Object::toString).orElse("Unknown"); } public byte[] wrap(byte[] buf, int off, int len) throws SaslException { @@ -84,7 +82,7 @@ public String getAuthorizationID() { } public static T getIdentifier(String id, - SecretManager secretManager) throws InvalidToken { + SecretManager secretManager) throws InvalidToken { byte[] tokenId = SaslUtil.decodeIdentifier(id); T tokenIdentifier = secretManager.createIdentifier(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java index 2bf351b63259..5f9433a3f141 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java index be968e530916..8912c34c51b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.security.access; @@ -59,7 +58,7 @@ public class AccessChecker { private static final Logger LOG = LoggerFactory.getLogger(AccessChecker.class); private static final Logger AUDITLOG = - LoggerFactory.getLogger("SecurityLogger." + AccessChecker.class.getName()); + LoggerFactory.getLogger("SecurityLogger." + AccessChecker.class.getName()); private final AuthManager authManager; /** Group service to retrieve the user group information */ @@ -71,7 +70,6 @@ public static boolean isAuthorizationSupported(Configuration conf) { /** * Constructor with existing configuration - * * @param conf Existing configuration to use */ public AccessChecker(final Configuration conf) { @@ -85,27 +83,26 @@ public AuthManager getAuthManager() { /** * Authorizes that the current user has any of the given permissions to access the table. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type. + * @param user Active user to which authorization checks should be applied + * @param request Request type. * @param tableName Table requested * @param permissions Actions being requested - * @throws IOException if obtaining the current user fails + * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - public void requireAccess(User user, String request, TableName tableName, - Action... permissions) throws IOException { + public void requireAccess(User user, String request, TableName tableName, Action... permissions) + throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.accessUserTable(user, tableName, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, null, null); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + null, null); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, null, null); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + null, null); } } logResult(result); @@ -116,33 +113,30 @@ public void requireAccess(User user, String request, TableName tableName, /** * Authorizes that the current user has global privileges for the given action. - * @param user Active user to which authorization checks should be applied - * @param request Request type + * @param user Active user to which authorization checks should be applied + * @param request Request type * @param filterUser User name to be filtered from permission as requested - * @param perm The action being requested - * @throws IOException if obtaining the current user fails + * @param perm The action being requested + * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if authorization is denied */ public void requirePermission(User user, String request, String filterUser, Action perm) - throws IOException { + throws IOException { requireGlobalPermission(user, request, perm, null, null, filterUser); } /** - * Checks that the user has the given global permission. The generated - * audit log message will contain context information for the operation - * being authorized, based on the given parameters. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type - * @param perm Action being requested - * @param tableName Affected table name. - * @param familyMap Affected column families. + * Checks that the user has the given global permission. The generated audit log message will + * contain context information for the operation being authorized, based on the given parameters. + * @param user Active user to which authorization checks should be applied + * @param request Request type + * @param perm Action being requested + * @param tableName Affected table name. + * @param familyMap Affected column families. * @param filterUser User name to be filtered from permission as requested */ - public void requireGlobalPermission(User user, String request, - Action perm, TableName tableName, - Map> familyMap, String filterUser) throws IOException { + public void requireGlobalPermission(User user, String request, Action perm, TableName tableName, + Map> familyMap, String filterUser) throws IOException { AuthResult result; if (authManager.authorizeUserGlobal(user, perm)) { result = AuthResult.allow(request, "Global check allowed", user, perm, tableName, familyMap); @@ -154,23 +148,21 @@ public void requireGlobalPermission(User user, String request, logResult(result); if (!result.isAllowed()) { throw new AccessDeniedException( - "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") - + "' (global, action=" + perm.toString() + ")"); + "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") + + "' (global, action=" + perm.toString() + ")"); } } /** - * Checks that the user has the given global permission. The generated - * audit log message will contain context information for the operation - * being authorized, based on the given parameters. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type + * Checks that the user has the given global permission. The generated audit log message will + * contain context information for the operation being authorized, based on the given parameters. + * @param user Active user to which authorization checks should be applied + * @param request Request type * @param perm Action being requested * @param namespace The given namespace */ - public void requireGlobalPermission(User user, String request, Action perm, - String namespace) throws IOException { + public void requireGlobalPermission(User user, String request, Action perm, String namespace) + throws IOException { AuthResult authResult; if (authManager.authorizeUserGlobal(user, perm)) { authResult = AuthResult.allow(request, "Global check allowed", user, perm, null); @@ -181,27 +173,27 @@ public void requireGlobalPermission(User user, String request, Action perm, authResult.getParams().setNamespace(namespace); logResult(authResult); throw new AccessDeniedException( - "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") - + "' (global, action=" + perm.toString() + ")"); + "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") + + "' (global, action=" + perm.toString() + ")"); } } /** * Checks that the user has the given global or namespace permission. - * @param user Active user to which authorization checks should be applied - * @param request Request type - * @param namespace Name space as requested - * @param filterUser User name to be filtered from permission as requested + * @param user Active user to which authorization checks should be applied + * @param request Request type + * @param namespace Name space as requested + * @param filterUser User name to be filtered from permission as requested * @param permissions Actions being requested */ public void requireNamespacePermission(User user, String request, String namespace, - String filterUser, Action... permissions) throws IOException { + String filterUser, Action... permissions) throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserNamespace(user, namespace, permission)) { result = - AuthResult.allow(request, "Namespace permission granted", user, permission, namespace); + AuthResult.allow(request, "Namespace permission granted", user, permission, namespace); break; } else { // rest of the world @@ -217,23 +209,22 @@ public void requireNamespacePermission(User user, String request, String namespa /** * Checks that the user has the given global or namespace permission. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type - * @param namespace The given namespace - * @param tableName Table requested - * @param familyMap Column family map requested + * @param user Active user to which authorization checks should be applied + * @param request Request type + * @param namespace The given namespace + * @param tableName Table requested + * @param familyMap Column family map requested * @param permissions Actions being requested */ public void requireNamespacePermission(User user, String request, String namespace, - TableName tableName, Map> familyMap, - Action... permissions) throws IOException { + TableName tableName, Map> familyMap, Action... permissions) + throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserNamespace(user, namespace, permission)) { result = - AuthResult.allow(request, "Namespace permission granted", user, permission, namespace); + AuthResult.allow(request, "Namespace permission granted", user, permission, namespace); result.getParams().setTableName(tableName).setFamilies(familyMap); break; } else { @@ -249,32 +240,31 @@ public void requireNamespacePermission(User user, String request, String namespa } /** - * Authorizes that the current user has any of the given permissions for the - * given table, column family and column qualifier. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type - * @param tableName Table requested - * @param family Column family requested - * @param qualifier Column qualifier requested - * @param filterUser User name to be filtered from permission as requested + * Authorizes that the current user has any of the given permissions for the given table, column + * family and column qualifier. + * @param user Active user to which authorization checks should be applied + * @param request Request type + * @param tableName Table requested + * @param family Column family requested + * @param qualifier Column qualifier requested + * @param filterUser User name to be filtered from permission as requested * @param permissions Actions being requested - * @throws IOException if obtaining the current user fails + * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ public void requirePermission(User user, String request, TableName tableName, byte[] family, - byte[] qualifier, String filterUser, Action... permissions) throws IOException { + byte[] qualifier, String filterUser, Action... permissions) throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserTable(user, tableName, family, qualifier, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, family, qualifier); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + family, qualifier); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, family, qualifier); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + family, qualifier); } } result.getParams().addExtraParam("filterUser", filterUser); @@ -285,32 +275,30 @@ public void requirePermission(User user, String request, TableName tableName, by } /** - * Authorizes that the current user has any of the given permissions for the - * given table, column family and column qualifier. - * - * @param user Active user to which authorization checks should be applied - * @param request Request type + * Authorizes that the current user has any of the given permissions for the given table, column + * family and column qualifier. + * @param user Active user to which authorization checks should be applied + * @param request Request type * @param tableName Table requested * @param family Column family param * @param qualifier Column qualifier param * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - public void requireTablePermission(User user, String request, - TableName tableName,byte[] family, byte[] qualifier, - Action... permissions) throws IOException { + public void requireTablePermission(User user, String request, TableName tableName, byte[] family, + byte[] qualifier, Action... permissions) throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserTable(user, tableName, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, null, null); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + null, null); result.getParams().setFamily(family).setQualifier(qualifier); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, family, qualifier); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + family, qualifier); result.getParams().setFamily(family).setQualifier(qualifier); } } @@ -322,13 +310,13 @@ public void requireTablePermission(User user, String request, /** * Check if caller is granting or revoking superusers's or supergroups's permissions. - * @param request request name - * @param caller caller + * @param request request name + * @param caller caller * @param userToBeChecked target user or group * @throws IOException AccessDeniedException if target user is superuser */ public void performOnSuperuser(String request, User caller, String userToBeChecked) - throws IOException { + throws IOException { List userGroups = new ArrayList<>(); userGroups.add(userToBeChecked); if (!AuthUtil.isGroupPrincipal(userToBeChecked)) { @@ -338,28 +326,23 @@ public void performOnSuperuser(String request, User caller, String userToBeCheck } for (String name : userGroups) { if (Superusers.isSuperUser(name)) { - AuthResult result = AuthResult.deny( - request, - "Granting or revoking superusers's or supergroups's permissions is not allowed", - caller, - Action.ADMIN, - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + AuthResult result = AuthResult.deny(request, + "Granting or revoking superusers's or supergroups's permissions is not allowed", caller, + Action.ADMIN, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); logResult(result); throw new AccessDeniedException(result.getReason()); } } } - public void checkLockPermissions(User user, String namespace, - TableName tableName, RegionInfo[] regionInfos, String reason) - throws IOException { + public void checkLockPermissions(User user, String namespace, TableName tableName, + RegionInfo[] regionInfos, String reason) throws IOException { if (namespace != null && !namespace.isEmpty()) { requireNamespacePermission(user, reason, namespace, null, Action.ADMIN, Action.CREATE); } else if (tableName != null || (regionInfos != null && regionInfos.length > 0)) { // So, either a table or regions op. If latter, check perms ons table. - TableName tn = tableName != null? tableName: regionInfos[0].getTable(); - requireTablePermission(user, reason, tn, null, null, - Action.ADMIN, Action.CREATE); + TableName tn = tableName != null ? tableName : regionInfos[0].getTable(); + requireTablePermission(user, reason, tn, null, null, Action.ADMIN, Action.CREATE); } else { throw new DoNotRetryIOException("Invalid lock level when requesting permissions."); } @@ -370,13 +353,12 @@ public static void logResult(AuthResult result) { User user = result.getUser(); UserGroupInformation ugi = user != null ? user.getUGI() : null; AUDITLOG.trace( - "Access {} for user {}; reason: {}; remote address: {}; request: {}; context: {};" + - "auth method: {}", + "Access {} for user {}; reason: {}; remote address: {}; request: {}; context: {};" + + "auth method: {}", (result.isAllowed() ? "allowed" : "denied"), - (user != null ? user.getShortName() : "UNKNOWN"), - result.getReason(), RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""), - result.getRequest(), result.toContextString(), - ugi != null ? ugi.getAuthenticationMethod() : "UNKNOWN"); + (user != null ? user.getShortName() : "UNKNOWN"), result.getReason(), + RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""), result.getRequest(), + result.toContextString(), ugi != null ? ugi.getAuthenticationMethod() : "UNKNOWN"); } } @@ -385,7 +367,7 @@ public static void logResult(AuthResult result) { * any privilege but for others caller must have ADMIN privilege. */ public User validateCallerWithFilterUser(User caller, TablePermission tPerm, String inputUserName) - throws IOException { + throws IOException { User filterUser = null; if (!caller.getShortName().equals(inputUserName)) { // User should have admin privilege if checking permission for other users @@ -425,7 +407,7 @@ public String getShortName() { this.shortName = new HadoopKerberosName(this.name).getShortName(); } catch (IOException ioe) { throw new IllegalArgumentException( - "Illegal principal name " + this.name + ": " + ioe.toString(), ioe); + "Illegal principal name " + this.name + ": " + ioe.toString(), ioe); } } return shortName; @@ -444,14 +426,14 @@ public String[] getGroupNames() { @Override public T runAs(PrivilegedAction action) { throw new UnsupportedOperationException( - "Method not supported, this class has limited implementation"); + "Method not supported, this class has limited implementation"); } @Override public T runAs(PrivilegedExceptionAction action) - throws IOException, InterruptedException { + throws IOException, InterruptedException { throw new UnsupportedOperationException( - "Method not supported, this class has limited implementation"); + "Method not supported, this class has limited implementation"); } @Override @@ -476,8 +458,7 @@ private void initGroupService(Configuration conf) { /** * Retrieve the groups of the given user. - * @param user User name - * @return Groups + * @param user User name n */ public static List getUserGroups(String user) { try { @@ -490,8 +471,8 @@ public static List getUserGroups(String user) { /** * Authorizes that if the current user has the given permissions. - * @param user Active user to which authorization checks should be applied - * @param request Request type + * @param user Active user to which authorization checks should be applied + * @param request Request type * @param permission Actions being requested * @return True if the user has the specific permission */ @@ -512,10 +493,10 @@ public boolean hasUserPermission(User user, String request, Permission permissio for (Action action : nsPerm.getActions()) { if (getAuthManager().authorizeUserNamespace(user, nsPerm.getNamespace(), action)) { authResult = - AuthResult.allow(request, "Namespace action allowed", user, action, null, null); + AuthResult.allow(request, "Namespace action allowed", user, action, null, null); } else { authResult = - AuthResult.deny(request, "Namespace action denied", user, action, null, null); + AuthResult.deny(request, "Namespace action denied", user, action, null, null); } AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { @@ -540,7 +521,7 @@ public boolean hasUserPermission(User user, String request, Permission permissio } private AuthResult permissionGranted(String request, User user, Action permRequest, - TableName tableName, byte[] family, byte[] qualifier) { + TableName tableName, byte[] family, byte[] qualifier) { Map> map = makeFamilyMap(family, qualifier); return permissionGranted(request, user, permRequest, tableName, map); } @@ -552,15 +533,15 @@ private AuthResult permissionGranted(String request, User user, Action permReque * Note: Ordering of the authorization checks has been carefully optimized to short-circuit the * most common requests and minimize the amount of processing required. *

    - * @param request User request - * @param user User name + * @param request User request + * @param user User name * @param permRequest the action being requested - * @param tableName Table name - * @param families the map of column families to qualifiers present in the request + * @param tableName Table name + * @param families the map of column families to qualifiers present in the request * @return an authorization result */ public AuthResult permissionGranted(String request, User user, Action permRequest, - TableName tableName, Map> families) { + TableName tableName, Map> families) { // 1. All users need read access to hbase:meta table. // this is a very common operation, so deal with it quickly. if (TableName.META_TABLE_NAME.equals(tableName)) { @@ -596,8 +577,10 @@ public AuthResult permissionGranted(String request, User user, Action permReques // for each qualifier of the family Set familySet = (Set) family.getValue(); for (byte[] qualifier : familySet) { - if (!getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, - permRequest)) { + if ( + !getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, + permRequest) + ) { return AuthResult.deny(request, "Failed qualifier check", user, permRequest, tableName, makeFamilyMap(family.getKey(), qualifier)); } @@ -605,8 +588,10 @@ public AuthResult permissionGranted(String request, User user, Action permReques } else if (family.getValue() instanceof List) { // List List cellList = (List) family.getValue(); for (Cell cell : cellList) { - if (!getAuthManager().authorizeUserTable(user, tableName, family.getKey(), - CellUtil.cloneQualifier(cell), permRequest)) { + if ( + !getAuthManager().authorizeUserTable(user, tableName, family.getKey(), + CellUtil.cloneQualifier(cell), permRequest) + ) { return AuthResult.deny(request, "Failed qualifier check", user, permRequest, tableName, makeFamilyMap(family.getKey(), CellUtil.cloneQualifier(cell))); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index 79233df751eb..47dc654bebd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; import java.util.Map; import java.util.Objects; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -32,20 +29,19 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.yetus.audience.InterfaceAudience; /** * NOTE: for internal use only by AccessController implementation - * *

    - * TODO: There is room for further performance optimization here. - * Calling AuthManager.authorize() per KeyValue imposes a fair amount of - * overhead. A more optimized solution might look at the qualifiers where - * permissions are actually granted and explicitly limit the scan to those. + * TODO: There is room for further performance optimization here. Calling AuthManager.authorize() + * per KeyValue imposes a fair amount of overhead. A more optimized solution might look at the + * qualifiers where permissions are actually granted and explicitly limit the scan to those. *

    *

    - * We should aim to use this _only_ when access to the requested column families - * is not granted at the column family levels. If table or column family - * access succeeds, then there is no need to impose the overhead of this filter. + * We should aim to use this _only_ when access to the requested column families is not granted at + * the column family levels. If table or column family access succeeds, then there is no need to + * impose the overhead of this filter. *

    */ @InterfaceAudience.Private @@ -75,8 +71,8 @@ public static enum Strategy { AccessControlFilter() { } - AccessControlFilter(AuthManager mgr, User ugi, TableName tableName, - Strategy strategy, Map cfVsMaxVersions) { + AccessControlFilter(AuthManager mgr, User ugi, TableName tableName, Strategy strategy, + Map cfVsMaxVersions) { authManager = mgr; table = tableName; user = ugi; @@ -98,20 +94,21 @@ public ReturnCode filterCell(final Cell cell) { if (isSystemTable) { return ReturnCode.INCLUDE; } - if (prevFam.getBytes() == null - || !(PrivateCellUtil.matchingFamily(cell, prevFam.getBytes(), prevFam.getOffset(), - prevFam.getLength()))) { + if ( + prevFam.getBytes() == null || !(PrivateCellUtil.matchingFamily(cell, prevFam.getBytes(), + prevFam.getOffset(), prevFam.getLength())) + ) { prevFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); // Similar to VisibilityLabelFilter familyMaxVersions = cfVsMaxVersions.get(prevFam); // Family is changed. Just unset curQualifier. prevQual.unset(); } - if (prevQual.getBytes() == null - || !(PrivateCellUtil.matchingQualifier(cell, prevQual.getBytes(), prevQual.getOffset(), - prevQual.getLength()))) { - prevQual.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + if ( + prevQual.getBytes() == null || !(PrivateCellUtil.matchingQualifier(cell, prevQual.getBytes(), + prevQual.getOffset(), prevQual.getLength())) + ) { + prevQual.set(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); currentVersions = 0; } currentVersions++; @@ -128,15 +125,17 @@ public ReturnCode filterCell(final Cell cell) { return ReturnCode.INCLUDE; } } - break; + break; // Cell permissions can override table or CF permissions case CHECK_CELL_DEFAULT: { - if (authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) || - authManager.authorizeCell(user, table, cell, Permission.Action.READ)) { + if ( + authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) + || authManager.authorizeCell(user, table, cell, Permission.Action.READ) + ) { return ReturnCode.INCLUDE; } } - break; + break; default: throw new RuntimeException("Unhandled strategy " + strategy); } @@ -156,7 +155,7 @@ public void reset() throws IOException { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { // no implementation, server-side use only throw new UnsupportedOperationException( "Serialization not supported. Intended for server-side use only."); @@ -168,8 +167,8 @@ public void reset() throws IOException { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray() */ - public static AccessControlFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static AccessControlFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { // no implementation, server-side use only throw new UnsupportedOperationException( "Serialization not supported. Intended for server-side use only."); @@ -180,15 +179,13 @@ public boolean equals(Object obj) { if (!(obj instanceof AccessControlFilter)) { return false; } - if (this == obj){ + if (this == obj) { return true; } - AccessControlFilter f=(AccessControlFilter)obj; - return this.authManager.equals(f.authManager) && - this.table.equals(f.table) && - this.user.equals(f.user) && - this.strategy.equals(f.strategy) && - this.cfVsMaxVersions.equals(f.cfVsMaxVersions); + AccessControlFilter f = (AccessControlFilter) obj; + return this.authManager.equals(f.authManager) && this.table.equals(f.table) + && this.user.equals(f.user) && this.strategy.equals(f.strategy) + && this.cfVsMaxVersions.equals(f.cfVsMaxVersions); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 1594e1306b09..a4438460fce5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.security.access; @@ -146,49 +145,41 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasPermissionResponse; /** - * Provides basic authorization checks for data access and administrative - * operations. - * + * Provides basic authorization checks for data access and administrative operations. *

    - * {@code AccessController} performs authorization checks for HBase operations - * based on: + * {@code AccessController} performs authorization checks for HBase operations based on: *

    *
      - *
    • the identity of the user performing the operation
    • - *
    • the scope over which the operation is performed, in increasing - * specificity: global, table, column family, or qualifier
    • - *
    • the type of action being performed (as mapped to - * {@link Permission.Action} values)
    • + *
    • the identity of the user performing the operation
    • + *
    • the scope over which the operation is performed, in increasing specificity: global, table, + * column family, or qualifier
    • + *
    • the type of action being performed (as mapped to {@link Permission.Action} values)
    • *
    *

    - * If the authorization check fails, an {@link AccessDeniedException} - * will be thrown for the operation. + * If the authorization check fails, an {@link AccessDeniedException} will be thrown for the + * operation. *

    - * *

    - * To perform authorization checks, {@code AccessController} relies on the - * RpcServerEngine being loaded to provide - * the user identities for remote requests. + * To perform authorization checks, {@code AccessController} relies on the RpcServerEngine being + * loaded to provide the user identities for remote requests. *

    - * *

    - * The access control lists used for authorization can be manipulated via the - * exposed {@link AccessControlService} Interface implementation, and the associated - * {@code grant}, {@code revoke}, and {@code user_permission} HBase shell - * commands. + * The access control lists used for authorization can be manipulated via the exposed + * {@link AccessControlService} Interface implementation, and the associated {@code grant}, + * {@code revoke}, and {@code user_permission} HBase shell commands. *

    */ @CoreCoprocessor @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class AccessController implements MasterCoprocessor, RegionCoprocessor, - RegionServerCoprocessor, AccessControlService.Interface, - MasterObserver, RegionObserver, RegionServerObserver, EndpointObserver, BulkLoadObserver { + RegionServerCoprocessor, AccessControlService.Interface, MasterObserver, RegionObserver, + RegionServerObserver, EndpointObserver, BulkLoadObserver { // TODO: encapsulate observer functions into separate class/sub-class. private static final Logger LOG = LoggerFactory.getLogger(AccessController.class); private static final Logger AUDITLOG = - LoggerFactory.getLogger("SecurityLogger."+AccessController.class.getName()); + LoggerFactory.getLogger("SecurityLogger." + AccessController.class.getName()); private static final String CHECK_COVERING_PERM = "check_covering_perm"; private static final String TAG_CHECK_PASSED = "tag_check_passed"; private static final byte[] TRUE = Bytes.toBytes(true); @@ -199,21 +190,23 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** flags if we are running on a region of the _acl_ table */ private boolean aclRegion = false; - /** defined only for Endpoint implementation, so it can have way to - access region services */ + /** + * defined only for Endpoint implementation, so it can have way to access region services + */ private RegionCoprocessorEnvironment regionEnv; /** Mapping of scanner instances to the user who created them */ - private Map scannerOwners = - new MapMaker().weakKeys().makeMap(); + private Map scannerOwners = new MapMaker().weakKeys().makeMap(); private Map> tableAcls; /** Provider for mapping principal names to Users */ private UserProvider userProvider; - /** if we are active, usually false, only true if "hbase.security.authorization" - has been set to true in site configuration */ + /** + * if we are active, usually false, only true if "hbase.security.authorization" has been set to + * true in site configuration + */ private boolean authorizationEnabled; /** if we are able to support cell ACLs */ @@ -222,8 +215,10 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** if we should check EXEC permissions */ private boolean shouldCheckExecPermission; - /** if we should terminate access checks early as soon as table or CF grants - allow access; pre-0.98 compatible behavior */ + /** + * if we should terminate access checks early as soon as table or CF grants allow access; pre-0.98 + * compatible behavior + */ private boolean compatibleEarlyTermination; /** if we have been successfully initialized */ @@ -233,8 +228,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, private volatile boolean aclTabAvailable = false; public static boolean isCellAuthorizationSupported(Configuration conf) { - return AccessChecker.isAuthorizationSupported(conf) && - (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS); + return AccessChecker.isAuthorizationSupported(conf) + && (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS); } public Region getRegion() { @@ -251,8 +246,7 @@ private void initialize(RegionCoprocessorEnvironment e) throws IOException { Map> tables = PermissionStorage.loadAll(region); // For each table, write out the table's permissions to the respective // znode for that table. - for (Map.Entry> t: - tables.entrySet()) { + for (Map.Entry> t : tables.entrySet()) { byte[] entry = t.getKey(); ListMultimap perms = t.getValue(); byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf); @@ -262,27 +256,25 @@ private void initialize(RegionCoprocessorEnvironment e) throws IOException { } /** - * Writes all table ACLs for the tables in the given Map up into ZooKeeper - * znodes. This is called to synchronize ACL changes following {@code _acl_} - * table updates. + * Writes all table ACLs for the tables in the given Map up into ZooKeeper znodes. This is called + * to synchronize ACL changes following {@code _acl_} table updates. */ - private void updateACL(RegionCoprocessorEnvironment e, - final Map> familyMap) { + private void updateACL(RegionCoprocessorEnvironment e, final Map> familyMap) { Set entries = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR); for (Map.Entry> f : familyMap.entrySet()) { List cells = f.getValue(); - for (Cell cell: cells) { + for (Cell cell : cells) { if (CellUtil.matchingFamily(cell, PermissionStorage.ACL_LIST_FAMILY)) { entries.add(CellUtil.cloneRow(cell)); } } } Configuration conf = regionEnv.getConfiguration(); - byte [] currentEntry = null; + byte[] currentEntry = null; // TODO: Here we are already on the ACL region. (And it is single // region) We can even just get the region from the env and do get // directly. The short circuit connection would avoid the RPC overhead - // so no socket communication, req write/read .. But we have the PB + // so no socket communication, req write/read .. But we have the PB // to and fro conversion overhead. get req is converted to PB req // and results are converted to PB results 1st and then to POJOs // again. We could have avoided such at least in ACL table context.. @@ -290,31 +282,30 @@ private void updateACL(RegionCoprocessorEnvironment e, for (byte[] entry : entries) { currentEntry = entry; ListMultimap perms = - PermissionStorage.getPermissions(conf, entry, t, null, null, null, false); + PermissionStorage.getPermissions(conf, entry, t, null, null, null, false); byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf); zkPermissionWatcher.writeToZookeeper(entry, serialized); } - } catch(IOException ex) { - LOG.error("Failed updating permissions mirror for '" + - (currentEntry == null? "null": Bytes.toString(currentEntry)) + "'", ex); + } catch (IOException ex) { + LOG.error("Failed updating permissions mirror for '" + + (currentEntry == null ? "null" : Bytes.toString(currentEntry)) + "'", ex); } } /** - * Check the current user for authorization to perform a specific action - * against the given set of row data. - * @param opType the operation type - * @param user the user - * @param e the coprocessor environment - * @param families the map of column families to qualifiers present in - * the request - * @param actions the desired actions + * Check the current user for authorization to perform a specific action against the given set of + * row data. + * @param opType the operation type + * @param user the user + * @param e the coprocessor environment + * @param families the map of column families to qualifiers present in the request + * @param actions the desired actions * @return an authorization result */ private AuthResult permissionGranted(OpType opType, User user, RegionCoprocessorEnvironment e, - Map> families, Action... actions) { + Map> families, Action... actions) { AuthResult result = null; - for (Action action: actions) { + for (Action action : actions) { result = accessChecker.permissionGranted(opType.toString(), user, action, e.getRegion().getRegionInfo().getTable(), families); if (!result.isAllowed()) { @@ -325,70 +316,63 @@ private AuthResult permissionGranted(OpType opType, User user, RegionCoprocessor } public void requireAccess(ObserverContext ctx, String request, TableName tableName, - Action... permissions) throws IOException { + Action... permissions) throws IOException { accessChecker.requireAccess(getActiveUser(ctx), request, tableName, permissions); } - public void requirePermission(ObserverContext ctx, String request, - Action perm) throws IOException { + public void requirePermission(ObserverContext ctx, String request, Action perm) + throws IOException { accessChecker.requirePermission(getActiveUser(ctx), request, null, perm); } - public void requireGlobalPermission(ObserverContext ctx, String request, - Action perm, TableName tableName, - Map> familyMap) throws IOException { + public void requireGlobalPermission(ObserverContext ctx, String request, Action perm, + TableName tableName, Map> familyMap) throws IOException { accessChecker.requireGlobalPermission(getActiveUser(ctx), request, perm, tableName, familyMap, null); } - public void requireGlobalPermission(ObserverContext ctx, String request, - Action perm, String namespace) throws IOException { - accessChecker.requireGlobalPermission(getActiveUser(ctx), - request, perm, namespace); + public void requireGlobalPermission(ObserverContext ctx, String request, Action perm, + String namespace) throws IOException { + accessChecker.requireGlobalPermission(getActiveUser(ctx), request, perm, namespace); } public void requireNamespacePermission(ObserverContext ctx, String request, String namespace, - Action... permissions) throws IOException { - accessChecker.requireNamespacePermission(getActiveUser(ctx), - request, namespace, null, permissions); + Action... permissions) throws IOException { + accessChecker.requireNamespacePermission(getActiveUser(ctx), request, namespace, null, + permissions); } public void requireNamespacePermission(ObserverContext ctx, String request, String namespace, - TableName tableName, Map> familyMap, - Action... permissions) throws IOException { - accessChecker.requireNamespacePermission(getActiveUser(ctx), - request, namespace, tableName, familyMap, - permissions); + TableName tableName, Map> familyMap, Action... permissions) + throws IOException { + accessChecker.requireNamespacePermission(getActiveUser(ctx), request, namespace, tableName, + familyMap, permissions); } public void requirePermission(ObserverContext ctx, String request, TableName tableName, - byte[] family, byte[] qualifier, Action... permissions) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), request, - tableName, family, qualifier, null, permissions); + byte[] family, byte[] qualifier, Action... permissions) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), request, tableName, family, qualifier, null, + permissions); } - public void requireTablePermission(ObserverContext ctx, String request, - TableName tableName,byte[] family, byte[] qualifier, - Action... permissions) throws IOException { - accessChecker.requireTablePermission(getActiveUser(ctx), - request, tableName, family, qualifier, permissions); + public void requireTablePermission(ObserverContext ctx, String request, TableName tableName, + byte[] family, byte[] qualifier, Action... permissions) throws IOException { + accessChecker.requireTablePermission(getActiveUser(ctx), request, tableName, family, qualifier, + permissions); } - public void checkLockPermissions(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String reason) - throws IOException { - accessChecker.checkLockPermissions(getActiveUser(ctx), - namespace, tableName, regionInfos, reason); + public void checkLockPermissions(ObserverContext ctx, String namespace, TableName tableName, + RegionInfo[] regionInfos, String reason) throws IOException { + accessChecker.checkLockPermissions(getActiveUser(ctx), namespace, tableName, regionInfos, + reason); } /** - * Returns true if the current user is allowed the given action - * over at least one of the column qualifiers in the given column families. + * Returns true if the current user is allowed the given action over at least one of + * the column qualifiers in the given column families. */ - private boolean hasFamilyQualifierPermission(User user, - Action perm, - RegionCoprocessorEnvironment env, - Map> familyMap) + private boolean hasFamilyQualifierPermission(User user, Action perm, + RegionCoprocessorEnvironment env, Map> familyMap) throws IOException { RegionInfo hri = env.getRegion().getRegionInfo(); TableName tableName = hri.getTable(); @@ -399,12 +383,12 @@ private boolean hasFamilyQualifierPermission(User user, if (familyMap != null && familyMap.size() > 0) { // at least one family must be allowed - for (Map.Entry> family : - familyMap.entrySet()) { + for (Map.Entry> family : familyMap.entrySet()) { if (family.getValue() != null && !family.getValue().isEmpty()) { for (byte[] qualifier : family.getValue()) { - if (getAuthManager().authorizeUserTable(user, tableName, - family.getKey(), qualifier, perm)) { + if ( + getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, perm) + ) { return true; } } @@ -446,12 +430,11 @@ public String toString() { /** * Determine if cell ACLs covered by the operation grant access. This is expensive. - * @return false if cell ACLs failed to grant access, true otherwise - * @throws IOException + * @return false if cell ACLs failed to grant access, true otherwise n */ private boolean checkCoveringPermission(User user, OpType request, RegionCoprocessorEnvironment e, - byte[] row, Map> familyMap, long opTs, Action... actions) - throws IOException { + byte[] row, Map> familyMap, long opTs, Action... actions) + throws IOException { if (!cellFeaturesEnabled) { return false; } @@ -465,36 +448,37 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce // consider only one such passing cell. In case of Delete we have to consider all the cell // versions under this passing version. When Delete Mutation contains columns which are a // version delete just consider only one version for those column cells. - boolean considerCellTs = (request == OpType.PUT || request == OpType.DELETE); + boolean considerCellTs = (request == OpType.PUT || request == OpType.DELETE); if (considerCellTs) { get.readAllVersions(); } else { get.readVersions(1); } boolean diffCellTsFromOpTs = false; - for (Map.Entry> entry: familyMap.entrySet()) { + for (Map.Entry> entry : familyMap.entrySet()) { byte[] col = entry.getKey(); // TODO: HBASE-7114 could possibly unify the collection type in family // maps so we would not need to do this if (entry.getValue() instanceof Set) { - Set set = (Set)entry.getValue(); + Set set = (Set) entry.getValue(); if (set == null || set.isEmpty()) { get.addFamily(col); } else { - for (byte[] qual: set) { + for (byte[] qual : set) { get.addColumn(col, qual); } } } else if (entry.getValue() instanceof List) { - List list = (List)entry.getValue(); + List list = (List) entry.getValue(); if (list == null || list.isEmpty()) { get.addFamily(col); } else { // In case of family delete, a Cell will be added into the list with Qualifier as null. for (Cell cell : list) { - if (cell.getQualifierLength() == 0 - && (cell.getTypeByte() == Type.DeleteFamily.getCode() - || cell.getTypeByte() == Type.DeleteFamilyVersion.getCode())) { + if ( + cell.getQualifierLength() == 0 && (cell.getTypeByte() == Type.DeleteFamily.getCode() + || cell.getTypeByte() == Type.DeleteFamilyVersion.getCode()) + ) { get.addFamily(col); } else { get.addColumn(col, CellUtil.cloneQualifier(cell)); @@ -509,8 +493,8 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce } else if (entry.getValue() == null) { get.addFamily(col); } else { - throw new RuntimeException("Unhandled collection type " + - entry.getValue().getClass().getName()); + throw new RuntimeException( + "Unhandled collection type " + entry.getValue().getClass().getName()); } } // We want to avoid looking into the future. So, if the cells of the @@ -557,7 +541,7 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce cells.clear(); // scan with limit as 1 to hold down memory use on wide rows more = scanner.next(cells, scannerContext); - for (Cell cell: cells) { + for (Cell cell : cells) { if (LOG.isTraceEnabled()) { LOG.trace("Found cell " + cell); } @@ -574,8 +558,10 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce // null/empty qualifier is used to denote a Family delete. The TS and delete type // associated with this is applicable for all columns within the family. That is // why the below (col.getQualifierLength() == 0) check. - if ((col.getQualifierLength() == 0 && request == OpType.DELETE) - || CellUtil.matchingQualifier(cell, col)) { + if ( + (col.getQualifierLength() == 0 && request == OpType.DELETE) + || CellUtil.matchingQualifier(cell, col) + ) { byte type = col.getTypeByte(); if (considerCellTs) { curColCheckTs = col.getTimestamp(); @@ -585,7 +571,7 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce // that column. Check all versions when Type is DeleteColumn or DeleteFamily // One version delete types are Delete/DeleteFamilyVersion curColAllVersions = (KeyValue.Type.DeleteColumn.getCode() == type) - || (KeyValue.Type.DeleteFamily.getCode() == type); + || (KeyValue.Type.DeleteFamily.getCode() == type); break; } } @@ -595,7 +581,7 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce continue; } foundColumn = true; - for (Action action: actions) { + for (Action action : actions) { // Are there permissions for this user for the cell? if (!getAuthManager().authorizeCell(user, getTableName(e), cell, action)) { // We can stop if the cell ACL denies access @@ -621,9 +607,9 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce private static void addCellPermissions(final byte[] perms, Map> familyMap) { // Iterate over the entries in the familyMap, replacing the cells therein // with new cells including the ACL data - for (Map.Entry> e: familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { List newCells = Lists.newArrayList(); - for (Cell cell: e.getValue()) { + for (Cell cell : e.getValue()) { // Prepend the supplied perms in a new ACL tag to an update list of tags for the cell List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(PermissionStorage.ACL_TAG_TYPE, perms)); @@ -683,8 +669,8 @@ public void start(CoprocessorEnvironment env) throws IOException { cellFeaturesEnabled = (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS); if (!cellFeaturesEnabled) { LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS - + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY - + " accordingly."); + + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY + + " accordingly."); } if (env instanceof MasterCoprocessorEnvironment) { @@ -699,7 +685,7 @@ public void start(CoprocessorEnvironment env) throws IOException { RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env; if (rsEnv instanceof HasRegionServerServices) { RegionServerServices rsServices = - ((HasRegionServerServices) rsEnv).getRegionServerServices(); + ((HasRegionServerServices) rsEnv).getRegionServerServices(); zkPermissionWatcher = rsServices.getZKPermissionWatcher(); accessChecker = rsServices.getAccessChecker(); } @@ -711,7 +697,7 @@ public void start(CoprocessorEnvironment env) throws IOException { AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT); if (regionEnv instanceof HasRegionServerServices) { RegionServerServices rsServices = - ((HasRegionServerServices) regionEnv).getRegionServerServices(); + ((HasRegionServerServices) regionEnv).getRegionServerServices(); zkPermissionWatcher = rsServices.getZKPermissionWatcher(); accessChecker = rsServices.getAccessChecker(); } @@ -757,40 +743,37 @@ public Optional getRegionServerObserver() { @Override public Iterable getServices() { - return Collections.singleton( - AccessControlProtos.AccessControlService.newReflectiveService(this)); + return Collections + .singleton(AccessControlProtos.AccessControlService.newReflectiveService(this)); } /*********************************** Observer implementations ***********************************/ @Override - public void preCreateTable(ObserverContext c, - TableDescriptor desc, RegionInfo[] regions) throws IOException { + public void preCreateTable(ObserverContext c, TableDescriptor desc, + RegionInfo[] regions) throws IOException { Set families = desc.getColumnFamilyNames(); Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (byte[] family: families) { + for (byte[] family : families) { familyMap.put(family, null); } - requireNamespacePermission(c, "createTable", - desc.getTableName().getNamespaceAsString(), desc.getTableName(), familyMap, Action.ADMIN, - Action.CREATE); + requireNamespacePermission(c, "createTable", desc.getTableName().getNamespaceAsString(), + desc.getTableName(), familyMap, Action.ADMIN, Action.CREATE); } @Override - public void postCompletedCreateTableAction( - final ObserverContext c, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException { + public void postCompletedCreateTableAction(final ObserverContext c, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { // When AC is used, it should be configured as the 1st CP. // In Master, the table operations like create, are handled by a Thread pool but the max size // for this pool is 1. So if multiple CPs create tables on startup, these creations will happen // sequentially only. // Related code in HMaster#startServiceThreads // {code} - // // We depend on there being only one instance of this executor running - // // at a time. To do concurrency, would need fencing of enable/disable of - // // tables. - // this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); + // // We depend on there being only one instance of this executor running + // // at a time. To do concurrency, would need fencing of enable/disable of + // // tables. + // this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); // {code} // In future if we change this pool to have more threads, then there is a chance for thread, // creating acl table, getting delayed and by that time another table creation got over and @@ -801,18 +784,18 @@ public void postCompletedCreateTableAction( } else { if (!aclTabAvailable) { LOG.warn("Not adding owner permission for table " + desc.getTableName() + ". " - + PermissionStorage.ACL_TABLE_NAME + " is not yet created. " - + getClass().getSimpleName() + " should be configured as the first Coprocessor"); + + PermissionStorage.ACL_TABLE_NAME + " is not yet created. " + getClass().getSimpleName() + + " should be configured as the first Coprocessor"); } else { String owner = getActiveUser(c).getShortName(); final UserPermission userPermission = new UserPermission(owner, - Permission.newBuilder(desc.getTableName()).withActions(Action.values()).build()); + Permission.newBuilder(desc.getTableName()).withActions(Action.values()).build()); // switch to the real hbase master user for doing the RPC on the ACL table User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Table table = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(c.getEnvironment().getConfiguration(), userPermission, table); } @@ -825,20 +808,19 @@ public Void run() throws Exception { @Override public void preDeleteTable(ObserverContext c, TableName tableName) - throws IOException { - requirePermission(c, "deleteTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + throws IOException { + requirePermission(c, "deleteTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void postDeleteTable(ObserverContext c, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Table table = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.removeTablePermissions(conf, tableName, table); } return null; @@ -849,16 +831,15 @@ public Void run() throws Exception { @Override public void preTruncateTable(ObserverContext c, - final TableName tableName) throws IOException { - requirePermission(c, "truncateTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + final TableName tableName) throws IOException { + requirePermission(c, "truncateTable", tableName, null, null, Action.ADMIN, Action.CREATE); final Configuration conf = c.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { List acls = - PermissionStorage.getUserTablePermissions(conf, tableName, null, null, null, false); + PermissionStorage.getUserTablePermissions(conf, tableName, null, null, null, false); if (acls != null) { tableAcls.put(tableName, acls); } @@ -869,7 +850,7 @@ public Void run() throws Exception { @Override public void postTruncateTable(ObserverContext ctx, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { final Configuration conf = ctx.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override @@ -878,7 +859,7 @@ public Void run() throws Exception { if (perms != null) { for (UserPermission perm : perms) { try (Table table = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(conf, perm, table); } } @@ -891,8 +872,7 @@ public Void run() throws Exception { @Override public TableDescriptor preModifyTable(ObserverContext c, - TableName tableName, TableDescriptor currentDesc, TableDescriptor newDesc) - throws IOException { + TableName tableName, TableDescriptor currentDesc, TableDescriptor newDesc) throws IOException { // TODO: potentially check if this is a add/modify/delete column operation requirePermission(c, "modifyTable", tableName, null, null, Action.ADMIN, Action.CREATE); return newDesc; @@ -925,9 +905,9 @@ public void postModifyTable(ObserverContext c, Tab @Override public Void run() throws Exception { UserPermission userperm = new UserPermission(owner, - Permission.newBuilder(currentDesc.getTableName()).withActions(Action.values()).build()); + Permission.newBuilder(currentDesc.getTableName()).withActions(Action.values()).build()); try (Table table = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(conf, userperm, table); } return null; @@ -937,113 +917,103 @@ public Void run() throws Exception { @Override public void preEnableTable(ObserverContext c, TableName tableName) - throws IOException { - requirePermission(c, "enableTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + throws IOException { + requirePermission(c, "enableTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preDisableTable(ObserverContext c, TableName tableName) - throws IOException { + throws IOException { if (Bytes.equals(tableName.getName(), PermissionStorage.ACL_GLOBAL_NAME)) { // We have to unconditionally disallow disable of the ACL table when we are installed, // even if not enforcing authorizations. We are still allowing grants and revocations, // checking permissions and logging audit messages, etc. If the ACL table is not // available we will fail random actions all over the place. throw new AccessDeniedException("Not allowed to disable " + PermissionStorage.ACL_TABLE_NAME - + " table with AccessController installed"); + + " table with AccessController installed"); } - requirePermission(c, "disableTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + requirePermission(c, "disableTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preAbortProcedure(ObserverContext ctx, - final long procId) throws IOException { + final long procId) throws IOException { requirePermission(ctx, "abortProcedure", Action.ADMIN); } @Override public void postAbortProcedure(ObserverContext ctx) - throws IOException { + throws IOException { // There is nothing to do at this time after the procedure abort request was sent. } @Override public void preGetProcedures(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "getProcedure", Action.ADMIN); } @Override - public void preGetLocks(ObserverContext ctx) - throws IOException { + public void preGetLocks(ObserverContext ctx) throws IOException { User user = getActiveUser(ctx); accessChecker.requirePermission(user, "getLocks", null, Action.ADMIN); } @Override public void preMove(ObserverContext c, RegionInfo region, - ServerName srcServer, ServerName destServer) throws IOException { - requirePermission(c, "move", - region.getTable(), null, null, Action.ADMIN); + ServerName srcServer, ServerName destServer) throws IOException { + requirePermission(c, "move", region.getTable(), null, null, Action.ADMIN); } @Override public void preAssign(ObserverContext c, RegionInfo regionInfo) - throws IOException { - requirePermission(c, "assign", - regionInfo.getTable(), null, null, Action.ADMIN); + throws IOException { + requirePermission(c, "assign", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preUnassign(ObserverContext c, RegionInfo regionInfo) - throws IOException { - requirePermission(c, "unassign", - regionInfo.getTable(), null, null, Action.ADMIN); + throws IOException { + requirePermission(c, "unassign", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preRegionOffline(ObserverContext c, - RegionInfo regionInfo) throws IOException { - requirePermission(c, "regionOffline", - regionInfo.getTable(), null, null, Action.ADMIN); + RegionInfo regionInfo) throws IOException { + requirePermission(c, "regionOffline", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException { - requirePermission(ctx, "setSplitOrMergeEnabled", - Action.ADMIN); + final boolean newValue, final MasterSwitchType switchType) throws IOException { + requirePermission(ctx, "setSplitOrMergeEnabled", Action.ADMIN); } @Override public void preBalance(ObserverContext c, BalanceRequest request) - throws IOException { + throws IOException { requirePermission(c, "balance", Action.ADMIN); } @Override - public void preBalanceSwitch(ObserverContext c, - boolean newValue) throws IOException { + public void preBalanceSwitch(ObserverContext c, boolean newValue) + throws IOException { requirePermission(c, "balanceSwitch", Action.ADMIN); } @Override - public void preShutdown(ObserverContext c) - throws IOException { + public void preShutdown(ObserverContext c) throws IOException { requirePermission(c, "shutdown", Action.ADMIN); } @Override - public void preStopMaster(ObserverContext c) - throws IOException { + public void preStopMaster(ObserverContext c) throws IOException { requirePermission(c, "stopMaster", Action.ADMIN); } @Override public void postStartMaster(ObserverContext ctx) - throws IOException { + throws IOException { try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) { if (!admin.tableExists(PermissionStorage.ACL_TABLE_NAME)) { createACLTable(admin); @@ -1052,43 +1022,37 @@ public void postStartMaster(ObserverContext ctx) } } } + /** - * Create the ACL table - * @throws IOException + * Create the ACL table n */ private static void createACLTable(Admin admin) throws IOException { /** Table descriptor for ACL table */ ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY). - setMaxVersions(1). - setInMemory(true). - setBlockCacheEnabled(true). - setBlocksize(8 * 1024). - setBloomFilterType(BloomType.NONE). - setScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); - TableDescriptor td = - TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME). - setColumnFamily(cfd).build(); + ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY).setMaxVersions(1) + .setInMemory(true).setBlockCacheEnabled(true).setBlocksize(8 * 1024) + .setBloomFilterType(BloomType.NONE).setScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME) + .setColumnFamily(cfd).build(); admin.createTable(td); } @Override public void preSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) - throws IOException { + final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { // Move this ACL check to SnapshotManager#checkPermissions as part of AC deprecation. - requirePermission(ctx, "snapshot " + snapshot.getName(), - hTableDescriptor.getTableName(), null, null, Permission.Action.ADMIN); + requirePermission(ctx, "snapshot " + snapshot.getName(), hTableDescriptor.getTableName(), null, + null, Permission.Action.ADMIN); } @Override public void preListSnapshot(ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException { + final SnapshotDescription snapshot) throws IOException { User user = getActiveUser(ctx); if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) { // list it, if user is the owner of snapshot AuthResult result = AuthResult.allow("listSnapshot " + snapshot.getName(), - "Snapshot owner check allowed", user, null, null, null); + "Snapshot owner check allowed", user, null, null, null); AccessChecker.logResult(result); } else { accessChecker.requirePermission(user, "listSnapshot " + snapshot.getName(), null, @@ -1098,12 +1062,12 @@ public void preListSnapshot(ObserverContext ctx, @Override public void preCloneSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) - throws IOException { + final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { User user = getActiveUser(ctx); - if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user) - && hTableDescriptor.getTableName().getNameAsString() - .equals(snapshot.getTableNameAsString())) { + if ( + SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user) + && hTableDescriptor.getTableName().getNameAsString().equals(snapshot.getTableNameAsString()) + ) { // Snapshot owner is allowed to create a table with the same name as the snapshot he took AuthResult result = AuthResult.allow("cloneSnapshot " + snapshot.getName(), "Snapshot owner check allowed", user, null, hTableDescriptor.getTableName(), null); @@ -1116,8 +1080,7 @@ public void preCloneSnapshot(final ObserverContext @Override public void preRestoreSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) - throws IOException { + final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { User user = getActiveUser(ctx); if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) { accessChecker.requirePermission(user, "restoreSnapshot " + snapshot.getName(), @@ -1130,12 +1093,12 @@ public void preRestoreSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException { + final SnapshotDescription snapshot) throws IOException { User user = getActiveUser(ctx); if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) { // Snapshot owner is allowed to delete the snapshot AuthResult result = AuthResult.allow("deleteSnapshot " + snapshot.getName(), - "Snapshot owner check allowed", user, null, null, null); + "Snapshot owner check allowed", user, null, null, null); AccessChecker.logResult(result); } else { accessChecker.requirePermission(user, "deleteSnapshot " + snapshot.getName(), null, @@ -1145,27 +1108,25 @@ public void preDeleteSnapshot(final ObserverContext ctx, - NamespaceDescriptor ns) throws IOException { - requireGlobalPermission(ctx, "createNamespace", - Action.ADMIN, ns.getName()); + NamespaceDescriptor ns) throws IOException { + requireGlobalPermission(ctx, "createNamespace", Action.ADMIN, ns.getName()); } @Override - public void preDeleteNamespace(ObserverContext ctx, String namespace) - throws IOException { - requireGlobalPermission(ctx, "deleteNamespace", - Action.ADMIN, namespace); + public void preDeleteNamespace(ObserverContext ctx, + String namespace) throws IOException { + requireGlobalPermission(ctx, "deleteNamespace", Action.ADMIN, namespace); } @Override public void postDeleteNamespace(ObserverContext ctx, - final String namespace) throws IOException { + final String namespace) throws IOException { final Configuration conf = ctx.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Table table = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.removeNamespacePermissions(conf, namespace, table); } return null; @@ -1177,7 +1138,7 @@ public Void run() throws Exception { @Override public void preModifyNamespace(ObserverContext ctx, - NamespaceDescriptor currentNsDesc, NamespaceDescriptor newNsDesc) throws IOException { + NamespaceDescriptor currentNsDesc, NamespaceDescriptor newNsDesc) throws IOException { // We require only global permission so that // a user with NS admin cannot altering namespace configurations. i.e. namespace quota requireGlobalPermission(ctx, "modifyNamespace", Action.ADMIN, newNsDesc.getName()); @@ -1191,13 +1152,13 @@ public void preGetNamespaceDescriptor(ObserverContext ctx, - List namespaces) throws IOException { + List namespaces) throws IOException { /* always allow namespace listing */ } @Override public void postListNamespaceDescriptors(ObserverContext ctx, - List descriptors) throws IOException { + List descriptors) throws IOException { // Retains only those which passes authorization checks, as the checks weren't done as part // of preGetTableDescriptors. Iterator itr = descriptors.iterator(); @@ -1215,52 +1176,46 @@ public void postListNamespaceDescriptors(ObserverContext ctx, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { // Move this ACL check to MasterFlushTableProcedureManager#checkPermissions as part of AC // deprecation. - requirePermission(ctx, "flushTable", tableName, - null, null, Action.ADMIN, Action.CREATE); + requirePermission(ctx, "flushTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override - public void preSplitRegion( - final ObserverContext ctx, - final TableName tableName, - final byte[] splitRow) throws IOException { - requirePermission(ctx, "split", tableName, - null, null, Action.ADMIN); + public void preSplitRegion(final ObserverContext ctx, + final TableName tableName, final byte[] splitRow) throws IOException { + requirePermission(ctx, "split", tableName, null, null, Action.ADMIN); } @Override public void preClearDeadServers(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "clearDeadServers", Action.ADMIN); } @Override public void preDecommissionRegionServers(ObserverContext ctx, - List servers, boolean offload) throws IOException { + List servers, boolean offload) throws IOException { requirePermission(ctx, "decommissionRegionServers", Action.ADMIN); } @Override public void preListDecommissionedRegionServers(ObserverContext ctx) - throws IOException { - requirePermission(ctx, "listDecommissionedRegionServers", - Action.ADMIN); + throws IOException { + requirePermission(ctx, "listDecommissionedRegionServers", Action.ADMIN); } @Override public void preRecommissionRegionServer(ObserverContext ctx, - ServerName server, List encodedRegionNames) throws IOException { + ServerName server, List encodedRegionNames) throws IOException { requirePermission(ctx, "recommissionRegionServers", Action.ADMIN); } /* ---- RegionObserver implementation ---- */ @Override - public void preOpen(ObserverContext c) - throws IOException { + public void preOpen(ObserverContext c) throws IOException { RegionCoprocessorEnvironment env = c.getEnvironment(); final Region region = env.getRegion(); if (region == null) { @@ -1299,22 +1254,22 @@ public void postOpen(ObserverContext c) { @Override public void preFlush(ObserverContext c, - FlushLifeCycleTracker tracker) throws IOException { - requirePermission(c, "flush", getTableName(c.getEnvironment()), - null, null, Action.ADMIN, Action.CREATE); + FlushLifeCycleTracker tracker) throws IOException { + requirePermission(c, "flush", getTableName(c.getEnvironment()), null, null, Action.ADMIN, + Action.CREATE); } @Override public InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { - requirePermission(c, "compact", getTableName(c.getEnvironment()), - null, null, Action.ADMIN, Action.CREATE); + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { + requirePermission(c, "compact", getTableName(c.getEnvironment()), null, null, Action.ADMIN, + Action.CREATE); return scanner; } private void internalPreRead(final ObserverContext c, - final Query query, OpType opType) throws IOException { + final Query query, OpType opType) throws IOException { Filter filter = query.getFilter(); // Don't wrap an AccessControlFilter if (filter != null && filter instanceof AccessControlFilter) { @@ -1322,17 +1277,17 @@ private void internalPreRead(final ObserverContext } User user = getActiveUser(c); RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = null; + Map> families = null; switch (opType) { - case GET: - case EXISTS: - families = ((Get)query).getFamilyMap(); - break; - case SCAN: - families = ((Scan)query).getFamilyMap(); - break; - default: - throw new RuntimeException("Unhandled operation " + opType); + case GET: + case EXISTS: + families = ((Get) query).getFamilyMap(); + break; + case SCAN: + families = ((Scan) query).getFamilyMap(); + break; + default: + throw new RuntimeException("Unhandled operation " + opType); } AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ); Region region = getRegion(env); @@ -1357,8 +1312,7 @@ private void internalPreRead(final ObserverContext // Only wrap the filter if we are enforcing authorizations if (authorizationEnabled) { Filter ourFilter = new AccessControlFilter(getAuthManager(), user, table, - AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, - cfVsMaxVersions); + AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, cfVsMaxVersions); // wrap any existing filter if (filter != null) { ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, @@ -1367,10 +1321,10 @@ private void internalPreRead(final ObserverContext switch (opType) { case GET: case EXISTS: - ((Get)query).setFilter(ourFilter); + ((Get) query).setFilter(ourFilter); break; case SCAN: - ((Scan)query).setFilter(ourFilter); + ((Scan) query).setFilter(ourFilter); break; default: throw new RuntimeException("Unhandled operation " + opType); @@ -1396,10 +1350,10 @@ private void internalPreRead(final ObserverContext switch (opType) { case GET: case EXISTS: - ((Get)query).setFilter(ourFilter); + ((Get) query).setFilter(ourFilter); break; case SCAN: - ((Scan)query).setFilter(ourFilter); + ((Scan) query).setFilter(ourFilter); break; default: throw new RuntimeException("Unhandled operation " + opType); @@ -1411,28 +1365,26 @@ private void internalPreRead(final ObserverContext AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions for user '" - + (user != null ? user.getShortName() : "null") - + "' (table=" + table + ", action=READ)"); + + (user != null ? user.getShortName() : "null") + "' (table=" + table + ", action=READ)"); } } @Override - public void preGetOp(final ObserverContext c, - final Get get, final List result) throws IOException { + public void preGetOp(final ObserverContext c, final Get get, + final List result) throws IOException { internalPreRead(c, get, OpType.GET); } @Override - public boolean preExists(final ObserverContext c, - final Get get, final boolean exists) throws IOException { + public boolean preExists(final ObserverContext c, final Get get, + final boolean exists) throws IOException { internalPreRead(c, get, OpType.EXISTS); return exists; } @Override - public void prePut(final ObserverContext c, - final Put put, final WALEdit edit, final Durability durability) - throws IOException { + public void prePut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, put); @@ -1443,9 +1395,8 @@ public void prePut(final ObserverContext c, // change the ACL of any previous Put. This allows simple evolution of // security policy over time without requiring expensive updates. RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = put.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.PUT, - user, env, families, Action.WRITE); + Map> families = put.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { @@ -1467,17 +1418,16 @@ public void prePut(final ObserverContext c, } @Override - public void postPut(final ObserverContext c, - final Put put, final WALEdit edit, final Durability durability) { + public void postPut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) { if (aclRegion) { updateACL(c.getEnvironment(), put.getFamilyCellMap()); } } @Override - public void preDelete(final ObserverContext c, - final Delete delete, final WALEdit edit, final Durability durability) - throws IOException { + public void preDelete(final ObserverContext c, final Delete delete, + final WALEdit edit, final Durability durability) throws IOException { // An ACL on a delete is useless, we shouldn't allow it if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) { throw new DoNotRetryIOException("ACL on delete has no effect: " + delete.toString()); @@ -1488,24 +1438,22 @@ public void preDelete(final ObserverContext c, // overwrite any of the visible versions ('visible' defined as not covered // by a tombstone already) then we have to disallow this operation. RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = delete.getFamilyCellMap(); + Map> families = delete.getFamilyCellMap(); User user = getActiveUser(c); - AuthResult authResult = permissionGranted(OpType.DELETE, - user, env, families, Action.WRITE); + AuthResult authResult = permissionGranted(OpType.DELETE, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } } @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { + MiniBatchOperationInProgress miniBatchOp) throws IOException { if (cellFeaturesEnabled && !compatibleEarlyTermination) { TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); User user = getActiveUser(c); @@ -1534,18 +1482,20 @@ public void preBatchMutate(ObserverContext c, continue; } AuthResult authResult = null; - if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), - m.getFamilyCellMap(), timestamp, Action.WRITE)) { - authResult = AuthResult.allow(opType.toString(), "Covering cell set", - user, Action.WRITE, table, m.getFamilyCellMap()); + if ( + checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), + m.getFamilyCellMap(), timestamp, Action.WRITE) + ) { + authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, + Action.WRITE, table, m.getFamilyCellMap()); } else { - authResult = AuthResult.deny(opType.toString(), "Covering cell set", - user, Action.WRITE, table, m.getFamilyCellMap()); + authResult = AuthResult.deny(opType.toString(), "Covering cell set", user, Action.WRITE, + table, m.getFamilyCellMap()); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " - + authResult.toContextString()); + throw new AccessDeniedException( + "Insufficient permissions " + authResult.toContextString()); } } } @@ -1553,9 +1503,8 @@ public void preBatchMutate(ObserverContext c, } @Override - public void postDelete(final ObserverContext c, - final Delete delete, final WALEdit edit, final Durability durability) - throws IOException { + public void postDelete(final ObserverContext c, final Delete delete, + final WALEdit edit, final Durability durability) throws IOException { if (aclRegion) { updateACL(c.getEnvironment(), delete.getFamilyCellMap()); } @@ -1563,25 +1512,22 @@ public void postDelete(final ObserverContext c, @Override public boolean preCheckAndPut(final ObserverContext c, - final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, - final ByteArrayComparable comparator, final Put put, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, put); // Require READ and WRITE permissions on the table, CF, and KV to update RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = makeFamilyMap(family, qualifier); - AuthResult authResult = permissionGranted(OpType.CHECK_AND_PUT, - user, env, families, Action.READ, Action.WRITE); + Map> families = makeFamilyMap(family, qualifier); + AuthResult authResult = + permissionGranted(OpType.CHECK_AND_PUT, user, env, families, Action.READ, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { put.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1598,9 +1544,8 @@ public boolean preCheckAndPut(final ObserverContext c, - final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator opp, final ByteArrayComparable comparator, final Put put, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator opp, + final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException { if (put.getAttribute(CHECK_COVERING_PERM) != null) { // We had failure with table, cf and q perm checks and now giving a chance for cell // perm check @@ -1608,13 +1553,15 @@ public boolean preCheckAndPutAfterRowLock(final ObserverContext> families = makeFamilyMap(family, qualifier); AuthResult authResult = null; User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.CHECK_AND_PUT, c.getEnvironment(), row, families, - HConstants.LATEST_TIMESTAMP, Action.READ)) { - authResult = AuthResult.allow(OpType.CHECK_AND_PUT.toString(), - "Covering cell set", user, Action.READ, table, families); + if ( + checkCoveringPermission(user, OpType.CHECK_AND_PUT, c.getEnvironment(), row, families, + HConstants.LATEST_TIMESTAMP, Action.READ) + ) { + authResult = AuthResult.allow(OpType.CHECK_AND_PUT.toString(), "Covering cell set", user, + Action.READ, table, families); } else { - authResult = AuthResult.deny(OpType.CHECK_AND_PUT.toString(), - "Covering cell set", user, Action.READ, table, families); + authResult = AuthResult.deny(OpType.CHECK_AND_PUT.toString(), "Covering cell set", user, + Action.READ, table, families); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { @@ -1626,29 +1573,26 @@ public boolean preCheckAndPutAfterRowLock(final ObserverContext c, - final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, - final ByteArrayComparable comparator, final Delete delete, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator, final Delete delete, final boolean result) + throws IOException { // An ACL on a delete is useless, we shouldn't allow it if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) { - throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " + - delete.toString()); + throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " + delete.toString()); } // Require READ and WRITE permissions on the table, CF, and the KV covered // by the delete RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = makeFamilyMap(family, qualifier); + Map> families = makeFamilyMap(family, qualifier); User user = getActiveUser(c); - AuthResult authResult = permissionGranted( - OpType.CHECK_AND_DELETE, user, env, families, Action.READ, Action.WRITE); + AuthResult authResult = + permissionGranted(OpType.CHECK_AND_DELETE, user, env, families, Action.READ, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } return result; @@ -1656,10 +1600,9 @@ public boolean preCheckAndDelete(final ObserverContext c, final byte[] row, - final byte[] family, final byte[] qualifier, final CompareOperator op, - final ByteArrayComparable comparator, final Delete delete, final boolean result) - throws IOException { + final ObserverContext c, final byte[] row, final byte[] family, + final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator, + final Delete delete, final boolean result) throws IOException { if (delete.getAttribute(CHECK_COVERING_PERM) != null) { // We had failure with table, cf and q perm checks and now giving a chance for cell // perm check @@ -1667,13 +1610,15 @@ public boolean preCheckAndDeleteAfterRowLock( Map> families = makeFamilyMap(family, qualifier); AuthResult authResult = null; User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.CHECK_AND_DELETE, c.getEnvironment(), - row, families, HConstants.LATEST_TIMESTAMP, Action.READ)) { - authResult = AuthResult.allow(OpType.CHECK_AND_DELETE.toString(), - "Covering cell set", user, Action.READ, table, families); + if ( + checkCoveringPermission(user, OpType.CHECK_AND_DELETE, c.getEnvironment(), row, families, + HConstants.LATEST_TIMESTAMP, Action.READ) + ) { + authResult = AuthResult.allow(OpType.CHECK_AND_DELETE.toString(), "Covering cell set", user, + Action.READ, table, families); } else { - authResult = AuthResult.deny(OpType.CHECK_AND_DELETE.toString(), - "Covering cell set", user, Action.READ, table, families); + authResult = AuthResult.deny(OpType.CHECK_AND_DELETE.toString(), "Covering cell set", user, + Action.READ, table, families); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { @@ -1685,22 +1630,20 @@ public boolean preCheckAndDeleteAfterRowLock( @Override public Result preAppend(ObserverContext c, Append append) - throws IOException { + throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, append); // Require WRITE permission to the table, CF, and the KV to be appended RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = append.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.APPEND, user, - env, families, Action.WRITE); + Map> families = append.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.APPEND, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { append.setAttribute(CHECK_COVERING_PERM, TRUE); - } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + } else if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1718,24 +1661,21 @@ public Result preAppend(ObserverContext c, Append @Override public Result preIncrement(final ObserverContext c, - final Increment increment) - throws IOException { + final Increment increment) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, increment); // Require WRITE permission to the table, CF, and the KV to be replaced by // the incremented value RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = increment.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.INCREMENT, - user, env, families, Action.WRITE); + Map> families = increment.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.INCREMENT, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { increment.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1753,30 +1693,32 @@ public Result preIncrement(final ObserverContext c @Override public List> postIncrementBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { // If the HFile version is insufficient to persist tags, we won't have any // work to do here if (!cellFeaturesEnabled || mutation.getACL() == null) { return cellPairs; } - return cellPairs.stream().map(pair -> new Pair<>(pair.getFirst(), + return cellPairs.stream() + .map(pair -> new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond()))) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } @Override public List> postAppendBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { // If the HFile version is insufficient to persist tags, we won't have any // work to do here if (!cellFeaturesEnabled || mutation.getACL() == null) { return cellPairs; } - return cellPairs.stream().map(pair -> new Pair<>(pair.getFirst(), + return cellPairs.stream() + .map(pair -> new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond()))) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } private Cell createNewCellWithTags(Mutation mutation, Cell oldCell, Cell newCell) { @@ -1807,13 +1749,13 @@ private Cell createNewCellWithTags(Mutation mutation, Cell oldCell, Cell newCell @Override public void preScannerOpen(final ObserverContext c, final Scan scan) - throws IOException { + throws IOException { internalPreRead(c, scan, OpType.SCAN); } @Override public RegionScanner postScannerOpen(final ObserverContext c, - final Scan scan, final RegionScanner s) throws IOException { + final Scan scan, final RegionScanner s) throws IOException { User user = getActiveUser(c); if (user != null && user.getShortName() != null) { // store reference to scanner owner for later checks @@ -1824,29 +1766,28 @@ public RegionScanner postScannerOpen(final ObserverContext c, - final InternalScanner s, final List result, - final int limit, final boolean hasNext) throws IOException { + final InternalScanner s, final List result, final int limit, final boolean hasNext) + throws IOException { requireScannerOwner(s); return hasNext; } @Override public void preScannerClose(final ObserverContext c, - final InternalScanner s) throws IOException { + final InternalScanner s) throws IOException { requireScannerOwner(s); } @Override public void postScannerClose(final ObserverContext c, - final InternalScanner s) throws IOException { + final InternalScanner s) throws IOException { // clean up any associated owner mapping scannerOwners.remove(s); } /** - * Verify, when servicing an RPC, that the caller is the scanner owner. - * If so, we assume that access control is correctly enforced based on - * the checks performed in preScannerOpen() + * Verify, when servicing an RPC, that the caller is the scanner owner. If so, we assume that + * access control is correctly enforced based on the checks performed in preScannerOpen() */ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { if (!RpcServer.isInRpcCallContext()) { @@ -1855,21 +1796,19 @@ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException String requestUserName = RpcServer.getRequestUserName().orElse(null); String owner = scannerOwners.get(s); if (authorizationEnabled && owner != null && !owner.equals(requestUserName)) { - throw new AccessDeniedException("User '"+ requestUserName +"' is not the scanner owner!"); + throw new AccessDeniedException("User '" + requestUserName + "' is not the scanner owner!"); } } /** - * Verifies user has CREATE or ADMIN privileges on - * the Column Families involved in the bulkLoadHFile - * request. Specific Column Write privileges are presently - * ignored. + * Verifies user has CREATE or ADMIN privileges on the Column Families involved in the + * bulkLoadHFile request. Specific Column Write privileges are presently ignored. */ @Override public void preBulkLoadHFile(ObserverContext ctx, - List> familyPaths) throws IOException { + List> familyPaths) throws IOException { User user = getActiveUser(ctx); - for(Pair el : familyPaths) { + for (Pair el : familyPaths) { accessChecker.requirePermission(user, "preBulkLoadHFile", ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), el.getFirst(), null, null, Action.ADMIN, Action.CREATE); @@ -1877,67 +1816,62 @@ public void preBulkLoadHFile(ObserverContext ctx, } /** - * Authorization check for - * SecureBulkLoadProtocol.prepareBulkLoad() - * @param ctx the context - * @throws IOException + * Authorization check for SecureBulkLoadProtocol.prepareBulkLoad() + * @param ctx the context n */ @Override public void prePrepareBulkLoad(ObserverContext ctx) - throws IOException { + throws IOException { requireAccess(ctx, "prePrepareBulkLoad", - ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, - Action.CREATE); + ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, + Action.CREATE); } /** - * Authorization security check for - * SecureBulkLoadProtocol.cleanupBulkLoad() - * @param ctx the context - * @throws IOException + * Authorization security check for SecureBulkLoadProtocol.cleanupBulkLoad() + * @param ctx the context n */ @Override public void preCleanupBulkLoad(ObserverContext ctx) - throws IOException { + throws IOException { requireAccess(ctx, "preCleanupBulkLoad", - ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, - Action.CREATE); + ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, + Action.CREATE); } /* ---- EndpointObserver implementation ---- */ @Override public Message preEndpointInvocation(ObserverContext ctx, - Service service, String methodName, Message request) throws IOException { + Service service, String methodName, Message request) throws IOException { // Don't intercept calls to our own AccessControlService, we check for // appropriate permissions in the service handlers if (shouldCheckExecPermission && !(service instanceof AccessControlService)) { requirePermission(ctx, - "invoke(" + service.getDescriptorForType().getName() + "." + methodName + ")", - getTableName(ctx.getEnvironment()), null, null, - Action.EXEC); + "invoke(" + service.getDescriptorForType().getName() + "." + methodName + ")", + getTableName(ctx.getEnvironment()), null, null, Action.EXEC); } return request; } @Override public void postEndpointInvocation(ObserverContext ctx, - Service service, String methodName, Message request, Message.Builder responseBuilder) - throws IOException { } + Service service, String methodName, Message request, Message.Builder responseBuilder) + throws IOException { + } /* ---- Protobuf AccessControlService implementation ---- */ /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#grant(UserPermission, boolean)} instead. + * {@link Admin#grant(UserPermission, boolean)} instead. * @see Admin#grant(UserPermission, boolean) * @see HBASE-21739 */ @Deprecated @Override - public void grant(RpcController controller, - AccessControlProtos.GrantRequest request, - RpcCallback done) { + public void grant(RpcController controller, AccessControlProtos.GrantRequest request, + RpcCallback done) { final UserPermission perm = AccessControlUtil.toUserPermission(request.getUserPermission()); AccessControlProtos.GrantResponse response = null; try { @@ -1948,8 +1882,8 @@ public void grant(RpcController controller, } User caller = RpcServer.getRequestUser().orElse(null); if (LOG.isDebugEnabled()) { - LOG.debug("Received request from {} to grant access permission {}", - caller.getName(), perm.toString()); + LOG.debug("Received request from {} to grant access permission {}", caller.getName(), + perm.toString()); } preGrantOrRevoke(caller, "grant", perm); @@ -1962,8 +1896,8 @@ public void grant(RpcController controller, AUDITLOG.trace("Granted permission " + perm.toString()); } } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } response = AccessControlProtos.GrantResponse.getDefaultInstance(); } catch (IOException ioe) { @@ -1975,14 +1909,14 @@ public void grant(RpcController controller, /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link Admin#revoke(UserPermission)} - * instead. + * instead. * @see Admin#revoke(UserPermission) * @see HBASE-21739 */ @Deprecated @Override public void revoke(RpcController controller, AccessControlProtos.RevokeRequest request, - RpcCallback done) { + RpcCallback done) { final UserPermission perm = AccessControlUtil.toUserPermission(request.getUserPermission()); AccessControlProtos.RevokeResponse response = null; try { @@ -1999,14 +1933,14 @@ public void revoke(RpcController controller, AccessControlProtos.RevokeRequest r preGrantOrRevoke(caller, "revoke", perm); // regionEnv is set at #start. Hopefully not null here. regionEnv.getConnection().getAdmin() - .revoke(new UserPermission(perm.getUser(), perm.getPermission())); + .revoke(new UserPermission(perm.getUser(), perm.getPermission())); if (AUDITLOG.isTraceEnabled()) { // audit log should record all permission changes AUDITLOG.trace("Revoked permission " + perm.toString()); } } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } response = AccessControlProtos.RevokeResponse.getDefaultInstance(); } catch (IOException ioe) { @@ -2018,15 +1952,15 @@ public void revoke(RpcController controller, AccessControlProtos.RevokeRequest r /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. * @see Admin#getUserPermissions(GetUserPermissionsRequest) * @see HBASE-21911 */ @Deprecated @Override public void getUserPermissions(RpcController controller, - AccessControlProtos.GetUserPermissionsRequest request, - RpcCallback done) { + AccessControlProtos.GetUserPermissionsRequest request, + RpcCallback done) { AccessControlProtos.GetUserPermissionsResponse response = null; try { // only allowed to be called on _acl_ region @@ -2037,31 +1971,31 @@ public void getUserPermissions(RpcController controller, User caller = RpcServer.getRequestUser().orElse(null); final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null; final String namespace = - request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null; + request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null; final TableName table = - request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; + request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; final byte[] cf = - request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null; + request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null; final byte[] cq = - request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null; + request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null; preGetUserPermissions(caller, userName, namespace, table, cf, cq); GetUserPermissionsRequest getUserPermissionsRequest = null; if (request.getType() == AccessControlProtos.Permission.Type.Table) { getUserPermissionsRequest = GetUserPermissionsRequest.newBuilder(table).withFamily(cf) - .withQualifier(cq).withUserName(userName).build(); + .withQualifier(cq).withUserName(userName).build(); } else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) { getUserPermissionsRequest = - GetUserPermissionsRequest.newBuilder(namespace).withUserName(userName).build(); + GetUserPermissionsRequest.newBuilder(namespace).withUserName(userName).build(); } else { getUserPermissionsRequest = - GetUserPermissionsRequest.newBuilder().withUserName(userName).build(); + GetUserPermissionsRequest.newBuilder().withUserName(userName).build(); } List perms = - regionEnv.getConnection().getAdmin().getUserPermissions(getUserPermissionsRequest); + regionEnv.getConnection().getAdmin().getUserPermissions(getUserPermissionsRequest); response = AccessControlUtil.buildGetUserPermissionsResponse(perms); } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } } catch (IOException ioe) { // pass exception back up @@ -2072,15 +2006,15 @@ public void getUserPermissions(RpcController controller, /** * @deprecated since 2.2.0 and will be removed 4.0.0. Use {@link Admin#hasUserPermissions(List)} - * instead. + * instead. * @see Admin#hasUserPermissions(List) * @see HBASE-22117 */ @Deprecated @Override public void checkPermissions(RpcController controller, - AccessControlProtos.CheckPermissionsRequest request, - RpcCallback done) { + AccessControlProtos.CheckPermissionsRequest request, + RpcCallback done) { AccessControlProtos.CheckPermissionsResponse response = null; try { User user = RpcServer.getRequestUser().orElse(null); @@ -2093,16 +2027,16 @@ public void checkPermissions(RpcController controller, TablePermission tperm = (TablePermission) permission; if (!tperm.getTableName().equals(tableName)) { throw new CoprocessorException(AccessController.class, - String.format( - "This method can only execute at the table specified in " - + "TablePermission. Table of the region:%s , requested table:%s", - tableName, tperm.getTableName())); + String.format( + "This method can only execute at the table specified in " + + "TablePermission. Table of the region:%s , requested table:%s", + tableName, tperm.getTableName())); } } } for (Permission permission : permissions) { boolean hasPermission = - accessChecker.hasUserPermission(user, "checkPermissions", permission); + accessChecker.hasUserPermission(user, "checkPermissions", permission); if (!hasPermission) { throw new AccessDeniedException("Insufficient permissions " + permission.toString()); } @@ -2136,7 +2070,7 @@ private TableName getTableName(Region region) { @Override public void preClose(ObserverContext c, boolean abortRequested) - throws IOException { + throws IOException { requirePermission(c, "preClose", Action.ADMIN); } @@ -2146,20 +2080,19 @@ private void checkSystemOrSuperUser(User activeUser) throws IOException { return; } if (!Superusers.isSuperUser(activeUser)) { - throw new AccessDeniedException("User '" + (activeUser != null ? - activeUser.getShortName() : "null") + "' is not system or super user."); + throw new AccessDeniedException( + "User '" + (activeUser != null ? activeUser.getShortName() : "null") + + "' is not system or super user."); } } @Override - public void preStopRegionServer( - ObserverContext ctx) - throws IOException { + public void preStopRegionServer(ObserverContext ctx) + throws IOException { requirePermission(ctx, "preStopRegionServer", Action.ADMIN); } - private Map> makeFamilyMap(byte[] family, - byte[] qualifier) { + private Map> makeFamilyMap(byte[] family, byte[] qualifier) { if (family == null) { return null; } @@ -2171,8 +2104,8 @@ public void preStopRegionServer( @Override public void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException { + List tableNamesList, List descriptors, String regex) + throws IOException { // We are delegating the authorization check to postGetTableDescriptors as we don't have // any concrete set of table names when a regex is present or the full list is requested. if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) { @@ -2193,8 +2126,8 @@ public void preGetTableDescriptors(ObserverContext @Override public void postGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException { + List tableNamesList, List descriptors, String regex) + throws IOException { // Skipping as checks in this case are already done by preGetTableDescriptors. if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) { return; @@ -2206,8 +2139,8 @@ public void postGetTableDescriptors(ObserverContext ctx, - List descriptors, String regex) throws IOException { + List descriptors, String regex) throws IOException { // Retains only those which passes authorization checks. Iterator itr = descriptors.iterator(); while (itr.hasNext()) { @@ -2231,130 +2164,130 @@ public void postGetTableNames(ObserverContext ctx, @Override public void preMergeRegions(final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException { - requirePermission(ctx, "mergeRegions", regionsToMerge[0].getTable(), null, null, - Action.ADMIN); + final RegionInfo[] regionsToMerge) throws IOException { + requirePermission(ctx, "mergeRegions", regionsToMerge[0].getTable(), null, null, Action.ADMIN); } @Override public void preRollWALWriterRequest(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "preRollLogWriterRequest", Permission.Action.ADMIN); } @Override public void postRollWALWriterRequest(ObserverContext ctx) - throws IOException { } + throws IOException { + } @Override public void preSetUserQuota(final ObserverContext ctx, - final String userName, final GlobalQuotaSettings quotas) throws IOException { + final String userName, final GlobalQuotaSettings quotas) throws IOException { requirePermission(ctx, "setUserQuota", Action.ADMIN); } @Override public void preSetUserQuota(final ObserverContext ctx, - final String userName, final TableName tableName, final GlobalQuotaSettings quotas) - throws IOException { + final String userName, final TableName tableName, final GlobalQuotaSettings quotas) + throws IOException { requirePermission(ctx, "setUserTableQuota", tableName, null, null, Action.ADMIN); } @Override public void preSetUserQuota(final ObserverContext ctx, - final String userName, final String namespace, final GlobalQuotaSettings quotas) - throws IOException { + final String userName, final String namespace, final GlobalQuotaSettings quotas) + throws IOException { requirePermission(ctx, "setUserNamespaceQuota", Action.ADMIN); } @Override public void preSetTableQuota(final ObserverContext ctx, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { + final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { requirePermission(ctx, "setTableQuota", tableName, null, null, Action.ADMIN); } @Override public void preSetNamespaceQuota(final ObserverContext ctx, - final String namespace, final GlobalQuotaSettings quotas) throws IOException { + final String namespace, final GlobalQuotaSettings quotas) throws IOException { requirePermission(ctx, "setNamespaceQuota", Action.ADMIN); } @Override public void preSetRegionServerQuota(ObserverContext ctx, - final String regionServer, GlobalQuotaSettings quotas) throws IOException { + final String regionServer, GlobalQuotaSettings quotas) throws IOException { requirePermission(ctx, "setRegionServerQuota", Action.ADMIN); } @Override public ReplicationEndpoint postCreateReplicationEndPoint( - ObserverContext ctx, ReplicationEndpoint endpoint) { + ObserverContext ctx, ReplicationEndpoint endpoint) { return endpoint; } @Override public void preReplicateLogEntries(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "replicateLogEntries", Action.WRITE); } @Override - public void preClearCompactionQueues(ObserverContext ctx) - throws IOException { + public void preClearCompactionQueues(ObserverContext ctx) + throws IOException { requirePermission(ctx, "preClearCompactionQueues", Permission.Action.ADMIN); } @Override public void preAddReplicationPeer(final ObserverContext ctx, - String peerId, ReplicationPeerConfig peerConfig) throws IOException { + String peerId, ReplicationPeerConfig peerConfig) throws IOException { requirePermission(ctx, "addReplicationPeer", Action.ADMIN); } @Override public void preRemoveReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException { + String peerId) throws IOException { requirePermission(ctx, "removeReplicationPeer", Action.ADMIN); } @Override public void preEnableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException { + String peerId) throws IOException { requirePermission(ctx, "enableReplicationPeer", Action.ADMIN); } @Override public void preDisableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException { + String peerId) throws IOException { requirePermission(ctx, "disableReplicationPeer", Action.ADMIN); } @Override public void preGetReplicationPeerConfig(final ObserverContext ctx, - String peerId) throws IOException { + String peerId) throws IOException { requirePermission(ctx, "getReplicationPeerConfig", Action.ADMIN); } @Override public void preUpdateReplicationPeerConfig( - final ObserverContext ctx, String peerId, - ReplicationPeerConfig peerConfig) throws IOException { + final ObserverContext ctx, String peerId, + ReplicationPeerConfig peerConfig) throws IOException { requirePermission(ctx, "updateReplicationPeerConfig", Action.ADMIN); } @Override public void preTransitReplicationPeerSyncReplicationState( - final ObserverContext ctx, String peerId, - SyncReplicationState clusterState) throws IOException { + final ObserverContext ctx, String peerId, + SyncReplicationState clusterState) throws IOException { requirePermission(ctx, "transitSyncReplicationPeerState", Action.ADMIN); } @Override public void preListReplicationPeers(final ObserverContext ctx, - String regex) throws IOException { + String regex) throws IOException { requirePermission(ctx, "listReplicationPeers", Action.ADMIN); } @Override public void preRequestLock(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { + TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { // There are operations in the CREATE and ADMIN domain which may require lock, READ // or WRITE. So for any lock request, we check for these two perms irrespective of lock type. String reason = String.format("Description=%s", description); @@ -2363,38 +2296,38 @@ public void preRequestLock(ObserverContext ctx, St @Override public void preLockHeartbeat(ObserverContext ctx, - TableName tableName, String description) throws IOException { + TableName tableName, String description) throws IOException { checkLockPermissions(ctx, null, tableName, null, description); } @Override public void preExecuteProcedures(ObserverContext ctx) - throws IOException { + throws IOException { checkSystemOrSuperUser(getActiveUser(ctx)); } @Override public void preSwitchRpcThrottle(ObserverContext ctx, - boolean enable) throws IOException { + boolean enable) throws IOException { requirePermission(ctx, "switchRpcThrottle", Action.ADMIN); } @Override public void preIsRpcThrottleEnabled(ObserverContext ctx) - throws IOException { + throws IOException { requirePermission(ctx, "isRpcThrottleEnabled", Action.ADMIN); } @Override public void preSwitchExceedThrottleQuota(ObserverContext ctx, - boolean enable) throws IOException { + boolean enable) throws IOException { requirePermission(ctx, "switchExceedThrottleQuota", Action.ADMIN); } /** - * Returns the active user to which authorization checks should be applied. - * If we are in the context of an RPC call, the remote user is used, - * otherwise the currently logged in user is used. + * Returns the active user to which authorization checks should be applied. If we are in the + * context of an RPC call, the remote user is used, otherwise the currently logged in user is + * used. */ private User getActiveUser(ObserverContext ctx) throws IOException { // for non-rpc handling, fallback to system user @@ -2407,14 +2340,14 @@ private User getActiveUser(ObserverContext ctx) throws IOException { /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#hasUserPermissions(String, List)} instead. + * {@link Admin#hasUserPermissions(String, List)} instead. * @see Admin#hasUserPermissions(String, List) * @see HBASE-22117 */ @Deprecated @Override public void hasPermission(RpcController controller, HasPermissionRequest request, - RpcCallback done) { + RpcCallback done) { // Converts proto to a TablePermission object. TablePermission tPerm = AccessControlUtil.toTablePermission(request.getTablePermission()); // Check input user name @@ -2427,8 +2360,8 @@ public void hasPermission(RpcController controller, HasPermissionRequest request User caller = RpcServer.getRequestUser().orElse(null); List permissions = Lists.newArrayList(tPerm); preHasUserPermissions(caller, inputUserName, permissions); - boolean hasPermission = regionEnv.getConnection().getAdmin() - .hasUserPermissions(inputUserName, permissions).get(0); + boolean hasPermission = + regionEnv.getConnection().getAdmin().hasUserPermissions(inputUserName, permissions).get(0); response = ResponseConverter.buildHasPermissionResponse(hasPermission); } catch (IOException ioe) { ResponseConverter.setControllerException(controller, ioe); @@ -2438,18 +2371,18 @@ public void hasPermission(RpcController controller, HasPermissionRequest request @Override public void preGrant(ObserverContext ctx, - UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { + UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { preGrantOrRevoke(getActiveUser(ctx), "grant", userPermission); } @Override public void preRevoke(ObserverContext ctx, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { preGrantOrRevoke(getActiveUser(ctx), "revoke", userPermission); } private void preGrantOrRevoke(User caller, String request, UserPermission userPermission) - throws IOException { + throws IOException { switch (userPermission.getPermission().scope) { case GLOBAL: accessChecker.requireGlobalPermission(caller, request, Action.ADMIN, ""); @@ -2473,13 +2406,13 @@ private void preGrantOrRevoke(User caller, String request, UserPermission userPe @Override public void preGetUserPermissions(ObserverContext ctx, - String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) - throws IOException { + String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier) + throws IOException { preGetUserPermissions(getActiveUser(ctx), userName, namespace, tableName, family, qualifier); } private void preGetUserPermissions(User caller, String userName, String namespace, - TableName tableName, byte[] family, byte[] qualifier) throws IOException { + TableName tableName, byte[] family, byte[] qualifier) throws IOException { if (tableName != null) { accessChecker.requirePermission(caller, "getUserPermissions", tableName, family, qualifier, userName, Action.ADMIN); @@ -2493,12 +2426,12 @@ private void preGetUserPermissions(User caller, String userName, String namespac @Override public void preHasUserPermissions(ObserverContext ctx, - String userName, List permissions) throws IOException { + String userName, List permissions) throws IOException { preHasUserPermissions(getActiveUser(ctx), userName, permissions); } private void preHasUserPermissions(User caller, String userName, List permissions) - throws IOException { + throws IOException { String request = "hasUserPermissions"; for (Permission permission : permissions) { if (!caller.getShortName().equals(userName)) { @@ -2537,81 +2470,80 @@ private void preHasUserPermissions(User caller, String userName, List ctx, - Set
    servers, Set tables, String targetGroup) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "moveServersAndTables", - null, Permission.Action.ADMIN); + Set
    servers, Set tables, String targetGroup) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "moveServersAndTables", null, + Permission.Action.ADMIN); } @Override public void preMoveServers(final ObserverContext ctx, - Set
    servers, String targetGroup) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "moveServers", - null, Permission.Action.ADMIN); + Set
    servers, String targetGroup) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "moveServers", null, + Permission.Action.ADMIN); } @Override public void preMoveTables(ObserverContext ctx, - Set tables, String targetGroup) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "moveTables", - null, Permission.Action.ADMIN); + Set tables, String targetGroup) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "moveTables", null, + Permission.Action.ADMIN); } @Override - public void preAddRSGroup(ObserverContext ctx, - String name) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "addRSGroup", - null, Permission.Action.ADMIN); + public void preAddRSGroup(ObserverContext ctx, String name) + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "addRSGroup", null, + Permission.Action.ADMIN); } @Override - public void preRemoveRSGroup(ObserverContext ctx, - String name) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "removeRSGroup", - null, Permission.Action.ADMIN); + public void preRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "removeRSGroup", null, + Permission.Action.ADMIN); } @Override - public void preBalanceRSGroup(ObserverContext ctx, - String groupName, BalanceRequest request) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "balanceRSGroup", - null, Permission.Action.ADMIN); + public void preBalanceRSGroup(ObserverContext ctx, String groupName, + BalanceRequest request) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "balanceRSGroup", null, + Permission.Action.ADMIN); } @Override - public void preRemoveServers( - ObserverContext ctx, - Set
    servers) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "removeServers", - null, Permission.Action.ADMIN); + public void preRemoveServers(ObserverContext ctx, + Set
    servers) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "removeServers", null, + Permission.Action.ADMIN); } @Override - public void preGetRSGroupInfo(ObserverContext ctx, - String groupName) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfo", - null, Permission.Action.ADMIN); + public void preGetRSGroupInfo(ObserverContext ctx, String groupName) + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfo", null, + Permission.Action.ADMIN); } @Override public void preGetRSGroupInfoOfTable(ObserverContext ctx, - TableName tableName) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfoOfTable", - null, Permission.Action.ADMIN); - //todo: should add check for table existence + TableName tableName) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfoOfTable", null, + Permission.Action.ADMIN); + // todo: should add check for table existence } @Override public void preListRSGroups(ObserverContext ctx) - throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "listRSGroups", - null, Permission.Action.ADMIN); + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "listRSGroups", null, + Permission.Action.ADMIN); } @Override public void preListTablesInRSGroup(ObserverContext ctx, String groupName) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "listTablesInRSGroup", - null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "listTablesInRSGroup", null, + Permission.Action.ADMIN); } @Override @@ -2623,22 +2555,22 @@ public void preGetConfiguredNamespacesAndTablesInRSGroup( @Override public void preGetRSGroupInfoOfServer(ObserverContext ctx, - Address server) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfoOfServer", - null, Permission.Action.ADMIN); + Address server) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfoOfServer", null, + Permission.Action.ADMIN); } @Override public void preRenameRSGroup(ObserverContext ctx, String oldName, - String newName) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "renameRSGroup", - null, Permission.Action.ADMIN); + String newName) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "renameRSGroup", null, + Permission.Action.ADMIN); } @Override public void preUpdateRSGroupConfig(final ObserverContext ctx, final String groupName, final Map configuration) throws IOException { - accessChecker - .requirePermission(getActiveUser(ctx), "updateRSGroupConfig", null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "updateRSGroupConfig", null, + Permission.Action.ADMIN); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java index 3ced725e0ad7..7f9853d89397 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; @@ -26,7 +25,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; @@ -44,17 +42,15 @@ /** * Performs authorization checks for a given user's assigned permissions. *

    - * There're following scopes: Global, Namespace, Table, Family, - * Qualifier, Cell. - * Generally speaking, higher scopes can overrides lower scopes, - * except for Cell permission can be granted even a user has not permission on specified table, - * which means the user can get/scan only those granted cells parts. + * There're following scopes: Global, Namespace, Table, Family, + * Qualifier, Cell. Generally speaking, higher scopes can overrides lower scopes, + * except for Cell permission can be granted even a user has not permission on specified table, + * which means the user can get/scan only those granted cells parts. *

    - * e.g, if user A has global permission R(ead), he can - * read table T without checking table scope permission, so authorization checks alway starts from - * Global scope. + * e.g, if user A has global permission R(ead), he can read table T without checking table scope + * permission, so authorization checks alway starts from Global scope. *

    - * For each scope, not only user but also groups he belongs to will be checked. + * For each scope, not only user but also groups he belongs to will be checked. *

    */ @InterfaceAudience.Private @@ -91,12 +87,13 @@ void clear() { } } } + PermissionCache NS_NO_PERMISSION = new PermissionCache<>(); PermissionCache TBL_NO_PERMISSION = new PermissionCache<>(); /** - * Cache for global permission excluding superuser and supergroup. - * Since every user/group can only have one global permission, no need to use PermissionCache. + * Cache for global permission excluding superuser and supergroup. Since every user/group can only + * have one global permission, no need to use PermissionCache. */ private Map globalCache = new ConcurrentHashMap<>(); /** Cache for namespace permission. */ @@ -118,7 +115,7 @@ void clear() { /** * Update acl info for table. * @param table name of table - * @param data updated acl data + * @param data updated acl data * @throws IOException exception when deserialize data */ public void refreshTableCacheFromWritable(TableName table, byte[] data) throws IOException { @@ -143,7 +140,7 @@ public void refreshTableCacheFromWritable(TableName table, byte[] data) throws I /** * Update acl info for namespace. * @param namespace namespace - * @param data updated acl data + * @param data updated acl data * @throws IOException exception when deserialize data */ public void refreshNamespaceCacheFromWritable(String namespace, byte[] data) throws IOException { @@ -183,7 +180,7 @@ private void updateGlobalCache(ListMultimap globalPerms) { /** * Updates the internal table permissions cache for specified table. - * @param table updated table name + * @param table updated table name * @param tablePerms new table permissions */ private void updateTableCache(TableName table, ListMultimap tablePerms) { @@ -198,10 +195,9 @@ private void updateTableCache(TableName table, ListMultimap /** * Updates the internal namespace permissions cache for specified namespace. * @param namespace updated namespace - * @param nsPerms new namespace permissions + * @param nsPerms new namespace permissions */ - private void updateNamespaceCache(String namespace, - ListMultimap nsPerms) { + private void updateNamespaceCache(String namespace, ListMultimap nsPerms) { PermissionCache cacheToUpdate = namespaceCache.getOrDefault(namespace, new PermissionCache<>()); clearCache(cacheToUpdate); @@ -216,7 +212,7 @@ private void clearCache(PermissionCache cacheToUpdate) { @SuppressWarnings("unchecked") private void updateCache(ListMultimap newPermissions, - PermissionCache cacheToUpdate) { + PermissionCache cacheToUpdate) { for (String name : newPermissions.keySet()) { for (Permission permission : newPermissions.get(name)) { cacheToUpdate.put(name, permission); @@ -226,7 +222,7 @@ private void updateCache(ListMultimap newPermissio /** * Check if user has given action privilige in global scope. - * @param user user name + * @param user user name * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ @@ -254,9 +250,9 @@ private boolean authorizeGlobal(GlobalPermission permissions, Permission.Action /** * Check if user has given action privilige in namespace scope. - * @param user user name + * @param user user name * @param namespace namespace - * @param action one of action in [Read, Write, Create, Exec, Admin] + * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ public boolean authorizeUserNamespace(User user, String namespace, Permission.Action action) { @@ -266,8 +262,8 @@ public boolean authorizeUserNamespace(User user, String namespace, Permission.Ac if (authorizeUserGlobal(user, action)) { return true; } - PermissionCache nsPermissions = namespaceCache.getOrDefault(namespace, - NS_NO_PERMISSION); + PermissionCache nsPermissions = + namespaceCache.getOrDefault(namespace, NS_NO_PERMISSION); if (authorizeNamespace(nsPermissions.get(user.getShortName()), namespace, action)) { return true; } @@ -279,8 +275,8 @@ public boolean authorizeUserNamespace(User user, String namespace, Permission.Ac return false; } - private boolean authorizeNamespace(Set permissions, - String namespace, Permission.Action action) { + private boolean authorizeNamespace(Set permissions, String namespace, + Permission.Action action) { if (permissions == null) { return false; } @@ -293,10 +289,10 @@ private boolean authorizeNamespace(Set permissions, } /** - * Checks if the user has access to the full table or at least a family/qualifier - * for the specified action. - * @param user user name - * @param table table name + * Checks if the user has access to the full table or at least a family/qualifier for the + * specified action. + * @param user user name + * @param table table name * @param action action in one of [Read, Write, Create, Exec, Admin] * @return true if the user has access to the table, false otherwise */ @@ -310,8 +306,8 @@ public boolean accessUserTable(User user, TableName table, Permission.Action act if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) { return true; } - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (hasAccessTable(tblPermissions.get(user.getShortName()), action)) { return true; } @@ -337,8 +333,8 @@ private boolean hasAccessTable(Set permissions, Permission.Acti /** * Check if user has given action privilige in table scope. - * @param user user name - * @param table table name + * @param user user name + * @param table table name * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ @@ -348,28 +344,28 @@ public boolean authorizeUserTable(User user, TableName table, Permission.Action /** * Check if user has given action privilige in table:family scope. - * @param user user name - * @param table table name + * @param user user name + * @param table table name * @param family family name * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ public boolean authorizeUserTable(User user, TableName table, byte[] family, - Permission.Action action) { + Permission.Action action) { return authorizeUserTable(user, table, family, null, action); } /** * Check if user has given action privilige in table:family:qualifier scope. - * @param user user name - * @param table table name - * @param family family name + * @param user user name + * @param table table name + * @param family family name * @param qualifier qualifier name - * @param action one of action in [Read, Write, Create, Exec, Admin] + * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ - public boolean authorizeUserTable(User user, TableName table, byte[] family, - byte[] qualifier, Permission.Action action) { + public boolean authorizeUserTable(User user, TableName table, byte[] family, byte[] qualifier, + Permission.Action action) { if (user == null) { return false; } @@ -379,22 +375,24 @@ public boolean authorizeUserTable(User user, TableName table, byte[] family, if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) { return true; } - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (authorizeTable(tblPermissions.get(user.getShortName()), table, family, qualifier, action)) { return true; } for (String group : user.getGroupNames()) { - if (authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), - table, family, qualifier, action)) { + if ( + authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, qualifier, + action) + ) { return true; } } return false; } - private boolean authorizeTable(Set permissions, - TableName table, byte[] family, byte[] qualifier, Permission.Action action) { + private boolean authorizeTable(Set permissions, TableName table, byte[] family, + byte[] qualifier, Permission.Action action) { if (permissions == null) { return false; } @@ -407,32 +405,33 @@ private boolean authorizeTable(Set permissions, } /** - * Check if user has given action privilige in table:family scope. - * This method is for backward compatibility. - * @param user user name - * @param table table name + * Check if user has given action privilige in table:family scope. This method is for backward + * compatibility. + * @param user user name + * @param table table name * @param family family names * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ - public boolean authorizeUserFamily(User user, TableName table, - byte[] family, Permission.Action action) { - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + public boolean authorizeUserFamily(User user, TableName table, byte[] family, + Permission.Action action) { + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (authorizeFamily(tblPermissions.get(user.getShortName()), table, family, action)) { return true; } for (String group : user.getGroupNames()) { - if (authorizeFamily(tblPermissions.get(AuthUtil.toGroupEntry(group)), - table, family, action)) { + if ( + authorizeFamily(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, action) + ) { return true; } } return false; } - private boolean authorizeFamily(Set permissions, - TableName table, byte[] family, Permission.Action action) { + private boolean authorizeFamily(Set permissions, TableName table, byte[] family, + Permission.Action action) { if (permissions == null) { return false; } @@ -446,9 +445,9 @@ private boolean authorizeFamily(Set permissions, /** * Check if user has given action privilige in cell scope. - * @param user user name - * @param table table name - * @param cell cell to be checked + * @param user user name + * @param table table name + * @param cell cell to be checked * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ @@ -456,11 +455,11 @@ public boolean authorizeCell(User user, TableName table, Cell cell, Permission.A try { List perms = PermissionStorage.getCellPermissionsForUser(user, cell); if (LOG.isTraceEnabled()) { - LOG.trace("Perms for user {} in table {} in cell {}: {}", - user.getShortName(), table, cell, (perms != null ? perms : "")); + LOG.trace("Perms for user {} in table {} in cell {}: {}", user.getShortName(), table, cell, + (perms != null ? perms : "")); } if (perms != null) { - for (Permission p: perms) { + for (Permission p : perms) { if (p.implies(action)) { return true; } @@ -492,8 +491,7 @@ public void removeTable(TableName table) { } /** - * Last modification logical time - * @return time + * Last modification logical time n */ public long getMTime() { return mtime.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java index 64a8c4cfeae9..5842d5834372 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,25 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; /** - * Represents the result of an authorization check for logging and error - * reporting. + * Represents the result of an authorization check for logging and error reporting. */ @InterfaceAudience.Private public class AuthResult { @@ -52,7 +49,7 @@ public class AuthResult { private final Map> families; public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, TableName table, byte[] family, byte[] qualifier) { + Permission.Action action, TableName table, byte[] family, byte[] qualifier) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -67,8 +64,7 @@ public AuthResult(boolean allowed, String request, String reason, User user, } public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + Permission.Action action, TableName table, Map> families) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -83,7 +79,7 @@ public AuthResult(boolean allowed, String request, String reason, User user, } public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, String namespace) { + Permission.Action action, String namespace) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -129,7 +125,9 @@ public String getRequest() { return request; } - public Params getParams() { return this.params;} + public Params getParams() { + return this.params; + } public void setAllowed(boolean allowed) { this.allowed = allowed; @@ -140,7 +138,7 @@ public void setReason(String reason) { } private static String toFamiliesString(Map> families, - byte[] family, byte[] qual) { + byte[] family, byte[] qual) { StringBuilder sb = new StringBuilder(); if (families != null) { boolean first = true; @@ -150,11 +148,11 @@ private static String toFamiliesString(Map> fami for (Object o : entry.getValue()) { String qualifier; if (o instanceof byte[]) { - qualifier = Bytes.toString((byte[])o); + qualifier = Bytes.toString((byte[]) o); } else if (o instanceof Cell) { Cell c = (Cell) o; qualifier = Bytes.toString(c.getQualifierArray(), c.getQualifierOffset(), - c.getQualifierLength()); + c.getQualifierLength()); } else { // Shouldn't really reach this? qualifier = o.toString(); @@ -185,27 +183,20 @@ private static String toFamiliesString(Map> fami public String toContextString() { StringBuilder sb = new StringBuilder(); String familiesString = toFamiliesString(families, family, qualifier); - sb.append("(user=") - .append(user != null ? user.getName() : "UNKNOWN") - .append(", "); + sb.append("(user=").append(user != null ? user.getName() : "UNKNOWN").append(", "); sb.append("scope=") - .append(namespace != null ? namespace : - table == null ? "GLOBAL" : table.getNameWithNamespaceInclAsString()) - .append(", "); - if(namespace == null && familiesString.length() > 0) { - sb.append("family=") - .append(familiesString) - .append(", "); + .append(namespace != null ? namespace + : table == null ? "GLOBAL" + : table.getNameWithNamespaceInclAsString()) + .append(", "); + if (namespace == null && familiesString.length() > 0) { + sb.append("family=").append(familiesString).append(", "); } String paramsString = params.toString(); - if(paramsString.length() > 0) { - sb.append("params=[") - .append(paramsString) - .append("],"); + if (paramsString.length() > 0) { + sb.append("params=[").append(paramsString).append("],"); } - sb.append("action=") - .append(action != null ? action.toString() : "") - .append(")"); + sb.append("action=").append(action != null ? action.toString() : "").append(")"); return sb.toString(); } @@ -214,35 +205,33 @@ public String toString() { return "AuthResult" + toContextString(); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, String namespace) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + String namespace) { return new AuthResult(true, request, reason, user, action, namespace); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, TableName table, byte[] family, byte[] qualifier) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + TableName table, byte[] family, byte[] qualifier) { return new AuthResult(true, request, reason, user, action, table, family, qualifier); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + TableName table, Map> families) { return new AuthResult(true, request, reason, user, action, table, families); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, String namespace) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + String namespace) { return new AuthResult(false, request, reason, user, action, namespace); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, TableName table, byte[] family, byte[] qualifier) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + TableName table, byte[] family, byte[] qualifier) { return new AuthResult(false, request, reason, user, action, table, family, qualifier); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + TableName table, Map> families) { return new AuthResult(false, request, reason, user, action, table, families); } @@ -292,12 +281,10 @@ public Params setQualifier(byte[] qualifier) { @Override public String toString() { String familiesString = toFamiliesString(families, family, qualifier); - String[] params = new String[] { - namespace != null ? "namespace=" + namespace : null, - tableName != null ? "table=" + tableName.getNameWithNamespaceInclAsString() : null, - familiesString.length() > 0 ? "family=" + familiesString : null, - extraParams.isEmpty() ? null : concatenateExtraParams() - }; + String[] params = new String[] { namespace != null ? "namespace=" + namespace : null, + tableName != null ? "table=" + tableName.getNameWithNamespaceInclAsString() : null, + familiesString.length() > 0 ? "family=" + familiesString : null, + extraParams.isEmpty() ? null : concatenateExtraParams() }; return Joiner.on(",").skipNulls().join(params); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java index 1e83e966102f..231fd8bcaefc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; @@ -43,10 +42,10 @@ public class CoprocessorWhitelistMasterObserver implements MasterCoprocessor, MasterObserver { public static final String CP_COPROCESSOR_WHITELIST_PATHS_KEY = - "hbase.coprocessor.region.whitelist.paths"; + "hbase.coprocessor.region.whitelist.paths"; - private static final Logger LOG = LoggerFactory - .getLogger(CoprocessorWhitelistMasterObserver.class); + private static final Logger LOG = + LoggerFactory.getLogger(CoprocessorWhitelistMasterObserver.class); @Override public Optional getMasterObserver() { @@ -55,35 +54,31 @@ public Optional getMasterObserver() { @Override public TableDescriptor preModifyTable(ObserverContext ctx, - TableName tableName, TableDescriptor currentDesc, TableDescriptor newDesc) - throws IOException { + TableName tableName, TableDescriptor currentDesc, TableDescriptor newDesc) throws IOException { verifyCoprocessors(ctx, newDesc); return newDesc; } @Override - public void preCreateTable(ObserverContext ctx, - TableDescriptor htd, RegionInfo[] regions) throws IOException { + public void preCreateTable(ObserverContext ctx, TableDescriptor htd, + RegionInfo[] regions) throws IOException { verifyCoprocessors(ctx, htd); } /** * Validates a single whitelist path against the coprocessor path - * @param coprocPath the path to the coprocessor including scheme - * @param wlPath can be: - * 1) a "*" to wildcard all coprocessor paths - * 2) a specific filesystem (e.g. hdfs://my-cluster/) - * 3) a wildcard path to be evaluated by - * {@link FilenameUtils#wildcardMatch(String, String)} - * path can specify scheme or not (e.g. - * "file:///usr/hbase/coprocessors" or for all - * filesystems "/usr/hbase/coprocessors") - * @return if the path was found under the wlPath + * @param coprocPath the path to the coprocessor including scheme + * @param wlPath can be: 1) a "*" to wildcard all coprocessor paths 2) a specific filesystem + * (e.g. hdfs://my-cluster/) 3) a wildcard path to be evaluated by + * {@link FilenameUtils#wildcardMatch(String, String)} path can specify scheme + * or not (e.g. "file:///usr/hbase/coprocessors" or for all filesystems + * "/usr/hbase/coprocessors") + * @return if the path was found under the wlPath */ private static boolean validatePath(Path coprocPath, Path wlPath) { // verify if all are allowed if (wlPath.toString().equals("*")) { - return(true); + return (true); } // verify we are on the same filesystem if wlPath has a scheme @@ -113,50 +108,48 @@ private static boolean validatePath(Path coprocPath, Path wlPath) { coprocPathHost = ""; } if (!wlPathScheme.equals(coprocPathScheme) || !wlPathHost.equals(coprocPathHost)) { - return(false); + return (false); } } // allow any on this file-system (file systems were verified to be the same above) if (wlPath.isRoot()) { - return(true); + return (true); } // allow "loose" matches stripping scheme - if (FilenameUtils.wildcardMatch( - Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(), - Path.getPathWithoutSchemeAndAuthority(wlPath).toString())) { - return(true); + if ( + FilenameUtils.wildcardMatch(Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(), + Path.getPathWithoutSchemeAndAuthority(wlPath).toString()) + ) { + return (true); } - return(false); + return (false); } /** - * Perform the validation checks for a coprocessor to determine if the path - * is white listed or not. - * @throws IOException if path is not included in whitelist or a failure - * occurs in processing - * @param ctx as passed in from the coprocessor - * @param htd as passed in from the coprocessor + * Perform the validation checks for a coprocessor to determine if the path is white listed or + * not. + * @throws IOException if path is not included in whitelist or a failure occurs in processing + * @param ctx as passed in from the coprocessor + * @param htd as passed in from the coprocessor */ private static void verifyCoprocessors(ObserverContext ctx, - TableDescriptor htd) throws IOException { - Collection paths = - ctx.getEnvironment().getConfiguration().getStringCollection( - CP_COPROCESSOR_WHITELIST_PATHS_KEY); + TableDescriptor htd) throws IOException { + Collection paths = ctx.getEnvironment().getConfiguration() + .getStringCollection(CP_COPROCESSOR_WHITELIST_PATHS_KEY); for (CoprocessorDescriptor cp : htd.getCoprocessorDescriptors()) { if (cp.getJarPath().isPresent()) { if (paths.stream().noneMatch(p -> { Path wlPath = new Path(p); if (validatePath(new Path(cp.getJarPath().get()), wlPath)) { - LOG.debug(String.format("Coprocessor %s found in directory %s", - cp.getClassName(), p)); + LOG.debug(String.format("Coprocessor %s found in directory %s", cp.getClassName(), p)); return true; } return false; })) { - throw new IOException(String.format("Loading %s DENIED in %s", - cp.getClassName(), CP_COPROCESSOR_WHITELIST_PATHS_KEY)); + throw new IOException(String.format("Loading %s DENIED in %s", cp.getClassName(), + CP_COPROCESSOR_WHITELIST_PATHS_KEY)); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java index 95927c0b164a..1a5bb63cbd36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java @@ -19,7 +19,6 @@ import java.util.Collection; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -28,8 +27,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * NoopAccessChecker is returned when hbase.security.authorization is not enabled. - * Always allow authorization if any user require any permission. + * NoopAccessChecker is returned when hbase.security.authorization is not enabled. Always allow + * authorization if any user require any permission. */ @InterfaceAudience.Private public final class NoopAccessChecker extends AccessChecker { @@ -49,7 +48,7 @@ public void requirePermission(User user, String request, String filterUser, Acti @Override public void requireGlobalPermission(User user, String request, Action perm, TableName tableName, - Map> familyMap, String filterUser) { + Map> familyMap, String filterUser) { } @Override @@ -58,23 +57,23 @@ public void requireGlobalPermission(User user, String request, Action perm, Stri @Override public void requireNamespacePermission(User user, String request, String namespace, - String filterUser, Action... permissions) { + String filterUser, Action... permissions) { } @Override public void requireNamespacePermission(User user, String request, String namespace, - TableName tableName, Map> familyMap, - Action... permissions) { + TableName tableName, Map> familyMap, + Action... permissions) { } @Override public void requirePermission(User user, String request, TableName tableName, byte[] family, - byte[] qualifier, String filterUser, Action... permissions) { + byte[] qualifier, String filterUser, Action... permissions) { } @Override public void requireTablePermission(User user, String request, TableName tableName, byte[] family, - byte[] qualifier, Action... permissions) { + byte[] qualifier, Action... permissions) { } @Override @@ -83,7 +82,7 @@ public void performOnSuperuser(String request, User caller, String userToBeCheck @Override public void checkLockPermissions(User user, String namespace, TableName tableName, - RegionInfo[] regionInfos, String reason) { + RegionInfo[] regionInfos, String reason) { } @Override @@ -93,7 +92,7 @@ public boolean hasUserPermission(User user, String request, Permission permissio @Override public AuthResult permissionGranted(String request, User user, Action permRequest, - TableName tableName, Map> families) { + TableName tableName, Map> families) { return AuthResult.allow(request, "All users allowed because authorization is disabled", user, permRequest, tableName, families); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java index 268bc36fc45b..7a4444291017 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.ByteArrayInputStream; @@ -78,14 +77,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; /** - * Maintains lists of permission grants to users and groups to allow for - * authorization checks by {@link AccessController}. - * + * Maintains lists of permission grants to users and groups to allow for authorization checks by + * {@link AccessController}. *

    - * Access control lists are stored in an "internal" metadata table named - * {@code _acl_}. Each table's permission grants are stored as a separate row, - * keyed by the table name. KeyValues for permissions assignments are stored - * in one of the formats: + * Access control lists are stored in an "internal" metadata table named {@code _acl_}. Each table's + * permission grants are stored as a separate row, keyed by the table name. KeyValues for + * permissions assignments are stored in one of the formats: + * *

      * Key                      Desc
      * --------                 --------
    @@ -105,7 +103,7 @@
     public final class PermissionStorage {
       /** Internal storage table for access control lists */
       public static final TableName ACL_TABLE_NAME =
    -      TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl");
    +    TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl");
       public static final byte[] ACL_GLOBAL_NAME = ACL_TABLE_NAME.getName();
       /** Column family used to store ACL grants */
       public static final String ACL_LIST_FAMILY_STR = "l";
    @@ -116,8 +114,8 @@ public final class PermissionStorage {
       public static final char NAMESPACE_PREFIX = '@';
     
       /**
    -   * Delimiter to separate user, column family, and qualifier in
    -   * _acl_ table info: column keys */
    +   * Delimiter to separate user, column family, and qualifier in _acl_ table info: column keys
    +   */
       public static final char ACL_KEY_DELIMITER = ',';
     
       private static final Logger LOG = LoggerFactory.getLogger(PermissionStorage.class);
    @@ -127,13 +125,13 @@ private PermissionStorage() {
     
       /**
        * Stores a new user permission grant in the access control lists table.
    -   * @param conf the configuration
    +   * @param conf     the configuration
        * @param userPerm the details of the permission to be granted
    -   * @param t acl table instance. It is closed upon method return.
    +   * @param t        acl table instance. It is closed upon method return.
        * @throws IOException in the case of an error accessing the metadata table
        */
       public static void addUserPermission(Configuration conf, UserPermission userPerm, Table t,
    -      boolean mergeExistingPermissions) throws IOException {
    +    boolean mergeExistingPermissions) throws IOException {
         Permission permission = userPerm.getPermission();
         Permission.Action[] actions = permission.getActions();
         byte[] rowKey = userPermissionRowKey(permission);
    @@ -147,7 +145,7 @@ public static void addUserPermission(Configuration conf, UserPermission userPerm
         }
     
         Set actionSet = new TreeSet();
    -    if(mergeExistingPermissions){
    +    if (mergeExistingPermissions) {
           List perms = getUserPermissions(conf, rowKey, null, null, null, false);
           UserPermission currentPerm = null;
           for (UserPermission perm : perms) {
    @@ -157,7 +155,7 @@ public static void addUserPermission(Configuration conf, UserPermission userPerm
             }
           }
     
    -      if (currentPerm != null && currentPerm.getPermission().getActions() != null){
    +      if (currentPerm != null && currentPerm.getPermission().getActions() != null) {
             actionSet.addAll(Arrays.asList(currentPerm.getPermission().getActions()));
           }
         }
    @@ -171,17 +169,12 @@ public static void addUserPermission(Configuration conf, UserPermission userPerm
         for (Permission.Action action : actionSet) {
           value[index++] = action.code();
         }
    -    p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
    -        .setRow(p.getRow())
    -        .setFamily(ACL_LIST_FAMILY)
    -        .setQualifier(key)
    -        .setTimestamp(p.getTimestamp())
    -        .setType(Type.Put)
    -        .setValue(value)
    -        .build());
    +    p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow())
    +      .setFamily(ACL_LIST_FAMILY).setQualifier(key).setTimestamp(p.getTimestamp()).setType(Type.Put)
    +      .setValue(value).build());
         if (LOG.isDebugEnabled()) {
           LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " "
    -          + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
    +        + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
         }
         try {
           t.put(p);
    @@ -191,34 +184,33 @@ public static void addUserPermission(Configuration conf, UserPermission userPerm
       }
     
       static void addUserPermission(Configuration conf, UserPermission userPerm, Table t)
    -          throws IOException{
    +    throws IOException {
         addUserPermission(conf, userPerm, t, false);
       }
     
       /**
    -   * Removes a previously granted permission from the stored access control
    -   * lists.  The {@link TablePermission} being removed must exactly match what
    -   * is stored -- no wildcard matching is attempted.  Ie, if user "bob" has
    -   * been granted "READ" access to the "data" table, but only to column family
    -   * plus qualifier "info:colA", then trying to call this method with only
    -   * user "bob" and the table name "data" (but without specifying the
    -   * column qualifier "info:colA") will have no effect.
    -   *
    -   * @param conf the configuration
    +   * Removes a previously granted permission from the stored access control lists. The
    +   * {@link TablePermission} being removed must exactly match what is stored -- no wildcard matching
    +   * is attempted. Ie, if user "bob" has been granted "READ" access to the "data" table, but only to
    +   * column family plus qualifier "info:colA", then trying to call this method with only user "bob"
    +   * and the table name "data" (but without specifying the column qualifier "info:colA") will have
    +   * no effect.
    +   * @param conf     the configuration
        * @param userPerm the details of the permission to be revoked
    -   * @param t acl table
    +   * @param t        acl table
        * @throws IOException if there is an error accessing the metadata table
        */
       public static void removeUserPermission(Configuration conf, UserPermission userPerm, Table t)
    -      throws IOException {
    -    if (null == userPerm.getPermission().getActions() ||
    -        userPerm.getPermission().getActions().length == 0) {
    +    throws IOException {
    +    if (
    +      null == userPerm.getPermission().getActions()
    +        || userPerm.getPermission().getActions().length == 0
    +    ) {
           removePermissionRecord(conf, userPerm, t);
         } else {
           // Get all the global user permissions from the acl table
    -      List permsList =
    -        getUserPermissions(conf, userPermissionRowKey(userPerm.getPermission()),
    -          null, null, null, false);
    +      List permsList = getUserPermissions(conf,
    +        userPermissionRowKey(userPerm.getPermission()), null, null, null, false);
           List remainingActions = new ArrayList<>();
           List dropActions = Arrays.asList(userPerm.getPermission().getActions());
           for (UserPermission perm : permsList) {
    @@ -230,8 +222,8 @@ public static void removeUserPermission(Configuration conf, UserPermission userP
                 }
               }
               if (!remainingActions.isEmpty()) {
    -            perm.getPermission().setActions(
    -              remainingActions.toArray(new Permission.Action[remainingActions.size()]));
    +            perm.getPermission()
    +              .setActions(remainingActions.toArray(new Permission.Action[remainingActions.size()]));
                 addUserPermission(conf, perm, t);
               } else {
                 removePermissionRecord(conf, userPerm, t);
    @@ -241,12 +233,12 @@ public static void removeUserPermission(Configuration conf, UserPermission userP
           }
         }
         if (LOG.isDebugEnabled()) {
    -      LOG.debug("Removed permission "+ userPerm.toString());
    +      LOG.debug("Removed permission " + userPerm.toString());
         }
       }
     
       private static void removePermissionRecord(Configuration conf, UserPermission userPerm, Table t)
    -      throws IOException {
    +    throws IOException {
         Delete d = new Delete(userPermissionRowKey(userPerm.getPermission()));
         d.addColumns(ACL_LIST_FAMILY, userPermissionKey(userPerm));
         try {
    @@ -260,12 +252,12 @@ private static void removePermissionRecord(Configuration conf, UserPermission us
        * Remove specified table from the _acl_ table.
        */
       static void removeTablePermissions(Configuration conf, TableName tableName, Table t)
    -      throws IOException{
    +    throws IOException {
         Delete d = new Delete(tableName.getName());
         d.addFamily(ACL_LIST_FAMILY);
     
         if (LOG.isDebugEnabled()) {
    -      LOG.debug("Removing permissions of removed table "+ tableName);
    +      LOG.debug("Removing permissions of removed table " + tableName);
         }
         try {
           t.delete(d);
    @@ -278,11 +270,11 @@ static void removeTablePermissions(Configuration conf, TableName tableName, Tabl
        * Remove specified namespace from the acl table.
        */
       static void removeNamespacePermissions(Configuration conf, String namespace, Table t)
    -      throws IOException{
    +    throws IOException {
         Delete d = new Delete(Bytes.toBytes(toNamespaceEntry(namespace)));
         d.addFamily(ACL_LIST_FAMILY);
         if (LOG.isDebugEnabled()) {
    -      LOG.debug("Removing permissions of removed namespace "+ namespace);
    +      LOG.debug("Removing permissions of removed namespace " + namespace);
         }
     
         try {
    @@ -293,15 +285,14 @@ static void removeNamespacePermissions(Configuration conf, String namespace, Tab
       }
     
       static private void removeTablePermissions(TableName tableName, byte[] column, Table table,
    -      boolean closeTable) throws IOException {
    +    boolean closeTable) throws IOException {
         Scan scan = new Scan();
         scan.addFamily(ACL_LIST_FAMILY);
     
         String columnName = Bytes.toString(column);
    -    scan.setFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(
    -        String.format("(%s%s%s)|(%s%s)$",
    -            ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER,
    -            ACL_KEY_DELIMITER, columnName))));
    +    scan.setFilter(new QualifierFilter(CompareOperator.EQUAL,
    +      new RegexStringComparator(String.format("(%s%s%s)|(%s%s)$", ACL_KEY_DELIMITER, columnName,
    +        ACL_KEY_DELIMITER, ACL_KEY_DELIMITER, columnName))));
     
         Set qualifierSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
         ResultScanner scanner = null;
    @@ -334,10 +325,10 @@ static private void removeTablePermissions(TableName tableName, byte[] column, T
        * Remove specified table column from the acl table.
        */
       static void removeTablePermissions(Configuration conf, TableName tableName, byte[] column,
    -      Table t) throws IOException {
    +    Table t) throws IOException {
         if (LOG.isDebugEnabled()) {
    -      LOG.debug("Removing permissions of removed column " + Bytes.toString(column) +
    -          " from table "+ tableName);
    +      LOG.debug("Removing permissions of removed column " + Bytes.toString(column) + " from table "
    +        + tableName);
         }
         removeTablePermissions(tableName, column, t, true);
       }
    @@ -358,10 +349,7 @@ static byte[] userPermissionRowKey(Permission permission) {
       }
     
       /**
    -   * Build qualifier key from user permission:
    -   *  username
    -   *  username,family
    -   *  username,family,qualifier
    +   * Build qualifier key from user permission: username username,family username,family,qualifier
        */
       static byte[] userPermissionKey(UserPermission permission) {
         byte[] key = Bytes.toBytes(permission.getUser());
    @@ -374,9 +362,9 @@ static byte[] userPermissionKey(UserPermission permission) {
         }
     
         if (family != null && family.length > 0) {
    -      key = Bytes.add(key, Bytes.add(new byte[]{ACL_KEY_DELIMITER}, family));
    +      key = Bytes.add(key, Bytes.add(new byte[] { ACL_KEY_DELIMITER }, family));
           if (qualifier != null && qualifier.length > 0) {
    -        key = Bytes.add(key, Bytes.add(new byte[]{ACL_KEY_DELIMITER}, qualifier));
    +        key = Bytes.add(key, Bytes.add(new byte[] { ACL_KEY_DELIMITER }, qualifier));
           }
         }
     
    @@ -384,8 +372,7 @@ static byte[] userPermissionKey(UserPermission permission) {
       }
     
       /**
    -   * Returns {@code true} if the given region is part of the {@code _acl_}
    -   * metadata table.
    +   * Returns {@code true} if the given region is part of the {@code _acl_} metadata table.
        */
       static boolean isAclRegion(Region region) {
         return ACL_TABLE_NAME.equals(region.getTableDescriptor().getTableName());
    @@ -399,17 +386,15 @@ static boolean isAclTable(TableDescriptor desc) {
       }
     
       /**
    -   * Loads all of the permission grants stored in a region of the {@code _acl_}
    -   * table.
    -   *
    +   * Loads all of the permission grants stored in a region of the {@code _acl_} table.
        * @param aclRegion the acl region
        * @return a map of the permissions for this table.
        * @throws IOException if an error occurs
        */
       static Map> loadAll(Region aclRegion)
    -      throws IOException {
    +    throws IOException {
         if (!isAclRegion(aclRegion)) {
    -      throw new IOException("Can only load permissions from "+ACL_TABLE_NAME);
    +      throw new IOException("Can only load permissions from " + ACL_TABLE_NAME);
         }
     
         Map> allPerms =
    @@ -435,7 +420,7 @@ static Map> loadAll(Region aclRegio
                 entry = CellUtil.cloneRow(kv);
               }
               Pair permissionsOfUserOnTable =
    -              parsePermissionRecord(entry, kv, null, null, false, null);
    +            parsePermissionRecord(entry, kv, null, null, false, null);
               if (permissionsOfUserOnTable != null) {
                 String username = permissionsOfUserOnTable.getFirst();
                 Permission permission = permissionsOfUserOnTable.getSecond();
    @@ -459,11 +444,11 @@ static Map> loadAll(Region aclRegio
       }
     
       /**
    -   * Load all permissions from the region server holding {@code _acl_},
    -   * primarily intended for testing purposes.
    +   * Load all permissions from the region server holding {@code _acl_}, primarily intended for
    +   * testing purposes.
        */
    -  static Map> loadAll(
    -      Configuration conf) throws IOException {
    +  static Map> loadAll(Configuration conf)
    +    throws IOException {
         Map> allPerms =
           new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
     
    @@ -480,7 +465,7 @@ static Map> loadAll(
             try {
               for (Result row : scanner) {
                 ListMultimap resultPerms =
    -                parsePermissions(row.getRow(), row, null, null, null, false);
    +              parsePermissions(row.getRow(), row, null, null, null, false);
                 allPerms.put(row.getRow(), resultPerms);
               }
             } finally {
    @@ -495,19 +480,19 @@ static Map> loadAll(
       }
     
       public static ListMultimap getTablePermissions(Configuration conf,
    -      TableName tableName) throws IOException {
    +    TableName tableName) throws IOException {
         return getPermissions(conf, tableName != null ? tableName.getName() : null, null, null, null,
           null, false);
       }
     
       public static ListMultimap getNamespacePermissions(Configuration conf,
    -      String namespace) throws IOException {
    +    String namespace) throws IOException {
         return getPermissions(conf, Bytes.toBytes(toNamespaceEntry(namespace)), null, null, null, null,
           false);
       }
     
       public static ListMultimap getGlobalPermissions(Configuration conf)
    -      throws IOException {
    +    throws IOException {
         return getPermissions(conf, null, null, null, null, null, false);
       }
     
    @@ -519,7 +504,7 @@ public static ListMultimap getGlobalPermissions(Configur
        * 

    */ static ListMultimap getPermissions(Configuration conf, byte[] entryName, - Table t, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { + Table t, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { if (entryName == null) { entryName = ACL_GLOBAL_NAME; } @@ -541,7 +526,7 @@ static ListMultimap getPermissions(Configuration conf, b perms = parsePermissions(entryName, row, cf, cq, user, hasFilterUser); } else { LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " - + Bytes.toString(entryName)); + + Bytes.toString(entryName)); } return perms; @@ -552,8 +537,8 @@ static ListMultimap getPermissions(Configuration conf, b * associated permissions. */ public static List getUserTablePermissions(Configuration conf, - TableName tableName, byte[] cf, byte[] cq, String userName, boolean hasFilterUser) - throws IOException { + TableName tableName, byte[] cf, byte[] cq, String userName, boolean hasFilterUser) + throws IOException { return getUserPermissions(conf, tableName == null ? null : tableName.getName(), cf, cq, userName, hasFilterUser); } @@ -563,7 +548,7 @@ public static List getUserTablePermissions(Configuration conf, * associated permissions. */ public static List getUserNamespacePermissions(Configuration conf, - String namespace, String user, boolean hasFilterUser) throws IOException { + String namespace, String user, boolean hasFilterUser) throws IOException { return getUserPermissions(conf, Bytes.toBytes(toNamespaceEntry(namespace)), null, null, user, hasFilterUser); } @@ -571,19 +556,19 @@ public static List getUserNamespacePermissions(Configuration con /** * Returns the currently granted permissions for a given table/namespace with associated * permissions based on the specified column family, column qualifier and user name. - * @param conf the configuration - * @param entryName Table name or the namespace - * @param cf Column family - * @param cq Column qualifier - * @param user User name to be filtered from permission as requested + * @param conf the configuration + * @param entryName Table name or the namespace + * @param cf Column family + * @param cq Column qualifier + * @param user User name to be filtered from permission as requested * @param hasFilterUser true if filter user is provided, otherwise false. * @return List of UserPermissions * @throws IOException on failure */ public static List getUserPermissions(Configuration conf, byte[] entryName, - byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { + byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { ListMultimap allPerms = - getPermissions(conf, entryName, null, cf, cq, user, hasFilterUser); + getPermissions(conf, entryName, null, cf, cq, user, hasFilterUser); List perms = new ArrayList<>(); for (Map.Entry entry : allPerms.entries()) { perms.add(entry.getValue()); @@ -596,12 +581,12 @@ public static List getUserPermissions(Configuration conf, byte[] * name. */ private static ListMultimap parsePermissions(byte[] entryName, - Result result, byte[] cf, byte[] cq, String user, boolean hasFilterUser) { + Result result, byte[] cf, byte[] cq, String user, boolean hasFilterUser) { ListMultimap perms = ArrayListMultimap.create(); if (result != null && result.size() > 0) { for (Cell kv : result.rawCells()) { Pair permissionsOfUserOnTable = - parsePermissionRecord(entryName, kv, cf, cq, hasFilterUser, user); + parsePermissionRecord(entryName, kv, cf, cq, hasFilterUser, user); if (permissionsOfUserOnTable != null) { String username = permissionsOfUserOnTable.getFirst(); @@ -614,7 +599,7 @@ private static ListMultimap parsePermissions(byte[] entr } private static Pair parsePermissionRecord(byte[] entryName, Cell kv, - byte[] cf, byte[] cq, boolean filterPerms, String filterUser) { + byte[] cf, byte[] cq, boolean filterPerms, String filterUser) { // return X given a set of permissions encoded in the permissionRecord kv. byte[] family = CellUtil.cloneFamily(kv); if (!Bytes.equals(family, ACL_LIST_FAMILY)) { @@ -624,10 +609,8 @@ private static Pair parsePermissionRecord(byte[] entryName, byte[] key = CellUtil.cloneQualifier(kv); byte[] value = CellUtil.cloneValue(kv); if (LOG.isDebugEnabled()) { - LOG.debug("Read acl: entry[" + - Bytes.toStringBinary(entryName) + "], kv [" + - Bytes.toStringBinary(key) + ": " + - Bytes.toStringBinary(value)+"]"); + LOG.debug("Read acl: entry[" + Bytes.toStringBinary(entryName) + "], kv [" + + Bytes.toStringBinary(key) + ": " + Bytes.toStringBinary(value) + "]"); } // check for a column family appended to the key @@ -638,8 +621,9 @@ private static Pair parsePermissionRecord(byte[] entryName, // Group list is not required when filterUser itself a group List filterUserGroups = null; if (filterPerms) { - if (username.charAt(0) == '@' && !StringUtils.isEmpty(filterUser) - && filterUser.charAt(0) != '@') { + if ( + username.charAt(0) == '@' && !StringUtils.isEmpty(filterUser) && filterUser.charAt(0) != '@' + ) { filterUserGroups = AccessChecker.getUserGroups(filterUser); } } @@ -651,9 +635,8 @@ private static Pair parsePermissionRecord(byte[] entryName, return null; } - return new Pair<>(username, - Permission.newBuilder(Bytes.toString(fromNamespaceEntry(entryName))) - .withActionCodes(value).build()); + return new Pair<>(username, Permission + .newBuilder(Bytes.toString(fromNamespaceEntry(entryName))).withActionCodes(value).build()); } // Handle global entry @@ -670,13 +653,13 @@ private static Pair parsePermissionRecord(byte[] entryName, int idx = username.indexOf(ACL_KEY_DELIMITER); byte[] permFamily = null; byte[] permQualifier = null; - if (idx > 0 && idx < username.length()-1) { - String remainder = username.substring(idx+1); + if (idx > 0 && idx < username.length() - 1) { + String remainder = username.substring(idx + 1); username = username.substring(0, idx); idx = remainder.indexOf(ACL_KEY_DELIMITER); - if (idx > 0 && idx < remainder.length()-1) { + if (idx > 0 && idx < remainder.length() - 1) { permFamily = Bytes.toBytes(remainder.substring(0, idx)); - permQualifier = Bytes.toBytes(remainder.substring(idx+1)); + permQualifier = Bytes.toBytes(remainder.substring(idx + 1)); } else { permFamily = Bytes.toBytes(remainder); } @@ -697,7 +680,7 @@ private static Pair parsePermissionRecord(byte[] entryName, } return new Pair<>(username, Permission.newBuilder(TableName.valueOf(entryName)) - .withFamily(permFamily).withQualifier(permQualifier).withActionCodes(value).build()); + .withFamily(permFamily).withQualifier(permQualifier).withActionCodes(value).build()); } /* @@ -708,7 +691,7 @@ private static Pair parsePermissionRecord(byte[] entryName, * filtered if not equal. */ private static boolean validateFilterUser(String username, String filterUser, - List filterUserGroups) { + List filterUserGroups) { if (filterUserGroups == null) { // Validate user name or group names whether equal if (filterUser.equals(username)) { @@ -728,7 +711,7 @@ private static boolean validateFilterUser(String username, String filterUser, * all CQ records. */ private static boolean validateCFAndCQ(byte[] permFamily, byte[] cf, byte[] permQualifier, - byte[] cq) { + byte[] cq) { boolean include = true; if (cf != null) { if (Bytes.equals(cf, permFamily)) { @@ -749,9 +732,9 @@ private static boolean validateCFAndCQ(byte[] permFamily, byte[] cf, byte[] perm * resulting byte array. Writes a set of permission [user: table permission] */ public static byte[] writePermissionsAsBytes(ListMultimap perms, - Configuration conf) { + Configuration conf) { return ProtobufUtil - .prependPBMagic(AccessControlUtil.toUserTablePermissions(perms).toByteArray()); + .prependPBMagic(AccessControlUtil.toUserTablePermissions(perms).toByteArray()); } // This is part of the old HbaseObjectWritableFor96Migration. @@ -761,8 +744,8 @@ public static byte[] writePermissionsAsBytes(ListMultimap readWritableUserPermission(DataInput in, - Configuration conf) throws IOException, ClassNotFoundException { + private static List readWritableUserPermission(DataInput in, Configuration conf) + throws IOException, ClassNotFoundException { assert WritableUtils.readVInt(in) == LIST_CODE; int length = in.readInt(); List list = new ArrayList<>(length); @@ -779,7 +762,7 @@ private static List readWritableUserPermission(DataInput in, } public static ListMultimap readUserPermission(byte[] data, - Configuration conf) throws DeserializationException { + Configuration conf) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -811,8 +794,8 @@ public static ListMultimap readUserPermission(byte[] dat } } - public static ListMultimap readPermissions(byte[] data, - Configuration conf) throws DeserializationException { + public static ListMultimap readPermissions(byte[] data, Configuration conf) + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -850,7 +833,7 @@ public static boolean isNamespaceEntry(String entryName) { } public static boolean isNamespaceEntry(byte[] entryName) { - return entryName != null && entryName.length !=0 && entryName[0] == NAMESPACE_PREFIX; + return entryName != null && entryName.length != 0 && entryName[0] == NAMESPACE_PREFIX; } public static boolean isTableEntry(byte[] entryName) { @@ -869,22 +852,22 @@ public static String fromNamespaceEntry(String namespace) { } public static byte[] toNamespaceEntry(byte[] namespace) { - byte[] ret = new byte[namespace.length+1]; + byte[] ret = new byte[namespace.length + 1]; ret[0] = NAMESPACE_PREFIX; System.arraycopy(namespace, 0, ret, 1, namespace.length); return ret; } public static byte[] fromNamespaceEntry(byte[] namespace) { - if(namespace[0] != NAMESPACE_PREFIX) { - throw new IllegalArgumentException("Argument is not a valid namespace entry: " + - Bytes.toString(namespace)); + if (namespace[0] != NAMESPACE_PREFIX) { + throw new IllegalArgumentException( + "Argument is not a valid namespace entry: " + Bytes.toString(namespace)); } return Arrays.copyOfRange(namespace, 1, namespace.length); } public static List getCellPermissionsForUser(User user, Cell cell) - throws IOException { + throws IOException { // Save an object allocation where we can if (cell.getTagsLength() == 0) { return null; @@ -898,15 +881,15 @@ public static List getCellPermissionsForUser(User user, Cell cell) // TODO: This can be improved. Don't build UsersAndPermissions just to unpack it again, // use the builder AccessControlProtos.UsersAndPermissions.Builder builder = - AccessControlProtos.UsersAndPermissions.newBuilder(); + AccessControlProtos.UsersAndPermissions.newBuilder(); if (tag.hasArray()) { ProtobufUtil.mergeFrom(builder, tag.getValueArray(), tag.getValueOffset(), tag.getValueLength()); } else { ProtobufUtil.mergeFrom(builder, Tag.cloneValue(tag)); } - ListMultimap kvPerms = - AccessControlUtil.toUsersAndPermissions(builder.build()); + ListMultimap kvPerms = + AccessControlUtil.toUsersAndPermissions(builder.build()); // Are there permissions for this user? List userPerms = kvPerms.get(user.getShortName()); if (userPerms != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java index 72da07cee5ea..656f59576fcf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; @@ -84,8 +83,10 @@ private boolean isEmptyArchiveDirDeletable(Path dir) { return false; } else if (isArchiveNamespaceDir(dir) && namespaceExists(dir.getName())) { return false; - } else if (isArchiveTableDir(dir) - && tableExists(TableName.valueOf(dir.getParent().getName(), dir.getName()))) { + } else if ( + isArchiveTableDir(dir) + && tableExists(TableName.valueOf(dir.getParent().getName(), dir.getName())) + ) { return false; } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java index acb6940697a8..4eb40019b6d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; @@ -115,24 +114,26 @@ public Optional getMasterObserver() { @Override public void preMasterInitialization(ObserverContext c) - throws IOException { - if (c.getEnvironment().getConfiguration() - .getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) { + throws IOException { + if ( + c.getEnvironment().getConfiguration() + .getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false) + ) { MasterCoprocessorEnvironment mEnv = c.getEnvironment(); if (!(mEnv instanceof HasMasterServices)) { throw new IOException("Does not implement HMasterServices"); } masterServices = ((HasMasterServices) mEnv).getMasterServices(); hdfsAclHelper = new SnapshotScannerHDFSAclHelper(masterServices.getConfiguration(), - masterServices.getConnection()); + masterServices.getConnection()); pathHelper = hdfsAclHelper.getPathHelper(); hdfsAclHelper.setCommonDirectoryPermission(); initialized = true; userProvider = UserProvider.instantiate(c.getEnvironment().getConfiguration()); } else { LOG.warn("Try to initialize the coprocessor SnapshotScannerHDFSAclController but failure " - + "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE - + " is false."); + + "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE + + " is false."); } } @@ -149,15 +150,15 @@ public void postStartMaster(ObserverContext c) thr family -> Bytes.equals(family.getName(), SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY)); if (!containHdfsAclFamily) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY).build()); admin.modifyTable(builder.build()); } aclTableInitialized = true; } else { - throw new TableNotFoundException("Table " + PermissionStorage.ACL_TABLE_NAME - + " is not created yet. Please check if " + getClass().getName() - + " is configured after " + AccessController.class.getName()); + throw new TableNotFoundException( + "Table " + PermissionStorage.ACL_TABLE_NAME + " is not created yet. Please check if " + + getClass().getName() + " is configured after " + AccessController.class.getName()); } } } @@ -171,7 +172,7 @@ public void preStopMaster(ObserverContext c) { @Override public void postCompletedCreateTableAction(ObserverContext c, - TableDescriptor desc, RegionInfo[] regions) throws IOException { + TableDescriptor desc, RegionInfo[] regions) throws IOException { if (needHandleTableHdfsAcl(desc, "createTable " + desc.getTableName())) { TableName tableName = desc.getTableName(); // 1. Create table directories to make HDFS acls can be inherited @@ -187,7 +188,7 @@ public void postCompletedCreateTableAction(ObserverContext c, - NamespaceDescriptor ns) throws IOException { + NamespaceDescriptor ns) throws IOException { if (checkInitialized("createNamespace " + ns.getName())) { // Create namespace directories to make HDFS acls can be inherited List paths = hdfsAclHelper.getNamespaceRootPaths(ns.getName()); @@ -199,7 +200,7 @@ public void postCreateNamespace(ObserverContext c, @Override public void postCompletedSnapshotAction(ObserverContext c, - SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { + SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { if (needHandleTableHdfsAcl(tableDescriptor, "snapshot " + snapshot.getName())) { // Add HDFS acls of users with table read permission to snapshot files hdfsAclHelper.snapshotAcl(snapshot); @@ -208,7 +209,7 @@ public void postCompletedSnapshotAction(ObserverContext c, - TableName tableName) throws IOException { + TableName tableName) throws IOException { if (needHandleTableHdfsAcl(tableName, "truncateTable " + tableName)) { // 1. create tmp table directories hdfsAclHelper.createTableDirectories(tableName); @@ -220,7 +221,7 @@ public void postCompletedTruncateTableAction(ObserverContext ctx, - TableName tableName) throws IOException { + TableName tableName) throws IOException { if (!tableName.isSystemTable() && checkInitialized("deleteTable " + tableName)) { /* * Remove table user access HDFS acl from namespace directory if the user has no permissions @@ -230,7 +231,7 @@ public void postCompletedDeleteTableAction(ObserverContext users = SnapshotScannerHDFSAclStorage.getTableUsers(aclTable, tableName); if (users.size() > 0) { // 1. Remove table archive directory default ACLs @@ -249,25 +250,29 @@ public void postCompletedDeleteTableAction(ObserverContext ctx, - TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) - throws IOException { + TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) + throws IOException { try (Table aclTable = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { - if (needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName) - && !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + if ( + needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName) + && !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor) + ) { // 1. Create table directories used for acl inherited hdfsAclHelper.createTableDirectories(tableName); // 2. Add table users HDFS acls Set tableUsers = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false); Set users = - hdfsAclHelper.getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true); + hdfsAclHelper.getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true); users.addAll(tableUsers); hdfsAclHelper.addTableAcl(tableName, users, "modify"); // 3. Record table user acls are synced to HDFS in acl table SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(ctx.getEnvironment().getConnection(), tableUsers, tableName); - } else if (needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName) - && !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor)) { + } else if ( + needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName) + && !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor) + ) { // 1. Remove empty table directories List tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false); for (Path path : tableRootPaths) { @@ -275,8 +280,8 @@ public void postModifyTable(ObserverContext ctx, } // 2. Remove all table HDFS acls Set tableUsers = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false); - Set users = hdfsAclHelper - .getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true); + Set users = + hdfsAclHelper.getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true); users.addAll(tableUsers); hdfsAclHelper.removeTableAcl(tableName, users); // 3. Remove namespace access HDFS acls for users who only own permission for this table @@ -291,10 +296,10 @@ public void postModifyTable(ObserverContext ctx, @Override public void postDeleteNamespace(ObserverContext ctx, - String namespace) throws IOException { + String namespace) throws IOException { if (checkInitialized("deleteNamespace " + namespace)) { try (Table aclTable = - ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { // 1. Delete namespace archive dir default ACLs Set users = SnapshotScannerHDFSAclStorage.getEntryUsers(aclTable, PermissionStorage.toNamespaceEntry(Bytes.toBytes(namespace))); @@ -316,13 +321,15 @@ public void postDeleteNamespace(ObserverContext ct @Override public void postGrant(ObserverContext c, - UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { - if (!checkInitialized( - "grant " + userPermission + ", merge existing permissions " + mergeExistingPermissions)) { + UserPermission userPermission, boolean mergeExistingPermissions) throws IOException { + if ( + !checkInitialized( + "grant " + userPermission + ", merge existing permissions " + mergeExistingPermissions) + ) { return; } try (Table aclTable = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { Configuration conf = c.getEnvironment().getConfiguration(); String userName = userPermission.getUser(); switch (userPermission.getAccessScope()) { @@ -332,11 +339,11 @@ public void postGrant(ObserverContext c, if (!isHdfsAclSet(aclTable, userName)) { // 1. Get namespaces and tables which global user acls are already synced Pair, Set> skipNamespaceAndTables = - SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName); + SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName); Set skipNamespaces = skipNamespaceAndTables.getFirst(); Set skipTables = skipNamespaceAndTables.getSecond().stream() - .filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())) - .collect(Collectors.toSet()); + .filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())) + .collect(Collectors.toSet()); // 2. Add HDFS acl(skip namespaces and tables directories whose acl is set) hdfsAclHelper.grantAcl(userPermission, skipNamespaces, skipTables); // 3. Record global acl is sync to HDFS @@ -355,7 +362,7 @@ public void postGrant(ObserverContext c, if (!isHdfsAclSet(aclTable, userName, namespace)) { // 1. Get tables which namespace user acls are already synced Set skipTables = SnapshotScannerHDFSAclStorage - .getUserNamespaceAndTable(aclTable, userName).getSecond(); + .getUserNamespaceAndTable(aclTable, userName).getSecond(); // 2. Add HDFS acl(skip tables directories whose acl is set) hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), skipTables); } @@ -390,17 +397,17 @@ public void postGrant(ObserverContext c, break; default: throw new IllegalArgumentException( - "Illegal user permission scope " + userPermission.getAccessScope()); + "Illegal user permission scope " + userPermission.getAccessScope()); } } } @Override public void postRevoke(ObserverContext c, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { if (checkInitialized("revoke " + userPermission)) { try (Table aclTable = - c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { String userName = userPermission.getUser(); Configuration conf = c.getEnvironment().getConfiguration(); switch (userPermission.getAccessScope()) { @@ -413,7 +420,7 @@ public void postRevoke(ObserverContext c, case NAMESPACE: NamespacePermission nsPerm = (NamespacePermission) userPermission.getPermission(); UserPermission userNsPerm = - getUserNamespacePermission(conf, userName, nsPerm.getNamespace()); + getUserNamespacePermission(conf, userName, nsPerm.getNamespace()); if (userNsPerm == null || !hdfsAclHelper.containReadAction(userNsPerm)) { removeUserNamespaceHdfsAcl(aclTable, userName, nsPerm.getNamespace(), userPermission); } @@ -430,22 +437,22 @@ public void postRevoke(ObserverContext c, break; default: throw new IllegalArgumentException( - "Illegal user permission scope " + userPermission.getAccessScope()); + "Illegal user permission scope " + userPermission.getAccessScope()); } } } } private void removeUserGlobalHdfsAcl(Table aclTable, String userName, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { if (SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName)) { // 1. Get namespaces and tables which global user acls are already synced Pair, Set> namespaceAndTable = - SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName); + SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName); Set skipNamespaces = namespaceAndTable.getFirst(); Set skipTables = namespaceAndTable.getSecond().stream() - .filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())) - .collect(Collectors.toSet()); + .filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())) + .collect(Collectors.toSet()); // 2. Remove user HDFS acls(skip namespaces and tables directories // whose acl must be reversed) hdfsAclHelper.revokeAcl(userPermission, skipNamespaces, skipTables); @@ -455,12 +462,12 @@ private void removeUserGlobalHdfsAcl(Table aclTable, String userName, } private void removeUserNamespaceHdfsAcl(Table aclTable, String userName, String namespace, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { if (SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, namespace)) { if (!SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName)) { // 1. Get tables whose namespace user acls are already synced Set skipTables = - SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName).getSecond(); + SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName).getSecond(); // 2. Remove user HDFS acls(skip tables directories whose acl must be reversed) hdfsAclHelper.revokeAcl(userPermission, new HashSet<>(), skipTables); } @@ -470,11 +477,13 @@ private void removeUserNamespaceHdfsAcl(Table aclTable, String userName, String } private void removeUserTableHdfsAcl(Table aclTable, String userName, TableName tableName, - UserPermission userPermission) throws IOException { + UserPermission userPermission) throws IOException { if (SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl(aclTable, userName, tableName)) { - if (!SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName) + if ( + !SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName) && !SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, - tableName.getNamespaceAsString())) { + tableName.getNamespaceAsString()) + ) { // 1. Remove table acls hdfsAclHelper.revokeAcl(userPermission, new HashSet<>(0), new HashSet<>(0)); } @@ -484,26 +493,26 @@ private void removeUserTableHdfsAcl(Table aclTable, String userName, TableName t } private UserPermission getUserGlobalPermission(Configuration conf, String userName) - throws IOException { + throws IOException { List permissions = PermissionStorage.getUserPermissions(conf, PermissionStorage.ACL_GLOBAL_NAME, null, null, userName, true); return permissions.size() > 0 ? permissions.get(0) : null; } private UserPermission getUserNamespacePermission(Configuration conf, String userName, - String namespace) throws IOException { + String namespace) throws IOException { List permissions = - PermissionStorage.getUserNamespacePermissions(conf, namespace, userName, true); + PermissionStorage.getUserNamespacePermissions(conf, namespace, userName, true); return permissions.size() > 0 ? permissions.get(0) : null; } private UserPermission getUserTablePermission(Configuration conf, String userName, - TableName tableName) throws IOException { + TableName tableName) throws IOException { List permissions = PermissionStorage - .getUserTablePermissions(conf, tableName, null, null, userName, true).stream() - .filter(userPermission -> hdfsAclHelper - .isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission())) - .collect(Collectors.toList()); + .getUserTablePermissions(conf, tableName, null, null, userName, true).stream() + .filter(userPermission -> hdfsAclHelper + .isNotFamilyOrQualifierPermission((TablePermission) userPermission.getPermission())) + .collect(Collectors.toList()); return permissions.size() > 0 ? permissions.get(0) : null; } @@ -512,12 +521,12 @@ private boolean isHdfsAclSet(Table aclTable, String userName) throws IOException } private boolean isHdfsAclSet(Table aclTable, String userName, String namespace) - throws IOException { + throws IOException { return isHdfsAclSet(aclTable, userName, namespace, null); } private boolean isHdfsAclSet(Table aclTable, String userName, TableName tableName) - throws IOException { + throws IOException { return isHdfsAclSet(aclTable, userName, null, tableName); } @@ -525,17 +534,17 @@ private boolean isHdfsAclSet(Table aclTable, String userName, TableName tableNam * Check if user global/namespace/table HDFS acls is already set */ private boolean isHdfsAclSet(Table aclTable, String userName, String namespace, - TableName tableName) throws IOException { + TableName tableName) throws IOException { boolean isSet = SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName); if (namespace != null) { isSet = isSet - || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, namespace); + || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, namespace); } if (tableName != null) { isSet = isSet - || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, - tableName.getNamespaceAsString()) - || SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl(aclTable, userName, tableName); + || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, + tableName.getNamespaceAsString()) + || SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl(aclTable, userName, tableName); } return isSet; } @@ -554,18 +563,18 @@ boolean checkInitialized(String operation) { private boolean needHandleTableHdfsAcl(TablePermission tablePermission) throws IOException { return needHandleTableHdfsAcl(tablePermission.getTableName(), "") - && hdfsAclHelper.isNotFamilyOrQualifierPermission(tablePermission); + && hdfsAclHelper.isNotFamilyOrQualifierPermission(tablePermission); } private boolean needHandleTableHdfsAcl(TableName tableName, String operation) throws IOException { - return !tableName.isSystemTable() && checkInitialized(operation) && hdfsAclHelper - .isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName)); + return !tableName.isSystemTable() && checkInitialized(operation) + && hdfsAclHelper.isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName)); } private boolean needHandleTableHdfsAcl(TableDescriptor tableDescriptor, String operation) { TableName tableName = tableDescriptor.getTableName(); return !tableName.isSystemTable() && checkInitialized(operation) - && hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor); + && hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor); } private User getActiveUser(ObserverContext ctx) throws IOException { @@ -583,25 +592,27 @@ private User getActiveUser(ObserverContext ctx) throws IOException { * delete 'ns1:t1', if Bob has global read permission, '@ns1' read permission or * 'ns1:other_tables' read permission, then skip remove Bob access acl in ns1Dirs, otherwise, * remove Bob access acl. - * @param aclTable acl table - * @param tableName the name of the table + * @param aclTable acl table + * @param tableName the name of the table * @param tablesUsers table users set * @return users whose access acl will be removed from the namespace of the table * @throws IOException if an error occurred */ private Set filterUsersToRemoveNsAccessAcl(Table aclTable, TableName tableName, - Set tablesUsers) throws IOException { + Set tablesUsers) throws IOException { Set removeUsers = new HashSet<>(); byte[] namespace = tableName.getNamespace(); for (String user : tablesUsers) { List userEntries = SnapshotScannerHDFSAclStorage.getUserEntries(aclTable, user); boolean remove = true; for (byte[] entry : userEntries) { - if (PermissionStorage.isGlobalEntry(entry) + if ( + PermissionStorage.isGlobalEntry(entry) || (PermissionStorage.isNamespaceEntry(entry) - && Bytes.equals(PermissionStorage.fromNamespaceEntry(entry), namespace)) + && Bytes.equals(PermissionStorage.fromNamespaceEntry(entry), namespace)) || (!Bytes.equals(tableName.getName(), entry) - && Bytes.equals(TableName.valueOf(entry).getNamespace(), namespace))) { + && Bytes.equals(TableName.valueOf(entry).getNamespace(), namespace)) + ) { remove = false; break; } @@ -633,12 +644,12 @@ static void addUserGlobalHdfsAcl(Table aclTable, String user) throws IOException } static void addUserNamespaceHdfsAcl(Table aclTable, String user, String namespace) - throws IOException { + throws IOException { addUserEntry(aclTable, user, Bytes.toBytes(PermissionStorage.toNamespaceEntry(namespace))); } static void addUserTableHdfsAcl(Connection connection, Set users, TableName tableName) - throws IOException { + throws IOException { try (Table aclTable = connection.getTable(PermissionStorage.ACL_TABLE_NAME)) { for (String user : users) { addUserTableHdfsAcl(aclTable, user, tableName); @@ -647,14 +658,14 @@ static void addUserTableHdfsAcl(Connection connection, Set users, TableN } static void addUserTableHdfsAcl(Connection connection, String user, TableName tableName) - throws IOException { + throws IOException { try (Table aclTable = connection.getTable(PermissionStorage.ACL_TABLE_NAME)) { addUserTableHdfsAcl(aclTable, user, tableName); } } static void addUserTableHdfsAcl(Table aclTable, String user, TableName tableName) - throws IOException { + throws IOException { addUserEntry(aclTable, user, tableName.getName()); } @@ -669,17 +680,17 @@ static void deleteUserGlobalHdfsAcl(Table aclTable, String user) throws IOExcept } static void deleteUserNamespaceHdfsAcl(Table aclTable, String user, String namespace) - throws IOException { + throws IOException { deleteUserEntry(aclTable, user, Bytes.toBytes(PermissionStorage.toNamespaceEntry(namespace))); } static void deleteUserTableHdfsAcl(Table aclTable, String user, TableName tableName) - throws IOException { + throws IOException { deleteUserEntry(aclTable, user, tableName.getName()); } static void deleteUserTableHdfsAcl(Connection connection, Set users, - TableName tableName) throws IOException { + TableName tableName) throws IOException { try (Table aclTable = connection.getTable(PermissionStorage.ACL_TABLE_NAME)) { for (String user : users) { deleteUserTableHdfsAcl(aclTable, user, tableName); @@ -688,7 +699,7 @@ static void deleteUserTableHdfsAcl(Connection connection, Set users, } private static void deleteUserEntry(Table aclTable, String user, byte[] entry) - throws IOException { + throws IOException { Delete delete = new Delete(entry); delete.addColumns(HDFS_ACL_FAMILY, Bytes.toBytes(user)); aclTable.delete(delete); @@ -731,7 +742,7 @@ private static Set getEntryUsers(Table aclTable, byte[] entry) throws IO } static Pair, Set> getUserNamespaceAndTable(Table aclTable, - String userName) throws IOException { + String userName) throws IOException { Set namespaces = new HashSet<>(); Set tables = new HashSet<>(); List userEntries = getUserEntries(aclTable, userName); @@ -763,18 +774,18 @@ static boolean hasUserGlobalHdfsAcl(Table aclTable, String user) throws IOExcept } static boolean hasUserNamespaceHdfsAcl(Table aclTable, String user, String namespace) - throws IOException { + throws IOException { return hasUserEntry(aclTable, user, Bytes.toBytes(PermissionStorage.toNamespaceEntry(namespace))); } static boolean hasUserTableHdfsAcl(Table aclTable, String user, TableName tableName) - throws IOException { + throws IOException { return hasUserEntry(aclTable, user, tableName.getName()); } private static boolean hasUserEntry(Table aclTable, String userName, byte[] entry) - throws IOException { + throws IOException { Get get = new Get(entry); get.addColumn(HDFS_ACL_FAMILY, Bytes.toBytes(userName)); return aclTable.exists(get); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java index 53d9970e09df..41f61a6efa33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; @@ -38,7 +37,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -76,21 +74,21 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable"; public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER = - "hbase.acl.sync.to.hdfs.thread.number"; + "hbase.acl.sync.to.hdfs.thread.number"; // The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir"; public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT = - "/hbase/.tmpdir-to-restore-snapshot"; + "/hbase/.tmpdir-to-restore-snapshot"; // The default permission of the common directories if the feature is enabled. public static final String COMMON_DIRECTORY_PERMISSION = - "hbase.acl.sync.to.hdfs.common.directory.permission"; + "hbase.acl.sync.to.hdfs.common.directory.permission"; // The secure HBase permission is 700, 751 means all others have execute access and the mask is // set to read-execute to make the extended access ACL entries can work. Be cautious to set // this value. public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751"; // The default permission of the snapshot restore directories if the feature is enabled. public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION = - "hbase.acl.sync.to.hdfs.restore.directory.permission"; + "hbase.acl.sync.to.hdfs.restore.directory.permission"; // 753 means all others have write-execute access. public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753"; @@ -101,7 +99,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { private ExecutorService pool; public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection connection) - throws IOException { + throws IOException { this.conf = configuration; this.pathHelper = new PathHelper(conf); this.fs = pathHelper.getFileSystem(); @@ -131,11 +129,11 @@ public void setCommonDirectoryPermission() throws IOException { for (Path path : paths) { createDirIfNotExist(path); fs.setPermission(path, new FsPermission( - conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT))); + conf.get(COMMON_DIRECTORY_PERMISSION, COMMON_DIRECTORY_PERMISSION_DEFAULT))); } // create snapshot restore directory Path restoreDir = - new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT)); + new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, SNAPSHOT_RESTORE_TMP_DIR_DEFAULT)); createDirIfNotExist(restoreDir); fs.setPermission(restoreDir, new FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION, SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT))); @@ -145,11 +143,11 @@ public void setCommonDirectoryPermission() throws IOException { * Set acl when grant user permission * @param userPermission the user and permission * @param skipNamespaces the namespace set to skip set acl because already set - * @param skipTables the table set to skip set acl because already set + * @param skipTables the table set to skip set acl because already set * @return false if an error occurred, otherwise true */ public boolean grantAcl(UserPermission userPermission, Set skipNamespaces, - Set skipTables) { + Set skipTables) { try { long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces, @@ -167,11 +165,11 @@ public boolean grantAcl(UserPermission userPermission, Set skipNamespace * Remove acl when grant or revoke user permission * @param userPermission the user and permission * @param skipNamespaces the namespace set to skip remove acl - * @param skipTables the table set to skip remove acl + * @param skipTables the table set to skip remove acl * @return false if an error occurred, otherwise true */ public boolean revokeAcl(UserPermission userPermission, Set skipNamespaces, - Set skipTables) { + Set skipTables) { try { long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces, @@ -199,7 +197,7 @@ public boolean snapshotAcl(SnapshotDescription snapshot) { if (userSet.size() > 0) { Path path = pathHelper.getSnapshotDir(snapshot.getName()); handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, HDFSAclOperation.OperationType.MODIFY, - true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get(); + true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get(); } LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(), EnvironmentEdgeManager.currentTime() - start); @@ -212,12 +210,12 @@ public boolean snapshotAcl(SnapshotDescription snapshot) { /** * Remove table access acl from namespace dir when delete table - * @param tableName the table + * @param tableName the table * @param removeUsers the users whose access acl will be removed * @return false if an error occurred, otherwise true */ public boolean removeNamespaceAccessAcl(TableName tableName, Set removeUsers, - String operation) { + String operation) { try { long start = EnvironmentEdgeManager.currentTime(); if (removeUsers.size() > 0) { @@ -235,7 +233,7 @@ public boolean removeNamespaceAccessAcl(TableName tableName, Set removeU /** * Remove default acl from namespace archive dir when delete namespace - * @param namespace the namespace + * @param namespace the namespace * @param removeUsers the users whose default acl will be removed * @return false if an error occurred, otherwise true */ @@ -244,7 +242,7 @@ public boolean removeNamespaceDefaultAcl(String namespace, Set removeUse long start = EnvironmentEdgeManager.currentTime(); Path archiveNsDir = pathHelper.getArchiveNsDir(namespace); HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers, - HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); + HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); operation.handleAcl(); LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace, EnvironmentEdgeManager.currentTime() - start); @@ -257,7 +255,7 @@ public boolean removeNamespaceDefaultAcl(String namespace, Set removeUse /** * Remove default acl from table archive dir when delete table - * @param tableName the table name + * @param tableName the table name * @param removeUsers the users whose default acl will be removed * @return false if an error occurred, otherwise true */ @@ -266,7 +264,7 @@ public boolean removeTableDefaultAcl(TableName tableName, Set removeUser long start = EnvironmentEdgeManager.currentTime(); Path archiveTableDir = pathHelper.getArchiveTableDir(tableName); HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers, - HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); + HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); operation.handleAcl(); LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName, EnvironmentEdgeManager.currentTime() - start); @@ -280,7 +278,7 @@ public boolean removeTableDefaultAcl(TableName tableName, Set removeUser /** * Add table user acls * @param tableName the table - * @param users the table users with READ permission + * @param users the table users with READ permission * @return false if an error occurred, otherwise true */ public boolean addTableAcl(TableName tableName, Set users, String operation) { @@ -304,7 +302,7 @@ public boolean addTableAcl(TableName tableName, Set users, String operat /** * Remove table acls when modify table * @param tableName the table - * @param users the table users with READ permission + * @param users the table users with READ permission * @return false if an error occurred, otherwise true */ public boolean removeTableAcl(TableName tableName, Set users) { @@ -324,8 +322,8 @@ public boolean removeTableAcl(TableName tableName, Set users) { } private void handleGrantOrRevokeAcl(UserPermission userPermission, - HDFSAclOperation.OperationType operationType, Set skipNamespaces, - Set skipTables) throws ExecutionException, InterruptedException, IOException { + HDFSAclOperation.OperationType operationType, Set skipNamespaces, + Set skipTables) throws ExecutionException, InterruptedException, IOException { Set users = Sets.newHashSet(userPermission.getUser()); switch (userPermission.getAccessScope()) { case GLOBAL: @@ -333,7 +331,7 @@ private void handleGrantOrRevokeAcl(UserPermission userPermission, break; case NAMESPACE: NamespacePermission namespacePermission = - (NamespacePermission) userPermission.getPermission(); + (NamespacePermission) userPermission.getPermission(); handleNamespaceAcl(Sets.newHashSet(namespacePermission.getNamespace()), users, skipNamespaces, skipTables, operationType); break; @@ -345,18 +343,17 @@ private void handleGrantOrRevokeAcl(UserPermission userPermission, break; default: throw new IllegalArgumentException( - "Illegal user permission scope " + userPermission.getAccessScope()); + "Illegal user permission scope " + userPermission.getAccessScope()); } } private void handleGlobalAcl(Set users, Set skipNamespaces, - Set skipTables, HDFSAclOperation.OperationType operationType) - throws ExecutionException, InterruptedException, IOException { + Set skipTables, HDFSAclOperation.OperationType operationType) + throws ExecutionException, InterruptedException, IOException { // handle global root directories HDFS acls - List hdfsAclOperations = getGlobalRootPaths().stream() - .map(path -> new HDFSAclOperation(fs, path, users, operationType, false, - HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)) - .collect(Collectors.toList()); + List hdfsAclOperations = + getGlobalRootPaths().stream().map(path -> new HDFSAclOperation(fs, path, users, operationType, + false, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).collect(Collectors.toList()); handleHDFSAclParallel(hdfsAclOperations).get(); // handle namespace HDFS acls handleNamespaceAcl(Sets.newHashSet(admin.listNamespaces()), users, skipNamespaces, skipTables, @@ -364,15 +361,15 @@ private void handleGlobalAcl(Set users, Set skipNamespaces, } private void handleNamespaceAcl(Set namespaces, Set users, - Set skipNamespaces, Set skipTables, - HDFSAclOperation.OperationType operationType) - throws ExecutionException, InterruptedException, IOException { + Set skipNamespaces, Set skipTables, + HDFSAclOperation.OperationType operationType) + throws ExecutionException, InterruptedException, IOException { namespaces.removeAll(skipNamespaces); namespaces.remove(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); // handle namespace root directories HDFS acls List hdfsAclOperations = new ArrayList<>(); Set skipTableNamespaces = - skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet()); + skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet()); for (String ns : namespaces) { /** * When op is REMOVE, remove the DEFAULT namespace ACL while keep the ACCESS for skipTables, @@ -381,8 +378,9 @@ private void handleNamespaceAcl(Set namespaces, Set users, */ HDFSAclOperation.OperationType op = operationType; HDFSAclOperation.AclType aclType = HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS; - if (operationType == HDFSAclOperation.OperationType.REMOVE - && skipTableNamespaces.contains(ns)) { + if ( + operationType == HDFSAclOperation.OperationType.REMOVE && skipTableNamespaces.contains(ns) + ) { // remove namespace directories default HDFS acls for skip tables op = HDFSAclOperation.OperationType.REMOVE; aclType = HDFSAclOperation.AclType.DEFAULT; @@ -396,20 +394,22 @@ private void handleNamespaceAcl(Set namespaces, Set users, Set tables = new HashSet<>(); for (String namespace : namespaces) { tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream() - .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName) - .collect(Collectors.toSet())); + .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName) + .collect(Collectors.toSet())); } handleTableAcl(tables, users, skipNamespaces, skipTables, operationType); } private void handleTableAcl(Set tableNames, Set users, - Set skipNamespaces, Set skipTables, - HDFSAclOperation.OperationType operationType) - throws ExecutionException, InterruptedException, IOException { + Set skipNamespaces, Set skipTables, + HDFSAclOperation.OperationType operationType) + throws ExecutionException, InterruptedException, IOException { Set filterTableNames = new HashSet<>(); for (TableName tableName : tableNames) { - if (!skipTables.contains(tableName) - && !skipNamespaces.contains(tableName.getNamespaceAsString())) { + if ( + !skipTables.contains(tableName) + && !skipNamespaces.contains(tableName.getNamespaceAsString()) + ) { filterTableNames.add(tableName); } } @@ -417,24 +417,23 @@ private void handleTableAcl(Set tableNames, Set users, // handle table HDFS acls for (TableName tableName : filterTableNames) { List hdfsAclOperations = getTableRootPaths(tableName, true).stream() - .map(path -> new HDFSAclOperation(fs, path, users, operationType, true, - HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)) - .collect(Collectors.toList()); + .map(path -> new HDFSAclOperation(fs, path, users, operationType, true, + HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)) + .collect(Collectors.toList()); CompletableFuture future = handleHDFSAclSequential(hdfsAclOperations); futures.add(future); } CompletableFuture future = - CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])); + CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])); future.get(); } private void handleNamespaceAccessAcl(String namespace, Set users, - HDFSAclOperation.OperationType operationType) - throws ExecutionException, InterruptedException { + HDFSAclOperation.OperationType operationType) throws ExecutionException, InterruptedException { // handle namespace access HDFS acls List hdfsAclOperations = - getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users, - operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList()); + getNamespaceRootPaths(namespace).stream().map(path -> new HDFSAclOperation(fs, path, users, + operationType, false, HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList()); CompletableFuture future = handleHDFSAclParallel(hdfsAclOperations); future.get(); } @@ -467,16 +466,15 @@ List getNamespaceRootPaths(String namespace) { /** * return paths that user will table permission will visit - * @param tableName the table + * @param tableName the table * @param includeSnapshotPath true if return table snapshots paths, otherwise false * @return the path list * @throws IOException if an error occurred */ List getTableRootPaths(TableName tableName, boolean includeSnapshotPath) - throws IOException { + throws IOException { List paths = Lists.newArrayList(pathHelper.getDataTableDir(tableName), - pathHelper.getMobTableDir(tableName), - pathHelper.getArchiveTableDir(tableName)); + pathHelper.getMobTableDir(tableName), pathHelper.getArchiveTableDir(tableName)); if (includeSnapshotPath) { paths.addAll(getTableSnapshotPaths(tableName)); } @@ -485,9 +483,9 @@ List getTableRootPaths(TableName tableName, boolean includeSnapshotPath) private List getTableSnapshotPaths(TableName tableName) throws IOException { return admin.listSnapshots().stream() - .filter(snapDesc -> snapDesc.getTableName().equals(tableName)) - .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName())) - .collect(Collectors.toList()); + .filter(snapDesc -> snapDesc.getTableName().equals(tableName)) + .map(snapshotDescription -> pathHelper.getSnapshotDir(snapshotDescription.getName())) + .collect(Collectors.toList()); } /** @@ -501,15 +499,15 @@ private Set getUsersWithGlobalReadAction() throws IOException { /** * Return users with namespace read permission - * @param namespace the namespace + * @param namespace the namespace * @param includeGlobal true if include users with global read action * @return users with namespace read permission * @throws IOException if an error occurred */ Set getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal) - throws IOException { + throws IOException { Set users = - getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace)); + getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace)); if (includeGlobal) { users.addAll(getUsersWithGlobalReadAction()); } @@ -518,28 +516,28 @@ Set getUsersWithNamespaceReadAction(String namespace, boolean includeGlo /** * Return users with table read permission - * @param tableName the table + * @param tableName the table * @param includeNamespace true if include users with namespace read action - * @param includeGlobal true if include users with global read action + * @param includeGlobal true if include users with global read action * @return users with table read permission * @throws IOException if an error occurred */ Set getUsersWithTableReadAction(TableName tableName, boolean includeNamespace, - boolean includeGlobal) throws IOException { + boolean includeGlobal) throws IOException { Set users = - getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName)); + getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName)); if (includeNamespace) { users - .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal)); + .addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal)); } return users; } private Set - getUsersWithReadAction(ListMultimap permissionMultimap) { + getUsersWithReadAction(ListMultimap permissionMultimap) { return permissionMultimap.entries().stream() - .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey) - .collect(Collectors.toSet()); + .filter(entry -> checkUserPermission(entry.getValue())).map(Map.Entry::getKey) + .collect(Collectors.toSet()); } private boolean checkUserPermission(UserPermission userPermission) { @@ -565,13 +563,14 @@ public static boolean isAclSyncToHdfsEnabled(Configuration conf) { Collections.addAll(masterCoprocessorSet, masterCoprocessors); } return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false) - && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName()) - && masterCoprocessorSet.contains(AccessController.class.getName()); + && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName()) + && masterCoprocessorSet.contains(AccessController.class.getName()); } boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) { - return tableDescriptor == null ? false - : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE)); + return tableDescriptor == null + ? false + : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE)); } PathHelper getPathHelper() { @@ -608,14 +607,14 @@ private CompletableFuture handleHDFSAclSequential(List o private CompletableFuture handleHDFSAclParallel(List operations) { List> futures = - operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList()); + operations.stream().map(this::handleHDFSAcl).collect(Collectors.toList()); return CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])); } private static AclEntry aclEntry(AclEntryScope scope, String name) { return new AclEntry.Builder().setScope(scope) - .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name) - .setPermission(READ_EXECUTE).build(); + .setType(AuthUtil.isGroupPrincipal(name) ? GROUP : USER).setName(name) + .setPermission(READ_EXECUTE).build(); } void createDirIfNotExist(Path path) throws IOException { @@ -636,11 +635,14 @@ void deleteEmptyDir(Path path) throws IOException { */ private static class HDFSAclOperation { enum OperationType { - MODIFY, REMOVE + MODIFY, + REMOVE } enum AclType { - ACCESS, DEFAULT, DEFAULT_ADN_ACCESS + ACCESS, + DEFAULT, + DEFAULT_ADN_ACCESS } private interface Operation { @@ -657,7 +659,7 @@ private interface Operation { private List defaultAclEntries; HDFSAclOperation(FileSystem fs, Path path, Set users, OperationType operationType, - boolean recursive, AclType aclType) { + boolean recursive, AclType aclType) { this.fs = fs; this.path = path; this.defaultAndAccessAclEntries = getAclEntries(AclType.DEFAULT_ADN_ACCESS, users); @@ -756,12 +758,12 @@ static final class PathHelper { PathHelper(Configuration conf) { this.conf = conf; rootDir = new Path(conf.get(HConstants.HBASE_DIR)); - tmpDataDir = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), - HConstants.BASE_NAMESPACE_DIR); + tmpDataDir = + new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.BASE_NAMESPACE_DIR); dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR); mobDataDir = new Path(MobUtils.getMobHome(rootDir), HConstants.BASE_NAMESPACE_DIR); archiveDataDir = new Path(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY), - HConstants.BASE_NAMESPACE_DIR); + HConstants.BASE_NAMESPACE_DIR); snapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME); } @@ -807,12 +809,12 @@ Path getMobDataNsDir(String namespace) { Path getDataTableDir(TableName tableName) { return new Path(getDataNsDir(tableName.getNamespaceAsString()), - tableName.getQualifierAsString()); + tableName.getQualifierAsString()); } Path getMobTableDir(TableName tableName) { return new Path(getMobDataNsDir(tableName.getNamespaceAsString()), - tableName.getQualifierAsString()); + tableName.getQualifierAsString()); } Path getArchiveNsDir(String namespace) { @@ -821,7 +823,7 @@ Path getArchiveNsDir(String namespace) { Path getArchiveTableDir(TableName tableName) { return new Path(getArchiveNsDir(tableName.getNamespaceAsString()), - tableName.getQualifierAsString()); + tableName.getQualifierAsString()); } Path getTmpNsDir(String namespace) { @@ -830,7 +832,7 @@ Path getTmpNsDir(String namespace) { Path getTmpTableDir(TableName tableName) { return new Path(getTmpNsDir(tableName.getNamespaceAsString()), - tableName.getQualifierAsString()); + tableName.getQualifierAsString()); } Path getSnapshotRootDir() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index 1102dac12a53..5d674c78cdc7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -15,9 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; @@ -26,31 +35,20 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * Handles synchronization of access control list entries and updates - * throughout all nodes in the cluster. The {@link AccessController} instance - * on the {@code _acl_} table regions, creates a znode for each table as - * {@code /hbase/acl/tablename}, with the znode data containing a serialized - * list of the permissions granted for the table. The {@code AccessController} - * instances on all other cluster hosts watch the znodes for updates, which - * trigger updates in the {@link AuthManager} permission cache. + * Handles synchronization of access control list entries and updates throughout all nodes in the + * cluster. The {@link AccessController} instance on the {@code _acl_} table regions, creates a + * znode for each table as {@code /hbase/acl/tablename}, with the znode data containing a serialized + * list of the permissions granted for the table. The {@code AccessController} instances on all + * other cluster hosts watch the znodes for updates, which trigger updates in the + * {@link AuthManager} permission cache. */ @InterfaceAudience.Private public class ZKPermissionWatcher extends ZKListener implements Closeable { @@ -63,8 +61,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { private final ExecutorService executor; private Future childrenChangedFuture; - public ZKPermissionWatcher(ZKWatcher watcher, - AuthManager authManager, Configuration conf) { + public ZKPermissionWatcher(ZKWatcher watcher, AuthManager authManager, Configuration conf) { super(watcher); this.authManager = authManager; String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE); @@ -83,7 +80,7 @@ public void start() throws KeeperException { @Override public Void call() throws KeeperException { List existing = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); if (existing != null) { refreshNodes(existing); } @@ -92,7 +89,7 @@ public Void call() throws KeeperException { }).get(); } catch (ExecutionException ex) { if (ex.getCause() instanceof KeeperException) { - throw (KeeperException)ex.getCause(); + throw (KeeperException) ex.getCause(); } else { throw new RuntimeException(ex.getCause()); } @@ -128,7 +125,7 @@ public void nodeCreated(String path) { public void run() { try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error("Error reading data from zookeeper", ke); @@ -182,7 +179,6 @@ public void run() { } } - @Override public void nodeChildrenChanged(final String path) { waitUntilStarted(); @@ -239,8 +235,7 @@ private void refreshNodes(List nodes) { try { refreshAuthManager(entry, n.getData()); } catch (IOException ioe) { - LOG.error("Failed parsing permissions for table '" + entry + - "' from zk", ioe); + LOG.error("Failed parsing permissions for table '" + entry + "' from zk", ioe); } } } @@ -248,7 +243,7 @@ private void refreshNodes(List nodes) { private void refreshAuthManager(String entry, byte[] nodeData) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Updating permissions cache from {} with data {}", entry, - Bytes.toStringBinary(nodeData)); + Bytes.toStringBinary(nodeData)); } if (PermissionStorage.isNamespaceEntry(entry)) { authManager.refreshNamespaceCacheFromWritable(PermissionStorage.fromNamespaceEntry(entry), @@ -259,9 +254,7 @@ private void refreshAuthManager(String entry, byte[] nodeData) throws IOExceptio } /*** - * Write a table's access controls to the permissions mirror in zookeeper - * @param entry - * @param permsData + * Write a table's access controls to the permissions mirror in zookeeper nn */ public void writeToZookeeper(byte[] entry, byte[] permsData) { String entryName = Bytes.toString(entry); @@ -272,15 +265,13 @@ public void writeToZookeeper(byte[] entry, byte[] permsData) { ZKUtil.createWithParents(watcher, zkNode); ZKUtil.updateExistingNodeData(watcher, zkNode, permsData, -1); } catch (KeeperException e) { - LOG.error("Failed updating permissions for entry '" + - entryName + "'", e); - watcher.abort("Failed writing node "+zkNode+" to zookeeper", e); + LOG.error("Failed updating permissions for entry '" + entryName + "'", e); + watcher.abort("Failed writing node " + zkNode + " to zookeeper", e); } } /*** - * Delete the acl notify node of table - * @param tableName + * Delete the acl notify node of table n */ public void deleteTableACLNode(final TableName tableName) { String zkNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, ACL_NODE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java index e6dc3574726e..ce0dc7ce2558 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java @@ -19,17 +19,15 @@ import java.util.Optional; import java.util.function.Supplier; - import javax.security.sasl.SaslServer; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Wrapper around a SaslServer which provides the last user attempting to authenticate via SASL, - * if the server/mechanism allow figuring that out. + * Wrapper around a SaslServer which provides the last user attempting to authenticate via SASL, if + * the server/mechanism allow figuring that out. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving @@ -37,8 +35,8 @@ public class AttemptingUserProvidingSaslServer { private final Supplier producer; private final SaslServer saslServer; - public AttemptingUserProvidingSaslServer( - SaslServer saslServer, Supplier producer) { + public AttemptingUserProvidingSaslServer(SaslServer saslServer, + Supplier producer) { this.saslServer = saslServer; this.producer = producer; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java index b3236d653764..03610014d5cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; @@ -30,7 +29,6 @@ import javax.security.sasl.RealmCallback; import javax.security.sasl.Sasl; import javax.security.sasl.SaslServer; - import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; import org.apache.hadoop.hbase.security.SaslUtil; @@ -44,16 +42,16 @@ @InterfaceAudience.Private public class DigestSaslServerAuthenticationProvider extends DigestSaslAuthenticationProvider - implements SaslServerAuthenticationProvider { - private static final Logger LOG = LoggerFactory.getLogger( - DigestSaslServerAuthenticationProvider.class); + implements SaslServerAuthenticationProvider { + private static final Logger LOG = + LoggerFactory.getLogger(DigestSaslServerAuthenticationProvider.class); private AtomicReference attemptingUser = new AtomicReference<>(null); @Override - public AttemptingUserProvidingSaslServer createServer( - SecretManager secretManager, - Map saslProps) throws IOException { + public AttemptingUserProvidingSaslServer + createServer(SecretManager secretManager, Map saslProps) + throws IOException { if (secretManager == null) { throw new AccessDeniedException("Server is not configured to do DIGEST authentication."); } @@ -70,7 +68,7 @@ private static class SaslDigestCallbackHandler implements CallbackHandler { private final AtomicReference attemptingUser; public SaslDigestCallbackHandler(SecretManager secretManager, - AtomicReference attemptingUser) { + AtomicReference attemptingUser) { this.secretManager = secretManager; this.attemptingUser = attemptingUser; } @@ -99,13 +97,13 @@ public void handle(Callback[] callbacks) throws InvalidToken, UnsupportedCallbac } } if (pc != null) { - TokenIdentifier tokenIdentifier = HBaseSaslRpcServer.getIdentifier( - nc.getDefaultName(), secretManager); + TokenIdentifier tokenIdentifier = + HBaseSaslRpcServer.getIdentifier(nc.getDefaultName(), secretManager); attemptingUser.set(tokenIdentifier.getUser()); char[] password = getPassword(tokenIdentifier); if (LOG.isTraceEnabled()) { LOG.trace("SASL server DIGEST-MD5 callback: setting password for client: {}", - tokenIdentifier.getUser()); + tokenIdentifier.getUser()); } pc.setPassword(password); } @@ -123,8 +121,8 @@ public void handle(Callback[] callbacks) throws InvalidToken, UnsupportedCallbac if (authenticatedUserId.equals(userRequestedToExecuteAs)) { ac.setAuthorized(true); if (LOG.isTraceEnabled()) { - String username = HBaseSaslRpcServer.getIdentifier( - userRequestedToExecuteAs, secretManager).getUser().getUserName(); + String username = HBaseSaslRpcServer + .getIdentifier(userRequestedToExecuteAs, secretManager).getUser().getUserName(); LOG.trace( "SASL server DIGEST-MD5 callback: setting " + "canonicalized client ID: " + username); } @@ -143,13 +141,12 @@ public boolean supportsProtocolAuthentication() { @Override public UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException { + SecretManager secretManager) throws IOException { UserGroupInformation authorizedUgi; TokenIdentifier tokenId = HBaseSaslRpcServer.getIdentifier(authzId, secretManager); authorizedUgi = tokenId.getUser(); if (authorizedUgi == null) { - throw new AccessDeniedException( - "Can't retrieve username from tokenIdentifier."); + throw new AccessDeniedException("Can't retrieve username from tokenIdentifier."); } authorizedUgi.addTokenIdentifier(tokenId); authorizedUgi.setAuthenticationMethod(getSaslAuthMethod().getAuthMethod()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java index 8a542c69c0dc..1d9b1bd1aa91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java @@ -1,5 +1,5 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -20,14 +20,12 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.Map; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.sasl.AuthorizeCallback; import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; - import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -39,29 +37,30 @@ @InterfaceAudience.Private public class GssSaslServerAuthenticationProvider extends GssSaslAuthenticationProvider - implements SaslServerAuthenticationProvider { - private static final Logger LOG = LoggerFactory.getLogger( - GssSaslServerAuthenticationProvider.class); + implements SaslServerAuthenticationProvider { + private static final Logger LOG = + LoggerFactory.getLogger(GssSaslServerAuthenticationProvider.class); @Override - public AttemptingUserProvidingSaslServer createServer( - SecretManager secretManager, - Map saslProps) throws IOException { + public AttemptingUserProvidingSaslServer + createServer(SecretManager secretManager, Map saslProps) + throws IOException { UserGroupInformation current = UserGroupInformation.getCurrentUser(); String fullName = current.getUserName(); LOG.debug("Server's Kerberos principal name is {}", fullName); String[] names = SaslUtil.splitKerberosName(fullName); if (names.length != 3) { throw new AccessDeniedException( - "Kerberos principal does NOT contain an instance (hostname): " + fullName); + "Kerberos principal does NOT contain an instance (hostname): " + fullName); } try { return current.doAs(new PrivilegedExceptionAction() { @Override public AttemptingUserProvidingSaslServer run() throws SaslException { - return new AttemptingUserProvidingSaslServer(Sasl.createSaslServer( - getSaslAuthMethod().getSaslMechanism(), names[0], names[1], saslProps, - new SaslGssCallbackHandler()), () -> null); + return new AttemptingUserProvidingSaslServer( + Sasl.createSaslServer(getSaslAuthMethod().getSaslMechanism(), names[0], names[1], + saslProps, new SaslGssCallbackHandler()), + () -> null); } }); } catch (InterruptedException e) { @@ -107,7 +106,7 @@ public boolean supportsProtocolAuthentication() { @Override public UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException { + SecretManager secretManager) throws IOException { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(authzId); ugi.setAuthenticationMethod(getSaslAuthMethod().getAuthMethod()); return ugi; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java index 3487cfcd586e..866b00d6e349 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; @@ -29,8 +28,8 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulates the server-side logic to authenticate a client over SASL. Tied one-to-one to - * a single client authentication implementation. + * Encapsulates the server-side logic to authenticate a client over SASL. Tied one-to-one to a + * single client authentication implementation. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving @@ -39,16 +38,17 @@ public interface SaslServerAuthenticationProvider extends SaslAuthenticationProv /** * Allows implementations to initialize themselves, prior to creating a server. */ - default void init(Configuration conf) throws IOException {} + default void init(Configuration conf) throws IOException { + } /** * Creates the SaslServer to accept incoming SASL authentication requests. */ AttemptingUserProvidingSaslServer createServer(SecretManager secretManager, - Map saslProps) throws IOException; + Map saslProps) throws IOException; boolean supportsProtocolAuthentication(); UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException; + SecretManager secretManager) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java index 829498dfd9fe..17480cb4659b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.ServiceLoader; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -32,17 +31,17 @@ @InterfaceAudience.Private public final class SaslServerAuthenticationProviders { - private static final Logger LOG = LoggerFactory.getLogger( - SaslClientAuthenticationProviders.class); + private static final Logger LOG = + LoggerFactory.getLogger(SaslClientAuthenticationProviders.class); public static final String EXTRA_PROVIDERS_KEY = "hbase.server.sasl.provider.extras"; private static final AtomicReference holder = - new AtomicReference<>(); + new AtomicReference<>(); private final HashMap providers; private SaslServerAuthenticationProviders(Configuration conf, - HashMap providers) { + HashMap providers) { this.providers = providers; } @@ -87,14 +86,14 @@ public static void reset() { * already exist in the map. */ static void addProviderIfNotExists(SaslServerAuthenticationProvider provider, - HashMap providers) { + HashMap providers) { final byte newProviderAuthCode = provider.getSaslAuthMethod().getCode(); - final SaslServerAuthenticationProvider alreadyRegisteredProvider = providers.get( - newProviderAuthCode); + final SaslServerAuthenticationProvider alreadyRegisteredProvider = + providers.get(newProviderAuthCode); if (alreadyRegisteredProvider != null) { throw new RuntimeException("Trying to load SaslServerAuthenticationProvider " - + provider.getClass() + ", but "+ alreadyRegisteredProvider.getClass() - + " is already registered with the same auth code"); + + provider.getClass() + ", but " + alreadyRegisteredProvider.getClass() + + " is already registered with the same auth code"); } providers.put(newProviderAuthCode, provider); } @@ -103,7 +102,7 @@ static void addProviderIfNotExists(SaslServerAuthenticationProvider provider, * Adds any providers defined in the configuration. */ static void addExtraProviders(Configuration conf, - HashMap providers) { + HashMap providers) { for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { Class clz; try { @@ -115,16 +114,16 @@ static void addExtraProviders(Configuration conf, if (!SaslServerAuthenticationProvider.class.isAssignableFrom(clz)) { LOG.warn("Server authentication class {} is not an instance of " - + "SaslServerAuthenticationProvider", clz); + + "SaslServerAuthenticationProvider", clz); continue; } try { SaslServerAuthenticationProvider provider = - (SaslServerAuthenticationProvider) clz.getConstructor().newInstance(); + (SaslServerAuthenticationProvider) clz.getConstructor().newInstance(); addProviderIfNotExists(provider, providers); } catch (InstantiationException | IllegalAccessException | NoSuchMethodException - | InvocationTargetException e) { + | InvocationTargetException e) { LOG.warn("Failed to instantiate {}", clz, e); } } @@ -136,8 +135,8 @@ static void addExtraProviders(Configuration conf, */ static SaslServerAuthenticationProviders createProviders(Configuration conf) { ServiceLoader loader = - ServiceLoader.load(SaslServerAuthenticationProvider.class); - HashMap providers = new HashMap<>(); + ServiceLoader.load(SaslServerAuthenticationProvider.class); + HashMap providers = new HashMap<>(); for (SaslServerAuthenticationProvider provider : loader) { addProviderIfNotExists(provider, providers); } @@ -146,8 +145,7 @@ static SaslServerAuthenticationProviders createProviders(Configuration conf) { if (LOG.isTraceEnabled()) { String loadedProviders = providers.values().stream() - .map((provider) -> provider.getClass().getName()) - .collect(Collectors.joining(", ")); + .map((provider) -> provider.getClass().getName()).collect(Collectors.joining(", ")); if (loadedProviders.isEmpty()) { loadedProviders = "None!"; } @@ -155,14 +153,13 @@ static SaslServerAuthenticationProviders createProviders(Configuration conf) { } // Initialize the providers once, before we get into the RPC path. - providers.forEach((b,provider) -> { + providers.forEach((b, provider) -> { try { // Give them a copy, just to make sure there is no funny-business going on. provider.init(new Configuration(conf)); } catch (IOException e) { LOG.error("Failed to initialize {}", provider.getClass(), e); - throw new RuntimeException( - "Failed to initialize " + provider.getClass().getName(), e); + throw new RuntimeException("Failed to initialize " + provider.getClass().getName(), e); } }); @@ -181,10 +178,8 @@ public SaslServerAuthenticationProvider selectProvider(byte authByte) { * Extracts the SIMPLE authentication provider. */ public SaslServerAuthenticationProvider getSimpleProvider() { - Optional opt = providers.values() - .stream() - .filter((p) -> p instanceof SimpleSaslServerAuthenticationProvider) - .findFirst(); + Optional opt = providers.values().stream() + .filter((p) -> p instanceof SimpleSaslServerAuthenticationProvider).findFirst(); if (!opt.isPresent()) { throw new RuntimeException("SIMPLE authentication provider not available when it should be"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java index ed7bf4ce9e76..27154174469d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Map; - import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; @@ -27,12 +26,12 @@ @InterfaceAudience.Private public class SimpleSaslServerAuthenticationProvider extends SimpleSaslAuthenticationProvider - implements SaslServerAuthenticationProvider { + implements SaslServerAuthenticationProvider { @Override - public AttemptingUserProvidingSaslServer createServer( - SecretManager secretManager, - Map saslProps) throws IOException { + public AttemptingUserProvidingSaslServer + createServer(SecretManager secretManager, Map saslProps) + throws IOException { throw new RuntimeException("HBase SIMPLE authentication doesn't use SASL"); } @@ -43,7 +42,7 @@ public boolean supportsProtocolAuthentication() { @Override public UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException { + SecretManager secretManager) throws IOException { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(authzId); ugi.setAuthenticationMethod(getSaslAuthMethod().getAuthMethod()); return ugi; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java index 9e124a54111c..b250e98f572b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java @@ -15,25 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; -import javax.crypto.SecretKey; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.time.Instant; import java.util.Arrays; - -import org.apache.yetus.audience.InterfaceAudience; +import javax.crypto.SecretKey; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * Represents a secret key used for signing and verifying authentication tokens - * by {@link AuthenticationTokenSecretManager}. + * Represents a secret key used for signing and verifying authentication tokens by + * {@link AuthenticationTokenSecretManager}. */ @InterfaceAudience.Private public class AuthenticationKey implements Writable { @@ -80,21 +77,18 @@ public boolean equals(Object obj) { if (obj == null || !(obj instanceof AuthenticationKey)) { return false; } - AuthenticationKey other = (AuthenticationKey)obj; - return id == other.getKeyId() && - expirationDate == other.getExpiration() && - (secret == null ? other.getKey() == null : - other.getKey() != null && - Bytes.equals(secret.getEncoded(), other.getKey().getEncoded())); + AuthenticationKey other = (AuthenticationKey) obj; + return id == other.getKeyId() && expirationDate == other.getExpiration() + && (secret == null + ? other.getKey() == null + : other.getKey() != null && Bytes.equals(secret.getEncoded(), other.getKey().getEncoded())); } @Override public String toString() { StringBuilder buf = new StringBuilder(); - buf.append("AuthenticationKey[") - .append("id=").append(id) - .append(", expiration=").append(Instant.ofEpochMilli(this.expirationDate)) - .append(", obj=").append(super.toString()) + buf.append("AuthenticationKey[").append("id=").append(id).append(", expiration=") + .append(Instant.ofEpochMilli(this.expirationDate)).append(", obj=").append(super.toString()) .append("]"); return buf.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 641288c03836..5070697439fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -15,53 +15,46 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; -import javax.crypto.SecretKey; import java.io.IOException; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; +import javax.crypto.SecretKey; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKLeaderManager; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Manages an internal list of secret keys used to sign new authentication - * tokens as they are generated, and to valid existing tokens used for - * authentication. - * + * Manages an internal list of secret keys used to sign new authentication tokens as they are + * generated, and to valid existing tokens used for authentication. *

    - * A single instance of {@code AuthenticationTokenSecretManager} will be - * running as the "leader" in a given HBase cluster. The leader is responsible - * for periodically generating new secret keys, which are then distributed to - * followers via ZooKeeper, and for expiring previously used secret keys that - * are no longer needed (as any tokens using them have expired). + * A single instance of {@code AuthenticationTokenSecretManager} will be running as the "leader" in + * a given HBase cluster. The leader is responsible for periodically generating new secret keys, + * which are then distributed to followers via ZooKeeper, and for expiring previously used secret + * keys that are no longer needed (as any tokens using them have expired). *

    */ @InterfaceAudience.Private -public class AuthenticationTokenSecretManager - extends SecretManager { +public class AuthenticationTokenSecretManager extends SecretManager { static final String NAME_PREFIX = "SecretManager-"; - private static final Logger LOG = LoggerFactory.getLogger( - AuthenticationTokenSecretManager.class); + private static final Logger LOG = LoggerFactory.getLogger(AuthenticationTokenSecretManager.class); private long lastKeyUpdate; private long keyUpdateInterval; @@ -70,7 +63,7 @@ public class AuthenticationTokenSecretManager private LeaderElector leaderElector; private ZKClusterId clusterId; - private Map allKeys = new ConcurrentHashMap<>(); + private Map allKeys = new ConcurrentHashMap<>(); private AuthenticationKey currentKey; private int idSeq; @@ -79,23 +72,25 @@ public class AuthenticationTokenSecretManager /** * Create a new secret manager instance for generating keys. - * @param conf Configuration to use - * @param zk Connection to zookeeper for handling leader elections - * @param keyUpdateInterval Time (in milliseconds) between rolling a new master key for token signing - * @param tokenMaxLifetime Maximum age (in milliseconds) before a token expires and is no longer valid + * @param conf Configuration to use + * @param zk Connection to zookeeper for handling leader elections + * @param keyUpdateInterval Time (in milliseconds) between rolling a new master key for token + * signing + * @param tokenMaxLifetime Maximum age (in milliseconds) before a token expires and is no longer + * valid */ - /* TODO: Restrict access to this constructor to make rogues instances more difficult. - * For the moment this class is instantiated from - * org.apache.hadoop.hbase.ipc.SecureServer so public access is needed. + /* + * TODO: Restrict access to this constructor to make rogues instances more difficult. For the + * moment this class is instantiated from org.apache.hadoop.hbase.ipc.SecureServer so public + * access is needed. */ - public AuthenticationTokenSecretManager(Configuration conf, - ZKWatcher zk, String serverName, - long keyUpdateInterval, long tokenMaxLifetime) { + public AuthenticationTokenSecretManager(Configuration conf, ZKWatcher zk, String serverName, + long keyUpdateInterval, long tokenMaxLifetime) { this.zkWatcher = new ZKSecretWatcher(conf, zk, this); this.keyUpdateInterval = keyUpdateInterval; this.tokenMaxLifetime = tokenMaxLifetime; this.leaderElector = new LeaderElector(zk, serverName); - this.name = NAME_PREFIX+serverName; + this.name = NAME_PREFIX + serverName; this.clusterId = new ZKClusterId(zk, zk); } @@ -130,31 +125,29 @@ protected synchronized byte[] createPassword(AuthenticationTokenIdentifier ident identifier.setIssueDate(now); identifier.setExpirationDate(now + tokenMaxLifetime); identifier.setSequenceNumber(tokenSeq.getAndIncrement()); - return createPassword(identifier.getBytes(), - secretKey.getKey()); + return createPassword(identifier.getBytes(), secretKey.getKey()); } @Override - public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) - throws InvalidToken { + public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) throws InvalidToken { long now = EnvironmentEdgeManager.currentTime(); if (identifier.getExpirationDate() < now) { throw new InvalidToken("Token has expired"); } AuthenticationKey masterKey = allKeys.get(identifier.getKeyId()); - if(masterKey == null) { - if(zkWatcher.getWatcher().isAborted()) { + if (masterKey == null) { + if (zkWatcher.getWatcher().isAborted()) { LOG.error("ZKWatcher is abort"); - throw new InvalidToken("Token keys could not be sync from zookeeper" - + " because of ZKWatcher abort"); + throw new InvalidToken( + "Token keys could not be sync from zookeeper" + " because of ZKWatcher abort"); } synchronized (this) { if (!leaderElector.isAlive() || leaderElector.isStopped()) { - LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":" - + leaderElector.getId() + "] is stopped or not alive"); + LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":" + leaderElector.getId() + + "] is stopped or not alive"); leaderElector.start(); - LOG.info("Thread leaderElector [" + leaderElector.getName() + ":" - + leaderElector.getId() + "] is started"); + LOG.info("Thread leaderElector [" + leaderElector.getName() + ":" + leaderElector.getId() + + "] is started"); } } zkWatcher.refreshKeys(); @@ -164,12 +157,10 @@ public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) masterKey = allKeys.get(identifier.getKeyId()); } if (masterKey == null) { - throw new InvalidToken("Unknown master key for token (id="+ - identifier.getKeyId()+")"); + throw new InvalidToken("Unknown master key for token (id=" + identifier.getKeyId() + ")"); } // regenerate the password - return createPassword(identifier.getBytes(), - masterKey.getKey()); + return createPassword(identifier.getBytes(), masterKey.getKey()); } @Override @@ -178,8 +169,7 @@ public AuthenticationTokenIdentifier createIdentifier() { } public Token generateToken(String username) { - AuthenticationTokenIdentifier ident = - new AuthenticationTokenIdentifier(username); + AuthenticationTokenIdentifier ident = new AuthenticationTokenIdentifier(username); Token token = new Token<>(ident, this); if (clusterId.hasId()) { token.setService(new Text(clusterId.getId())); @@ -259,9 +249,11 @@ synchronized void rollCurrentKey() { long now = EnvironmentEdgeManager.currentTime(); AuthenticationKey prev = currentKey; - AuthenticationKey newKey = new AuthenticationKey(++idSeq, - Long.MAX_VALUE, // don't allow to expire until it's replaced by a new key - generateSecret()); + AuthenticationKey newKey = new AuthenticationKey(++idSeq, Long.MAX_VALUE, // don't allow to + // expire until it's + // replaced by a new + // key + generateSecret()); allKeys.put(newKey.getKeyId(), newKey); currentKey = newKey; zkWatcher.addKeyToZK(newKey); @@ -292,8 +284,8 @@ private class LeaderElector extends Thread implements Stoppable { public LeaderElector(ZKWatcher watcher, String serverName) { setDaemon(true); setName("ZKSecretWatcher-leaderElector"); - zkLeader = new ZKLeaderManager(watcher, - ZNodePaths.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"), + zkLeader = + new ZKLeaderManager(watcher, ZNodePaths.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"), Bytes.toBytes(serverName), this); } @@ -318,7 +310,7 @@ public void stop(String reason) { zkLeader.stepDownAsLeader(); } isMaster = false; - LOG.info("Stopping leader election, because: "+reason); + LOG.info("Stopping leader election, because: " + reason); interrupt(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java index 9a58006343e6..64b889cd0668 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java @@ -35,9 +35,8 @@ import org.slf4j.LoggerFactory; /** - * Helper class to obtain a filesystem delegation token. - * Mainly used by Map-Reduce jobs that requires to read/write data to - * a remote file-system (e.g. BulkLoad, ExportSnapshot). + * Helper class to obtain a filesystem delegation token. Mainly used by Map-Reduce jobs that + * requires to read/write data to a remote file-system (e.g. BulkLoad, ExportSnapshot). */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -60,15 +59,13 @@ public FsDelegationToken(final UserProvider userProvider, final String renewer) } /** - * Acquire the delegation token for the specified filesystem. - * Before requesting a new delegation token, tries to find one already available. - * Currently supports checking existing delegation tokens for swebhdfs, webhdfs and hdfs. - * + * Acquire the delegation token for the specified filesystem. Before requesting a new delegation + * token, tries to find one already available. Currently supports checking existing delegation + * tokens for swebhdfs, webhdfs and hdfs. * @param fs the filesystem that requires the delegation token * @throws IOException on fs.getDelegationToken() failure */ - public void acquireDelegationToken(final FileSystem fs) - throws IOException { + public void acquireDelegationToken(final FileSystem fs) throws IOException { String tokenKind; String scheme = fs.getUri().getScheme(); if (SWEBHDFS_SCHEME.equalsIgnoreCase(scheme)) { @@ -87,15 +84,14 @@ public void acquireDelegationToken(final FileSystem fs) } /** - * Acquire the delegation token for the specified filesystem and token kind. - * Before requesting a new delegation token, tries to find one already available. - * + * Acquire the delegation token for the specified filesystem and token kind. Before requesting a + * new delegation token, tries to find one already available. * @param tokenKind non-null token kind to get delegation token from the {@link UserProvider} - * @param fs the filesystem that requires the delegation token + * @param fs the filesystem that requires the delegation token * @throws IOException on fs.getDelegationToken() failure */ public void acquireDelegationToken(final String tokenKind, final FileSystem fs) - throws IOException { + throws IOException { Objects.requireNonNull(tokenKind, "tokenKind:null"); if (userProvider.isHadoopSecurityEnabled()) { this.fs = fs; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java index 2946344e73d8..dd4fd1c8b7b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.security.token; - import java.io.IOException; import java.util.Collections; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -46,33 +45,33 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos; /** - * Provides a service for obtaining authentication tokens via the - * {@link AuthenticationProtos} AuthenticationService coprocessor service. + * Provides a service for obtaining authentication tokens via the {@link AuthenticationProtos} + * AuthenticationService coprocessor service. */ @CoreCoprocessor @InterfaceAudience.Private -public class TokenProvider implements AuthenticationProtos.AuthenticationService.Interface, - RegionCoprocessor { +public class TokenProvider + implements AuthenticationProtos.AuthenticationService.Interface, RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(TokenProvider.class); private AuthenticationTokenSecretManager secretManager; - @Override public void start(CoprocessorEnvironment env) { // if running at region if (env instanceof RegionCoprocessorEnvironment) { - RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment)env; - /* Getting the RpcServer from a RegionCE is wrong. There cannot be an expectation that Region - is hosted inside a RegionServer. If you need RpcServer, then pass in a RegionServerCE. - TODO: FIX. + RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment) env; + /* + * Getting the RpcServer from a RegionCE is wrong. There cannot be an expectation that Region + * is hosted inside a RegionServer. If you need RpcServer, then pass in a RegionServerCE. + * TODO: FIX. */ - RegionServerServices rss = ((HasRegionServerServices)regionEnv).getRegionServerServices(); + RegionServerServices rss = ((HasRegionServerServices) regionEnv).getRegionServerServices(); RpcServerInterface server = rss.getRpcServer(); - SecretManager mgr = ((RpcServer)server).getSecretManager(); + SecretManager mgr = ((RpcServer) server).getSecretManager(); if (mgr instanceof AuthenticationTokenSecretManager) { - secretManager = (AuthenticationTokenSecretManager)mgr; + secretManager = (AuthenticationTokenSecretManager) mgr; } } } @@ -90,9 +89,10 @@ private boolean isAllowedDelegationTokenOp(UserGroupInformation ugi) throws IOEx if (authMethod == AuthenticationMethod.PROXY) { authMethod = ugi.getRealUser().getAuthenticationMethod(); } - if (authMethod != AuthenticationMethod.KERBEROS - && authMethod != AuthenticationMethod.KERBEROS_SSL - && authMethod != AuthenticationMethod.CERTIFICATE) { + if ( + authMethod != AuthenticationMethod.KERBEROS && authMethod != AuthenticationMethod.KERBEROS_SSL + && authMethod != AuthenticationMethod.CERTIFICATE + ) { return false; } return true; @@ -102,34 +102,33 @@ private boolean isAllowedDelegationTokenOp(UserGroupInformation ugi) throws IOEx @Override public Iterable getServices() { - return Collections.singleton( - AuthenticationProtos.AuthenticationService.newReflectiveService(this)); + return Collections + .singleton(AuthenticationProtos.AuthenticationService.newReflectiveService(this)); } @Override public void getAuthenticationToken(RpcController controller, - AuthenticationProtos.GetAuthenticationTokenRequest request, - RpcCallback done) { + AuthenticationProtos.GetAuthenticationTokenRequest request, + RpcCallback done) { AuthenticationProtos.GetAuthenticationTokenResponse.Builder response = - AuthenticationProtos.GetAuthenticationTokenResponse.newBuilder(); + AuthenticationProtos.GetAuthenticationTokenResponse.newBuilder(); try { if (secretManager == null) { - throw new IOException( - "No secret manager configured for token authentication"); + throw new IOException("No secret manager configured for token authentication"); } User currentUser = RpcServer.getRequestUser() - .orElseThrow(() -> new AccessDeniedException("No authenticated user for request!")); + .orElseThrow(() -> new AccessDeniedException("No authenticated user for request!")); UserGroupInformation ugi = currentUser.getUGI(); if (!isAllowedDelegationTokenOp(ugi)) { - LOG.warn("Token generation denied for user=" + currentUser.getName() + ", authMethod=" + - ugi.getAuthenticationMethod()); + LOG.warn("Token generation denied for user=" + currentUser.getName() + ", authMethod=" + + ugi.getAuthenticationMethod()); throw new AccessDeniedException( - "Token generation only allowed for Kerberos authenticated clients"); + "Token generation only allowed for Kerberos authenticated clients"); } Token token = - secretManager.generateToken(currentUser.getName()); + secretManager.generateToken(currentUser.getName()); response.setToken(ClientTokenUtil.toToken(token)).build(); } catch (IOException ioe) { CoprocessorRpcUtils.setControllerException(controller, ioe); @@ -139,9 +138,9 @@ public void getAuthenticationToken(RpcController controller, @Override public void whoAmI(RpcController controller, AuthenticationProtos.WhoAmIRequest request, - RpcCallback done) { + RpcCallback done) { AuthenticationProtos.WhoAmIResponse.Builder response = - AuthenticationProtos.WhoAmIResponse.newBuilder(); + AuthenticationProtos.WhoAmIResponse.newBuilder(); RpcServer.getRequestUser().ifPresent(requestUser -> { response.setUsername(requestUser.getShortName()); AuthenticationMethod method = requestUser.getUGI().getAuthenticationMethod(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index 603ec60ec618..9f8a9219bb34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,15 +43,15 @@ public class TokenUtil { // This class is referenced indirectly by User out in common; instances are created by reflection private static final Logger LOG = LoggerFactory.getLogger(TokenUtil.class); - /** - * See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.AsyncConnection)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. - */ + /** + * See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.AsyncConnection)}. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. + */ @Deprecated - public static CompletableFuture> obtainToken( - AsyncConnection conn) { + public static CompletableFuture> + obtainToken(AsyncConnection conn) { return ClientTokenUtil.obtainToken(conn); } @@ -62,7 +62,7 @@ public static CompletableFuture> obtainToke */ @Deprecated public static Token obtainToken(Configuration conf) - throws IOException { + throws IOException { try (Connection connection = ConnectionFactory.createConnection(conf)) { return obtainToken(connection); } @@ -70,22 +70,21 @@ public static Token obtainToken(Configuration con /** * See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.Connection)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static Token obtainToken(Connection conn) - throws IOException { + throws IOException { return ClientTokenUtil.obtainToken(conn); } - /** * See {@link ClientTokenUtil#toToken(Token)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static AuthenticationProtos.Token toToken(Token token) { @@ -94,54 +93,51 @@ public static AuthenticationProtos.Token toToken(Token obtainToken( - final Connection conn, User user) throws IOException, InterruptedException { + public static Token obtainToken(final Connection conn, User user) + throws IOException, InterruptedException { return ClientTokenUtil.obtainToken(conn, user); } /** * See {@link ClientTokenUtil#obtainAndCacheToken(Connection, User)}. */ - public static void obtainAndCacheToken(final Connection conn, - User user) - throws IOException, InterruptedException { + public static void obtainAndCacheToken(final Connection conn, User user) + throws IOException, InterruptedException { ClientTokenUtil.obtainAndCacheToken(conn, user); } /** - * See {@link ClientTokenUtil#toToken(org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.Token)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * See + * {@link ClientTokenUtil#toToken(org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.Token)}. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static Token toToken(AuthenticationProtos.Token proto) { return ClientTokenUtil.toToken(proto); } - private static Text getClusterId(Token token) - throws IOException { - return token.getService() != null - ? token.getService() : new Text("default"); + private static Text getClusterId(Token token) throws IOException { + return token.getService() != null ? token.getService() : new Text("default"); } /** - * Obtain an authentication token on behalf of the given user and add it to - * the credentials for the given map reduce job. + * Obtain an authentication token on behalf of the given user and add it to the credentials for + * the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @param job The job instance in which the token should be stored - * @throws IOException If making a remote call to the authentication service fails + * @param job The job instance in which the token should be stored + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ - public static void obtainTokenForJob(final Connection conn, - User user, Job job) - throws IOException, InterruptedException { + public static void obtainTokenForJob(final Connection conn, User user, Job job) + throws IOException, InterruptedException { try { Token token = ClientTokenUtil.obtainToken(conn, user); @@ -150,8 +146,8 @@ public static void obtainTokenForJob(final Connection conn, } Text clusterId = getClusterId(token); if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName() + " on cluster " + clusterId.toString()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName() + + " on cluster " + clusterId.toString()); } job.getCredentials().addToken(clusterId, token); } catch (IOException ioe) { @@ -162,21 +158,21 @@ public static void obtainTokenForJob(final Connection conn, throw re; } catch (Exception e) { throw new UndeclaredThrowableException(e, - "Unexpected exception obtaining token for user " + user.getName()); + "Unexpected exception obtaining token for user " + user.getName()); } } /** - * Obtain an authentication token on behalf of the given user and add it to - * the credentials for the given map reduce job. + * Obtain an authentication token on behalf of the given user and add it to the credentials for + * the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @param job The job configuration in which the token should be stored - * @throws IOException If making a remote call to the authentication service fails + * @param job The job configuration in which the token should be stored + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ public static void obtainTokenForJob(final Connection conn, final JobConf job, User user) - throws IOException, InterruptedException { + throws IOException, InterruptedException { try { Token token = ClientTokenUtil.obtainToken(conn, user); @@ -185,8 +181,8 @@ public static void obtainTokenForJob(final Connection conn, final JobConf job, U } Text clusterId = getClusterId(token); if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName() + " on cluster " + clusterId.toString()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName() + + " on cluster " + clusterId.toString()); } job.getCredentials().addToken(clusterId, token); } catch (IOException ioe) { @@ -197,22 +193,21 @@ public static void obtainTokenForJob(final Connection conn, final JobConf job, U throw re; } catch (Exception e) { throw new UndeclaredThrowableException(e, - "Unexpected exception obtaining token for user "+user.getName()); + "Unexpected exception obtaining token for user " + user.getName()); } } /** - * Checks for an authentication token for the given user, obtaining a new token if necessary, - * and adds it to the credentials for the given map reduce job. - * + * Checks for an authentication token for the given user, obtaining a new token if necessary, and + * adds it to the credentials for the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @param job The job configuration in which the token should be stored - * @throws IOException If making a remote call to the authentication service fails + * @param job The job configuration in which the token should be stored + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ public static void addTokenForJob(final Connection conn, final JobConf job, User user) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Token token = getAuthToken(conn, user); if (token == null) { @@ -222,17 +217,16 @@ public static void addTokenForJob(final Connection conn, final JobConf job, User } /** - * Checks for an authentication token for the given user, obtaining a new token if necessary, - * and adds it to the credentials for the given map reduce job. - * + * Checks for an authentication token for the given user, obtaining a new token if necessary, and + * adds it to the credentials for the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @param job The job instance in which the token should be stored - * @throws IOException If making a remote call to the authentication service fails + * @param job The job instance in which the token should be stored + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ public static void addTokenForJob(final Connection conn, User user, Job job) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Token token = getAuthToken(conn, user); if (token == null) { token = ClientTokenUtil.obtainToken(conn, user); @@ -241,17 +235,16 @@ public static void addTokenForJob(final Connection conn, User user, Job job) } /** - * Checks if an authentication tokens exists for the connected cluster, - * obtaining one if needed and adding it to the user's credentials. - * + * Checks if an authentication tokens exists for the connected cluster, obtaining one if needed + * and adding it to the user's credentials. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token - * @throws IOException If making a remote call to the authentication service fails + * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted * @return true if the token was added, false if it already existed */ public static boolean addTokenIfMissing(Connection conn, User user) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Token token = getAuthToken(conn, user); if (token == null) { token = ClientTokenUtil.obtainToken(conn, user); @@ -266,7 +259,7 @@ public static boolean addTokenIfMissing(Connection conn, User user) * @return null if the user does not have the token, otherwise the auth token for the cluster. */ private static Token getAuthToken(Connection conn, User user) - throws IOException { + throws IOException { final String clusterId = conn.getClusterId(); if (clusterId == null) { throw new IOException("Failed to get cluster ID"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java index 2398ba4031ed..b4cf115e432e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.IOException; @@ -24,8 +23,8 @@ import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -45,9 +44,8 @@ public class ZKSecretWatcher extends ZKListener { private String baseKeyZNode; private String keysParentZNode; - public ZKSecretWatcher(Configuration conf, - ZKWatcher watcher, - AuthenticationTokenSecretManager secretManager) { + public ZKSecretWatcher(Configuration conf, ZKWatcher watcher, + AuthenticationTokenSecretManager secretManager) { super(watcher); this.secretManager = secretManager; String keyZNodeParent = conf.get("zookeeper.znode.tokenauth.parent", DEFAULT_ROOT_NODE); @@ -62,7 +60,7 @@ public void start() throws KeeperException { if (ZKUtil.watchAndCheckExists(watcher, keysParentZNode)) { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } } @@ -72,11 +70,11 @@ public void nodeCreated(String path) { if (path.equals(keysParentZNode)) { try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); - watcher.abort("Error reading new key znode "+path, ke); + watcher.abort("Error reading new key znode " + path, ke); } } } @@ -90,7 +88,7 @@ public void nodeDeleted(String path) { secretManager.removeKey(id); LOG.info("Node deleted id={}", id); } catch (NumberFormatException nfe) { - LOG.error("Invalid znode name for key ID '"+keyId+"'", nfe); + LOG.error("Invalid znode name for key ID '" + keyId + "'", nfe); } } } @@ -101,19 +99,19 @@ public void nodeDataChanged(String path) { try { byte[] data = ZKUtil.getDataAndWatch(watcher, path); if (data == null || data.length == 0) { - LOG.debug("Ignoring empty node "+path); + LOG.debug("Ignoring empty node " + path); return; } - AuthenticationKey key = (AuthenticationKey)Writables.getWritable(data, - new AuthenticationKey()); + AuthenticationKey key = + (AuthenticationKey) Writables.getWritable(data, new AuthenticationKey()); secretManager.addKey(key); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); - watcher.abort("Error reading updated key znode "+path, ke); + watcher.abort("Error reading updated key znode " + path, ke); } catch (IOException ioe) { LOG.error(HBaseMarkers.FATAL, "Error reading key writables", ioe); - watcher.abort("Error reading key writables from znode "+path, ioe); + watcher.abort("Error reading key writables from znode " + path, ioe); } } } @@ -124,7 +122,7 @@ public void nodeChildrenChanged(String path) { // keys changed try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); @@ -144,16 +142,16 @@ private void refreshNodes(List nodes) { try { byte[] data = n.getData(); if (data == null || data.length == 0) { - LOG.debug("Ignoring empty node "+path); + LOG.debug("Ignoring empty node " + path); continue; } - AuthenticationKey key = (AuthenticationKey)Writables.getWritable( - data, new AuthenticationKey()); + AuthenticationKey key = + (AuthenticationKey) Writables.getWritable(data, new AuthenticationKey()); secretManager.addKey(key); } catch (IOException ioe) { - LOG.error(HBaseMarkers.FATAL, "Failed reading new secret key for id '" + - keyId + "' from zk", ioe); - watcher.abort("Error deserializing key from znode "+path, ioe); + LOG.error(HBaseMarkers.FATAL, + "Failed reading new secret key for id '" + keyId + "' from zk", ioe); + watcher.abort("Error deserializing key from znode " + path, ioe); } } } @@ -167,12 +165,12 @@ public void removeKeyFromZK(AuthenticationKey key) { try { ZKUtil.deleteNode(watcher, keyZNode); } catch (KeeperException.NoNodeException nne) { - LOG.error("Non-existent znode "+keyZNode+" for key "+key.getKeyId(), nne); + LOG.error("Non-existent znode " + keyZNode + " for key " + key.getKeyId(), nne); } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Failed removing znode "+keyZNode+" for key "+ - key.getKeyId(), ke); - watcher.abort("Unhandled zookeeper error removing znode "+keyZNode+ - " for key "+key.getKeyId(), ke); + LOG.error(HBaseMarkers.FATAL, + "Failed removing znode " + keyZNode + " for key " + key.getKeyId(), ke); + watcher.abort( + "Unhandled zookeeper error removing znode " + keyZNode + " for key " + key.getKeyId(), ke); } } @@ -183,13 +181,12 @@ public void addKeyToZK(AuthenticationKey key) { // TODO: is there any point in retrying beyond what ZK client does? ZKUtil.createSetData(watcher, keyZNode, keyData); } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Unable to synchronize master key "+key.getKeyId()+ - " to znode "+keyZNode, ke); - watcher.abort("Unable to synchronize secret key "+ - key.getKeyId()+" in zookeeper", ke); + LOG.error(HBaseMarkers.FATAL, + "Unable to synchronize master key " + key.getKeyId() + " to znode " + keyZNode, ke); + watcher.abort("Unable to synchronize secret key " + key.getKeyId() + " in zookeeper", ke); } catch (IOException ioe) { // this can only happen from an error serializing the key - watcher.abort("Failed serializing key "+key.getKeyId(), ioe); + watcher.abort("Failed serializing key " + key.getKeyId(), ioe); } } @@ -204,13 +201,12 @@ public void updateKeyInZK(AuthenticationKey key) { ZKUtil.createSetData(watcher, keyZNode, keyData); } } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Unable to update master key "+key.getKeyId()+ - " in znode "+keyZNode); - watcher.abort("Unable to synchronize secret key "+ - key.getKeyId()+" in zookeeper", ke); + LOG.error(HBaseMarkers.FATAL, + "Unable to update master key " + key.getKeyId() + " in znode " + keyZNode); + watcher.abort("Unable to synchronize secret key " + key.getKeyId() + " in zookeeper", ke); } catch (IOException ioe) { // this can only happen from an error serializing the key - watcher.abort("Failed serializing key "+key.getKeyId(), ioe); + watcher.abort("Failed serializing key " + key.getKeyId(), ioe); } } @@ -220,7 +216,7 @@ public void updateKeyInZK(AuthenticationKey key) { synchronized void refreshKeys() { try { List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index 519502e5aea8..b6163123cbf2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -39,7 +39,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.AuthUtil; @@ -78,7 +77,7 @@ @InterfaceAudience.Private public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService { private static final Logger LOG = - LoggerFactory.getLogger(DefaultVisibilityLabelServiceImpl.class); + LoggerFactory.getLogger(DefaultVisibilityLabelServiceImpl.class); // "system" label is having an ordinal value 1. private static final int SYSTEM_LABEL_ORDINAL = 1; @@ -118,15 +117,15 @@ public Configuration getConf() { @Override public void init(RegionCoprocessorEnvironment e) throws IOException { - /* So, presumption that the RegionCE has a ZK Connection is too much. Why would a RCE have - * a ZK instance? This is cheating presuming we have access to the RS ZKW. TODO: Fix. - * - * And what is going on here? This ain't even a Coprocessor? And its being passed a CP Env? + /* + * So, presumption that the RegionCE has a ZK Connection is too much. Why would a RCE have a ZK + * instance? This is cheating presuming we have access to the RS ZKW. TODO: Fix. And what is + * going on here? This ain't even a Coprocessor? And its being passed a CP Env? */ // This is a CoreCoprocessor. On creation, we should have gotten an environment that // implements HasRegionServerServices so we can get at RSS. FIX!!!! Integrate this CP as // native service. - ZKWatcher zk = ((HasRegionServerServices)e).getRegionServerServices().getZooKeeper(); + ZKWatcher zk = ((HasRegionServerServices) e).getRegionServerServices().getZooKeeper(); try { labelsCache = VisibilityLabelsCache.createAndGet(zk, this.conf); } catch (IOException ioe) { @@ -137,7 +136,7 @@ public void init(RegionCoprocessorEnvironment e) throws IOException { if (e.getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) { this.labelsRegion = e.getRegion(); Pair, Map>> labelsAndUserAuths = - extractLabelsAndAuths(getExistingLabelsWithAuths()); + extractLabelsAndAuths(getExistingLabelsWithAuths()); Map labels = labelsAndUserAuths.getFirst(); Map> userAuths = labelsAndUserAuths.getSecond(); // Add the "system" label if it is not added into the system yet @@ -182,20 +181,20 @@ protected List> getExistingLabelsWithAuths() throws IOException { return existingLabels; } - protected Pair, Map>> extractLabelsAndAuths( - List> labelDetails) { + protected Pair, Map>> + extractLabelsAndAuths(List> labelDetails) { Map labels = new HashMap<>(); Map> userAuths = new HashMap<>(); for (List cells : labelDetails) { for (Cell cell : cells) { if (CellUtil.matchingQualifier(cell, LABEL_QUALIFIER)) { labels.put( - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), - PrivateCellUtil.getRowAsInt(cell)); + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), + PrivateCellUtil.getRowAsInt(cell)); } else { // These are user cells who has authorization for this label String user = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + cell.getQualifierLength()); List auths = userAuths.get(user); if (auths == null) { auths = new ArrayList<>(); @@ -209,18 +208,13 @@ protected Pair, Map>> extractLabelsAn } protected void addSystemLabel(Region region, Map labels, - Map> userAuths) throws IOException { + Map> userAuths) throws IOException { if (!labels.containsKey(SYSTEM_LABEL)) { byte[] row = Bytes.toBytes(SYSTEM_LABEL_ORDINAL); Put p = new Put(row); - p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(LABEL_QUALIFIER) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(SYSTEM_LABEL)) - .build()); + p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) + .setFamily(LABELS_TABLE_FAMILY).setQualifier(LABEL_QUALIFIER).setTimestamp(p.getTimestamp()) + .setType(Type.Put).setValue(Bytes.toBytes(SYSTEM_LABEL)).build()); region.put(p); labels.put(SYSTEM_LABEL, SYSTEM_LABEL_ORDINAL); } @@ -237,19 +231,13 @@ public OperationStatus[] addLabels(List labels) throws IOException { String labelStr = Bytes.toString(label); if (this.labelsCache.getLabelOrdinal(labelStr) > 0) { finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, - new LabelAlreadyExistsException("Label '" + labelStr + "' already exists")); + new LabelAlreadyExistsException("Label '" + labelStr + "' already exists")); } else { byte[] row = Bytes.toBytes(ordinalCounter.get()); Put p = new Put(row); - p.add(builder.clear() - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(LABEL_QUALIFIER) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(label) - .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))) - .build()); + p.add(builder.clear().setRow(row).setFamily(LABELS_TABLE_FAMILY) + .setQualifier(LABEL_QUALIFIER).setTimestamp(p.getTimestamp()).setType(Type.Put) + .setValue(label).setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))).build()); if (LOG.isDebugEnabled()) { LOG.debug("Adding the label " + labelStr); } @@ -277,19 +265,13 @@ public OperationStatus[] setAuths(byte[] user, List authLabels) throws I if (labelOrdinal == 0) { // This label is not yet added. 1st this should be added to the system finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, - new InvalidLabelException("Label '" + authStr + "' doesn't exists")); + new InvalidLabelException("Label '" + authStr + "' doesn't exists")); } else { byte[] row = Bytes.toBytes(labelOrdinal); Put p = new Put(row); - p.add(builder.clear() - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(user) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(DUMMY_VALUE) - .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))) - .build()); + p.add(builder.clear().setRow(row).setFamily(LABELS_TABLE_FAMILY).setQualifier(user) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(DUMMY_VALUE) + .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))).build()); puts.add(p); } i++; @@ -307,9 +289,8 @@ public OperationStatus[] clearAuths(byte[] user, List authLabels) throws List currentAuths; if (AuthUtil.isGroupPrincipal(Bytes.toString(user))) { String group = AuthUtil.getGroupName(Bytes.toString(user)); - currentAuths = this.getGroupAuths(new String[]{group}, true); - } - else { + currentAuths = this.getGroupAuths(new String[] { group }, true); + } else { currentAuths = this.getUserAuths(user, true); } List deletes = new ArrayList<>(authLabels.size()); @@ -324,9 +305,9 @@ public OperationStatus[] clearAuths(byte[] user, List authLabels) throws deletes.add(d); } else { // This label is not set for the user. - finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, - new InvalidLabelException("Label '" + authLabelStr + "' is not set for the user " - + Bytes.toString(user))); + finalOpStatus[i] = + new OperationStatus(OperationStatusCode.FAILURE, new InvalidLabelException( + "Label '" + authLabelStr + "' is not set for the user " + Bytes.toString(user))); } i++; } @@ -339,15 +320,12 @@ public OperationStatus[] clearAuths(byte[] user, List authLabels) throws /** * Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus * might have some entries in it where the OpStatus is FAILURE. We will leave those and set in - * others in the order. - * @param mutations - * @param finalOpStatus - * @return whether we need a ZK update or not. + * others in the order. nn * @return whether we need a ZK update or not. */ private boolean mutateLabelsRegion(List mutations, OperationStatus[] finalOpStatus) - throws IOException { - OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations - .toArray(new Mutation[mutations.size()])); + throws IOException { + OperationStatus[] opStatus = + this.labelsRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()])); int i = 0; boolean updateZk = false; for (OperationStatus status : opStatus) { @@ -364,8 +342,7 @@ private boolean mutateLabelsRegion(List mutations, OperationStatus[] f } @Override - public List getUserAuths(byte[] user, boolean systemCall) - throws IOException { + public List getUserAuths(byte[] user, boolean systemCall) throws IOException { assert (labelsRegion != null || systemCall); if (systemCall || labelsRegion == null) { return this.labelsCache.getUserAuths(Bytes.toString(user)); @@ -375,7 +352,7 @@ public List getUserAuths(byte[] user, boolean systemCall) s.addColumn(LABELS_TABLE_FAMILY, user); } Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion, - new Authorizations(SYSTEM_LABEL)); + new Authorizations(SYSTEM_LABEL)); s.setFilter(filter); ArrayList auths = new ArrayList<>(); RegionScanner scanner = this.labelsRegion.getScanner(s); @@ -399,8 +376,7 @@ public List getUserAuths(byte[] user, boolean systemCall) } @Override - public List getGroupAuths(String[] groups, boolean systemCall) - throws IOException { + public List getGroupAuths(String[] groups, boolean systemCall) throws IOException { assert (labelsRegion != null || systemCall); if (systemCall || labelsRegion == null) { return this.labelsCache.getGroupAuths(groups); @@ -412,7 +388,7 @@ public List getGroupAuths(String[] groups, boolean systemCall) } } Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion, - new Authorizations(SYSTEM_LABEL)); + new Authorizations(SYSTEM_LABEL)); s.setFilter(filter); Set auths = new HashSet<>(); RegionScanner scanner = this.labelsRegion.getScanner(s); @@ -439,7 +415,7 @@ public List getGroupAuths(String[] groups, boolean systemCall) public List listLabels(String regex) throws IOException { assert (labelsRegion != null); Pair, Map>> labelsAndUserAuths = - extractLabelsAndAuths(getExistingLabelsWithAuths()); + extractLabelsAndAuths(getExistingLabelsWithAuths()); Map labels = labelsAndUserAuths.getFirst(); labels.remove(SYSTEM_LABEL); if (regex != null) { @@ -457,7 +433,7 @@ public List listLabels(String regex) throws IOException { @Override public List createVisibilityExpTags(String visExpression, boolean withSerializationFormat, - boolean checkAuths) throws IOException { + boolean checkAuths) throws IOException { Set auths = new HashSet<>(); if (checkAuths) { User user = VisibilityUtils.getActiveUser(); @@ -465,7 +441,7 @@ public List createVisibilityExpTags(String visExpression, boolean withSeria auths.addAll(this.labelsCache.getGroupAuthsAsOrdinals(user.getGroupNames())); } return VisibilityUtils.createVisibilityExpTags(visExpression, withSerializationFormat, - checkAuths, auths, labelsCache); + checkAuths, auths, labelsCache); } protected void updateZk(boolean labelAddition) throws IOException { @@ -474,7 +450,7 @@ protected void updateZk(boolean labelAddition) throws IOException { // so many labels and auth in the system, we will end up adding lots of data to zk. Most // possibly we will exceed zk node data limit! Pair, Map>> labelsAndUserAuths = - extractLabelsAndAuths(getExistingLabelsWithAuths()); + extractLabelsAndAuths(getExistingLabelsWithAuths()); Map existingLabels = labelsAndUserAuths.getFirst(); Map> userAuths = labelsAndUserAuths.getSecond(); if (labelAddition) { @@ -488,7 +464,7 @@ protected void updateZk(boolean labelAddition) throws IOException { @Override public VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) - throws IOException { + throws IOException { // If a super user issues a get/scan, he should be able to scan the cells // irrespective of the Visibility labels if (isReadFromSystemAuthUser()) { @@ -593,7 +569,7 @@ public boolean havingSystemAuth(User user) throws IOException { @Override public boolean matchVisibility(List putVisTags, Byte putTagsFormat, List deleteVisTags, - Byte deleteTagsFormat) throws IOException { + Byte deleteTagsFormat) throws IOException { // Early out if there are no tags in both of cell and delete if (putVisTags.isEmpty() && deleteVisTags.isEmpty()) { return true; @@ -602,8 +578,10 @@ public boolean matchVisibility(List putVisTags, Byte putTagsFormat, List putVisTags, Byte putTagsFormat, List putVisTags, - List deleteVisTags) throws IOException { + private static boolean matchUnSortedVisibilityTags(List putVisTags, List deleteVisTags) + throws IOException { return compareTagsOrdinals(sortTagsBasedOnOrdinal(putVisTags), - sortTagsBasedOnOrdinal(deleteVisTags)); + sortTagsBasedOnOrdinal(deleteVisTags)); } /** - * @param putVisTags Visibility tags in Put Mutation + * @param putVisTags Visibility tags in Put Mutation * @param deleteVisTags Visibility tags in Delete Mutation - * @return true when all the visibility tags in Put matches with visibility tags in Delete. - * This is used when both the set of tags are sorted based on the label ordinal. + * @return true when all the visibility tags in Put matches with visibility tags in Delete. This + * is used when both the set of tags are sorted based on the label ordinal. */ private static boolean matchOrdinalSortedVisibilityTags(List putVisTags, - List deleteVisTags) { + List deleteVisTags) { boolean matchFound = false; // If the size does not match. Definitely we are not comparing the equal tags. if ((deleteVisTags.size()) == putVisTags.size()) { @@ -662,7 +640,7 @@ private static List> sortTagsBasedOnOrdinal(List tags) throws } private static void getSortedTagOrdinals(List> fullTagsList, Tag tag) - throws IOException { + throws IOException { List tagsOrdinalInSortedOrder = new ArrayList<>(); int offset = tag.getValueOffset(); int endOffset = offset + tag.getValueLength(); @@ -679,7 +657,7 @@ private static void getSortedTagOrdinals(List> fullTagsList, Tag t * @return true when all the visibility tags in Put matches with visibility tags in Delete. */ private static boolean compareTagsOrdinals(List> putVisTags, - List> deleteVisTags) { + List> deleteVisTags) { boolean matchFound = false; if (deleteVisTags.size() == putVisTags.size()) { for (List deleteTagOrdinals : deleteVisTags) { @@ -698,28 +676,27 @@ private static boolean compareTagsOrdinals(List> putVisTags, @Override public byte[] encodeVisibilityForReplication(final List tags, final Byte serializationFormat) - throws IOException { - if (tags.size() > 0 - && (serializationFormat == null || - serializationFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)) { + throws IOException { + if ( + tags.size() > 0 && (serializationFormat == null + || serializationFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT) + ) { return createModifiedVisExpression(tags); } return null; } /** - * @param tags - * - all the visibility tags associated with the current Cell + * n * - all the visibility tags associated with the current Cell * @return - the modified visibility expression as byte[] */ - private byte[] createModifiedVisExpression(final List tags) - throws IOException { + private byte[] createModifiedVisExpression(final List tags) throws IOException { StringBuilder visibilityString = new StringBuilder(); for (Tag tag : tags) { if (tag.getType() == TagType.VISIBILITY_TAG_TYPE) { if (visibilityString.length() != 0) { - visibilityString.append(VisibilityConstants.CLOSED_PARAN).append( - VisibilityConstants.OR_OPERATOR); + visibilityString.append(VisibilityConstants.CLOSED_PARAN) + .append(VisibilityConstants.OR_OPERATOR); } int offset = tag.getValueOffset(); int endOffset = offset + tag.getValueLength(); @@ -733,19 +710,19 @@ private byte[] createModifiedVisExpression(final List tags) if (expressionStart) { // Quote every label in case of unicode characters if present visibilityString.append(VisibilityConstants.OPEN_PARAN) - .append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label)); + .append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label)); } else { visibilityString.append(VisibilityConstants.AND_OPERATOR) - .append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label)); + .append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label)); } } else { String label = this.labelsCache.getLabel(currLabelOrdinal); if (expressionStart) { - visibilityString.append(VisibilityConstants.OPEN_PARAN).append( - CellVisibility.quote(label)); + visibilityString.append(VisibilityConstants.OPEN_PARAN) + .append(CellVisibility.quote(label)); } else { - visibilityString.append(VisibilityConstants.AND_OPERATOR).append( - CellVisibility.quote(label)); + visibilityString.append(VisibilityConstants.AND_OPERATOR) + .append(CellVisibility.quote(label)); } } expressionStart = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java index 77bc2057cdc8..ec03913d56f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,23 +21,21 @@ import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * This is an implementation for ScanLabelGenerator. - * It will extract labels from passed in authorizations and cross check - * against the set of predefined authorization labels for given user. + * This is an implementation for ScanLabelGenerator. It will extract labels from passed in + * authorizations and cross check against the set of predefined authorization labels for given user. * The labels for which the user is not authorized will be dropped. */ @InterfaceAudience.Private public class DefinedSetFilterScanLabelGenerator implements ScanLabelGenerator { private static final Logger LOG = - LoggerFactory.getLogger(DefinedSetFilterScanLabelGenerator.class); + LoggerFactory.getLogger(DefinedSetFilterScanLabelGenerator.class); private Configuration conf; @@ -71,7 +69,7 @@ public List getLabels(User user, Authorizations authorizations) { } private List dropLabelsNotInUserAuths(List labels, List auths, - String userName) { + String userName) { List droppedLabels = new ArrayList<>(); List passedLabels = new ArrayList<>(labels.size()); for (String label : labels) { @@ -86,7 +84,7 @@ private List dropLabelsNotInUserAuths(List labels, List sb.append("Dropping invalid authorizations requested by user "); sb.append(userName); sb.append(": [ "); - for (String label: droppedLabels) { + for (String label : droppedLabels) { sb.append(label); sb.append(' '); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java index e2bc16b5f02c..3be8ac1de976 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,18 +21,16 @@ import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * This ScanLabelGenerator enforces a set of predefined authorizations for a - * given user, the set defined by the admin using the VisibilityClient admin - * interface or the set_auths shell command. Any authorizations requested with - * Scan#authorizations will be ignored. + * This ScanLabelGenerator enforces a set of predefined authorizations for a given user, the set + * defined by the admin using the VisibilityClient admin interface or the set_auths shell command. + * Any authorizations requested with Scan#authorizations will be ignored. */ @InterfaceAudience.Private public class EnforcingScanLabelGenerator implements ScanLabelGenerator { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java index 11842a2bd807..603cf12b678e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,11 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.Operator; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class ExpressionExpander { @@ -47,8 +46,10 @@ public ExpressionNode expand(ExpressionNode src) { } return nlExp; } - if (src instanceof NonLeafExpressionNode - && ((NonLeafExpressionNode) src).getOperator() == Operator.NOT) { + if ( + src instanceof NonLeafExpressionNode + && ((NonLeafExpressionNode) src).getOperator() == Operator.NOT + ) { // Negate the exp return negate((NonLeafExpressionNode) src); } @@ -111,12 +112,14 @@ private NonLeafExpressionNode expandNonLeaf(NonLeafExpressionNode newNode, Opera // (a | b) & (c & d) ... if (outerOp == Operator.OR) { // (a | b) | (c & d) - if (leftChildNLE.getOperator() == Operator.OR - && rightChildNLE.getOperator() == Operator.AND) { + if ( + leftChildNLE.getOperator() == Operator.OR && rightChildNLE.getOperator() == Operator.AND + ) { leftChildNLE.addChildExp(rightChildNLE); newNode = leftChildNLE; - } else if (leftChildNLE.getOperator() == Operator.AND - && rightChildNLE.getOperator() == Operator.OR) { + } else if ( + leftChildNLE.getOperator() == Operator.AND && rightChildNLE.getOperator() == Operator.OR + ) { // (a & b) | (c | d) rightChildNLE.addChildExp(leftChildNLE); newNode = rightChildNLE; @@ -126,16 +129,18 @@ private NonLeafExpressionNode expandNonLeaf(NonLeafExpressionNode newNode, Opera } else { // outer op is & // (a | b) & (c & d) => (a & c & d) | (b & c & d) - if (leftChildNLE.getOperator() == Operator.OR - && rightChildNLE.getOperator() == Operator.AND) { + if ( + leftChildNLE.getOperator() == Operator.OR && rightChildNLE.getOperator() == Operator.AND + ) { newNode = new NonLeafExpressionNode(Operator.OR); for (ExpressionNode exp : leftChildNLE.getChildExps()) { NonLeafExpressionNode rightChildNLEClone = rightChildNLE.deepClone(); rightChildNLEClone.addChildExp(exp); newNode.addChildExp(rightChildNLEClone); } - } else if (leftChildNLE.getOperator() == Operator.AND - && rightChildNLE.getOperator() == Operator.OR) { + } else if ( + leftChildNLE.getOperator() == Operator.AND && rightChildNLE.getOperator() == Operator.OR + ) { // (a & b) & (c | d) => (a & b & c) | (a & b & d) newNode = new NonLeafExpressionNode(Operator.OR); for (ExpressionNode exp : rightChildNLE.getChildExps()) { @@ -162,7 +167,7 @@ private NonLeafExpressionNode expandNonLeaf(NonLeafExpressionNode newNode, Opera } private NonLeafExpressionNode mergeChildNodes(NonLeafExpressionNode newOuterNode, - Operator outerOp, ExpressionNode lChild, NonLeafExpressionNode nlChild) { + Operator outerOp, ExpressionNode lChild, NonLeafExpressionNode nlChild) { // Merge the single right/left node into the other side if (nlChild.getOperator() == outerOp) { NonLeafExpressionNode leftChildNLEClone = nlChild.deepClone(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java index 313e8801e3e1..f56757abf1b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,12 @@ import java.util.ArrayList; import java.util.List; import java.util.Stack; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.Operator; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class ExpressionParser { @@ -39,6 +38,7 @@ public class ExpressionParser { private static final char NOT = '!'; private static final char SPACE = ' '; private static final char DOUBLE_QUOTES = '"'; + public ExpressionNode parse(String expS) throws ParseException { expS = expS.trim(); Stack expStack = new Stack<>(); @@ -66,28 +66,28 @@ public ExpressionNode parse(String expS) throws ParseException { break; case DOUBLE_QUOTES: int labelOffset = ++index; - // We have to rewrite the expression within double quotes as incase of expressions + // We have to rewrite the expression within double quotes as incase of expressions // with escape characters we may have to avoid them as the original expression did // not have them List list = new ArrayList<>(); while (index < endPos && !endDoubleQuotesFound(exp[index])) { if (exp[index] == '\\') { index++; - if (exp[index] != '\\' && exp[index] != '"') - throw new ParseException("invalid escaping with quotes " + expS + " at column : " - + index); + if (exp[index] != '\\' && exp[index] != '"') throw new ParseException( + "invalid escaping with quotes " + expS + " at column : " + index); } list.add(exp[index]); index++; } - // The expression has come to the end. still no double quotes found - if(index == endPos) { + // The expression has come to the end. still no double quotes found + if (index == endPos) { throw new ParseException("No terminating quotes " + expS + " at column : " + index); } // This could be costly. but do we have any alternative? // If we don't do this way then we may have to handle while checking the authorizations. // Better to do it here. - byte[] array = org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.toArray(list); + byte[] array = + org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.toArray(list); String leafExp = Bytes.toString(array).trim(); if (leafExp.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); @@ -99,13 +99,13 @@ public ExpressionNode parse(String expS) throws ParseException { labelOffset = index; do { if (!VisibilityLabelsValidator.isValidAuthChar(exp[index])) { - throw new ParseException("Error parsing expression " - + expS + " at column : " + index); + throw new ParseException( + "Error parsing expression " + expS + " at column : " + index); } index++; } while (index < endPos && !isEndOfLabel(exp[index])); leafExp = - new String(exp, labelOffset, index - labelOffset, StandardCharsets.UTF_8).trim(); + new String(exp, labelOffset, index - labelOffset, StandardCharsets.UTF_8).trim(); if (leafExp.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } @@ -137,14 +137,14 @@ public ExpressionNode parse(String expS) throws ParseException { } private int skipSpaces(byte[] exp, int index) { - while (index < exp.length -1 && exp[index+1] == SPACE) { + while (index < exp.length - 1 && exp[index + 1] == SPACE) { index++; } return index; } private void processCloseParan(Stack expStack, String expS, int index) - throws ParseException { + throws ParseException { if (expStack.size() < 2) { // When ) comes we expect atleast a ( node and another leaf/non leaf node // in stack. @@ -154,8 +154,9 @@ private void processCloseParan(Stack expStack, String expS, int ExpressionNode secondTop = expStack.pop(); // The second top must be a ( node and top should not be a ). Top can be // any thing else - if (top == LeafExpressionNode.OPEN_PARAN_NODE - || secondTop != LeafExpressionNode.OPEN_PARAN_NODE) { + if ( + top == LeafExpressionNode.OPEN_PARAN_NODE || secondTop != LeafExpressionNode.OPEN_PARAN_NODE + ) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } // a&(b|) is not valid. @@ -164,8 +165,10 @@ private void processCloseParan(Stack expStack, String expS, int // (a&) is not valid. if (top instanceof NonLeafExpressionNode) { NonLeafExpressionNode nlTop = (NonLeafExpressionNode) top; - if ((nlTop.getOperator() == Operator.NOT && nlTop.getChildExps().size() != 1) - || (nlTop.getOperator() != Operator.NOT && nlTop.getChildExps().size() != 2)) { + if ( + (nlTop.getOperator() == Operator.NOT && nlTop.getChildExps().size() != 1) + || (nlTop.getOperator() != Operator.NOT && nlTop.getChildExps().size() != 2) + ) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } } @@ -204,7 +207,7 @@ private void processCloseParan(Stack expStack, String expS, int } private void processOpenParan(Stack expStack, String expS, int index) - throws ParseException { + throws ParseException { if (!expStack.isEmpty()) { ExpressionNode top = expStack.peek(); // Top can not be a Label Node. a(.. is not valid. but ((a.. is fine. @@ -217,8 +220,10 @@ private void processOpenParan(Stack expStack, String expS, int i // a&b( is not valid. // a&( is valid though. Also !( is valid NonLeafExpressionNode nlTop = (NonLeafExpressionNode) top; - if ((nlTop.getOperator() == Operator.NOT && nlTop.getChildExps().size() != 0) - || (nlTop.getOperator() != Operator.NOT && nlTop.getChildExps().size() != 1)) { + if ( + (nlTop.getOperator() == Operator.NOT && nlTop.getChildExps().size() != 0) + || (nlTop.getOperator() != Operator.NOT && nlTop.getChildExps().size() != 1) + ) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } } @@ -227,7 +232,7 @@ private void processOpenParan(Stack expStack, String expS, int i } private void processLabelExpNode(LeafExpressionNode node, Stack expStack, - String expS, int index) throws ParseException { + String expS, int index) throws ParseException { if (expStack.isEmpty()) { expStack.push(node); } else { @@ -254,7 +259,7 @@ private void processLabelExpNode(LeafExpressionNode node, Stack } private void processANDorOROp(Operator op, Stack expStack, String expS, int index) - throws ParseException { + throws ParseException { if (expStack.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } @@ -274,7 +279,7 @@ private void processANDorOROp(Operator op, Stack expStack, Strin } private void processNOTOp(Stack expStack, String expS, int index) - throws ParseException { + throws ParseException { // When ! comes, the stack can be empty or top ( or top can be some exp like // a& // !!.., a!, a&b!, !a! are invalid @@ -293,21 +298,21 @@ private void processNOTOp(Stack expStack, String expS, int index private static boolean endDoubleQuotesFound(byte b) { return (b == DOUBLE_QUOTES); } + private static boolean isEndOfLabel(byte b) { - return (b == OPEN_PARAN || b == CLOSE_PARAN || b == OR || b == AND || - b == NOT || b == SPACE); + return (b == OPEN_PARAN || b == CLOSE_PARAN || b == OR || b == AND || b == NOT || b == SPACE); } private static Operator getOperator(byte op) { switch (op) { - case AND: - return Operator.AND; - case OR: - return Operator.OR; - case NOT: - return Operator.NOT; - default: - return null; + case AND: + return Operator.AND; + case OR: + return Operator.OR; + case NOT: + return Operator.NOT; + default: + return null; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java index 1c77a4d008de..038b15c39bb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,22 +21,18 @@ import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * If the passed in authorization is null, then this ScanLabelGenerator - * feeds the set of predefined authorization labels for the given user. That is - * the set defined by the admin using the VisibilityClient admin interface - * or the set_auths shell command. - * Otherwise the passed in authorization labels are returned with no change. - * - * Note: This SLG should not be used alone because it does not check - * the passed in authorization labels against what the user is authorized for. + * If the passed in authorization is null, then this ScanLabelGenerator feeds the set of predefined + * authorization labels for the given user. That is the set defined by the admin using the + * VisibilityClient admin interface or the set_auths shell command. Otherwise the passed in + * authorization labels are returned with no change. Note: This SLG should not be used alone because + * it does not check the passed in authorization labels against what the user is authorized for. */ @InterfaceAudience.Private public class FeedUserAuthScanLabelGenerator implements ScanLabelGenerator { @@ -62,8 +58,10 @@ public Configuration getConf() { @Override public List getLabels(User user, Authorizations authorizations) { - if (authorizations == null || authorizations.getLabels() == null - || authorizations.getLabels().isEmpty()) { + if ( + authorizations == null || authorizations.getLabels() == null + || authorizations.getLabels().isEmpty() + ) { String userName = user.getShortName(); Set auths = new HashSet<>(); auths.addAll(this.labelsCache.getUserAuths(userName)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java index b6c11b806510..594e27b9f5fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java index fbbf8f5a08e1..3f969ef64f46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,24 +18,19 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** - * This would be the interface which would be used add labels to the RPC context - * and this would be stored against the UGI. - * + * This would be the interface which would be used add labels to the RPC context and this would be + * stored against the UGI. */ @InterfaceAudience.Public public interface ScanLabelGenerator extends Configurable { /** - * Helps to get a list of lables associated with an UGI - * @param user - * @param authorizations - * @return The labels + * Helps to get a list of lables associated with an UGI nn * @return The labels */ public List getLabels(User user, Authorizations authorizations); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java index 840ee32da4e0..bbd49d3e371c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,9 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** * This is a simple implementation for ScanLabelGenerator. It will just extract labels passed via diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 66dd862b663e..7fa8a4ec8c5d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.visibility; import static org.apache.hadoop.hbase.HConstants.OperationStatusCode.SANITY_CHECK_FAILURE; @@ -121,12 +120,11 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) // TODO: break out Observer functions into separate class/sub-class. public class VisibilityController implements MasterCoprocessor, RegionCoprocessor, - VisibilityLabelsService.Interface, MasterObserver, RegionObserver { - + VisibilityLabelsService.Interface, MasterObserver, RegionObserver { private static final Logger LOG = LoggerFactory.getLogger(VisibilityController.class); - private static final Logger AUDITLOG = LoggerFactory.getLogger("SecurityLogger." - + VisibilityController.class.getName()); + private static final Logger AUDITLOG = + LoggerFactory.getLogger("SecurityLogger." + VisibilityController.class.getName()); // flags if we are running on a region of the 'labels' table private boolean labelsRegion = false; // Flag denoting whether AcessController is available or not. @@ -135,13 +133,14 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso private volatile boolean initialized = false; private boolean checkAuths = false; /** Mapping of scanner instances to the user who created them */ - private Map scannerOwners = - new MapMaker().weakKeys().makeMap(); + private Map scannerOwners = new MapMaker().weakKeys().makeMap(); private VisibilityLabelService visibilityLabelService; - /** if we are active, usually false, only true if "hbase.security.authorization" - has been set to true in site configuration */ + /** + * if we are active, usually false, only true if "hbase.security.authorization" has been set to + * true in site configuration + */ boolean authorizationEnabled; // Add to this list if there are any reserved tag types @@ -173,8 +172,8 @@ public void start(CoprocessorEnvironment env) throws IOException { // Do not create for master CPs if (!(env instanceof MasterCoprocessorEnvironment)) { - visibilityLabelService = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(this.conf); + visibilityLabelService = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(this.conf); } } @@ -196,8 +195,8 @@ public Optional getMasterObserver() { @Override public Iterable getServices() { - return Collections.singleton( - VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this)); + return Collections + .singleton(VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this)); } /********************************* Master related hooks **********************************/ @@ -224,8 +223,8 @@ public void postStartMaster(ObserverContext ctx) @Override public TableDescriptor preModifyTable(ObserverContext ctx, - TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) - throws IOException { + TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) + throws IOException { if (authorizationEnabled) { if (LABELS_TABLE_NAME.equals(tableName)) { throw new ConstraintException("Cannot alter " + LABELS_TABLE_NAME); @@ -235,8 +234,8 @@ public TableDescriptor preModifyTable(ObserverContext ctx, TableName tableName) - throws IOException { + public void preDisableTable(ObserverContext ctx, + TableName tableName) throws IOException { if (!authorizationEnabled) { return; } @@ -253,13 +252,13 @@ public void postOpen(ObserverContext e) { if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) { this.labelsRegion = true; synchronized (this) { - this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors() - .contains(AccessController.class.getName()); + this.accessControllerAvailable = + CoprocessorHost.getLoadedCoprocessors().contains(AccessController.class.getName()); } initVisibilityLabelService(e.getEnvironment()); } else { checkAuths = e.getEnvironment().getConfiguration() - .getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false); + .getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false); initVisibilityLabelService(e.getEnvironment()); } } @@ -276,12 +275,12 @@ private void initVisibilityLabelService(RegionCoprocessorEnvironment env) { @Override public void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { } @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { + MiniBatchOperationInProgress miniBatchOp) throws IOException { if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { return; } @@ -294,7 +293,7 @@ public void preBatchMutate(ObserverContext c, cellVisibility = m.getCellVisibility(); } catch (DeserializationException de) { miniBatchOp.setOperationStatus(i, - new OperationStatus(SANITY_CHECK_FAILURE, de.getMessage())); + new OperationStatus(SANITY_CHECK_FAILURE, de.getMessage())); continue; } boolean sanityFailure = false; @@ -328,11 +327,11 @@ public void preBatchMutate(ObserverContext c, // Don't check user auths for labels with Mutations when the user is super user boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser()); try { - visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, true, - authCheck); + visibilityTags = + this.visibilityLabelService.createVisibilityExpTags(labelsExp, true, authCheck); } catch (InvalidLabelException e) { miniBatchOp.setOperationStatus(i, - new OperationStatus(SANITY_CHECK_FAILURE, e.getMessage())); + new OperationStatus(SANITY_CHECK_FAILURE, e.getMessage())); } if (visibilityTags != null) { labelCache.put(labelsExp, visibilityTags); @@ -369,9 +368,8 @@ public void preBatchMutate(ObserverContext c, } @Override - public void prePrepareTimeStampForDeleteVersion( - ObserverContext ctx, Mutation delete, Cell cell, - byte[] byteNow, Get get) throws IOException { + public void prePrepareTimeStampForDeleteVersion(ObserverContext ctx, + Mutation delete, Cell cell, byte[] byteNow, Get get) throws IOException { // Nothing to do if we are not filtering by visibility if (!authorizationEnabled) { return; @@ -389,14 +387,14 @@ public void prePrepareTimeStampForDeleteVersion( if (cellVisibility != null) { String labelsExp = cellVisibility.getExpression(); try { - visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, false, - false); + visibilityTags = + this.visibilityLabelService.createVisibilityExpTags(labelsExp, false, false); } catch (InvalidLabelException e) { throw new IOException("Invalid cell visibility specified " + labelsExp, e); } } get.setFilter(new DeleteVersionVisibilityExpressionFilter(visibilityTags, - VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT)); + VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT)); try (RegionScanner scanner = ctx.getEnvironment().getRegion().getScanner(new Scan(get))) { // NOTE: Please don't use HRegion.get() instead, // because it will copy cells to heap. See HBASE-26036 @@ -409,8 +407,8 @@ public void prePrepareTimeStampForDeleteVersion( return; } if (result.size() > get.getMaxVersions()) { - throw new RuntimeException("Unexpected size: " + result.size() + - ". Results more than the max versions obtained."); + throw new RuntimeException( + "Unexpected size: " + result.size() + ". Results more than the max versions obtained."); } Cell getCell = result.get(get.getMaxVersions() - 1); PrivateCellUtil.setTimestamp(cell, getCell.getTimestamp()); @@ -424,17 +422,16 @@ public void prePrepareTimeStampForDeleteVersion( } /** - * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This - * tag type is reserved and should not be explicitly set by user. - * + * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This tag type is + * reserved and should not be explicitly set by user. * @param cell The cell under consideration * @param pair An optional pair of type {@code } which would be reused if already - * set and new one will be created if NULL is passed + * set and new one will be created if NULL is passed * @return If the boolean is false then it indicates that the cell has a RESERVERD_VIS_TAG and - * with boolean as true, not null tag indicates that a string modified tag was found. + * with boolean as true, not null tag indicates that a string modified tag was found. */ private Pair checkForReservedVisibilityTagPresence(Cell cell, - Pair pair) throws IOException { + Pair pair) throws IOException { if (pair == null) { pair = new Pair<>(false, null); } else { @@ -484,7 +481,7 @@ private void removeReplicationVisibilityTag(List tags) throws IOException { @Override public void preScannerOpen(ObserverContext e, Scan scan) - throws IOException { + throws IOException { if (!initialized) { throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"); } @@ -509,8 +506,8 @@ public void preScannerOpen(ObserverContext e, Scan } } - Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region, - authorizations); + Filter visibilityLabelFilter = + VisibilityUtils.createVisibilityLabelFilter(region, authorizations); if (visibilityLabelFilter != null) { Filter filter = scan.getFilter(); if (filter != null) { @@ -523,8 +520,8 @@ public void preScannerOpen(ObserverContext e, Scan @Override public DeleteTracker postInstantiateDeleteTracker( - ObserverContext ctx, DeleteTracker delTracker) - throws IOException { + ObserverContext ctx, DeleteTracker delTracker) + throws IOException { // Nothing to do if we are not filtering by visibility if (!authorizationEnabled) { return delTracker; @@ -544,7 +541,7 @@ public DeleteTracker postInstantiateDeleteTracker( @Override public RegionScanner postScannerOpen(final ObserverContext c, - final Scan scan, final RegionScanner s) throws IOException { + final Scan scan, final RegionScanner s) throws IOException { User user = VisibilityUtils.getActiveUser(); if (user != null && user.getShortName() != null) { scannerOwners.put(s, user.getShortName()); @@ -554,21 +551,21 @@ public RegionScanner postScannerOpen(final ObserverContext c, - final InternalScanner s, final List result, final int limit, final boolean hasNext) - throws IOException { + final InternalScanner s, final List result, final int limit, final boolean hasNext) + throws IOException { requireScannerOwner(s); return hasNext; } @Override public void preScannerClose(final ObserverContext c, - final InternalScanner s) throws IOException { + final InternalScanner s) throws IOException { requireScannerOwner(s); } @Override public void postScannerClose(final ObserverContext c, - final InternalScanner s) throws IOException { + final InternalScanner s) throws IOException { // clean up any associated owner mapping scannerOwners.remove(s); } @@ -578,8 +575,7 @@ public void postScannerClose(final ObserverContext * access control is correctly enforced based on the checks performed in preScannerOpen() */ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { - if (!RpcServer.isInRpcCallContext()) - return; + if (!RpcServer.isInRpcCallContext()) return; String requestUName = RpcServer.getRequestUserName().orElse(null); String owner = scannerOwners.get(s); if (authorizationEnabled && owner != null && !owner.equals(requestUName)) { @@ -588,8 +584,8 @@ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException } @Override - public void preGetOp(ObserverContext e, Get get, - List results) throws IOException { + public void preGetOp(ObserverContext e, Get get, List results) + throws IOException { if (!initialized) { throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized"); } @@ -613,8 +609,8 @@ public void preGetOp(ObserverContext e, Get get, return; } } - Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment() - .getRegion(), authorizations); + Filter visibilityLabelFilter = + VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment().getRegion(), authorizations); if (visibilityLabelFilter != null) { Filter filter = get.getFilter(); if (filter != null) { @@ -631,24 +627,24 @@ private boolean isSystemOrSuperUser() throws IOException { @Override public List> postIncrementBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { resultPairs - .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); + .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); } return resultPairs; } @Override public List> postAppendBeforeWAL( - ObserverContext ctx, Mutation mutation, - List> cellPairs) throws IOException { + ObserverContext ctx, Mutation mutation, + List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { resultPairs - .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); + .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); } return resultPairs; } @@ -668,13 +664,15 @@ private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOExc // Don't check user auths for labels with Mutations when the user is super user boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser()); tags.addAll(this.visibilityLabelService.createVisibilityExpTags(cellVisibility.getExpression(), - true, authCheck)); + true, authCheck)); // Carry forward all other tags Iterator tagsItr = PrivateCellUtil.tagsIterator(newCell); while (tagsItr.hasNext()) { Tag tag = tagsItr.next(); - if (tag.getType() != TagType.VISIBILITY_TAG_TYPE - && tag.getType() != TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) { + if ( + tag.getType() != TagType.VISIBILITY_TAG_TYPE + && tag.getType() != TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE + ) { tags.add(tag); } } @@ -682,10 +680,12 @@ private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOExc return PrivateCellUtil.createCell(newCell, tags); } - /****************************** VisibilityEndpoint service related methods ******************************/ + /****************************** + * VisibilityEndpoint service related methods + ******************************/ @Override public synchronized void addLabels(RpcController controller, VisibilityLabelsRequest request, - RpcCallback done) { + RpcCallback done) { VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder(); List visLabels = request.getVisLabelList(); if (!initialized) { @@ -716,8 +716,8 @@ public synchronized void addLabels(RpcController controller, VisibilityLabelsReq } if (status.getOperationStatusCode() != SUCCESS) { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder + .setException(buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.setResult(i, failureResultBuilder.build()); } i++; @@ -736,7 +736,7 @@ public synchronized void addLabels(RpcController controller, VisibilityLabelsReq } private void setExceptionResults(int size, IOException e, - VisibilityLabelsResponse.Builder response) { + VisibilityLabelsResponse.Builder response) { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); failureResultBuilder.setException(buildException(e)); RegionActionResult failureResult = failureResultBuilder.build(); @@ -747,7 +747,7 @@ private void setExceptionResults(int size, IOException e, @Override public synchronized void setAuths(RpcController controller, SetAuthsRequest request, - RpcCallback done) { + RpcCallback done) { VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder(); List auths = request.getAuthList(); if (!initialized) { @@ -773,8 +773,8 @@ public synchronized void setAuths(RpcController controller, SetAuthsRequest requ response.addResult(successResult); } else { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder + .setException(buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.addResult(failureResultBuilder.build()); } } @@ -791,7 +791,7 @@ public synchronized void setAuths(RpcController controller, SetAuthsRequest requ } private void logResult(boolean isAllowed, String request, String reason, byte[] user, - List labelAuths, String regex) { + List labelAuths, String regex) { if (AUDITLOG.isTraceEnabled()) { // This is more duplicated code! List labelAuthsStr = new ArrayList<>(); @@ -810,18 +810,18 @@ private void logResult(boolean isAllowed, String request, String reason, byte[] LOG.warn("Failed to get active system user."); LOG.debug("Details on failure to get active system user.", e); } - AUDITLOG.trace("Access " + (isAllowed ? "allowed" : "denied") + " for user " + - (requestingUser != null ? requestingUser.getShortName() : "UNKNOWN") + "; reason: " + - reason + "; remote address: " + - RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("") + "; request: " + - request + "; user: " + (user != null ? Bytes.toShort(user) : "null") + "; labels: " + - labelAuthsStr + "; regex: " + regex); + AUDITLOG.trace("Access " + (isAllowed ? "allowed" : "denied") + " for user " + + (requestingUser != null ? requestingUser.getShortName() : "UNKNOWN") + "; reason: " + + reason + "; remote address: " + + RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("") + "; request: " + + request + "; user: " + (user != null ? Bytes.toShort(user) : "null") + "; labels: " + + labelAuthsStr + "; regex: " + regex); } } @Override public synchronized void getAuths(RpcController controller, GetAuthsRequest request, - RpcCallback done) { + RpcCallback done) { GetAuthsResponse.Builder response = GetAuthsResponse.newBuilder(); if (!initialized) { controller.setFailed("VisibilityController not yet initialized"); @@ -833,15 +833,14 @@ public synchronized void getAuths(RpcController controller, GetAuthsRequest requ // AccessController CP methods. if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User requestingUser = VisibilityUtils.getActiveUser(); - throw new AccessDeniedException("User '" - + (requestingUser != null ? requestingUser.getShortName() : "null") + throw new AccessDeniedException( + "User '" + (requestingUser != null ? requestingUser.getShortName() : "null") + "' is not authorized to perform this action."); } if (AuthUtil.isGroupPrincipal(Bytes.toString(user))) { String group = AuthUtil.getGroupName(Bytes.toString(user)); - labels = this.visibilityLabelService.getGroupAuths(new String[]{group}, false); - } - else { + labels = this.visibilityLabelService.getGroupAuths(new String[] { group }, false); + } else { labels = this.visibilityLabelService.getUserAuths(user, false); } logResult(true, "getAuths", "Get authorizations for user allowed", user, null, null); @@ -863,12 +862,12 @@ public synchronized void getAuths(RpcController controller, GetAuthsRequest requ @Override public synchronized void clearAuths(RpcController controller, SetAuthsRequest request, - RpcCallback done) { + RpcCallback done) { VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder(); List auths = request.getAuthList(); if (!initialized) { - setExceptionResults(auths.size(), new CoprocessorException( - "VisibilityController not yet initialized"), response); + setExceptionResults(auths.size(), + new CoprocessorException("VisibilityController not yet initialized"), response); } else { byte[] requestUser = request.getUser().toByteArray(); List labelAuths = new ArrayList<>(auths.size()); @@ -877,7 +876,7 @@ public synchronized void clearAuths(RpcController controller, SetAuthsRequest re if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User user = VisibilityUtils.getActiveUser(); throw new AccessDeniedException("User '" + (user != null ? user.getShortName() : "null") - + " is not authorized to perform this action."); + + " is not authorized to perform this action."); } if (authorizationEnabled) { checkCallingUserAuth(); // When AC is not in place the calling user should have @@ -888,7 +887,7 @@ public synchronized void clearAuths(RpcController controller, SetAuthsRequest re } OperationStatus[] opStatus = - this.visibilityLabelService.clearAuths(requestUser, labelAuths); + this.visibilityLabelService.clearAuths(requestUser, labelAuths); logResult(true, "clearAuths", "Removing authorization for labels allowed", requestUser, labelAuths, null); RegionActionResult successResult = RegionActionResult.newBuilder().build(); @@ -897,8 +896,8 @@ public synchronized void clearAuths(RpcController controller, SetAuthsRequest re response.addResult(successResult); } else { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder + .setException(buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.addResult(failureResultBuilder.build()); } } @@ -916,7 +915,7 @@ public synchronized void clearAuths(RpcController controller, SetAuthsRequest re @Override public synchronized void listLabels(RpcController controller, ListLabelsRequest request, - RpcCallback done) { + RpcCallback done) { ListLabelsResponse.Builder response = ListLabelsResponse.newBuilder(); if (!initialized) { controller.setFailed("VisibilityController not yet initialized"); @@ -928,8 +927,8 @@ public synchronized void listLabels(RpcController controller, ListLabelsRequest // AccessController CP methods. if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User requestingUser = VisibilityUtils.getActiveUser(); - throw new AccessDeniedException("User '" - + (requestingUser != null ? requestingUser.getShortName() : "null") + throw new AccessDeniedException( + "User '" + (requestingUser != null ? requestingUser.getShortName() : "null") + "' is not authorized to perform this action."); } labels = this.visibilityLabelService.listLabels(regex); @@ -959,8 +958,8 @@ private void checkCallingUserAuth() throws IOException { throw new IOException("Unable to retrieve calling user"); } if (!(this.visibilityLabelService.havingSystemAuth(user))) { - throw new AccessDeniedException("User '" + user.getShortName() - + "' is not authorized to perform this action."); + throw new AccessDeniedException( + "User '" + user.getShortName() + "' is not authorized to perform this action."); } } } @@ -970,7 +969,7 @@ private static class DeleteVersionVisibilityExpressionFilter extends FilterBase private Byte deleteCellVisTagsFormat; public DeleteVersionVisibilityExpressionFilter(List deleteCellVisTags, - Byte deleteCellVisTagsFormat) { + Byte deleteCellVisTagsFormat) { this.deleteCellVisTags = deleteCellVisTags; this.deleteCellVisTagsFormat = deleteCellVisTagsFormat; } @@ -989,10 +988,9 @@ public ReturnCode filterCell(final Cell cell) throws IOException { // Early out if there are no tags in the cell return ReturnCode.INCLUDE; } - boolean matchFound = VisibilityLabelServiceManager - .getInstance().getVisibilityLabelService() - .matchVisibility(putVisTags, putCellVisTagsFormat, deleteCellVisTags, - deleteCellVisTagsFormat); + boolean matchFound = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService().matchVisibility( + putVisTags, putCellVisTagsFormat, deleteCellVisTags, deleteCellVisTagsFormat); return matchFound ? ReturnCode.INCLUDE : ReturnCode.SKIP; } @@ -1001,12 +999,12 @@ public boolean equals(Object obj) { if (!(obj instanceof DeleteVersionVisibilityExpressionFilter)) { return false; } - if (this == obj){ + if (this == obj) { return true; } - DeleteVersionVisibilityExpressionFilter f = (DeleteVersionVisibilityExpressionFilter)obj; - return this.deleteCellVisTags.equals(f.deleteCellVisTags) && - this.deleteCellVisTagsFormat.equals(f.deleteCellVisTagsFormat); + DeleteVersionVisibilityExpressionFilter f = (DeleteVersionVisibilityExpressionFilter) obj; + return this.deleteCellVisTags.equals(f.deleteCellVisTags) + && this.deleteCellVisTagsFormat.equals(f.deleteCellVisTagsFormat); } @Override @@ -1016,15 +1014,13 @@ public int hashCode() { } /** - * @param t - * @return NameValuePair of the exception name to stringified version os exception. + * n * @return NameValuePair of the exception name to stringified version os exception. */ // Copied from ResponseConverter and made private. Only used in here. private static NameBytesPair buildException(final Throwable t) { NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder(); parameterBuilder.setName(t.getClass().getName()); - parameterBuilder.setValue( - ByteString.copyFromUtf8(StringUtils.stringifyException(t))); + parameterBuilder.setValue(ByteString.copyFromUtf8(StringUtils.stringifyException(t))); return parameterBuilder.build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java index 64058b715058..8f67afd3395b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +18,8 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.yetus.audience.InterfaceAudience; /** * During the read (ie. get/Scan) the VisibilityController calls this interface for each of the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java index 4c3f1414b864..e5b06eb23c40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,13 +20,12 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.yetus.audience.InterfaceAudience; /** * This Filter checks the visibility expression with each KV against visibility labels associated @@ -43,7 +42,7 @@ class VisibilityLabelFilter extends FilterBase { private int curQualMetVersions; public VisibilityLabelFilter(VisibilityExpEvaluator expEvaluator, - Map cfVsMaxVersions) { + Map cfVsMaxVersions) { this.expEvaluator = expEvaluator; this.cfVsMaxVersions = cfVsMaxVersions; this.curFamily = new SimpleMutableByteRange(); @@ -58,9 +57,10 @@ public boolean filterRowKey(Cell cell) throws IOException { @Override public ReturnCode filterCell(final Cell cell) throws IOException { - if (curFamily.getBytes() == null - || !(PrivateCellUtil.matchingFamily(cell, curFamily.getBytes(), curFamily.getOffset(), - curFamily.getLength()))) { + if ( + curFamily.getBytes() == null || !(PrivateCellUtil.matchingFamily(cell, curFamily.getBytes(), + curFamily.getOffset(), curFamily.getLength())) + ) { curFamily.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); // For this family, all the columns can have max of curFamilyMaxVersions versions. No need to // consider the older versions for visibility label check. @@ -69,10 +69,12 @@ public ReturnCode filterCell(final Cell cell) throws IOException { // Family is changed. Just unset curQualifier. curQualifier.unset(); } - if (curQualifier.getBytes() == null || !(PrivateCellUtil.matchingQualifier(cell, - curQualifier.getBytes(), curQualifier.getOffset(), curQualifier.getLength()))) { + if ( + curQualifier.getBytes() == null || !(PrivateCellUtil.matchingQualifier(cell, + curQualifier.getBytes(), curQualifier.getOffset(), curQualifier.getLength())) + ) { curQualifier.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + cell.getQualifierLength()); curQualMetVersions = 0; } curQualMetVersions++; @@ -96,12 +98,12 @@ public boolean equals(Object obj) { if (!(obj instanceof VisibilityLabelFilter)) { return false; } - if(this == obj){ + if (this == obj) { return true; } - VisibilityLabelFilter f = (VisibilityLabelFilter)obj; - return this.expEvaluator.equals(f.expEvaluator) && - this.cfVsMaxVersions.equals(f.cfVsMaxVersions); + VisibilityLabelFilter f = (VisibilityLabelFilter) obj; + return this.expEvaluator.equals(f.expEvaluator) + && this.cfVsMaxVersions.equals(f.cfVsMaxVersions); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java index b1e4d8909c69..9e58cff23bc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java index 55ba344670fd..a55ab2aae22f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +19,13 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.regionserver.OperationStatus; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** * The interface which deals with visibility labels and user auths admin service as well as the cell @@ -37,101 +36,82 @@ public interface VisibilityLabelService extends Configurable { /** * System calls this after opening of regions. Gives a chance for the VisibilityLabelService to so - * any initialization logic. - * @param e - * the region coprocessor env + * any initialization logic. n * the region coprocessor env */ void init(RegionCoprocessorEnvironment e) throws IOException; /** - * Adds the set of labels into the system. - * @param labels - * Labels to add to the system. + * Adds the set of labels into the system. n * Labels to add to the system. * @return OperationStatus for each of the label addition */ OperationStatus[] addLabels(List labels) throws IOException; /** - * Sets given labels globally authorized for the user. - * @param user - * The authorizing user - * @param authLabels - * Labels which are getting authorized for the user + * Sets given labels globally authorized for the user. n * The authorizing user n * Labels which + * are getting authorized for the user * @return OperationStatus for each of the label auth addition */ OperationStatus[] setAuths(byte[] user, List authLabels) throws IOException; /** - * Removes given labels from user's globally authorized list of labels. - * @param user - * The user whose authorization to be removed - * @param authLabels - * Labels which are getting removed from authorization set + * Removes given labels from user's globally authorized list of labels. n * The user whose + * authorization to be removed n * Labels which are getting removed from authorization set * @return OperationStatus for each of the label auth removal */ OperationStatus[] clearAuths(byte[] user, List authLabels) throws IOException; /** - * Retrieve the visibility labels for the user. - * @param user - * Name of the user whose authorization to be retrieved - * @param systemCall - * Whether a system or user originated call. + * Retrieve the visibility labels for the user. n * Name of the user whose authorization to be + * retrieved n * Whether a system or user originated call. * @return Visibility labels authorized for the given user. */ List getUserAuths(byte[] user, boolean systemCall) throws IOException; /** - * Retrieve the visibility labels for the groups. - * @param groups - * Name of the groups whose authorization to be retrieved - * @param systemCall - * Whether a system or user originated call. + * Retrieve the visibility labels for the groups. n * Name of the groups whose authorization to be + * retrieved n * Whether a system or user originated call. * @return Visibility labels authorized for the given group. */ List getGroupAuths(String[] groups, boolean systemCall) throws IOException; /** * Retrieve the list of visibility labels defined in the system. - * @param regex The regular expression to filter which labels are returned. + * @param regex The regular expression to filter which labels are returned. * @return List of visibility labels */ List listLabels(String regex) throws IOException; /** - * Creates tags corresponding to given visibility expression. - *
    - * Note: This will be concurrently called from multiple threads and implementation should - * take care of thread safety. - * @param visExpression The Expression for which corresponding Tags to be created. - * @param withSerializationFormat specifies whether a tag, denoting the serialization version - * of the tags, to be added in the list. When this is true make sure to add the - * serialization format Tag also. The format tag value should be byte type. - * @param checkAuths denotes whether to check individual labels in visExpression against user's - * global auth label. + * Creates tags corresponding to given visibility expression.
    + * Note: This will be concurrently called from multiple threads and implementation should take + * care of thread safety. + * @param visExpression The Expression for which corresponding Tags to be created. + * @param withSerializationFormat specifies whether a tag, denoting the serialization version of + * the tags, to be added in the list. When this is true make sure + * to add the serialization format Tag also. The format tag value + * should be byte type. + * @param checkAuths denotes whether to check individual labels in visExpression + * against user's global auth label. * @return The list of tags corresponds to the visibility expression. These tags will be stored * along with the Cells. */ List createVisibilityExpTags(String visExpression, boolean withSerializationFormat, - boolean checkAuths) throws IOException; + boolean checkAuths) throws IOException; /** * Creates VisibilityExpEvaluator corresponding to given Authorizations.
    * Note: This will be concurrently called from multiple threads and implementation should take - * care of thread safety. - * @param authorizations - * Authorizations for the read request + * care of thread safety. n * Authorizations for the read request * @return The VisibilityExpEvaluator corresponding to the given set of authorization labels. */ VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) - throws IOException; + throws IOException; /** * System checks for user auth during admin operations. (ie. Label add, set/clear auth). The * operation is allowed only for users having system auth. Also during read, if the requesting - * user has system auth, he can view all the data irrespective of its labels. - * @param user - * User for whom system auth check to be done. + * user has system auth, he can view all the data irrespective of its labels. n * User for whom + * system auth check to be done. * @return true if the given user is having system/super auth */ boolean havingSystemAuth(User user) throws IOException; @@ -141,41 +121,28 @@ VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) * in Delete mutation and the cell in consideration. Also system passes the serialization format * of visibility tags in Put and Delete.
    * Note: This will be concurrently called from multiple threads and implementation should take - * care of thread safety. - * @param putVisTags - * The visibility tags present in the Put mutation - * @param putVisTagFormat - * The serialization format for the Put visibility tags. A null value for - * this format means the tags are written with unsorted label ordinals - * @param deleteVisTags - * - The visibility tags in the delete mutation (the specified Cell Visibility) - * @param deleteVisTagFormat - * The serialization format for the Delete visibility tags. A null value for - * this format means the tags are written with unsorted label ordinals + * care of thread safety. n * The visibility tags present in the Put mutation n * The + * serialization format for the Put visibility tags. A null value for this format + * means the tags are written with unsorted label ordinals n * - The visibility tags in the delete + * mutation (the specified Cell Visibility) n * The serialization format for the Delete visibility + * tags. A null value for this format means the tags are written with unsorted label + * ordinals * @return true if matching tags are found * @see VisibilityConstants#SORTED_ORDINAL_SERIALIZATION_FORMAT */ boolean matchVisibility(List putVisTags, Byte putVisTagFormat, List deleteVisTags, - Byte deleteVisTagFormat) throws IOException; + Byte deleteVisTagFormat) throws IOException; /** - * Provides a way to modify the visibility tags of type {@link TagType} - * .VISIBILITY_TAG_TYPE, that are part of the cell created from the WALEdits - * that are prepared for replication while calling - * {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} - * .replicate(). - * {@link org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint} - * calls this API to provide an opportunity to modify the visibility tags - * before replicating. - * - * @param visTags - * the visibility tags associated with the cell - * @param serializationFormat - * the serialization format associated with the tag - * @return the modified visibility expression in the form of byte[] - * @throws IOException + * Provides a way to modify the visibility tags of type {@link TagType} .VISIBILITY_TAG_TYPE, that + * are part of the cell created from the WALEdits that are prepared for replication while calling + * {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} .replicate(). + * {@link org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint} calls this + * API to provide an opportunity to modify the visibility tags before replicating. n * the + * visibility tags associated with the cell n * the serialization format associated with the tag + * @return the modified visibility expression in the form of byte[] n */ - byte[] encodeVisibilityForReplication(final List visTags, - final Byte serializationFormat) throws IOException; + byte[] encodeVisibilityForReplication(final List visTags, final Byte serializationFormat) + throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java index 74531b92ce78..ec009116a6bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java @@ -18,12 +18,11 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.ReflectionUtils; /** * Manages singleton instance of {@link VisibilityLabelService} @@ -34,7 +33,7 @@ public class VisibilityLabelServiceManager { private static final Logger LOG = LoggerFactory.getLogger(VisibilityLabelServiceManager.class); public static final String VISIBILITY_LABEL_SERVICE_CLASS = - "hbase.regionserver.visibility.label.service.class"; + "hbase.regionserver.visibility.label.service.class"; private static final VisibilityLabelServiceManager INSTANCE = new VisibilityLabelServiceManager(); private volatile VisibilityLabelService visibilityLabelService = null; @@ -49,14 +48,14 @@ public static VisibilityLabelServiceManager getInstance() { } /** - * @param conf - * @return singleton instance of {@link VisibilityLabelService}. The FQCN of the implementation - * class can be specified using "hbase.regionserver.visibility.label.service.class". + * n * @return singleton instance of {@link VisibilityLabelService}. The FQCN of the + * implementation class can be specified using + * "hbase.regionserver.visibility.label.service.class". * @throws IOException When VLS implementation, as specified in conf, can not be loaded. */ public VisibilityLabelService getVisibilityLabelService(Configuration conf) throws IOException { String vlsClassName = conf.get(VISIBILITY_LABEL_SERVICE_CLASS, - DefaultVisibilityLabelServiceImpl.class.getCanonicalName()).trim(); + DefaultVisibilityLabelServiceImpl.class.getCanonicalName()).trim(); if (this.visibilityLabelService != null) { checkForClusterLevelSingleConf(vlsClassName); return this.visibilityLabelService; @@ -68,8 +67,8 @@ public VisibilityLabelService getVisibilityLabelService(Configuration conf) thro } this.vlsClazzName = vlsClassName; try { - this.visibilityLabelService = (VisibilityLabelService) ReflectionUtils.newInstance( - Class.forName(vlsClassName), conf); + this.visibilityLabelService = + (VisibilityLabelService) ReflectionUtils.newInstance(Class.forName(vlsClassName), conf); } catch (ClassNotFoundException e) { throw new IOException(e); } @@ -81,8 +80,8 @@ private void checkForClusterLevelSingleConf(String vlsClassName) { assert this.vlsClazzName != null; if (!this.vlsClazzName.equals(vlsClassName)) { LOG.warn("Trying to use table specific value for config " - + "'hbase.regionserver.visibility.label.service.class' which is not supported." - + " Will use the cluster level VisibilityLabelService class " + this.vlsClazzName); + + "'hbase.regionserver.visibility.label.service.class' which is not supported." + + " Will use the cluster level VisibilityLabelService class " + this.vlsClazzName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java index 6eed5b66c761..5e004f8c6b0f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java @@ -75,14 +75,11 @@ private VisibilityLabelsCache(ZKWatcher watcher, Configuration conf) throws IOEx } /** - * Creates the singleton instance, if not yet present, and returns the same. - * @param watcher - * @param conf - * @return Singleton instance of VisibilityLabelsCache - * @throws IOException + * Creates the singleton instance, if not yet present, and returns the same. nn * @return + * Singleton instance of VisibilityLabelsCache n */ public synchronized static VisibilityLabelsCache createAndGet(ZKWatcher watcher, - Configuration conf) throws IOException { + Configuration conf) throws IOException { // VisibilityLabelService#init() for different regions (in same RS) passes same instance of // watcher as all get the instance from RS. // watcher != instance.zkVisibilityWatcher.getWatcher() - This check is needed only in UTs with @@ -96,10 +93,8 @@ public synchronized static VisibilityLabelsCache createAndGet(ZKWatcher watcher, } /** - * @return Singleton instance of VisibilityLabelsCache - * @throws IllegalStateException - * when this is called before calling - * {@link #createAndGet(ZKWatcher, Configuration)} + * @return Singleton instance of VisibilityLabelsCache n * when this is called before calling + * {@link #createAndGet(ZKWatcher, Configuration)} */ public static VisibilityLabelsCache get() { // By the time this method is called, the singleton instance of VisibilityLabelsCache should @@ -239,7 +234,6 @@ public List getGroupAuths(String[] groups) { /** * Returns the list of ordinals of labels associated with the user - * * @param user Not null value. * @return the list of ordinals */ @@ -254,10 +248,8 @@ public Set getUserAuthsAsOrdinals(String user) { } /** - * Returns the list of ordinals of labels associated with the groups - * - * @param groups - * @return the list of ordinals + * Returns the list of ordinals of labels associated with the groups n * @return the list of + * ordinals */ public Set getGroupAuthsAsOrdinals(String[] groups) { this.lock.readLock().lock(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java index b25b7e21c011..026a99796c9f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,17 +38,17 @@ import org.slf4j.LoggerFactory; /** - * Similar to MvccSensitiveTracker but tracks the visibility expression also before - * deciding if a Cell can be considered deleted + * Similar to MvccSensitiveTracker but tracks the visibility expression also before deciding if a + * Cell can be considered deleted */ @InterfaceAudience.Private public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTracker { private static final Logger LOG = - LoggerFactory.getLogger(VisibilityNewVersionBehaivorTracker.class); + LoggerFactory.getLogger(VisibilityNewVersionBehaivorTracker.class); public VisibilityNewVersionBehaivorTracker(NavigableSet columns, - CellComparator cellComparator, int minVersion, int maxVersion, int resultMaxVersions, - long oldestUnexpiredTS) { + CellComparator cellComparator, int minVersion, int maxVersion, int resultMaxVersions, + long oldestUnexpiredTS) { super(columns, cellComparator, minVersion, maxVersion, resultMaxVersions, oldestUnexpiredTS); } @@ -122,37 +121,35 @@ public void add(Cell cell) { prepare(cell); byte type = cell.getTypeByte(); switch (KeyValue.Type.codeToType(type)) { - // By the order of seen. We put null cq at first. - case DeleteFamily: // Delete all versions of all columns of the specified family - delFamMap.put(cell.getSequenceId(), - new VisibilityDeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId(), - new TagInfo(cell))); - break; - case DeleteFamilyVersion: // Delete all columns of the specified family and specified version - delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - - // These two kinds of markers are mix with Puts. - case DeleteColumn: // Delete all versions of the specified column - delColMap.put(cell.getSequenceId(), - new VisibilityDeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId(), - new TagInfo(cell))); - break; - case Delete: // Delete the specified version of the specified column. - delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - default: - throw new AssertionError("Unknown delete marker type for " + cell); + // By the order of seen. We put null cq at first. + case DeleteFamily: // Delete all versions of all columns of the specified family + delFamMap.put(cell.getSequenceId(), new VisibilityDeleteVersionsNode(cell.getTimestamp(), + cell.getSequenceId(), new TagInfo(cell))); + break; + case DeleteFamilyVersion: // Delete all columns of the specified family and specified version + delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + + // These two kinds of markers are mix with Puts. + case DeleteColumn: // Delete all versions of the specified column + delColMap.put(cell.getSequenceId(), new VisibilityDeleteVersionsNode(cell.getTimestamp(), + cell.getSequenceId(), new TagInfo(cell))); + break; + case Delete: // Delete the specified version of the specified column. + delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + default: + throw new AssertionError("Unknown delete marker type for " + cell); } } private boolean tagMatched(Cell put, TagInfo delInfo) throws IOException { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(put, putVisTags); - return putVisTags.isEmpty() == delInfo.tags.isEmpty() && ( - (putVisTags.isEmpty() && delInfo.tags.isEmpty()) || VisibilityLabelServiceManager - .getInstance().getVisibilityLabelService() - .matchVisibility(putVisTags, putCellVisTagsFormat, delInfo.tags, delInfo.format)); + return putVisTags.isEmpty() == delInfo.tags.isEmpty() + && ((putVisTags.isEmpty() && delInfo.tags.isEmpty()) + || VisibilityLabelServiceManager.getInstance().getVisibilityLabelService() + .matchVisibility(putVisTags, putCellVisTagsFormat, delInfo.tags, delInfo.format)); } @Override @@ -161,7 +158,7 @@ public DeleteResult isDeleted(Cell cell) { long duplicateMvcc = prepare(cell); for (Map.Entry e : delColMap.tailMap(cell.getSequenceId()) - .entrySet()) { + .entrySet()) { VisibilityDeleteVersionsNode node = (VisibilityDeleteVersionsNode) e.getValue(); long deleteMvcc = Long.MAX_VALUE; SortedMap deleteVersionMvccs = node.deletesMap.get(cell.getTimestamp()); @@ -174,8 +171,8 @@ public DeleteResult isDeleted(Cell cell) { } } } - SortedMap> subMap = node.mvccCountingMap - .subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true); + SortedMap> subMap = node.mvccCountingMap.subMap(cell.getSequenceId(), + true, Math.min(duplicateMvcc, deleteMvcc), true); for (Map.Entry> seg : subMap.entrySet()) { if (seg.getValue().size() >= maxVersions) { return DeleteResult.VERSION_MASKED; @@ -202,6 +199,6 @@ public DeleteResult isDeleted(Cell cell) { @Override protected void resetInternal() { delFamMap.put(Long.MAX_VALUE, - new VisibilityDeleteVersionsNode(Long.MIN_VALUE, Long.MAX_VALUE, new TagInfo())); + new VisibilityDeleteVersionsNode(Long.MIN_VALUE, Long.MAX_VALUE, new TagInfo())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java index e39d6016463d..c5a3acaac110 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; @@ -31,10 +29,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A RegionServerObserver impl that provides the custom - * VisibilityReplicationEndpoint. This class should be configured as the - * 'hbase.coprocessor.regionserver.classes' for the visibility tags to be - * replicated as string. The value for the configuration should be + * A RegionServerObserver impl that provides the custom VisibilityReplicationEndpoint. This class + * should be configured as the 'hbase.coprocessor.regionserver.classes' for the visibility tags to + * be replicated as string. The value for the configuration should be * 'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'. */ @InterfaceAudience.Private @@ -45,21 +42,22 @@ public class VisibilityReplication implements RegionServerCoprocessor, RegionSer @Override public void start(CoprocessorEnvironment env) throws IOException { this.conf = env.getConfiguration(); - visibilityLabelService = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(this.conf); + visibilityLabelService = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(this.conf); } @Override public void stop(CoprocessorEnvironment env) throws IOException { } - @Override public Optional getRegionServerObserver() { + @Override + public Optional getRegionServerObserver() { return Optional.of(this); } @Override public ReplicationEndpoint postCreateReplicationEndPoint( - ObserverContext ctx, ReplicationEndpoint endpoint) { + ObserverContext ctx, ReplicationEndpoint endpoint) { return new VisibilityReplicationEndpoint(endpoint, visibilityLabelService); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java index cd495ce442a3..5cffb51500a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -47,7 +46,7 @@ public class VisibilityReplicationEndpoint implements ReplicationEndpoint { private final VisibilityLabelService visibilityLabelsService; public VisibilityReplicationEndpoint(ReplicationEndpoint endpoint, - VisibilityLabelService visibilityLabelsService) { + VisibilityLabelService visibilityLabelsService) { this.delegator = endpoint; this.visibilityLabelsService = visibilityLabelsService; } @@ -58,7 +57,7 @@ public void init(Context context) throws IOException { } @Override - public void peerConfigUpdated(ReplicationPeerConfig rpc){ + public void peerConfigUpdated(ReplicationPeerConfig rpc) { delegator.peerConfigUpdated(rpc); } @@ -80,22 +79,22 @@ public boolean replicate(ReplicateContext replicateContext) { if (cell.getTagsLength() > 0) { visTags.clear(); nonVisTags.clear(); - Byte serializationFormat = VisibilityUtils.extractAndPartitionTags(cell, visTags, - nonVisTags); + Byte serializationFormat = + VisibilityUtils.extractAndPartitionTags(cell, visTags, nonVisTags); if (!visTags.isEmpty()) { try { byte[] modifiedVisExpression = visibilityLabelsService - .encodeVisibilityForReplication(visTags, serializationFormat); + .encodeVisibilityForReplication(visTags, serializationFormat); if (modifiedVisExpression != null) { nonVisTags - .add(new ArrayBackedTag(TagType.STRING_VIS_TAG_TYPE, modifiedVisExpression)); + .add(new ArrayBackedTag(TagType.STRING_VIS_TAG_TYPE, modifiedVisExpression)); } } catch (Exception ioe) { LOG.error( - "Exception while reading the visibility labels from the cell. The replication " - + "would happen as per the existing format and not as " + - "string type for the cell " - + cell + ".", ioe); + "Exception while reading the visibility labels from the cell. The replication " + + "would happen as per the existing format and not as " + + "string type for the cell " + cell + ".", + ioe); // just return the old entries as it is without applying the string type change newEdit.add(cell); continue; @@ -140,7 +139,9 @@ public boolean isRunning() { } @Override - public boolean isStarting() {return this.delegator.isStarting();} + public boolean isStarting() { + return this.delegator.isStarting(); + } @Override public void start() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java index 6b9ac7449a4b..59623ece1359 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,24 +21,23 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.querymatcher.ScanDeleteTracker; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.regionserver.querymatcher.ScanDeleteTracker; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Triple; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Similar to ScanDeletTracker but tracks the visibility expression also before - * deciding if a Cell can be considered deleted + * Similar to ScanDeletTracker but tracks the visibility expression also before deciding if a Cell + * can be considered deleted */ @InterfaceAudience.Private public class VisibilityScanDeleteTracker extends ScanDeleteTracker { @@ -50,10 +48,10 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { * This tag is used for the DELETE cell which has no visibility label. */ private static final List EMPTY_TAG = Collections.EMPTY_LIST; - // Its better to track the visibility tags in delete based on each type. Create individual - // data structures for tracking each of them. This would ensure that there is no tracking based + // Its better to track the visibility tags in delete based on each type. Create individual + // data structures for tracking each of them. This would ensure that there is no tracking based // on time and also would handle all cases where deletefamily or deletecolumns is specified with - // Latest_timestamp. In such cases the ts in the delete marker and the masking + // Latest_timestamp. In such cases the ts in the delete marker and the masking // put will not be same. So going with individual data structures for different delete // type would solve this problem and also ensure that the combination of different type // of deletes with diff ts would also work fine @@ -73,7 +71,7 @@ public VisibilityScanDeleteTracker(CellComparator comparator) { @Override public void add(Cell delCell) { - //Cannot call super.add because need to find if the delete needs to be considered + // Cannot call super.add because need to find if the delete needs to be considered long timestamp = delCell.getTimestamp(); byte type = delCell.getTypeByte(); if (type == KeyValue.Type.DeleteFamily.getCode()) { @@ -124,23 +122,27 @@ private boolean extractDeleteCellVisTags(Cell delCell, Type type) { } deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteFamily.add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamily + .add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); hasVisTag = true; } else { - visibilityTagsDeleteFamily.add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamily + .add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); } break; case DeleteFamilyVersion: - if(visibilityTagsDeleteFamilyVersion == null) { + if (visibilityTagsDeleteFamilyVersion == null) { visibilityTagsDeleteFamilyVersion = new ArrayList<>(); } delTags = new ArrayList<>(); deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteFamilyVersion.add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamilyVersion + .add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); hasVisTag = true; } else { - visibilityTagsDeleteFamilyVersion.add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamilyVersion + .add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); } break; case DeleteColumn: @@ -189,8 +191,8 @@ public DeleteResult isDeleted(Cell cell) { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, - triple.getFirst(), triple.getSecond()); + .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, + triple.getFirst(), triple.getSecond()); if (matchFound) { // A return type of FAMILY_DELETED will cause skip for all remaining cells from // this @@ -225,8 +227,8 @@ public DeleteResult isDeleted(Cell cell) { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, - triple.getFirst(), triple.getSecond()); + .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, + triple.getFirst(), triple.getSecond()); if (matchFound) { return DeleteResult.FAMILY_VERSION_DELETED; } @@ -254,10 +256,10 @@ public DeleteResult isDeleted(Cell cell) { for (Pair, Byte> tags : visibilityTagsDeleteColumns) { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = - VisibilityUtils.extractVisibilityTags(cell, putVisTags); + VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, - tags.getFirst(), tags.getSecond()); + .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, + tags.getFirst(), tags.getSecond()); if (matchFound) { return DeleteResult.VERSION_DELETED; } @@ -283,10 +285,10 @@ public DeleteResult isDeleted(Cell cell) { for (Pair, Byte> tags : visiblityTagsDeleteColumnVersion) { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = - VisibilityUtils.extractVisibilityTags(cell, putVisTags); + VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, - tags.getFirst(), tags.getSecond()); + .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, + tags.getFirst(), tags.getSecond()); if (matchFound) { return DeleteResult.VERSION_DELETED; } @@ -312,12 +314,12 @@ public DeleteResult isDeleted(Cell cell) { visiblityTagsDeleteColumnVersion = null; } else { throw new IllegalStateException("isDeleted failed: deleteBuffer=" - + Bytes.toStringBinary(deleteCell.getQualifierArray(), - deleteCell.getQualifierOffset(), deleteCell.getQualifierLength()) - + ", qualifier=" - + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()) - + ", timestamp=" + timestamp + ", comparison result: " + ret); + + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), + deleteCell.getQualifierLength()) + + ", qualifier=" + + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + + ", timestamp=" + timestamp + ", comparison result: " + ret); } } } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index de0c28746459..ef84e10afdde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -75,10 +75,10 @@ public class VisibilityUtils { private static final Logger LOG = LoggerFactory.getLogger(VisibilityUtils.class); public static final String VISIBILITY_LABEL_GENERATOR_CLASS = - "hbase.regionserver.scan.visibility.label.generator.class"; + "hbase.regionserver.scan.visibility.label.generator.class"; public static final String SYSTEM_LABEL = "system"; - public static final Tag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG = new ArrayBackedTag( - TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, + public static final Tag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG = + new ArrayBackedTag(TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL); private static final String COMMA = ","; @@ -86,9 +86,8 @@ public class VisibilityUtils { private static final ExpressionExpander EXP_EXPANDER = new ExpressionExpander(); /** - * Creates the labels data to be written to zookeeper. - * @param existingLabels - * @return Bytes form of labels and their ordinal details to be written to zookeeper. + * Creates the labels data to be written to zookeeper. n * @return Bytes form of labels and their + * ordinal details to be written to zookeeper. */ public static byte[] getDataToWriteToZooKeeper(Map existingLabels) { VisibilityLabelsRequest.Builder visReqBuilder = VisibilityLabelsRequest.newBuilder(); @@ -102,9 +101,8 @@ public static byte[] getDataToWriteToZooKeeper(Map existingLabe } /** - * Creates the user auth data to be written to zookeeper. - * @param userAuths - * @return Bytes form of user auths details to be written to zookeeper. + * Creates the user auth data to be written to zookeeper. n * @return Bytes form of user auths + * details to be written to zookeeper. */ public static byte[] getUserAuthsDataToWriteToZooKeeper(Map> userAuths) { MultiUserAuthorizations.Builder builder = MultiUserAuthorizations.newBuilder(); @@ -121,14 +119,11 @@ public static byte[] getUserAuthsDataToWriteToZooKeeper(Map readLabelsFromZKData(byte[] data) - throws DeserializationException { + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -143,13 +138,10 @@ public static List readLabelsFromZKData(byte[] data) } /** - * Reads back User auth data written to zookeeper. - * @param data - * @return User auth details - * @throws DeserializationException + * Reads back User auth data written to zookeeper. n * @return User auth details n */ - public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) - throws DeserializationException { + public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) + throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { @@ -167,9 +159,8 @@ public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) * @param conf The configuration to use * @return Stack of ScanLabelGenerator instances. ScanLabelGenerator classes can be specified in * Configuration as comma separated list using key - * "hbase.regionserver.scan.visibility.label.generator.class" - * @throws IllegalArgumentException - * when any of the specified ScanLabelGenerator class can not be loaded. + * "hbase.regionserver.scan.visibility.label.generator.class" n * when any of the + * specified ScanLabelGenerator class can not be loaded. */ public static List getScanLabelGenerators(Configuration conf) { // There can be n SLG specified as comma separated in conf @@ -194,9 +185,9 @@ public static List getScanLabelGenerators(Configuration conf // 2. DefinedSetFilterScanLabelGenerator // This stacking will achieve the following default behavior: // 1. If there is no Auths in the scan, we will obtain the global defined set for the user - // from the labels table. + // from the labels table. // 2. If there is Auths in the scan, we will examine the passed in Auths and filter out the - // labels that the user is not entitled to. Then use the resulting label set. + // labels that the user is not entitled to. Then use the resulting label set. if (slgs.isEmpty()) { slgs.add(ReflectionUtils.newInstance(FeedUserAuthScanLabelGenerator.class, conf)); slgs.add(ReflectionUtils.newInstance(DefinedSetFilterScanLabelGenerator.class, conf)); @@ -226,18 +217,14 @@ public static Byte extractVisibilityTags(Cell cell, List tags) { /** * Extracts and partitions the visibility tags and nonVisibility Tags - * - * @param cell - the cell for which we would extract and partition the - * visibility and non visibility tags - * @param visTags - * - all the visibilty tags of type TagType.VISIBILITY_TAG_TYPE would - * be added to this list + * @param cell - the cell for which we would extract and partition the visibility and non + * visibility tags n * - all the visibilty tags of type + * TagType.VISIBILITY_TAG_TYPE would be added to this list * @param nonVisTags - all the non visibility tags would be added to this list - * @return - the serailization format of the tag. Can be null if no tags are found or - * if there is no visibility tag found + * @return - the serailization format of the tag. Can be null if no tags are found or if there is + * no visibility tag found */ - public static Byte extractAndPartitionTags(Cell cell, List visTags, - List nonVisTags) { + public static Byte extractAndPartitionTags(Cell cell, List visTags, List nonVisTags) { Byte serializationFormat = null; Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { @@ -266,15 +253,15 @@ public static boolean isVisibilityTagsPresent(Cell cell) { } public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations) - throws IOException { + throws IOException { Map cfVsMaxVersions = new HashMap<>(); for (ColumnFamilyDescriptor hcd : region.getTableDescriptor().getColumnFamilies()) { cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions()); } - VisibilityLabelService vls = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(); - Filter visibilityLabelFilter = new VisibilityLabelFilter( - vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions); + VisibilityLabelService vls = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(); + Filter visibilityLabelFilter = + new VisibilityLabelFilter(vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions); return visibilityLabelFilter; } @@ -297,8 +284,8 @@ public static User getActiveUser() throws IOException { } public static List createVisibilityExpTags(String visExpression, - boolean withSerializationFormat, boolean checkAuths, Set auths, - VisibilityLabelOrdinalProvider ordinalProvider) throws IOException { + boolean withSerializationFormat, boolean checkAuths, Set auths, + VisibilityLabelOrdinalProvider ordinalProvider) throws IOException { ExpressionNode node = null; try { node = EXP_PARSER.parse(visExpression); @@ -341,8 +328,8 @@ public static List createVisibilityExpTags(String visExpression, } private static void getLabelOrdinals(ExpressionNode node, List labelOrdinals, - Set auths, boolean checkAuths, VisibilityLabelOrdinalProvider ordinalProvider) - throws IOException, InvalidLabelException { + Set auths, boolean checkAuths, VisibilityLabelOrdinalProvider ordinalProvider) + throws IOException, InvalidLabelException { if (node.isSingleNode()) { String identifier = null; int labelOrdinal = 0; @@ -355,8 +342,8 @@ private static void getLabelOrdinals(ExpressionNode node, List labelOrd checkAuths(auths, labelOrdinal, identifier, checkAuths); } else { // This is a NOT node. - LeafExpressionNode lNode = (LeafExpressionNode) ((NonLeafExpressionNode) node) - .getChildExps().get(0); + LeafExpressionNode lNode = + (LeafExpressionNode) ((NonLeafExpressionNode) node).getChildExps().get(0); identifier = lNode.getIdentifier(); labelOrdinal = ordinalProvider.getLabelOrdinal(identifier); checkAuths(auths, labelOrdinal, identifier, checkAuths); @@ -376,16 +363,11 @@ private static void getLabelOrdinals(ExpressionNode node, List labelOrd /** * This will sort the passed labels in ascending oder and then will write one after the other to - * the passed stream. - * @param labelOrdinals - * Unsorted label ordinals - * @param dos - * Stream where to write the labels. - * @throws IOException - * When IOE during writes to Stream. + * the passed stream. n * Unsorted label ordinals n * Stream where to write the labels. n * When + * IOE during writes to Stream. */ private static void writeLabelOrdinalsToStream(List labelOrdinals, DataOutputStream dos) - throws IOException { + throws IOException { Collections.sort(labelOrdinals); for (Integer labelOrdinal : labelOrdinals) { StreamUtils.writeRawVInt32(dos, labelOrdinal); @@ -393,11 +375,11 @@ private static void writeLabelOrdinalsToStream(List labelOrdinals, Data } private static void checkAuths(Set auths, int labelOrdinal, String identifier, - boolean checkAuths) throws IOException { + boolean checkAuths) throws IOException { if (checkAuths) { if (auths == null || (!auths.contains(labelOrdinal))) { throw new AccessDeniedException("Visibility label " + identifier - + " not authorized for the user " + VisibilityUtils.getActiveUser().getShortName()); + + " not authorized for the user " + VisibilityUtils.getActiveUser().getShortName()); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java index bcb3b8ba4fbe..3150dc448f94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +18,12 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - -import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,7 +39,7 @@ public class ZKVisibilityLabelWatcher extends ZKListener { private static final String VISIBILITY_LABEL_ZK_PATH = "zookeeper.znode.visibility.label.parent"; private static final String DEFAULT_VISIBILITY_LABEL_NODE = "visibility/labels"; private static final String VISIBILITY_USER_AUTHS_ZK_PATH = - "zookeeper.znode.visibility.user.auths.parent"; + "zookeeper.znode.visibility.user.auths.parent"; private static final String DEFAULT_VISIBILITY_USER_AUTHS_NODE = "visibility/user_auths"; private VisibilityLabelsCache labelsCache; @@ -48,15 +47,15 @@ public class ZKVisibilityLabelWatcher extends ZKListener { private String userAuthsZnode; public ZKVisibilityLabelWatcher(ZKWatcher watcher, VisibilityLabelsCache labelsCache, - Configuration conf) { + Configuration conf) { super(watcher); this.labelsCache = labelsCache; String labelZnodeParent = conf.get(VISIBILITY_LABEL_ZK_PATH, DEFAULT_VISIBILITY_LABEL_NODE); - String userAuthsZnodeParent = conf.get(VISIBILITY_USER_AUTHS_ZK_PATH, - DEFAULT_VISIBILITY_USER_AUTHS_NODE); + String userAuthsZnodeParent = + conf.get(VISIBILITY_USER_AUTHS_ZK_PATH, DEFAULT_VISIBILITY_USER_AUTHS_NODE); this.labelZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, labelZnodeParent); - this.userAuthsZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, - userAuthsZnodeParent); + this.userAuthsZnode = + ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, userAuthsZnodeParent); } public void start() throws KeeperException { @@ -132,10 +131,8 @@ public void nodeChildrenChanged(String path) { } /** - * Write a labels mirror or user auths mirror into zookeeper - * - * @param data - * @param labelsOrUserAuths true for writing labels and false for user auths. + * Write a labels mirror or user auths mirror into zookeeper n * @param labelsOrUserAuths true for + * writing labels and false for user auths. */ public void writeToZookeeper(byte[] data, boolean labelsOrUserAuths) { String znode = this.labelZnode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java index fcc66a8b5ea0..4a3cbd358b66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java index fd479b40594f..4151ecff4506 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java index 83610fadc8f1..94bb99faa036 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java index 2281453c2dc0..9b1ad236dc0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,9 @@ @InterfaceAudience.Private public enum Operator { - AND('&'), OR('|'), NOT('!'); + AND('&'), + OR('|'), + NOT('!'); private final char rep; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java index 342aa87feda2..3c73c3d5d043 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.server.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_METHOD; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SERVICE; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SYSTEM; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -34,12 +34,14 @@ import org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RpcSystem; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; /** * Construct {@link Span} instances originating from the server side of an IPC. - * - * @see Semantic conventions for RPC spans + * @see Semantic + * conventions for RPC spans */ @InterfaceAudience.Private public class IpcServerSpanBuilder implements Supplier { @@ -48,13 +50,11 @@ public class IpcServerSpanBuilder implements Supplier { private final Map, Object> attributes = new HashMap<>(); public IpcServerSpanBuilder(final RpcCall rpcCall) { - final String packageAndService = Optional.ofNullable(rpcCall.getService()) - .map(BlockingService::getDescriptorForType) - .map(IpcClientSpanBuilder::getRpcPackageAndService) - .orElse(""); - final String method = Optional.ofNullable(rpcCall.getMethod()) - .map(IpcClientSpanBuilder::getRpcName) - .orElse(""); + final String packageAndService = + Optional.ofNullable(rpcCall.getService()).map(BlockingService::getDescriptorForType) + .map(IpcClientSpanBuilder::getRpcPackageAndService).orElse(""); + final String method = + Optional.ofNullable(rpcCall.getMethod()).map(IpcClientSpanBuilder::getRpcName).orElse(""); setName(IpcClientSpanBuilder.buildSpanName(packageAndService, method)); addAttribute(RPC_SYSTEM, RpcSystem.HBASE_RPC.name()); addAttribute(RPC_SERVICE, packageAndService); @@ -78,9 +78,8 @@ public IpcServerSpanBuilder addAttribute(final AttributeKey key, T value) @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) - .setSpanKind(SpanKind.SERVER); + final SpanBuilder builder = + TraceUtil.getGlobalTracer().spanBuilder(name).setSpanKind(SpanKind.SERVER); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); return builder.startSpan(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java index f8e54c9c459c..be86d6fc8a91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.Arrays; import java.util.Locale; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -37,43 +35,42 @@ */ @InterfaceAudience.Private public class CreateSnapshot extends AbstractHBaseTool { - private SnapshotType snapshotType = SnapshotType.FLUSH; - private TableName tableName = null; - private String snapshotName = null; + private SnapshotType snapshotType = SnapshotType.FLUSH; + private TableName tableName = null; + private String snapshotName = null; - public static void main(String[] args) { - new CreateSnapshot().doStaticMain(args); - } + public static void main(String[] args) { + new CreateSnapshot().doStaticMain(args); + } - @Override - protected void addOptions() { - this.addRequiredOptWithArg("t", "table", "The name of the table"); - this.addRequiredOptWithArg("n", "name", "The name of the created snapshot"); - this.addOptWithArg("s", "snapshot_type", - "Snapshot Type. FLUSH is default. Posible values are " - + Arrays.toString(SnapshotType.values())); - } + @Override + protected void addOptions() { + this.addRequiredOptWithArg("t", "table", "The name of the table"); + this.addRequiredOptWithArg("n", "name", "The name of the created snapshot"); + this.addOptWithArg("s", "snapshot_type", "Snapshot Type. FLUSH is default. Posible values are " + + Arrays.toString(SnapshotType.values())); + } - @Override - protected void processOptions(CommandLine cmd) { - this.tableName = TableName.valueOf(cmd.getOptionValue('t')); - this.snapshotName = cmd.getOptionValue('n'); - String snapshotTypeName = cmd.getOptionValue('s'); - if (snapshotTypeName != null) { - snapshotTypeName = snapshotTypeName.toUpperCase(Locale.ROOT); - this.snapshotType = SnapshotType.valueOf(snapshotTypeName); - } + @Override + protected void processOptions(CommandLine cmd) { + this.tableName = TableName.valueOf(cmd.getOptionValue('t')); + this.snapshotName = cmd.getOptionValue('n'); + String snapshotTypeName = cmd.getOptionValue('s'); + if (snapshotTypeName != null) { + snapshotTypeName = snapshotTypeName.toUpperCase(Locale.ROOT); + this.snapshotType = SnapshotType.valueOf(snapshotTypeName); } + } - @Override - protected int doWork() throws Exception { - try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { - admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); - } catch (Exception e) { - System.err.println("failed to take the snapshot: " + e.getMessage()); - return -1; - } - return 0; + @Override + protected int doWork() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Admin admin = connection.getAdmin()) { + admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); + } catch (Exception e) { + System.err.println("failed to take the snapshot: " + e.getMessage()); + return -1; } + return 0; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 17406445fc3a..73408a7edb71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; -import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; - import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -73,50 +70,52 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** * Helper to Restore/Clone a Snapshot - * - *

    The helper assumes that a table is already created, and by calling restore() - * the content present in the snapshot will be restored as the new content of the table. - * - *

    Clone from Snapshot: If the target table is empty, the restore operation - * is just a "clone operation", where the only operations are: + *

    + * The helper assumes that a table is already created, and by calling restore() the content present + * in the snapshot will be restored as the new content of the table. + *

    + * Clone from Snapshot: If the target table is empty, the restore operation is just a "clone + * operation", where the only operations are: *

      - *
    • for each region in the snapshot create a new region - * (note that the region will have a different name, since the encoding contains the table name) - *
    • for each file in the region create a new HFileLink to point to the original file. - *
    • restore the logs, if any + *
    • for each region in the snapshot create a new region (note that the region will have a + * different name, since the encoding contains the table name) + *
    • for each file in the region create a new HFileLink to point to the original file. + *
    • restore the logs, if any *
    - * - *

    Restore from Snapshot: + *

    + * Restore from Snapshot: + *

      + *
    • for each region in the table verify which are available in the snapshot and which are not + *
        + *
      • if the region is not present in the snapshot, remove it. + *
      • if the region is present in the snapshot + *
          + *
        • for each file in the table region verify which are available in the snapshot + *
            + *
          • if the hfile is not present in the snapshot, remove it + *
          • if the hfile is present, keep it (nothing to do) + *
          + *
        • for each file in the snapshot region but not in the table + *
            + *
          • create a new HFileLink that point to the original file + *
          + *
        + *
      + *
    • for each region in the snapshot not present in the current table state *
        - *
      • for each region in the table verify which are available in the snapshot and which are not - *
          - *
        • if the region is not present in the snapshot, remove it. - *
        • if the region is present in the snapshot - *
            - *
          • for each file in the table region verify which are available in the snapshot - *
              - *
            • if the hfile is not present in the snapshot, remove it - *
            • if the hfile is present, keep it (nothing to do) - *
            - *
          • for each file in the snapshot region but not in the table - *
              - *
            • create a new HFileLink that point to the original file - *
            - *
          - *
        - *
      • for each region in the snapshot not present in the current table state - *
          - *
        • create a new region and for each file in the region create a new HFileLink - * (This is the same as the clone operation) - *
        - *
      • restore the logs, if any + *
      • create a new region and for each file in the region create a new HFileLink (This is the same + * as the clone operation) + *
      + *
    • restore the logs, if any *
    */ @InterfaceAudience.Private @@ -125,7 +124,7 @@ public class RestoreSnapshotHelper { private final Map regionsMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - private final Map > parentsMap = new HashMap<>(); + private final Map> parentsMap = new HashMap<>(); private final ForeignExceptionDispatcher monitor; private final MonitoredTask status; @@ -142,13 +141,9 @@ public class RestoreSnapshotHelper { private final FileSystem fs; private final boolean createBackRefs; - public RestoreSnapshotHelper(final Configuration conf, - final FileSystem fs, - final SnapshotManifest manifest, - final TableDescriptor tableDescriptor, - final Path rootDir, - final ForeignExceptionDispatcher monitor, - final MonitoredTask status) { + public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, + final SnapshotManifest manifest, final TableDescriptor tableDescriptor, final Path rootDir, + final ForeignExceptionDispatcher monitor, final MonitoredTask status) { this(conf, fs, manifest, tableDescriptor, rootDir, monitor, status, true); } @@ -226,13 +221,13 @@ private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) thr // NOTE: we rely upon the region name as: "table name, start key, end key" if (tableRegions != null) { monitor.rethrowException(); - for (RegionInfo regionInfo: tableRegions) { + for (RegionInfo regionInfo : tableRegions) { String regionName = regionInfo.getEncodedName(); if (regionNames.contains(regionName)) { LOG.info("region to restore: " + regionName); regionNames.remove(regionName); - metaChanges.addRegionToRestore(ProtobufUtil.toRegionInfo(regionManifests.get(regionName) - .getRegionInfo())); + metaChanges.addRegionToRestore( + ProtobufUtil.toRegionInfo(regionManifests.get(regionName).getRegionInfo())); } else { LOG.info("region to remove: " + regionName); metaChanges.addRegionToRemove(regionInfo); @@ -244,10 +239,10 @@ private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) thr List regionsToAdd = new ArrayList<>(regionNames.size()); if (regionNames.size() > 0) { monitor.rethrowException(); - for (String regionName: regionNames) { + for (String regionName : regionNames) { LOG.info("region to add: " + regionName); - regionsToAdd.add(ProtobufUtil.toRegionInfo(regionManifests.get(regionName) - .getRegionInfo())); + regionsToAdd + .add(ProtobufUtil.toRegionInfo(regionManifests.get(regionName).getRegionInfo())); } } @@ -281,14 +276,14 @@ private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) thr * Describe the set of operations needed to update hbase:meta after restore. */ public static class RestoreMetaChanges { - private final Map > parentsMap; + private final Map> parentsMap; private final TableDescriptor htd; private List regionsToRestore = null; private List regionsToRemove = null; private List regionsToAdd = null; - public RestoreMetaChanges(TableDescriptor htd, Map > parentsMap) { + public RestoreMetaChanges(TableDescriptor htd, Map> parentsMap) { this.parentsMap = parentsMap; this.htd = htd; } @@ -313,9 +308,8 @@ public boolean hasRegionsToAdd() { } /** - * Returns the list of new regions added during the on-disk restore. - * The caller is responsible to add the regions to META. - * e.g MetaTableAccessor.addRegionsToMeta(...) + * Returns the list of new regions added during the on-disk restore. The caller is responsible + * to add the regions to META. e.g MetaTableAccessor.addRegionsToMeta(...) * @return the list of regions to add to META */ public List getRegionsToAdd() { @@ -330,8 +324,8 @@ public boolean hasRegionsToRestore() { } /** - * Returns the list of 'restored regions' during the on-disk restore. - * The caller is responsible to add the regions to hbase:meta if not present. + * Returns the list of 'restored regions' during the on-disk restore. The caller is responsible + * to add the regions to hbase:meta if not present. * @return the list of regions restored */ public List getRegionsToRestore() { @@ -346,9 +340,8 @@ public boolean hasRegionsToRemove() { } /** - * Returns the list of regions removed during the on-disk restore. - * The caller is responsible to remove the regions from META. - * e.g. MetaTableAccessor.deleteRegions(...) + * Returns the list of regions removed during the on-disk restore. The caller is responsible to + * remove the regions from META. e.g. MetaTableAccessor.deleteRegions(...) * @return the list of regions to remove from META */ public List getRegionsToRemove() { @@ -377,14 +370,14 @@ void addRegionToRestore(final RegionInfo hri) { regionsToRestore.add(hri); } - public void updateMetaParentRegions(Connection connection, - final List regionInfos) throws IOException { + public void updateMetaParentRegions(Connection connection, final List regionInfos) + throws IOException { if (regionInfos == null || parentsMap.isEmpty()) return; // Extract region names and offlined regions Map regionsByName = new HashMap<>(regionInfos.size()); List parentRegions = new LinkedList<>(); - for (RegionInfo regionInfo: regionInfos) { + for (RegionInfo regionInfo : regionInfos) { if (regionInfo.isSplitParent()) { parentRegions.add(regionInfo); } else { @@ -393,7 +386,7 @@ public void updateMetaParentRegions(Connection connection, } // Update Offline parents - for (RegionInfo regionInfo: parentRegions) { + for (RegionInfo regionInfo : parentRegions) { Pair daughters = parentsMap.get(regionInfo.getEncodedName()); if (daughters == null) { // The snapshot contains an unreferenced region. @@ -409,8 +402,7 @@ public void updateMetaParentRegions(Connection connection, LOG.debug("Update splits parent " + regionInfo.getEncodedName() + " -> " + daughters); MetaTableAccessor.addSplitsToParent(connection, regionInfo, - regionsByName.get(daughters.getFirst()), - regionsByName.get(daughters.getSecond())); + regionsByName.get(daughters.getFirst()), regionsByName.get(daughters.getSecond())); } } } @@ -419,7 +411,7 @@ public void updateMetaParentRegions(Connection connection, * Remove specified regions from the file-system, using the archiver. */ private void removeHdfsRegions(final ThreadPoolExecutor exec, final List regions) - throws IOException { + throws IOException { if (regions == null || regions.isEmpty()) return; ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() { @Override @@ -433,8 +425,8 @@ public void editRegion(final RegionInfo hri) throws IOException { * Restore specified regions by restoring content to the snapshot state. */ private void restoreHdfsRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return; ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() { @Override @@ -448,8 +440,8 @@ public void editRegion(final RegionInfo hri) throws IOException { * Restore specified mob regions by restoring content to the snapshot state. */ private void restoreHdfsMobRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return; ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() { @Override @@ -459,11 +451,11 @@ public void editRegion(final RegionInfo hri) throws IOException { }); } - private Map> getRegionHFileReferences( - final SnapshotRegionManifest manifest) { + private Map> + getRegionHFileReferences(final SnapshotRegionManifest manifest) { Map> familyMap = new HashMap<>(manifest.getFamilyFilesCount()); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { familyMap.put(familyFiles.getFamilyName().toStringUtf8(), new ArrayList<>(familyFiles.getStoreFilesList())); } @@ -471,20 +463,20 @@ private Map> getRegionHFileRefere } /** - * Restore region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreRegion(final RegionInfo regionInfo, - final SnapshotRegionManifest regionManifest) throws IOException { + final SnapshotRegionManifest regionManifest) throws IOException { restoreRegion(regionInfo, regionManifest, new Path(tableDir, regionInfo.getEncodedName())); } /** - * Restore mob region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore mob region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreMobRegion(final RegionInfo regionInfo, - final SnapshotRegionManifest regionManifest) throws IOException { + final SnapshotRegionManifest regionManifest) throws IOException { if (regionManifest == null) { return; } @@ -493,39 +485,39 @@ private void restoreMobRegion(final RegionInfo regionInfo, } /** - * Restore region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreRegion(final RegionInfo regionInfo, - final SnapshotRegionManifest regionManifest, Path regionDir) throws IOException { + final SnapshotRegionManifest regionManifest, Path regionDir) throws IOException { Map> snapshotFiles = - getRegionHFileReferences(regionManifest); + getRegionHFileReferences(regionManifest); String tableName = tableDesc.getTableName().getNameAsString(); final String snapshotName = snapshotDesc.getName(); Path regionPath = new Path(tableDir, regionInfo.getEncodedName()); - HRegionFileSystem regionFS = (fs.exists(regionPath)) ? - HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, false) : - HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo); + HRegionFileSystem regionFS = (fs.exists(regionPath)) + ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, false) + : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo); // Restore families present in the table - for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { byte[] family = Bytes.toBytes(familyDir.getName()); Set familyFiles = getTableRegionFamilyFiles(familyDir); List snapshotFamilyFiles = - snapshotFiles.remove(familyDir.getName()); + snapshotFiles.remove(familyDir.getName()); List filesToTrack = new ArrayList<>(); if (snapshotFamilyFiles != null) { List hfilesToAdd = new ArrayList<>(); - for (SnapshotRegionManifest.StoreFile storeFile: snapshotFamilyFiles) { + for (SnapshotRegionManifest.StoreFile storeFile : snapshotFamilyFiles) { if (familyFiles.contains(storeFile.getName())) { // HFile already present familyFiles.remove(storeFile.getName()); - //no need to restore already present files, but we need to add those to tracker - filesToTrack.add(new StoreFileInfo(conf, fs, - new Path(familyDir, storeFile.getName()), true)); + // no need to restore already present files, but we need to add those to tracker + filesToTrack + .add(new StoreFileInfo(conf, fs, new Path(familyDir, storeFile.getName()), true)); } else { // HFile missing hfilesToAdd.add(storeFile); @@ -533,57 +525,55 @@ private void restoreRegion(final RegionInfo regionInfo, } // Remove hfiles not present in the snapshot - for (String hfileName: familyFiles) { + for (String hfileName : familyFiles) { Path hfile = new Path(familyDir, hfileName); if (!fs.getFileStatus(hfile).isDirectory()) { - LOG.trace("Removing HFile=" + hfileName + " not present in snapshot=" + - snapshotName + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); + LOG.trace("Removing HFile=" + hfileName + " not present in snapshot=" + snapshotName + + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile); } } // Restore Missing files - for (SnapshotRegionManifest.StoreFile storeFile: hfilesToAdd) { - LOG.debug("Restoring missing HFileLink " + storeFile.getName() + - " of snapshot=" + snapshotName+ - " to region=" + regionInfo.getEncodedName() + " table=" + tableName); + for (SnapshotRegionManifest.StoreFile storeFile : hfilesToAdd) { + LOG.debug("Restoring missing HFileLink " + storeFile.getName() + " of snapshot=" + + snapshotName + " to region=" + regionInfo.getEncodedName() + " table=" + tableName); String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs); // mark the reference file to be added to tracker - filesToTrack.add(new StoreFileInfo(conf, fs, - new Path(familyDir, fileName), true)); + filesToTrack.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true)); } } else { // Family doesn't exists in the snapshot - LOG.trace("Removing family=" + Bytes.toString(family) + " in snapshot=" + snapshotName + - " from region=" + regionInfo.getEncodedName() + " table=" + tableName); + LOG.trace("Removing family=" + Bytes.toString(family) + " in snapshot=" + snapshotName + + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); HFileArchiver.archiveFamilyByFamilyDir(fs, conf, regionInfo, familyDir, family); fs.delete(familyDir, true); } - StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); - //simply reset list of tracked files with the matching files - //and the extra one present in the snapshot + // simply reset list of tracked files with the matching files + // and the extra one present in the snapshot tracker.set(filesToTrack); } // Add families not present in the table - for (Map.Entry> familyEntry: - snapshotFiles.entrySet()) { + for (Map.Entry> familyEntry : snapshotFiles + .entrySet()) { Path familyDir = new Path(regionDir, familyEntry.getKey()); - StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); List files = new ArrayList<>(); if (!fs.mkdirs(familyDir)) { throw new IOException("Unable to create familyDir=" + familyDir); } - for (SnapshotRegionManifest.StoreFile storeFile: familyEntry.getValue()) { + for (SnapshotRegionManifest.StoreFile storeFile : familyEntry.getValue()) { LOG.trace("Adding HFileLink (Not present in the table) " + storeFile.getName() - + " of snapshot " + snapshotName + " to table=" + tableName); + + " of snapshot " + snapshotName + " to table=" + tableName); String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs); files.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true)); } @@ -610,12 +600,12 @@ private Set getTableRegionFamilyFiles(final Path familyDir) throws IOExc } /** - * Clone specified regions. For each region create a new region - * and create a HFileLink for each hfile. + * Clone specified regions. For each region create a new region and create a HFileLink for each + * hfile. */ private RegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return null; final Map snapshotRegions = new HashMap<>(regions.size()); @@ -632,16 +622,16 @@ private RegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec, String snapshotRegionName = snapshotRegionInfo.getEncodedName(); String clonedRegionName = clonedRegionsInfo[i].getEncodedName(); regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName)); - LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName + - " in snapshot " + snapshotName); + LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName + " in snapshot " + + snapshotName); // Add mapping between cloned region name and snapshot region info snapshotRegions.put(clonedRegionName, snapshotRegionInfo); } // create the regions on disk - ModifyRegionUtils.createRegions(exec, conf, rootDir, - tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() { + ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDesc, clonedRegionsInfo, + new ModifyRegionUtils.RegionFillTask() { @Override public void fillRegion(final HRegion region) throws IOException { RegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName()); @@ -653,42 +643,35 @@ public void fillRegion(final HRegion region) throws IOException { } /** - * Clone the mob region. For the region create a new region - * and create a HFileLink for each hfile. + * Clone the mob region. For the region create a new region and create a HFileLink for each hfile. */ private void cloneHdfsMobRegion(final Map regionManifests, - final RegionInfo region) throws IOException { + final RegionInfo region) throws IOException { // clone region info (change embedded tableName with the new one) Path clonedRegionPath = MobUtils.getMobRegionPath(rootDir, tableDesc.getTableName()); - cloneRegion(MobUtils.getMobRegionInfo(tableDesc.getTableName()), - clonedRegionPath, region, regionManifests.get(region.getEncodedName())); + cloneRegion(MobUtils.getMobRegionInfo(tableDesc.getTableName()), clonedRegionPath, region, + regionManifests.get(region.getEncodedName())); } /** - * Clone region directory content from the snapshot info. - * - * Each region is encoded with the table name, so the cloned region will have - * a different region name. - * - * Instead of copying the hfiles a HFileLink is created. - * - * @param regionDir {@link Path} cloned dir - * @param snapshotRegionInfo + * Clone region directory content from the snapshot info. Each region is encoded with the table + * name, so the cloned region will have a different region name. Instead of copying the hfiles a + * HFileLink is created. + * @param regionDir {@link Path} cloned dir n */ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, - final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) - throws IOException { + final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) throws IOException { final String tableName = tableDesc.getTableName().getNameAsString(); final String snapshotName = snapshotDesc.getName(); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8()); List clonedFiles = new ArrayList<>(); - for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) { - LOG.info("Adding HFileLink " + storeFile.getName() +" from cloned region " - + "in snapshot " + snapshotName + " to table=" + tableName); + for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) { + LOG.info("Adding HFileLink " + storeFile.getName() + " from cloned region " + "in snapshot " + + snapshotName + " to table=" + tableName); if (MobUtils.isMobRegionInfo(newRegionInfo)) { - String mobFileName = HFileLink.createHFileLinkName(snapshotRegionInfo, - storeFile.getName()); + String mobFileName = + HFileLink.createHFileLinkName(snapshotRegionInfo, storeFile.getName()); Path mobPath = new Path(familyDir, mobFileName); if (fs.exists(mobPath)) { fs.delete(mobPath, true); @@ -699,18 +682,18 @@ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, clonedFiles.add(new StoreFileInfo(conf, fs, new Path(familyDir, file), true)); } } - //we don't need to track files under mobdir + // we don't need to track files under mobdir if (!MobUtils.isMobRegionInfo(newRegionInfo)) { Path regionPath = new Path(tableDir, newRegionInfo.getEncodedName()); - HRegionFileSystem regionFS = (fs.exists(regionPath)) ? - HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) : - HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); + HRegionFileSystem regionFS = (fs.exists(regionPath)) + ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) + : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); Configuration sftConf = StoreUtils.createStoreConfiguration(conf, tableDesc, tableDesc.getColumnFamily(familyFiles.getFamilyName().toByteArray())); - StoreFileTracker tracker = StoreFileTrackerFactory.create(sftConf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(sftConf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); tracker.set(clonedFiles); } } @@ -718,40 +701,34 @@ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, } /** - * Clone region directory content from the snapshot info. - * - * Each region is encoded with the table name, so the cloned region will have - * a different region name. - * - * Instead of copying the hfiles a HFileLink is created. - * - * @param region {@link HRegion} cloned - * @param snapshotRegionInfo + * Clone region directory content from the snapshot info. Each region is encoded with the table + * name, so the cloned region will have a different region name. Instead of copying the hfiles a + * HFileLink is created. + * @param region {@link HRegion} cloned n */ private void cloneRegion(final HRegion region, final RegionInfo snapshotRegionInfo, - final SnapshotRegionManifest manifest) throws IOException { - cloneRegion(region.getRegionInfo(), - new Path(tableDir, region.getRegionInfo().getEncodedName()), - snapshotRegionInfo, - manifest); + final SnapshotRegionManifest manifest) throws IOException { + cloneRegion(region.getRegionInfo(), new Path(tableDir, region.getRegionInfo().getEncodedName()), + snapshotRegionInfo, manifest); } /** * Create a new {@link HFileLink} to reference the store file. - *

    The store file in the snapshot can be a simple hfile, an HFileLink or a reference. + *

    + * The store file in the snapshot can be a simple hfile, an HFileLink or a reference. *

      - *
    • hfile: abc -> table=region-abc - *
    • reference: abc.1234 -> table=region-abc.1234 - *
    • hfilelink: table=region-hfile -> table=region-hfile + *
    • hfile: abc -> table=region-abc + *
    • reference: abc.1234 -> table=region-abc.1234 + *
    • hfilelink: table=region-hfile -> table=region-hfile *
    - * @param familyDir destination directory for the store file - * @param regionInfo destination region info for the table + * @param familyDir destination directory for the store file + * @param regionInfo destination region info for the table * @param createBackRef - Whether back reference should be created. Defaults to true. - * @param storeFile store file name (can be a Reference, HFileLink or simple HFile) + * @param storeFile store file name (can be a Reference, HFileLink or simple HFile) */ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInfo, - final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef) - throws IOException { + final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef) + throws IOException { String hfileName = storeFile.getName(); if (HFileLink.isHFileLink(hfileName)) { return HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName, createBackRef); @@ -764,7 +741,10 @@ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInf /** * Create a new {@link Reference} as copy of the source one. - *

    +   * 

    + *

    + * + *
        * The source table looks like:
        *    1234/abc      (original file)
        *    5678/abc.1234 (reference file)
    @@ -775,20 +755,27 @@ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInf
        *
        * NOTE that the region name in the clone changes (md5 of regioninfo)
        * and the reference should reflect that change.
    -   * 
    - * @param familyDir destination directory for the store file + *
    + * + *
    + * @param familyDir destination directory for the store file * @param regionInfo destination region info for the table - * @param storeFile reference file name + * @param storeFile reference file name */ private String restoreReferenceFile(final Path familyDir, final RegionInfo regionInfo, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { String hfileName = storeFile.getName(); // Extract the referred information (hfile name and parent region) Path refPath = - StoreFileInfo.getReferredToFile(new Path(new Path(new Path(new Path(snapshotTable - .getNamespaceAsString(), snapshotTable.getQualifierAsString()), regionInfo - .getEncodedName()), familyDir.getName()), hfileName)); + StoreFileInfo + .getReferredToFile( + new Path( + new Path( + new Path(new Path(snapshotTable.getNamespaceAsString(), + snapshotTable.getQualifierAsString()), regionInfo.getEncodedName()), + familyDir.getName()), + hfileName)); String snapshotRegionName = refPath.getParent().getParent().getName(); String fileName = refPath.getName(); @@ -816,15 +803,15 @@ private String restoreReferenceFile(final Path familyDir, final RegionInfo regio if (linkPath != null) { in = HFileLink.buildFromHFileLinkPattern(conf, linkPath).open(fs); } else { - linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), - regionInfo.getEncodedName()), familyDir.getName()), hfileName); + linkPath = new Path(new Path( + HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), regionInfo.getEncodedName()), + familyDir.getName()), hfileName); in = fs.open(linkPath); } OutputStream out = fs.create(outPath); IOUtils.copyBytes(in, out, conf); } - // Add the daughter region to the map String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes())); if (regionName == null) { @@ -846,10 +833,8 @@ private String restoreReferenceFile(final Path familyDir, final RegionInfo regio } /** - * Create a new {@link RegionInfo} from the snapshot region info. - * Keep the same startKey, endKey, regionId and split information but change - * the table name. - * + * Create a new {@link RegionInfo} from the snapshot region info. Keep the same startKey, endKey, + * regionId and split information but change the table name. * @param snapshotRegionInfo Info for region to clone. * @return the new HRegion instance */ @@ -858,13 +843,10 @@ public RegionInfo cloneRegionInfo(final RegionInfo snapshotRegionInfo) { } public static RegionInfo cloneRegionInfo(TableName tableName, RegionInfo snapshotRegionInfo) { - return RegionInfoBuilder.newBuilder(tableName) - .setStartKey(snapshotRegionInfo.getStartKey()) - .setEndKey(snapshotRegionInfo.getEndKey()) - .setSplit(snapshotRegionInfo.isSplit()) - .setRegionId(snapshotRegionInfo.getRegionId()) - .setOffline(snapshotRegionInfo.isOffline()) - .build(); + return RegionInfoBuilder.newBuilder(tableName).setStartKey(snapshotRegionInfo.getStartKey()) + .setEndKey(snapshotRegionInfo.getEndKey()).setSplit(snapshotRegionInfo.isSplit()) + .setRegionId(snapshotRegionInfo.getRegionId()).setOffline(snapshotRegionInfo.isOffline()) + .build(); } /** @@ -883,44 +865,38 @@ private List getTableRegions() throws IOException { RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath()); regions.add(hri); } - LOG.debug("found " + regions.size() + " regions for table=" + - tableDesc.getTableName().getNameAsString()); + LOG.debug("found " + regions.size() + " regions for table=" + + tableDesc.getTableName().getNameAsString()); return regions; } /** - * Copy the snapshot files for a snapshot scanner, discards meta changes. - * @param conf - * @param fs - * @param rootDir - * @param restoreDir - * @param snapshotName - * @throws IOException + * Copy the snapshot files for a snapshot scanner, discards meta changes. nnnnnn */ public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs, - Path rootDir, Path restoreDir, String snapshotName) throws IOException { + Path rootDir, Path restoreDir, String snapshotName) throws IOException { // ensure that restore dir is not under root dir if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) { - throw new IllegalArgumentException("Filesystems for restore directory and HBase root " + - "directory should be the same"); + throw new IllegalArgumentException( + "Filesystems for restore directory and HBase root " + "directory should be the same"); } - if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath() +"/")) { - throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " + - "root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir); + if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath() + "/")) { + throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " + + "root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir); } Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); - MonitoredTask status = TaskMonitor.get().createStatus( - "Restoring snapshot '" + snapshotName + "' to directory " + restoreDir); + MonitoredTask status = TaskMonitor.get() + .createStatus("Restoring snapshot '" + snapshotName + "' to directory " + restoreDir); ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(); // we send createBackRefs=false so that restored hfiles do not create back reference links // in the base hbase root dir. - RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, - manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false); + RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, manifest, + manifest.getTableDescriptor(), restoreDir, monitor, status, false); RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize. if (LOG.isDebugEnabled()) { @@ -931,11 +907,11 @@ public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, File } public static void restoreSnapshotAcl(SnapshotDescription snapshot, TableName newTableName, - Configuration conf) throws IOException { + Configuration conf) throws IOException { if (snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null) { LOG.info("Restore snapshot acl to table. snapshot: " + snapshot + ", table: " + newTableName); ListMultimap perms = - ShadedAccessControlUtil.toUserTablePermissions(snapshot.getUsersAndPermissions()); + ShadedAccessControlUtil.toUserTablePermissions(snapshot.getUsersAndPermissions()); try (Connection conn = ConnectionFactory.createConnection(conf)) { for (Entry e : perms.entries()) { String user = e.getKey(); @@ -945,7 +921,7 @@ public static void restoreSnapshotAcl(SnapshotDescription snapshot, TableName ne } } catch (Throwable e) { throw new IOException("Grant acl into newly creatd table failed. snapshot: " + snapshot - + ", table: " + newTableName, e); + + ", table: " + newTableName, e); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index d126ec5a7526..6564809303aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** @@ -84,9 +83,8 @@ *
    * * Utility methods in this class are useful for getting the correct locations for different parts of - * the snapshot, as well as moving completed snapshots into place (see - * {@link #completeSnapshot}, and writing the - * {@link SnapshotDescription} to the working snapshot directory. + * the snapshot, as well as moving completed snapshots into place (see {@link #completeSnapshot}, + * and writing the {@link SnapshotDescription} to the working snapshot directory. */ @InterfaceAudience.Private public final class SnapshotDescriptionUtils { @@ -97,7 +95,7 @@ public final class SnapshotDescriptionUtils { public static class CompletedSnaphotDirectoriesFilter extends FSUtils.BlackListDirFilter { /** - * @param fs + * n */ public CompletedSnaphotDirectoriesFilter(FileSystem fs) { super(fs, Collections.singletonList(SNAPSHOT_TMP_DIR_NAME)); @@ -121,8 +119,7 @@ public CompletedSnaphotDirectoriesFilter(FileSystem fs) { public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp"; /** - * The configuration property that determines the filepath of the snapshot - * base working directory + * The configuration property that determines the filepath of the snapshot base working directory */ public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir"; @@ -133,10 +130,11 @@ public CompletedSnaphotDirectoriesFilter(FileSystem fs) { // Default value if no ttl is specified for Snapshot private static final long NO_SNAPSHOT_TTL_SPECIFIED = 0; - public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = "hbase.snapshot.master.timeout.millis"; + public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = + "hbase.snapshot.master.timeout.millis"; /** By default, wait 300 seconds for a snapshot to complete */ - public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5 ; + public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5; public static final String SNAPSHOT_CORRUPTED_FILE = "_CORRUPTED"; @@ -145,21 +143,21 @@ private SnapshotDescriptionUtils() { } /** - * @param conf {@link Configuration} from which to check for the timeout - * @param type type of snapshot being taken + * @param conf {@link Configuration} from which to check for the timeout + * @param type type of snapshot being taken * @param defaultMaxWaitTime Default amount of time to wait, if none is in the configuration * @return the max amount of time the master should wait for a snapshot to complete */ public static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type, - long defaultMaxWaitTime) { + long defaultMaxWaitTime) { String confKey; switch (type) { - case DISABLED: - default: - confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS; + case DISABLED: + default: + confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS; } return Math.max(conf.getLong(confKey, defaultMaxWaitTime), - conf.getLong(MASTER_SNAPSHOT_TIMEOUT_MILLIS, defaultMaxWaitTime)); + conf.getLong(MASTER_SNAPSHOT_TIMEOUT_MILLIS, defaultMaxWaitTime)); } /** @@ -176,10 +174,11 @@ public static Path getSnapshotRootDir(final Path rootDir) { * Get the directory for a specified snapshot. This directory is a sub-directory of snapshot root * directory and all the data files for a snapshot are kept under this directory. * @param snapshot snapshot being taken - * @param rootDir hbase root directory + * @param rootDir hbase root directory * @return the final directory for the completed snapshot */ - public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir) { + public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, + final Path rootDir) { return getCompletedSnapshotDir(snapshot.getName(), rootDir); } @@ -187,7 +186,7 @@ public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, f * Get the directory for a completed snapshot. This directory is a sub-directory of snapshot root * directory and all the data files for a snapshot are kept under this directory. * @param snapshotName name of the snapshot being taken - * @param rootDir hbase root directory + * @param rootDir hbase root directory * @return the final directory for the completed snapshot */ public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) { @@ -198,35 +197,35 @@ public static Path getCompletedSnapshotDir(final String snapshotName, final Path * Get the general working directory for snapshots - where they are built, where they are * temporarily copied on export, etc. * @param rootDir root directory of the HBase installation - * @param conf Configuration of the HBase instance + * @param conf Configuration of the HBase instance * @return Path to the snapshot tmp directory, relative to the passed root directory */ public static Path getWorkingSnapshotDir(final Path rootDir, final Configuration conf) { - return new Path(conf.get(SNAPSHOT_WORKING_DIR, - getDefaultWorkingSnapshotDir(rootDir).toString())); + return new Path( + conf.get(SNAPSHOT_WORKING_DIR, getDefaultWorkingSnapshotDir(rootDir).toString())); } /** * Get the directory to build a snapshot, before it is finalized * @param snapshot snapshot that will be built - * @param rootDir root directory of the hbase installation - * @param conf Configuration of the HBase instance + * @param rootDir root directory of the hbase installation + * @param conf Configuration of the HBase instance * @return {@link Path} where one can build a snapshot */ public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir, - Configuration conf) { + Configuration conf) { return getWorkingSnapshotDir(snapshot.getName(), rootDir, conf); } /** * Get the directory to build a snapshot, before it is finalized * @param snapshotName name of the snapshot - * @param rootDir root directory of the hbase installation - * @param conf Configuration of the HBase instance + * @param rootDir root directory of the hbase installation + * @param conf Configuration of the HBase instance * @return {@link Path} where one can build a snapshot */ public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir, - Configuration conf) { + Configuration conf) { return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName); } @@ -238,6 +237,7 @@ public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir public static Path getCorruptedFlagFileForSnapshot(final Path workingDir) { return new Path(workingDir, SNAPSHOT_CORRUPTED_FILE); } + /** * Get the directory within the given filepath to store the snapshot instance * @param snapshotsDir directory to store snapshot directory within @@ -259,9 +259,9 @@ public static final Path getSnapshotsDir(Path rootDir) { /** * Determines if the given workingDir is a subdirectory of the given "root directory" * @param workingDir a directory to check - * @param rootDir root directory of the HBase installation - * @return true if the given workingDir is a subdirectory of the given root directory, - * false otherwise + * @param rootDir root directory of the HBase installation + * @return true if the given workingDir is a subdirectory of the given root directory, false + * otherwise */ public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) { return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR); @@ -270,9 +270,9 @@ public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir /** * Determines if the given workingDir is a subdirectory of the default working snapshot directory * @param workingDir a directory to check - * @param conf configuration for the HBase cluster + * @param conf configuration for the HBase cluster * @return true if the given workingDir is a subdirectory of the default working directory for - * snapshots, false otherwise + * snapshots, false otherwise * @throws IOException if we can't get the root dir */ public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf) @@ -296,16 +296,16 @@ private static Path getDefaultWorkingSnapshotDir(final Path rootDir) { * parameters, if none have been supplied. This resolves any 'optional' parameters that aren't * supplied to their default values. * @param snapshot general snapshot descriptor - * @param conf Configuration to read configured snapshot defaults if snapshot is not complete + * @param conf Configuration to read configured snapshot defaults if snapshot is not complete * @return a valid snapshot description * @throws IllegalArgumentException if the {@link SnapshotDescription} is not a complete - * {@link SnapshotDescription}. + * {@link SnapshotDescription}. */ public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf) - throws IllegalArgumentException, IOException { + throws IllegalArgumentException, IOException { if (!snapshot.hasTable()) { throw new IllegalArgumentException( - "Descriptor doesn't apply to a table, so we can't build it."); + "Descriptor doesn't apply to a table, so we can't build it."); } SnapshotDescription.Builder builder = snapshot.toBuilder(); @@ -315,19 +315,21 @@ public static SnapshotDescription validate(SnapshotDescription snapshot, Configu if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) { time = EnvironmentEdgeManager.currentTime(); LOG.debug("Creation time not specified, setting to:" + time + " (current time:" - + EnvironmentEdgeManager.currentTime() + ")."); + + EnvironmentEdgeManager.currentTime() + ")."); builder.setCreationTime(time); } long ttl = snapshot.getTtl(); // set default ttl(sec) if it is not set already or the value is out of the range - if (ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED || - ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { - final long defaultSnapshotTtl = conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, - HConstants.DEFAULT_SNAPSHOT_TTL); + if ( + ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED + || ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE) + ) { + final long defaultSnapshotTtl = + conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, HConstants.DEFAULT_SNAPSHOT_TTL); if (LOG.isDebugEnabled()) { LOG.debug("Snapshot current TTL value: {} resetting it to default value: {}", ttl, - defaultSnapshotTtl); + defaultSnapshotTtl); } ttl = defaultSnapshotTtl; } @@ -357,18 +359,18 @@ public static SnapshotDescription validate(SnapshotDescription snapshot, Configu /** * Write the snapshot description into the working directory of a snapshot - * @param snapshot description of the snapshot being taken + * @param snapshot description of the snapshot being taken * @param workingDir working directory of the snapshot - * @param fs {@link FileSystem} on which the snapshot should be taken + * @param fs {@link FileSystem} on which the snapshot should be taken * @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on - * failure + * failure */ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs) - throws IOException { - FsPermission perms = CommonFSUtils.getFilePermissions(fs, fs.getConf(), - HConstants.DATA_FILE_UMASK_KEY); + throws IOException { + FsPermission perms = + CommonFSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY); Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)){ + try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)) { snapshot.writeTo(out); } catch (IOException e) { // if we get an exception, try to remove the snapshot info @@ -382,15 +384,15 @@ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingD /** * Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory - * @param fs filesystem where the snapshot was taken + * @param fs filesystem where the snapshot was taken * @param snapshotDir directory where the snapshot was stored * @return the stored snapshot description * @throws CorruptedSnapshotException if the snapshot cannot be read */ public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) - throws CorruptedSnapshotException { + throws CorruptedSnapshotException { Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE); - try (FSDataInputStream in = fs.open(snapshotInfo)){ + try (FSDataInputStream in = fs.open(snapshotInfo)) { return SnapshotDescription.parseFrom(in); } catch (IOException e) { throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e); @@ -398,36 +400,34 @@ public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotD } /** - * Commits the snapshot process by moving the working snapshot - * to the finalized filepath - * - * @param snapshotDir The file path of the completed snapshots - * @param workingDir The file path of the in progress snapshots - * @param fs The file system of the completed snapshots + * Commits the snapshot process by moving the working snapshot to the finalized filepath + * @param snapshotDir The file path of the completed snapshots + * @param workingDir The file path of the in progress snapshots + * @param fs The file system of the completed snapshots * @param workingDirFs The file system of the in progress snapshots - * @param conf Configuration - * + * @param conf Configuration * @throws SnapshotCreationException if the snapshot could not be moved - * @throws IOException the filesystem could not be reached + * @throws IOException the filesystem could not be reached */ public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs, FileSystem workingDirFs, final Configuration conf) throws SnapshotCreationException, IOException { - LOG.debug("Sentinel is done, just moving the snapshot from " + workingDir + " to " - + snapshotDir); + LOG.debug( + "Sentinel is done, just moving the snapshot from " + workingDir + " to " + snapshotDir); // If the working and completed snapshot directory are on the same file system, attempt // to rename the working snapshot directory to the completed location. If that fails, // or the file systems differ, attempt to copy the directory over, throwing an exception // if this fails URI workingURI = workingDirFs.getUri(); URI rootURI = fs.getUri(); - if ((!workingURI.getScheme().equals(rootURI.getScheme()) || - workingURI.getAuthority() == null || - !workingURI.getAuthority().equals(rootURI.getAuthority()) || - workingURI.getUserInfo() == null || - !workingURI.getUserInfo().equals(rootURI.getUserInfo()) || - !fs.rename(workingDir, snapshotDir)) && !FileUtil.copy(workingDirFs, workingDir, fs, - snapshotDir, true, true, conf)) { + if ( + (!workingURI.getScheme().equals(rootURI.getScheme()) || workingURI.getAuthority() == null + || !workingURI.getAuthority().equals(rootURI.getAuthority()) + || workingURI.getUserInfo() == null + || !workingURI.getUserInfo().equals(rootURI.getUserInfo()) + || !fs.rename(workingDir, snapshotDir)) + && !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf) + ) { throw new SnapshotCreationException("Failed to copy working directory(" + workingDir + ") to completed directory(" + snapshotDir + ")."); } @@ -436,33 +436,34 @@ public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSyste /** * Check if the user is this table snapshot's owner * @param snapshot the table snapshot description - * @param user the user - * @return true if the user is the owner of the snapshot, - * false otherwise or the snapshot owner field is not present. + * @param user the user + * @return true if the user is the owner of the snapshot, false otherwise or the snapshot owner + * field is not present. */ public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDescription snapshot, - User user) { + User user) { if (user == null) return false; return user.getShortName().equals(snapshot.getOwner()); } public static boolean isSecurityAvailable(Configuration conf) throws IOException { - try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); } } private static SnapshotDescription writeAclToSnapshotDescription(SnapshotDescription snapshot, - Configuration conf) throws IOException { + Configuration conf) throws IOException { ListMultimap perms = - User.runAsLoginUser(new PrivilegedExceptionAction>() { - @Override - public ListMultimap run() throws Exception { - return PermissionStorage.getTablePermissions(conf, - TableName.valueOf(snapshot.getTable())); - } - }); + User.runAsLoginUser(new PrivilegedExceptionAction>() { + @Override + public ListMultimap run() throws Exception { + return PermissionStorage.getTablePermissions(conf, + TableName.valueOf(snapshot.getTable())); + } + }); return snapshot.toBuilder() - .setUsersAndPermissions(ShadedAccessControlUtil.toUserTablePermissions(perms)).build(); + .setUsersAndPermissions(ShadedAccessControlUtil.toUserTablePermissions(perms)).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index 8b69675d9efa..29dbeacda8ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -44,18 +43,16 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.org.apache.commons.cli.AlreadySelectedException; -import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; -import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; -import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; +import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; @@ -64,10 +61,10 @@ /** * Tool for dumping snapshot information. *
      - *
    1. Table Descriptor - *
    2. Snapshot creation time, type, format version, ... - *
    3. List of hfiles and wals - *
    4. Stats about hfiles and logs sizes, percentage of shared with the source table, ... + *
    5. Table Descriptor + *
    6. Snapshot creation time, type, format version, ... + *
    7. List of hfiles and wals + *
    8. Stats about hfiles and logs sizes, percentage of shared with the source table, ... *
    */ @InterfaceAudience.Public @@ -75,33 +72,33 @@ public final class SnapshotInfo extends AbstractHBaseTool { private static final Logger LOG = LoggerFactory.getLogger(SnapshotInfo.class); static final class Options { - static final Option SNAPSHOT = new Option(null, "snapshot", true, - "The name of the snapshot to be detailed."); - static final Option REMOTE_DIR = new Option(null, "remote-dir", true, - "A custom root directory where snapshots are stored. " - + "Use it together with the --snapshot option."); - static final Option LIST_SNAPSHOTS = new Option(null, "list-snapshots", false, - "List all the available snapshots and exit."); - static final Option FILES = new Option(null, "files", false, - "The list of files retained by the specified snapshot. " + static final Option SNAPSHOT = + new Option(null, "snapshot", true, "The name of the snapshot to be detailed."); + static final Option REMOTE_DIR = + new Option(null, "remote-dir", true, "A custom root directory where snapshots are stored. " + + "Use it together with the --snapshot option."); + static final Option LIST_SNAPSHOTS = + new Option(null, "list-snapshots", false, "List all the available snapshots and exit."); + static final Option FILES = + new Option(null, "files", false, "The list of files retained by the specified snapshot. " + "Use it together with the --snapshot option."); - static final Option STATS = new Option(null, "stats", false, - "Additional information about the specified snapshot. " + static final Option STATS = + new Option(null, "stats", false, "Additional information about the specified snapshot. " + "Use it together with the --snapshot option."); static final Option SCHEMA = new Option(null, "schema", false, - "Show the descriptor of the table for the specified snapshot. " - + "Use it together with the --snapshot option."); - static final Option SIZE_IN_BYTES = new Option(null, "size-in-bytes", false, - "Print the size of the files in bytes. " - + "Use it together with the --snapshot and --files options."); + "Show the descriptor of the table for the specified snapshot. " + + "Use it together with the --snapshot option."); + static final Option SIZE_IN_BYTES = + new Option(null, "size-in-bytes", false, "Print the size of the files in bytes. " + + "Use it together with the --snapshot and --files options."); } /** * Statistics about the snapshot *
      - *
    1. How many store files and logs are in the archive - *
    2. How many store files and logs are shared with the table - *
    3. Total store files and logs size and shared amount + *
    4. How many store files and logs are in the archive + *
    5. How many store files and logs are shared with the table + *
    6. Total store files and logs size and shared amount *
    */ public static class SnapshotStats { @@ -164,8 +161,7 @@ String getStateToString() { private final FileSystem fs; SnapshotStats(final Configuration conf, final FileSystem fs, - final SnapshotDescription snapshot) - { + final SnapshotDescription snapshot) { this.snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); this.snapshotTable = snapshot.getTableName(); this.conf = conf; @@ -173,14 +169,13 @@ String getStateToString() { } SnapshotStats(final Configuration conf, final FileSystem fs, - final SnapshotProtos.SnapshotDescription snapshot) { + final SnapshotProtos.SnapshotDescription snapshot) { this.snapshot = snapshot; this.snapshotTable = TableName.valueOf(snapshot.getTable()); this.conf = conf; this.fs = fs; } - /** @return the snapshot descriptor */ public SnapshotDescription getSnapshotDescription() { return ProtobufUtil.createSnapshotDesc(this.snapshot); @@ -188,9 +183,7 @@ public SnapshotDescription getSnapshotDescription() { /** @return true if the snapshot is corrupted */ public boolean isSnapshotCorrupted() { - return hfilesMissing.get() > 0 || - logsMissing.get() > 0 || - hfilesCorrupted.get() > 0; + return hfilesMissing.get() > 0 || logsMissing.get() > 0 || hfilesCorrupted.get() > 0; } /** @return the number of available store files */ @@ -204,7 +197,9 @@ public int getArchivedStoreFilesCount() { } /** @return the number of available store files in the mob dir */ - public int getMobStoreFilesCount() { return hfilesMobCount.get(); } + public int getMobStoreFilesCount() { + return hfilesMobCount.get(); + } /** @return the number of available log files */ public int getLogsCount() { @@ -241,15 +236,16 @@ public long getArchivedStoreFileSize() { return hfilesArchiveSize.get(); } - /** @return the total size of the store files in the mob store*/ - public long getMobStoreFilesSize() { return hfilesMobSize.get(); } + /** @return the total size of the store files in the mob store */ + public long getMobStoreFilesSize() { + return hfilesMobSize.get(); + } - /** @return the total size of the store files in the archive which is not shared - * with other snapshots and tables - * - * This is only calculated when - * {@link #getSnapshotStats(Configuration, SnapshotProtos.SnapshotDescription, Map)} - * is called with a non-null Map + /** + * @return the total size of the store files in the archive which is not shared with other + * snapshots and tables This is only calculated when + * {@link #getSnapshotStats(Configuration, SnapshotProtos.SnapshotDescription, Map)} is + * called with a non-null Map */ public long getNonSharedArchivedStoreFilesSize() { return nonSharedHfilesArchiveSize.get(); @@ -270,15 +266,15 @@ public long getLogsSize() { return logSize.get(); } - /** Check if for a give file in archive, if there are other snapshots/tables still - * reference it. - * @param filePath file path in archive - * @param snapshotFilesMap a map for store files in snapshots about how many snapshots refer - * to it. + /** + * Check if for a give file in archive, if there are other snapshots/tables still reference it. + * @param filePath file path in archive + * @param snapshotFilesMap a map for store files in snapshots about how many snapshots refer to + * it. * @return true or false */ private boolean isArchivedFileStillReferenced(final Path filePath, - final Map snapshotFilesMap) { + final Map snapshotFilesMap) { Integer c = snapshotFilesMap.get(filePath); @@ -301,17 +297,17 @@ private boolean isArchivedFileStillReferenced(final Path filePath, /** * Add the specified store file to the stats - * @param region region encoded Name - * @param family family name + * @param region region encoded Name + * @param family family name * @param storeFile store file name - * @param filesMap store files map for all snapshots, it may be null + * @param filesMap store files map for all snapshots, it may be null * @return the store file information */ FileInfo addStoreFile(final RegionInfo region, final String family, - final SnapshotRegionManifest.StoreFile storeFile, - final Map filesMap) throws IOException { - HFileLink link = HFileLink.build(conf, snapshotTable, region.getEncodedName(), - family, storeFile.getName()); + final SnapshotRegionManifest.StoreFile storeFile, final Map filesMap) + throws IOException { + HFileLink link = + HFileLink.build(conf, snapshotTable, region.getEncodedName(), family, storeFile.getName()); boolean isCorrupted = false; boolean inArchive = false; long size = -1; @@ -324,8 +320,9 @@ FileInfo addStoreFile(final RegionInfo region, final String family, // If store file is not shared with other snapshots and tables, // increase nonSharedHfilesArchiveSize - if ((filesMap != null) && - !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)) { + if ( + (filesMap != null) && !isArchivedFileStillReferenced(link.getArchivePath(), filesMap) + ) { nonSharedHfilesArchiveSize.addAndGet(size); } } else if (fs.exists(link.getMobPath())) { @@ -348,7 +345,7 @@ FileInfo addStoreFile(final RegionInfo region, final String family, /** * Add the specified log file to the stats - * @param server server name + * @param server server name * @param logfile log file name * @return the log information */ @@ -391,11 +388,10 @@ public int doWork() throws IOException, InterruptedException { if (listSnapshots) { SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); System.out.printf("%-20s | %-20s | %-20s | %s%n", "SNAPSHOT", "CREATION TIME", "TTL IN SEC", - "TABLE NAME"); - for (SnapshotDescription desc: getSnapshotList(conf)) { + "TABLE NAME"); + for (SnapshotDescription desc : getSnapshotList(conf)) { System.out.printf("%-20s | %20s | %20s | %s%n", desc.getName(), - df.format(new Date(desc.getCreationTime())), desc.getTtl(), - desc.getTableNameAsString()); + df.format(new Date(desc.getCreationTime())), desc.getTtl(), desc.getTableNameAsString()); } return 0; } @@ -432,7 +428,7 @@ private boolean loadSnapshotInfo(final String snapshotName) throws IOException { } SnapshotProtos.SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); snapshotManifest = SnapshotManifest.open(getConf(), fs, snapshotDir, snapshotDesc); return true; } @@ -466,8 +462,8 @@ private void printSchema() { } /** - * Collect the hfiles and logs statistics of the snapshot and - * dump the file list if requested and the collected information. + * Collect the hfiles and logs statistics of the snapshot and dump the file list if requested and + * the collected information. */ private void printFiles(final boolean showFiles, final boolean showStats) throws IOException { if (showFiles) { @@ -476,28 +472,28 @@ private void printFiles(final boolean showFiles, final boolean showStats) throws } // Collect information about hfiles and logs in the snapshot - final SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); + final SnapshotProtos.SnapshotDescription snapshotDesc = + snapshotManifest.getSnapshotDescription(); final String table = snapshotDesc.getTable(); final SnapshotDescription desc = ProtobufUtil.createSnapshotDesc(snapshotDesc); final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, desc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(getConf(), fs, snapshotManifest, - "SnapshotInfo", - new SnapshotReferenceUtil.SnapshotVisitor() { + "SnapshotInfo", new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { if (storeFile.hasReference()) return; SnapshotStats.FileInfo info = stats.addStoreFile(regionInfo, family, storeFile, null); if (showFiles) { String state = info.getStateToString(); System.out.printf("%8s %s/%s/%s/%s %s%n", - (info.isMissing() ? "-" : fileSizeToString(info.getSize())), - table, regionInfo.getEncodedName(), family, storeFile.getName(), + (info.isMissing() ? "-" : fileSizeToString(info.getSize())), table, + regionInfo.getEncodedName(), family, storeFile.getName(), state == null ? "" : "(" + state + ")"); } } - }); + }); // Dump the stats System.out.println(); @@ -511,18 +507,15 @@ public void storeFile(final RegionInfo regionInfo, final String family, } if (showStats) { - System.out.printf("%d HFiles (%d in archive, %d in mob storage), total size %s " + - "(%.2f%% %s shared with the source table, %.2f%% %s in mob dir)%n", + System.out.printf( + "%d HFiles (%d in archive, %d in mob storage), total size %s " + + "(%.2f%% %s shared with the source table, %.2f%% %s in mob dir)%n", stats.getStoreFilesCount(), stats.getArchivedStoreFilesCount(), - stats.getMobStoreFilesCount(), - fileSizeToString(stats.getStoreFilesSize()), - stats.getSharedStoreFilePercentage(), - fileSizeToString(stats.getSharedStoreFilesSize()), - stats.getMobStoreFilePercentage(), - fileSizeToString(stats.getMobStoreFilesSize()) - ); - System.out.printf("%d Logs, total size %s%n", - stats.getLogsCount(), fileSizeToString(stats.getLogsSize())); + stats.getMobStoreFilesCount(), fileSizeToString(stats.getStoreFilesSize()), + stats.getSharedStoreFilePercentage(), fileSizeToString(stats.getSharedStoreFilesSize()), + stats.getMobStoreFilePercentage(), fileSizeToString(stats.getMobStoreFilesSize())); + System.out.printf("%d Logs, total size %s%n", stats.getLogsCount(), + fileSizeToString(stats.getLogsSize())); System.out.println(); } } @@ -542,17 +535,16 @@ protected void addOptions() { addOption(Options.SIZE_IN_BYTES); } - @Override protected CommandLineParser newParser() { // Commons-CLI lacks the capability to handle combinations of options, so we do it ourselves // Validate in parse() to get helpful error messages instead of exploding in processOptions() return new DefaultParser() { @Override - public CommandLine parse(org.apache.hbase.thirdparty.org.apache.commons.cli.Options opts, String[] args, Properties props, boolean stop) - throws ParseException { + public CommandLine parse(org.apache.hbase.thirdparty.org.apache.commons.cli.Options opts, + String[] args, Properties props, boolean stop) throws ParseException { CommandLine cl = super.parse(opts, args, props, stop); - if(!cmd.hasOption(Options.LIST_SNAPSHOTS) && !cmd.hasOption(Options.SNAPSHOT)) { + if (!cmd.hasOption(Options.LIST_SNAPSHOTS) && !cmd.hasOption(Options.SNAPSHOT)) { throw new ParseException("Missing required snapshot option!"); } return cl; @@ -564,8 +556,8 @@ public CommandLine parse(org.apache.hbase.thirdparty.org.apache.commons.cli.Opti protected void processOptions(CommandLine cmd) { snapshotName = cmd.getOptionValue(Options.SNAPSHOT.getLongOpt()); showFiles = cmd.hasOption(Options.FILES.getLongOpt()); - showStats = cmd.hasOption(Options.FILES.getLongOpt()) - || cmd.hasOption(Options.STATS.getLongOpt()); + showStats = + cmd.hasOption(Options.FILES.getLongOpt()) || cmd.hasOption(Options.STATS.getLongOpt()); showSchema = cmd.hasOption(Options.SCHEMA.getLongOpt()); listSnapshots = cmd.hasOption(Options.LIST_SNAPSHOTS.getLongOpt()); printSizeInBytes = cmd.hasOption(Options.SIZE_IN_BYTES.getLongOpt()); @@ -583,12 +575,12 @@ protected void printUsage() { /** * Returns the snapshot stats - * @param conf the {@link Configuration} to use + * @param conf the {@link Configuration} to use * @param snapshot {@link SnapshotDescription} to get stats from * @return the snapshot stats */ public static SnapshotStats getSnapshotStats(final Configuration conf, - final SnapshotDescription snapshot) throws IOException { + final SnapshotDescription snapshot) throws IOException { SnapshotProtos.SnapshotDescription snapshotDesc = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); return getSnapshotStats(conf, snapshotDesc, null); @@ -596,28 +588,29 @@ public static SnapshotStats getSnapshotStats(final Configuration conf, /** * Returns the snapshot stats - * @param conf the {@link Configuration} to use - * @param snapshotDesc HBaseProtos.SnapshotDescription to get stats from - * @param filesMap {@link Map} store files map for all snapshots, it may be null + * @param conf the {@link Configuration} to use + * @param snapshotDesc HBaseProtos.SnapshotDescription to get stats from + * @param filesMap {@link Map} store files map for all snapshots, it may be null * @return the snapshot stats */ public static SnapshotStats getSnapshotStats(final Configuration conf, - final SnapshotProtos.SnapshotDescription snapshotDesc, - final Map filesMap) throws IOException { + final SnapshotProtos.SnapshotDescription snapshotDesc, final Map filesMap) + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); final SnapshotStats stats = new SnapshotStats(conf, fs, snapshotDesc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, - "SnapshotsStatsAggregation", new SnapshotReferenceUtil.SnapshotVisitor() { - @Override - public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { - if (!storeFile.hasReference()) { - stats.addStoreFile(regionInfo, family, storeFile, filesMap); - } - }}); + "SnapshotsStatsAggregation", new SnapshotReferenceUtil.SnapshotVisitor() { + @Override + public void storeFile(final RegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (!storeFile.hasReference()) { + stats.addStoreFile(regionInfo, family, storeFile, filesMap); + } + } + }); return stats; } @@ -627,16 +620,16 @@ public void storeFile(final RegionInfo regionInfo, final String family, * @return the list of snapshots */ public static List getSnapshotList(final Configuration conf) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); FileStatus[] snapshots = fs.listStatus(snapshotDir, - new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); + new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); List snapshotLists = new ArrayList<>(snapshots.length); - for (FileStatus snapshotDirStat: snapshots) { + for (FileStatus snapshotDirStat : snapshots) { SnapshotProtos.SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()); + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()); snapshotLists.add(ProtobufUtil.createSnapshotDesc(snapshotDesc)); } return snapshotLists; @@ -644,81 +637,80 @@ public static List getSnapshotList(final Configuration conf /** * Gets the store files map for snapshot - * @param conf the {@link Configuration} to use - * @param snapshot {@link SnapshotDescription} to get stats from - * @param exec the {@link ExecutorService} to use - * @param filesMap {@link Map} the map to put the mapping entries + * @param conf the {@link Configuration} to use + * @param snapshot {@link SnapshotDescription} to get stats from + * @param exec the {@link ExecutorService} to use + * @param filesMap {@link Map} the map to put the mapping entries * @param uniqueHFilesArchiveSize {@link AtomicLong} the accumulated store file size in archive - * @param uniqueHFilesSize {@link AtomicLong} the accumulated store file size shared - * @param uniqueHFilesMobSize {@link AtomicLong} the accumulated mob store file size shared + * @param uniqueHFilesSize {@link AtomicLong} the accumulated store file size shared + * @param uniqueHFilesMobSize {@link AtomicLong} the accumulated mob store file size shared */ private static void getSnapshotFilesMap(final Configuration conf, - final SnapshotDescription snapshot, final ExecutorService exec, - final ConcurrentHashMap filesMap, - final AtomicLong uniqueHFilesArchiveSize, final AtomicLong uniqueHFilesSize, - final AtomicLong uniqueHFilesMobSize) throws IOException { + final SnapshotDescription snapshot, final ExecutorService exec, + final ConcurrentHashMap filesMap, final AtomicLong uniqueHFilesArchiveSize, + final AtomicLong uniqueHFilesSize, final AtomicLong uniqueHFilesMobSize) throws IOException { SnapshotProtos.SnapshotDescription snapshotDesc = - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); Path rootDir = CommonFSUtils.getRootDir(conf); final FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, exec, - new SnapshotReferenceUtil.SnapshotVisitor() { - @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { - if (!storeFile.hasReference()) { - HFileLink link = HFileLink.build(conf, snapshot.getTableName(), - regionInfo.getEncodedName(), family, storeFile.getName()); - long size; - Integer count; - Path p; - AtomicLong al; - int c = 0; - - if (fs.exists(link.getArchivePath())) { - p = link.getArchivePath(); - al = uniqueHFilesArchiveSize; - size = fs.getFileStatus(p).getLen(); - } else if (fs.exists(link.getMobPath())) { - p = link.getMobPath(); - al = uniqueHFilesMobSize; - size = fs.getFileStatus(p).getLen(); - } else { - p = link.getOriginPath(); - al = uniqueHFilesSize; - size = link.getFileStatus(fs).getLen(); - } - - // If it has been counted, do not double count - count = filesMap.get(p); - if (count != null) { - c = count.intValue(); - } else { - al.addAndGet(size); - } - - filesMap.put(p, ++c); + new SnapshotReferenceUtil.SnapshotVisitor() { + @Override + public void storeFile(final RegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (!storeFile.hasReference()) { + HFileLink link = HFileLink.build(conf, snapshot.getTableName(), + regionInfo.getEncodedName(), family, storeFile.getName()); + long size; + Integer count; + Path p; + AtomicLong al; + int c = 0; + + if (fs.exists(link.getArchivePath())) { + p = link.getArchivePath(); + al = uniqueHFilesArchiveSize; + size = fs.getFileStatus(p).getLen(); + } else if (fs.exists(link.getMobPath())) { + p = link.getMobPath(); + al = uniqueHFilesMobSize; + size = fs.getFileStatus(p).getLen(); + } else { + p = link.getOriginPath(); + al = uniqueHFilesSize; + size = link.getFileStatus(fs).getLen(); + } + + // If it has been counted, do not double count + count = filesMap.get(p); + if (count != null) { + c = count.intValue(); + } else { + al.addAndGet(size); } + + filesMap.put(p, ++c); } - }); + } + }); } /** * Returns the map of store files based on path for all snapshots - * @param conf the {@link Configuration} to use + * @param conf the {@link Configuration} to use * @param uniqueHFilesArchiveSize pass out the size for store files in archive - * @param uniqueHFilesSize pass out the size for store files shared - * @param uniqueHFilesMobSize pass out the size for mob store files shared + * @param uniqueHFilesSize pass out the size for store files shared + * @param uniqueHFilesMobSize pass out the size for mob store files shared * @return the map of store files */ public static Map getSnapshotsFilesMap(final Configuration conf, - AtomicLong uniqueHFilesArchiveSize, AtomicLong uniqueHFilesSize, - AtomicLong uniqueHFilesMobSize) throws IOException { + AtomicLong uniqueHFilesArchiveSize, AtomicLong uniqueHFilesSize, AtomicLong uniqueHFilesMobSize) + throws IOException { List snapshotList = getSnapshotList(conf); - if (snapshotList.isEmpty()) { return Collections.emptyMap(); } @@ -730,7 +722,7 @@ public static Map getSnapshotsFilesMap(final Configuration conf, try { for (final SnapshotDescription snapshot : snapshotList) { getSnapshotFilesMap(conf, snapshot, exec, fileMap, uniqueHFilesArchiveSize, - uniqueHFilesSize, uniqueHFilesMobSize); + uniqueHFilesSize, uniqueHFilesMobSize); } } finally { exec.shutdown(); @@ -739,7 +731,6 @@ public static Map getSnapshotsFilesMap(final Configuration conf, return fileMap; } - public static void main(String[] args) { new SnapshotInfo().doStaticMain(args); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index f154aa92cd6e..e19be3f554b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -66,11 +65,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * Utility class to help read/write the Snapshot Manifest. - * - * The snapshot format is transparent for the users of this class, - * once the snapshot is written, it will never be modified. - * On open() the snapshot will be loaded to the current in-memory format. + * Utility class to help read/write the Snapshot Manifest. The snapshot format is transparent for + * the users of this class, once the snapshot is written, it will never be modified. On open() the + * snapshot will be loaded to the current in-memory format. */ @InterfaceAudience.Private public final class SnapshotManifest { @@ -93,18 +90,17 @@ public final class SnapshotManifest { private final MonitoredTask statusTask; /** - * - * @param conf configuration file for HBase setup - * @param rootFs root filesystem containing HFiles + * @param conf configuration file for HBase setup + * @param rootFs root filesystem containing HFiles * @param workingDir file path of where the manifest should be located - * @param desc description of snapshot being taken - * @param monitor monitor of foreign exceptions - * @throws IOException if the working directory file system cannot be - * determined from the config file + * @param desc description of snapshot being taken + * @param monitor monitor of foreign exceptions + * @throws IOException if the working directory file system cannot be determined from the config + * file */ - private SnapshotManifest(final Configuration conf, final FileSystem rootFs, - final Path workingDir, final SnapshotDescription desc, - final ForeignExceptionSnare monitor, final MonitoredTask statusTask) throws IOException { + private SnapshotManifest(final Configuration conf, final FileSystem rootFs, final Path workingDir, + final SnapshotDescription desc, final ForeignExceptionSnare monitor, + final MonitoredTask statusTask) throws IOException { this.monitor = monitor; this.desc = desc; this.workingDir = workingDir; @@ -116,49 +112,40 @@ private SnapshotManifest(final Configuration conf, final FileSystem rootFs, } /** - * Return a SnapshotManifest instance, used for writing a snapshot. - * - * There are two usage pattern: - * - The Master will create a manifest, add the descriptor, offline regions - * and consolidate the snapshot by writing all the pending stuff on-disk. - * manifest = SnapshotManifest.create(...) - * manifest.addRegion(tableDir, hri) - * manifest.consolidate() - * - The RegionServer will create a single region manifest - * manifest = SnapshotManifest.create(...) - * manifest.addRegion(region) + * Return a SnapshotManifest instance, used for writing a snapshot. There are two usage pattern: - + * The Master will create a manifest, add the descriptor, offline regions and consolidate the + * snapshot by writing all the pending stuff on-disk. manifest = SnapshotManifest.create(...) + * manifest.addRegion(tableDir, hri) manifest.consolidate() - The RegionServer will create a + * single region manifest manifest = SnapshotManifest.create(...) manifest.addRegion(region) */ public static SnapshotManifest create(final Configuration conf, final FileSystem fs, - final Path workingDir, final SnapshotDescription desc, - final ForeignExceptionSnare monitor) throws IOException { + final Path workingDir, final SnapshotDescription desc, final ForeignExceptionSnare monitor) + throws IOException { return create(conf, fs, workingDir, desc, monitor, null); } public static SnapshotManifest create(final Configuration conf, final FileSystem fs, - final Path workingDir, final SnapshotDescription desc, final ForeignExceptionSnare monitor, - final MonitoredTask statusTask) throws IOException { + final Path workingDir, final SnapshotDescription desc, final ForeignExceptionSnare monitor, + final MonitoredTask statusTask) throws IOException { return new SnapshotManifest(conf, fs, workingDir, desc, monitor, statusTask); } /** * Return a SnapshotManifest instance with the information already loaded in-memory. - * SnapshotManifest manifest = SnapshotManifest.open(...) - * TableDescriptor htd = manifest.getDescriptor() - * for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests()) - * hri = regionManifest.getRegionInfo() - * for (regionManifest.getFamilyFiles()) - * ... + * SnapshotManifest manifest = SnapshotManifest.open(...) TableDescriptor htd = + * manifest.getDescriptor() for (SnapshotRegionManifest regionManifest: + * manifest.getRegionManifests()) hri = regionManifest.getRegionInfo() for + * (regionManifest.getFamilyFiles()) ... */ public static SnapshotManifest open(final Configuration conf, final FileSystem fs, - final Path workingDir, final SnapshotDescription desc) throws IOException { + final Path workingDir, final SnapshotDescription desc) throws IOException { SnapshotManifest manifest = new SnapshotManifest(conf, fs, workingDir, desc, null, null); manifest.load(); return manifest; } - /** * Add the table descriptor to the snapshot manifest */ @@ -168,9 +155,11 @@ public void addTableDescriptor(final TableDescriptor htd) throws IOException { interface RegionVisitor { TRegion regionOpen(final RegionInfo regionInfo) throws IOException; + void regionClose(final TRegion region) throws IOException; TFamily familyOpen(final TRegion region, final byte[] familyName) throws IOException; + void familyClose(final TRegion region, final TFamily family) throws IOException; void storeFile(final TRegion region, final TFamily family, final StoreFileInfo storeFile) @@ -184,8 +173,8 @@ private RegionVisitor createRegionVisitor(final SnapshotDescription desc) throws case SnapshotManifestV2.DESCRIPTOR_VERSION: return new SnapshotManifestV2.ManifestBuilder(conf, rootFs, workingDir); default: - throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), - ProtobufUtil.createSnapshotDesc(desc)); + throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), + ProtobufUtil.createSnapshotDesc(desc)); } } @@ -256,13 +245,13 @@ protected void addRegion(final HRegion region, RegionVisitor visitor) throws IOE for (HStore store : region.getStores()) { // 2.1. build the snapshot reference for the store - Object familyData = visitor.familyOpen(regionData, - store.getColumnFamilyDescriptor().getName()); + Object familyData = + visitor.familyOpen(regionData, store.getColumnFamilyDescriptor().getName()); monitor.rethrowException(); List storeFiles = new ArrayList<>(store.getStorefiles()); if (LOG.isDebugEnabled()) { - LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); } // 2.2. iterate through all the store's files and create "references". @@ -271,8 +260,8 @@ protected void addRegion(final HRegion region, RegionVisitor visitor) throws IOE monitor.rethrowException(); // create "reference" to this store file. - LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath() + - " for snapshot=" + snapshotName); + LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): " + storeFile.getPath() + + " for snapshot=" + snapshotName); visitor.storeFile(regionData, familyData, storeFile.getFileInfo()); } visitor.familyClose(regionData, familyData); @@ -281,8 +270,8 @@ protected void addRegion(final HRegion region, RegionVisitor visitor) throws IOE } /** - * Creates a 'manifest' for the specified region, by reading directly from the disk. - * This is used by the "offline snapshot" when the table is disabled. + * Creates a 'manifest' for the specified region, by reading directly from the disk. This is used + * by the "offline snapshot" when the table is disabled. */ public void addRegion(final Path tableDir, final RegionInfo regionInfo) throws IOException { // Get the ManifestBuilder/RegionVisitor @@ -353,7 +342,7 @@ private List getStoreFiles(Path storeDir) throws IOException { } private void addReferenceFiles(RegionVisitor visitor, Object regionData, Object familyData, - Collection storeFiles, boolean isMob) throws IOException { + Collection storeFiles, boolean isMob) throws IOException { final String fileType = isMob ? "mob file" : "hfile"; if (LOG.isDebugEnabled()) { @@ -362,11 +351,11 @@ private void addReferenceFiles(RegionVisitor visitor, Object regionData, Object int i = 0; int sz = storeFiles.size(); - for (StoreFileInfo storeFile: storeFiles) { + for (StoreFileInfo storeFile : storeFiles) { monitor.rethrowException(); - LOG.debug(String.format("Adding reference for %s (%d/%d): %s", - fileType, ++i, sz, storeFile.getPath())); + LOG.debug(String.format("Adding reference for %s (%d/%d): %s", fileType, ++i, sz, + storeFile.getPath())); // create "reference" to this store file. visitor.storeFile(regionData, familyData, storeFile); @@ -374,11 +363,9 @@ private void addReferenceFiles(RegionVisitor visitor, Object regionData, Object } /** - * Load the information in the SnapshotManifest. Called by SnapshotManifest.open() - * - * If the format is v2 and there is no data-manifest, means that we are loading an - * in-progress snapshot. Since we support rolling-upgrades, we loook for v1 and v2 - * regions format. + * Load the information in the SnapshotManifest. Called by SnapshotManifest.open() If the format + * is v2 and there is no data-manifest, means that we are loading an in-progress snapshot. Since + * we support rolling-upgrades, we loook for v1 and v2 regions format. */ private void load() throws IOException { switch (getSnapshotFormat(desc)) { @@ -404,13 +391,13 @@ private void load() throws IOException { List v1Regions, v2Regions; ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { - v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, - workingDir, desc); - v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, - workingDir, desc, manifestSizeLimit); + v1Regions = + SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); + v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, workingDir, + desc, manifestSizeLimit); } catch (InvalidProtocolBufferException e) { - throw new CorruptedSnapshotException("unable to parse region manifest " + - e.getMessage(), e); + throw new CorruptedSnapshotException( + "unable to parse region manifest " + e.getMessage(), e); } finally { tpool.shutdown(); } @@ -427,8 +414,8 @@ private void load() throws IOException { break; } default: - throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), - ProtobufUtil.createSnapshotDesc(desc)); + throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), + ProtobufUtil.createSnapshotDesc(desc)); } } @@ -467,14 +454,14 @@ private void setStatusMsg(String msg) { } /** - * Get all the Region Manifest from the snapshot. - * This is an helper to get a map with the region encoded name + * Get all the Region Manifest from the snapshot. This is an helper to get a map with the region + * encoded name */ public Map getRegionManifestsMap() { if (regionManifests == null || regionManifests.isEmpty()) return null; HashMap regionsMap = new HashMap<>(regionManifests.size()); - for (SnapshotRegionManifest manifest: regionManifests) { + for (SnapshotRegionManifest manifest : regionManifests) { String regionName = getRegionNameFromManifest(manifest); regionsMap.put(regionName, manifest); } @@ -486,7 +473,7 @@ public void consolidate() throws IOException { LOG.info("Using old Snapshot Format"); // write a copy of descriptor to the snapshot directory FSTableDescriptors.createTableDescriptorForTableDirectory(workingDirFs, workingDir, htd, - false); + false); } else { LOG.debug("Convert to Single Snapshot Manifest for {}", this.desc.getName()); convertToV2SingleManifest(); @@ -494,8 +481,8 @@ public void consolidate() throws IOException { } /* - * In case of rolling-upgrade, we try to read all the formats and build - * the snapshot with the latest format. + * In case of rolling-upgrade, we try to read all the formats and build the snapshot with the + * latest format. */ private void convertToV2SingleManifest() throws IOException { // Try to load v1 and v2 regions @@ -503,10 +490,10 @@ private void convertToV2SingleManifest() throws IOException { ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); setStatusMsg("Loading Region manifests for " + this.desc.getName()); try { - v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, - workingDir, desc); - v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs, - workingDir, desc, manifestSizeLimit); + v1Regions = + SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, workingDir, desc); + v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs, workingDir, + desc, manifestSizeLimit); SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder(); dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(htd)); @@ -535,7 +522,7 @@ private void convertToV2SingleManifest() throws IOException { int totalDeletes = 0; ExecutorCompletionService completionService = new ExecutorCompletionService<>(tpool); if (v1Regions != null) { - for (SnapshotRegionManifest regionManifest: v1Regions) { + for (SnapshotRegionManifest regionManifest : v1Regions) { ++totalDeletes; completionService.submit(() -> { SnapshotManifestV1.deleteRegionManifest(workingDirFs, workingDir, regionManifest); @@ -544,7 +531,7 @@ private void convertToV2SingleManifest() throws IOException { } } if (v2Regions != null) { - for (SnapshotRegionManifest regionManifest: v2Regions) { + for (SnapshotRegionManifest regionManifest : v2Regions) { ++totalDeletes; completionService.submit(() -> { SnapshotManifestV2.deleteRegionManifest(workingDirFs, workingDir, regionManifest); @@ -570,9 +557,9 @@ private void convertToV2SingleManifest() throws IOException { /* * Write the SnapshotDataManifest file */ - private void writeDataManifest(final SnapshotDataManifest manifest) - throws IOException { - try (FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { + private void writeDataManifest(final SnapshotDataManifest manifest) throws IOException { + try ( + FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { manifest.writeTo(stream); } } @@ -607,10 +594,10 @@ public static ThreadPoolExecutor createExecutor(final Configuration conf, final * Extract the region encoded name from the region manifest */ static String getRegionNameFromManifest(final SnapshotRegionManifest manifest) { - byte[] regionName = RegionInfo.createRegionName( - ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), - manifest.getRegionInfo().getStartKey().toByteArray(), - manifest.getRegionInfo().getRegionId(), true); + byte[] regionName = + RegionInfo.createRegionName(ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), + manifest.getRegionInfo().getStartKey().toByteArray(), + manifest.getRegionInfo().getRegionId(), true); return RegionInfo.encodeRegionName(regionName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java index b1eca35febf2..61c366de971a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.IOException; @@ -48,13 +47,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. - * - * Snapshot v1 layout format - * - Each region in the table is represented by a directory with the .hregioninfo file - * /snapshotName/regionName/.hregioninfo - * - Each file present in the table is represented by an empty file - * /snapshotName/regionName/familyName/fileName + * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. Snapshot v1 layout format - Each region in the + * table is represented by a directory with the .hregioninfo file + * /snapshotName/regionName/.hregioninfo - Each file present in the table is represented by an empty + * file /snapshotName/regionName/familyName/fileName */ @InterfaceAudience.Private public final class SnapshotManifestV1 { @@ -65,15 +61,14 @@ public final class SnapshotManifestV1 { private SnapshotManifestV1() { } - static class ManifestBuilder implements SnapshotManifest.RegionVisitor< - HRegionFileSystem, Path> { + static class ManifestBuilder implements SnapshotManifest.RegionVisitor { private final Configuration conf; private final Path snapshotDir; private final FileSystem rootFs; private final FileSystem workingDirFs; public ManifestBuilder(final Configuration conf, final FileSystem rootFs, - final Path snapshotDir) throws IOException { + final Path snapshotDir) throws IOException { this.snapshotDir = snapshotDir; this.conf = conf; this.rootFs = rootFs; @@ -82,8 +77,8 @@ public ManifestBuilder(final Configuration conf, final FileSystem rootFs, @Override public HRegionFileSystem regionOpen(final RegionInfo regionInfo) throws IOException { - HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf, - workingDirFs, snapshotDir, regionInfo); + HRegionFileSystem snapshotRegionFs = + HRegionFileSystem.createRegionOnFileSystem(conf, workingDirFs, snapshotDir, regionInfo); return snapshotRegionFs; } @@ -103,15 +98,15 @@ public void familyClose(final HRegionFileSystem region, final Path family) { @Override public void storeFile(final HRegionFileSystem region, final Path familyDir, - final StoreFileInfo storeFile) throws IOException { + final StoreFileInfo storeFile) throws IOException { Path referenceFile = new Path(familyDir, storeFile.getPath().getName()); boolean success = true; if (storeFile.isReference()) { // write the Reference object to the snapshot storeFile.getReference().write(workingDirFs, referenceFile); } else { - // create "reference" to this store file. It is intentionally an empty file -- all - // necessary information is captured by its fs location and filename. This allows us to + // create "reference" to this store file. It is intentionally an empty file -- all + // necessary information is captured by its fs location and filename. This allows us to // only figure out what needs to be done via a single nn operation (instead of having to // open and read the files as well). success = workingDirFs.createNewFile(referenceFile); @@ -123,8 +118,8 @@ public void storeFile(final HRegionFileSystem region, final Path familyDir, } static List loadRegionManifests(final Configuration conf, - final Executor executor,final FileSystem fs, final Path snapshotDir, - final SnapshotDescription desc) throws IOException { + final Executor executor, final FileSystem fs, final Path snapshotDir, + final SnapshotDescription desc) throws IOException { FileStatus[] regions = CommonFSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { @@ -134,7 +129,7 @@ static List loadRegionManifests(final Configuration conf final ExecutorCompletionService completionService = new ExecutorCompletionService<>(executor); - for (final FileStatus region: regions) { + for (final FileStatus region : regions) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { @@ -158,15 +153,15 @@ public SnapshotRegionManifest call() throws IOException { } static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir, - final SnapshotRegionManifest manifest) throws IOException { + final SnapshotRegionManifest manifest) throws IOException { String regionName = SnapshotManifest.getRegionNameFromManifest(manifest); fs.delete(new Path(snapshotDir, regionName), true); } - static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, - final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException { - HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, - tableDir, regionInfo, true); + static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, final FileSystem fs, + final Path tableDir, final RegionInfo regionInfo) throws IOException { + HRegionFileSystem regionFs = + HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, true); SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder(); // 1. dump region meta info into the snapshot directory @@ -183,7 +178,7 @@ static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, // files/batch, far more than the number of store files under a single column family. Collection familyNames = regionFs.getFamilies(); if (familyNames != null) { - for (String familyName: familyNames) { + for (String familyName : familyNames) { Collection storeFiles = regionFs.getStoreFiles(familyName, false); if (storeFiles == null) { LOG.debug("No files under family: " + familyName); @@ -192,21 +187,21 @@ static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, // 2.1. build the snapshot reference for the store SnapshotRegionManifest.FamilyFiles.Builder family = - SnapshotRegionManifest.FamilyFiles.newBuilder(); + SnapshotRegionManifest.FamilyFiles.newBuilder(); family.setFamilyName(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(familyName))); if (LOG.isDebugEnabled()) { - LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); } // 2.2. iterate through all the store's files and create "references". int i = 0; int sz = storeFiles.size(); - for (StoreFileInfo storeFile: storeFiles) { + for (StoreFileInfo storeFile : storeFiles) { // create "reference" to this store file. - LOG.debug("Adding reference for file ("+ (++i) +"/" + sz + "): " + storeFile.getPath()); + LOG.debug("Adding reference for file (" + (++i) + "/" + sz + "): " + storeFile.getPath()); SnapshotRegionManifest.StoreFile.Builder sfManifest = - SnapshotRegionManifest.StoreFile.newBuilder(); + SnapshotRegionManifest.StoreFile.newBuilder(); sfManifest.setName(storeFile.getPath().getName()); family.addStoreFiles(sfManifest.build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index ae914f69b5cc..90b7f8a1c5ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.IOException; @@ -49,12 +48,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. - * - * Snapshot v2 layout format - * - Single Manifest file containing all the information of regions - * - In the online-snapshot case each region will write a "region manifest" - * /snapshotName/manifest.regionName + * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. Snapshot v2 layout format - Single Manifest + * file containing all the information of regions - In the online-snapshot case each region will + * write a "region manifest" /snapshotName/manifest.regionName */ @InterfaceAudience.Private public final class SnapshotManifestV2 { @@ -64,16 +60,17 @@ public final class SnapshotManifestV2 { public static final String SNAPSHOT_MANIFEST_PREFIX = "region-manifest."; - private SnapshotManifestV2() {} + private SnapshotManifestV2() { + } static class ManifestBuilder implements SnapshotManifest.RegionVisitor< - SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> { + SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> { private final Configuration conf; private final Path snapshotDir; private final FileSystem rootFs; public ManifestBuilder(final Configuration conf, final FileSystem rootFs, - final Path snapshotDir) { + final Path snapshotDir) { this.snapshotDir = snapshotDir; this.conf = conf; this.rootFs = rootFs; @@ -93,8 +90,8 @@ public void regionClose(final SnapshotRegionManifest.Builder region) throws IOEx FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf); if (workingDirFs.exists(snapshotDir)) { SnapshotRegionManifest manifest = region.build(); - try (FSDataOutputStream stream = workingDirFs.create( - getRegionManifestPath(snapshotDir, manifest))) { + try (FSDataOutputStream stream = + workingDirFs.create(getRegionManifestPath(snapshotDir, manifest))) { manifest.writeTo(stream); } } else { @@ -103,26 +100,26 @@ public void regionClose(final SnapshotRegionManifest.Builder region) throws IOEx } @Override - public SnapshotRegionManifest.FamilyFiles.Builder familyOpen( - final SnapshotRegionManifest.Builder region, final byte[] familyName) { + public SnapshotRegionManifest.FamilyFiles.Builder + familyOpen(final SnapshotRegionManifest.Builder region, final byte[] familyName) { SnapshotRegionManifest.FamilyFiles.Builder family = - SnapshotRegionManifest.FamilyFiles.newBuilder(); + SnapshotRegionManifest.FamilyFiles.newBuilder(); family.setFamilyName(UnsafeByteOperations.unsafeWrap(familyName)); return family; } @Override public void familyClose(final SnapshotRegionManifest.Builder region, - final SnapshotRegionManifest.FamilyFiles.Builder family) { + final SnapshotRegionManifest.FamilyFiles.Builder family) { region.addFamilyFiles(family.build()); } @Override public void storeFile(final SnapshotRegionManifest.Builder region, - final SnapshotRegionManifest.FamilyFiles.Builder family, final StoreFileInfo storeFile) - throws IOException { + final SnapshotRegionManifest.FamilyFiles.Builder family, final StoreFileInfo storeFile) + throws IOException { SnapshotRegionManifest.StoreFile.Builder sfManifest = - SnapshotRegionManifest.StoreFile.newBuilder(); + SnapshotRegionManifest.StoreFile.newBuilder(); sfManifest.setName(storeFile.getPath().getName()); if (storeFile.isReference()) { sfManifest.setReference(storeFile.getReference().convert()); @@ -137,8 +134,8 @@ public void storeFile(final SnapshotRegionManifest.Builder region, } static List loadRegionManifests(final Configuration conf, - final Executor executor, final FileSystem fs, final Path snapshotDir, - final SnapshotDescription desc, final int manifestSizeLimit) throws IOException { + final Executor executor, final FileSystem fs, final Path snapshotDir, + final SnapshotDescription desc, final int manifestSizeLimit) throws IOException { FileStatus[] manifestFiles = CommonFSUtils.listStatus(fs, snapshotDir, new PathFilter() { @Override public boolean accept(Path path) { @@ -150,7 +147,7 @@ public boolean accept(Path path) { final ExecutorCompletionService completionService = new ExecutorCompletionService<>(executor); - for (final FileStatus st: manifestFiles) { + for (final FileStatus st : manifestFiles) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { @@ -173,8 +170,8 @@ public SnapshotRegionManifest call() throws IOException { } catch (ExecutionException e) { Throwable t = e.getCause(); - if(t instanceof InvalidProtocolBufferException) { - throw (InvalidProtocolBufferException)t; + if (t instanceof InvalidProtocolBufferException) { + throw (InvalidProtocolBufferException) t; } else { throw new IOException("ExecutionException", e.getCause()); } @@ -183,12 +180,12 @@ public SnapshotRegionManifest call() throws IOException { } static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir, - final SnapshotRegionManifest manifest) throws IOException { + final SnapshotRegionManifest manifest) throws IOException { fs.delete(getRegionManifestPath(snapshotDir, manifest), true); } private static Path getRegionManifestPath(final Path snapshotDir, - final SnapshotRegionManifest manifest) { + final SnapshotRegionManifest manifest) { String regionName = SnapshotManifest.getRegionNameFromManifest(manifest); return new Path(snapshotDir, SNAPSHOT_MANIFEST_PREFIX + regionName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java index 2e8c0dfdff96..cf02f74f8126 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -28,7 +27,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -42,6 +40,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; @@ -55,7 +54,7 @@ public final class SnapshotReferenceUtil { public interface StoreFileVisitor { void storeFile(final RegionInfo regionInfo, final String familyName, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException; + final SnapshotRegionManifest.StoreFile storeFile) throws IOException; } public interface SnapshotVisitor extends StoreFileVisitor { @@ -67,49 +66,45 @@ private SnapshotReferenceUtil() { /** * Iterate over the snapshot store files - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory - * @param visitor callback object to get the referenced files + * @param visitor callback object to get the referenced files * @throws IOException if an error occurred while scanning the directory */ public static void visitReferencedFiles(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotVisitor visitor) - throws IOException { + final Path snapshotDir, final SnapshotVisitor visitor) throws IOException { SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); visitReferencedFiles(conf, fs, snapshotDir, desc, visitor); } /** * Iterate over the snapshot store files, restored.edits and logs - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory - * @param desc the {@link SnapshotDescription} of the snapshot to verify - * @param visitor callback object to get the referenced files + * @param desc the {@link SnapshotDescription} of the snapshot to verify + * @param visitor callback object to get the referenced files * @throws IOException if an error occurred while scanning the directory */ public static void visitReferencedFiles(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription desc, final SnapshotVisitor visitor) - throws IOException { + final Path snapshotDir, final SnapshotDescription desc, final SnapshotVisitor visitor) + throws IOException { visitTableStoreFiles(conf, fs, snapshotDir, desc, visitor); } - /**© - * Iterate over the snapshot store files - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + /** + * © Iterate over the snapshot store files + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory - * @param desc the {@link SnapshotDescription} of the snapshot to verify - * @param visitor callback object to get the store files + * @param desc the {@link SnapshotDescription} of the snapshot to verify + * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ static void visitTableStoreFiles(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription desc, final StoreFileVisitor visitor) - throws IOException { + final Path snapshotDir, final SnapshotDescription desc, final StoreFileVisitor visitor) + throws IOException { SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc); List regionManifests = manifest.getRegionManifests(); if (regionManifests == null || regionManifests.isEmpty()) { @@ -117,24 +112,23 @@ static void visitTableStoreFiles(final Configuration conf, final FileSystem fs, return; } - for (SnapshotRegionManifest regionManifest: regionManifests) { + for (SnapshotRegionManifest regionManifest : regionManifests) { visitRegionStoreFiles(regionManifest, visitor); } } /** * Iterate over the snapshot store files in the specified region - * * @param manifest snapshot manifest to inspect - * @param visitor callback object to get the store files + * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ public static void visitRegionStoreFiles(final SnapshotRegionManifest manifest, - final StoreFileVisitor visitor) throws IOException { + final StoreFileVisitor visitor) throws IOException { RegionInfo regionInfo = ProtobufUtil.toRegionInfo(manifest.getRegionInfo()); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { String familyName = familyFiles.getFamilyName().toStringUtf8(); - for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) { + for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) { visitor.storeFile(regionInfo, familyName, storeFile); } } @@ -142,45 +136,42 @@ public static void visitRegionStoreFiles(final SnapshotRegionManifest manifest, /** * Verify the validity of the snapshot - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} - * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} + * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify * @param snapshotDesc the {@link SnapshotDescription} of the snapshot to verify * @throws CorruptedSnapshotException if the snapshot is corrupted - * @throws IOException if an error occurred while scanning the directory + * @throws IOException if an error occurred while scanning the directory */ public static void verifySnapshot(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { + final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); verifySnapshot(conf, fs, manifest); } /** * Verify the validity of the snapshot - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param manifest snapshot manifest to inspect * @throws CorruptedSnapshotException if the snapshot is corrupted - * @throws IOException if an error occurred while scanning the directory + * @throws IOException if an error occurred while scanning the directory */ public static void verifySnapshot(final Configuration conf, final FileSystem fs, - final SnapshotManifest manifest) throws IOException { + final SnapshotManifest manifest) throws IOException { final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription(); final Path snapshotDir = manifest.getSnapshotDir(); concurrentVisitReferencedFiles(conf, fs, manifest, "VerifySnapshot", new StoreFileVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { verifyStoreFile(conf, fs, snapshotDir, snapshotDesc, regionInfo, family, storeFile); } }); } /** - * Verify the validity of the snapshot. - * + * Verify the validity of the snapshot. * @param visitor user-specified store file visitor */ public static void verifySnapshot(final Configuration conf, final FileSystem fs, @@ -189,8 +180,8 @@ public static void verifySnapshot(final Configuration conf, final FileSystem fs, } public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs, - final SnapshotManifest manifest, final String desc, final StoreFileVisitor visitor) - throws IOException { + final SnapshotManifest manifest, final String desc, final StoreFileVisitor visitor) + throws IOException { final Path snapshotDir = manifest.getSnapshotDir(); List regionManifests = manifest.getRegionManifests(); @@ -209,8 +200,8 @@ public static void concurrentVisitReferencedFiles(final Configuration conf, fina } public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs, - final SnapshotManifest manifest, final ExecutorService exec, final StoreFileVisitor visitor) - throws IOException { + final SnapshotManifest manifest, final ExecutorService exec, final StoreFileVisitor visitor) + throws IOException { final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription(); final Path snapshotDir = manifest.getSnapshotDir(); @@ -224,7 +215,8 @@ public static void concurrentVisitReferencedFiles(final Configuration conf, fina for (final SnapshotRegionManifest regionManifest : regionManifests) { completionService.submit(new Callable() { - @Override public Void call() throws IOException { + @Override + public Void call() throws IOException { visitRegionStoreFiles(regionManifest, visitor); return null; } @@ -239,7 +231,7 @@ public static void concurrentVisitReferencedFiles(final Configuration conf, fina } catch (ExecutionException e) { if (e.getCause() instanceof CorruptedSnapshotException) { throw new CorruptedSnapshotException(e.getCause().getMessage(), - ProtobufUtil.createSnapshotDesc(snapshotDesc)); + ProtobufUtil.createSnapshotDesc(snapshotDesc)); } else { throw new IOException(e.getCause()); } @@ -248,20 +240,19 @@ public static void concurrentVisitReferencedFiles(final Configuration conf, fina /** * Verify the validity of the snapshot store file - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify - * @param snapshot the {@link SnapshotDescription} of the snapshot to verify - * @param regionInfo {@link RegionInfo} of the region that contains the store file - * @param family family that contains the store file - * @param storeFile the store file to verify + * @param snapshot the {@link SnapshotDescription} of the snapshot to verify + * @param regionInfo {@link RegionInfo} of the region that contains the store file + * @param family family that contains the store file + * @param storeFile the store file to verify * @throws CorruptedSnapshotException if the snapshot is corrupted - * @throws IOException if an error occurred while scanning the directory + * @throws IOException if an error occurred while scanning the directory */ public static void verifyStoreFile(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription snapshot, final RegionInfo regionInfo, - final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final Path snapshotDir, final SnapshotDescription snapshot, final RegionInfo regionInfo, + final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { TableName table = TableName.valueOf(snapshot.getTable()); String fileName = storeFile.getName(); @@ -274,8 +265,8 @@ public static void verifyStoreFile(final Configuration conf, final FileSystem fs refPath = HFileLink.createPath(table, refRegion, family, refPath.getName()); if (!HFileLink.buildFromHFileLinkPattern(conf, refPath).exists(fs)) { throw new CorruptedSnapshotException( - "Missing parent hfile for: " + fileName + " path=" + refPath, - ProtobufUtil.createSnapshotDesc(snapshot)); + "Missing parent hfile for: " + fileName + " path=" + refPath, + ProtobufUtil.createSnapshotDesc(snapshot)); } if (storeFile.hasReference()) { @@ -291,8 +282,8 @@ public static void verifyStoreFile(final Configuration conf, final FileSystem fs } else if (HFileLink.isHFileLink(fileName)) { linkPath = new Path(family, fileName); } else { - linkPath = new Path(family, HFileLink.createHFileLinkName( - table, regionInfo.getEncodedName(), fileName)); + linkPath = new Path(family, + HFileLink.createHFileLinkName(table, regionInfo.getEncodedName(), fileName)); } // check if the linked file exists (in the archive, or in the table dir) @@ -300,7 +291,7 @@ public static void verifyStoreFile(final Configuration conf, final FileSystem fs if (MobUtils.isMobRegionInfo(regionInfo)) { // for mob region link = HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), linkPath); + HFileArchiveUtil.getArchivePath(conf), linkPath); } else { // not mob region link = HFileLink.buildFromHFileLinkPattern(conf, linkPath); @@ -308,62 +299,57 @@ public static void verifyStoreFile(final Configuration conf, final FileSystem fs try { FileStatus fstat = link.getFileStatus(fs); if (storeFile.hasFileSize() && storeFile.getFileSize() != fstat.getLen()) { - String msg = "hfile: " + fileName + " size does not match with the expected one. " + - " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize(); + String msg = "hfile: " + fileName + " size does not match with the expected one. " + + " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize(); LOG.error(msg); - throw new CorruptedSnapshotException(msg, - ProtobufUtil.createSnapshotDesc(snapshot)); + throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot)); } } catch (FileNotFoundException e) { - String msg = "Can't find hfile: " + fileName + " in the real (" + - link.getOriginPath() + ") or archive (" + link.getArchivePath() - + ") directory for the primary table."; + String msg = "Can't find hfile: " + fileName + " in the real (" + link.getOriginPath() + + ") or archive (" + link.getArchivePath() + ") directory for the primary table."; LOG.error(msg); - throw new CorruptedSnapshotException(msg, - ProtobufUtil.createSnapshotDesc(snapshot)); + throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot)); } } /** * Returns the store file names in the snapshot. - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory * @throws IOException if an error occurred while scanning the directory * @return the names of hfiles in the specified snaphot */ public static Set getHFileNames(final Configuration conf, final FileSystem fs, - final Path snapshotDir) throws IOException { + final Path snapshotDir) throws IOException { SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); return getHFileNames(conf, fs, snapshotDir, desc); } /** * Returns the store file names in the snapshot. - * - * @param conf The current {@link Configuration} instance. - * @param fs {@link FileSystem} - * @param snapshotDir {@link Path} to the Snapshot directory + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} + * @param snapshotDir {@link Path} to the Snapshot directory * @param snapshotDesc the {@link SnapshotDescription} of the snapshot to inspect * @throws IOException if an error occurred while scanning the directory * @return the names of hfiles in the specified snaphot */ private static Set getHFileNames(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription snapshotDesc) - throws IOException { + final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { final Set names = new HashSet<>(); visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { String hfile = storeFile.getName(); if (HFileLink.isHFileLink(hfile)) { names.add(HFileLink.getReferencedHFileName(hfile)); } else if (StoreFileInfo.isReference(hfile)) { - Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path( + Path refPath = + StoreFileInfo.getReferredToFile(new Path(new Path( new Path(new Path(regionInfo.getTable().getNamespaceAsString(), - regionInfo.getTable().getQualifierAsString()), regionInfo.getEncodedName()), + regionInfo.getTable().getQualifierAsString()), regionInfo.getEncodedName()), family), hfile)); names.add(hfile); names.add(refPath.getName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java index a5d4f1e56d36..9c2221833285 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,21 +77,20 @@ public Path getFilePath() { /** * Perform a bulk load of the given directory into the given pre-existing table. - * @param tableName the table to load into + * @param tableName the table to load into * @param family2Files map of family to List of hfiles * @throws TableNotFoundException if table does not yet exist */ Map bulkLoad(TableName tableName, Map> family2Files) - throws TableNotFoundException, IOException; + throws TableNotFoundException, IOException; /** - * Disables replication for all bulkloads done via this instance, - * when bulkload replication is configured. + * Disables replication for all bulkloads done via this instance, when bulkload replication is + * configured. */ void disableReplication(); /** - * * @return true if replication has been disabled. */ boolean isReplicationDisabled(); @@ -99,16 +98,15 @@ Map bulkLoad(TableName tableName, Map bulkLoad(TableName tableName, Path dir) - throws TableNotFoundException, IOException; + throws TableNotFoundException, IOException; static BulkLoadHFiles create(Configuration conf) { return new BulkLoadHFilesTool(conf); } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index b0b086e145a9..d1c99fc6334e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -49,7 +49,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; - import org.apache.commons.lang3.mutable.MutableInt; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -94,15 +93,16 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; import org.apache.hbase.thirdparty.com.google.common.collect.Multimaps; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * The implementation for {@link BulkLoadHFiles}, and also can be executed from command line as a @@ -156,8 +156,8 @@ public void initialize() { fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true); maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32); - nrThreads = conf.getInt("hbase.loadincremental.threads.max", - Runtime.getRuntime().availableProcessors()); + nrThreads = + conf.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors()); bulkLoadByFamily = conf.getBoolean(BULK_LOAD_HFILES_BY_FAMILY, false); } @@ -195,16 +195,16 @@ private static boolean shouldCopyHFileMetaKey(byte[] key) { * Checks whether there is any invalid family name in HFiles to be bulk loaded. */ private static void validateFamiliesInHFiles(TableDescriptor tableDesc, - Deque queue, boolean silence) throws IOException { + Deque queue, boolean silence) throws IOException { Set familyNames = Arrays.stream(tableDesc.getColumnFamilies()) .map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet()); List unmatchedFamilies = queue.stream().map(item -> Bytes.toString(item.getFamily())) .filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList()); if (unmatchedFamilies.size() > 0) { String msg = - "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " + - unmatchedFamilies + "; valid family names of table " + tableDesc.getTableName() + - " are: " + familyNames; + "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " + + unmatchedFamilies + "; valid family names of table " + tableDesc.getTableName() + + " are: " + familyNames; LOG.error(msg); if (!silence) { throw new IOException(msg); @@ -232,7 +232,7 @@ private interface BulkHFileVisitor { * to false. */ private static void visitBulkHFiles(FileSystem fs, Path bulkDir, - BulkHFileVisitor visitor, boolean validateHFile) throws IOException { + BulkHFileVisitor visitor, boolean validateHFile) throws IOException { FileStatus[] familyDirStatuses = fs.listStatus(bulkDir); for (FileStatus familyStat : familyDirStatuses) { if (!familyStat.isDirectory()) { @@ -294,7 +294,7 @@ private static void visitBulkHFiles(FileSystem fs, Path bulkDir, * Walk the given directory for all HFiles, and return a Queue containing all such files. */ private static void discoverLoadQueue(Configuration conf, Deque ret, Path hfofDir, - boolean validateHFile) throws IOException { + boolean validateHFile) throws IOException { visitBulkHFiles(hfofDir.getFileSystem(conf), hfofDir, new BulkHFileVisitor() { @Override public byte[] bulkFamily(final byte[] familyName) { @@ -304,10 +304,11 @@ public byte[] bulkFamily(final byte[] familyName) { @Override public void bulkHFile(final byte[] family, final FileStatus hfile) { long length = hfile.getLen(); - if (length > conf.getLong(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE)) { - LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length + - " bytes can be problematic as it may lead to oversplitting."); + if ( + length > conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE) + ) { + LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length + + " bytes can be problematic as it may lead to oversplitting."); } ret.add(new LoadQueueItem(family, hfile.getPath())); } @@ -318,14 +319,14 @@ public void bulkHFile(final byte[] family, final FileStatus hfile) { * Prepare a collection of {@code LoadQueueItem} from list of source hfiles contained in the * passed directory and validates whether the prepared queue has all the valid table column * families in it. - * @param map map of family to List of hfiles + * @param map map of family to List of hfiles * @param tableName table to which hfiles should be loaded - * @param queue queue which needs to be loaded into the table - * @param silence true to ignore unmatched column families + * @param queue queue which needs to be loaded into the table + * @param silence true to ignore unmatched column families * @throws IOException If any I/O or network error occurred */ public static void prepareHFileQueue(AsyncClusterConnection conn, TableName tableName, - Map> map, Deque queue, boolean silence) throws IOException { + Map> map, Deque queue, boolean silence) throws IOException { populateLoadQueue(queue, map); validateFamiliesInHFiles(FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), queue, silence); @@ -335,15 +336,15 @@ public static void prepareHFileQueue(AsyncClusterConnection conn, TableName tabl * Prepare a collection of {@code LoadQueueItem} from list of source hfiles contained in the * passed directory and validates whether the prepared queue has all the valid table column * families in it. - * @param hfilesDir directory containing list of hfiles to be loaded into the table - * @param queue queue which needs to be loaded into the table + * @param hfilesDir directory containing list of hfiles to be loaded into the table + * @param queue queue which needs to be loaded into the table * @param validateHFile if true hfiles will be validated for its format - * @param silence true to ignore unmatched column families + * @param silence true to ignore unmatched column families * @throws IOException If any I/O or network error occurred */ public static void prepareHFileQueue(Configuration conf, AsyncClusterConnection conn, - TableName tableName, Path hfilesDir, Deque queue, boolean validateHFile, - boolean silence) throws IOException { + TableName tableName, Path hfilesDir, Deque queue, boolean validateHFile, + boolean silence) throws IOException { discoverLoadQueue(conf, queue, hfilesDir, validateHFile); validateFamiliesInHFiles(FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), queue, silence); @@ -357,12 +358,12 @@ public static void prepareHFileQueue(Configuration conf, AsyncClusterConnection *
  • {@link #bulkLoadPhase(AsyncClusterConnection, TableName, Deque, Multimap, boolean, Map)} *
  • * - * @param conn Connection to use + * @param conn Connection to use * @param tableName Table to which these hfiles should be loaded to - * @param queue {@code LoadQueueItem} has hfiles yet to be loaded + * @param queue {@code LoadQueueItem} has hfiles yet to be loaded */ public void loadHFileQueue(AsyncClusterConnection conn, TableName tableName, - Deque queue, boolean copyFiles) throws IOException { + Deque queue, boolean copyFiles) throws IOException { ExecutorService pool = createExecutorService(); try { Multimap regionGroups = groupOrSplitPhase(conn, tableName, pool, @@ -378,52 +379,52 @@ public void loadHFileQueue(AsyncClusterConnection conn, TableName tableName, * hfiles that need to be retried. If it is successful it will return an empty list. NOTE: To * maintain row atomicity guarantees, region server side should succeed atomically and fails * atomically. - * @param conn Connection to use + * @param conn Connection to use * @param tableName Table to which these hfiles should be loaded to * @param copyFiles whether replicate to peer cluster while bulkloading - * @param first the start key of region - * @param lqis hfiles should be loaded + * @param first the start key of region + * @param lqis hfiles should be loaded * @return empty list if success, list of items to retry on recoverable failure */ @InterfaceAudience.Private protected CompletableFuture> tryAtomicRegionLoad( - final AsyncClusterConnection conn, final TableName tableName, boolean copyFiles, - final byte[] first, Collection lqis) { + final AsyncClusterConnection conn, final TableName tableName, boolean copyFiles, + final byte[] first, Collection lqis) { List> familyPaths = - lqis.stream().map(lqi -> Pair.newPair(lqi.getFamily(), lqi.getFilePath().toString())) - .collect(Collectors.toList()); + lqis.stream().map(lqi -> Pair.newPair(lqi.getFamily(), lqi.getFilePath().toString())) + .collect(Collectors.toList()); CompletableFuture> future = new CompletableFuture<>(); - FutureUtils - .addListener( - conn.bulkLoad(tableName, familyPaths, first, assignSeqIds, - fsDelegationToken.getUserToken(), bulkToken, copyFiles, clusterIds, replicate), - (loaded, error) -> { - if (error != null) { - LOG.error("Encountered unrecoverable error from region server", error); - if (getConf().getBoolean(RETRY_ON_IO_EXCEPTION, false) - && numRetries.get() < getConf().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER)) { - LOG.warn("Will attempt to retry loading failed HFiles. Retry #" - + numRetries.incrementAndGet()); - // return lqi's to retry - future.complete(lqis); - } else { - LOG.error(RETRY_ON_IO_EXCEPTION - + " is disabled or we have reached retry limit. Unable to recover"); - future.completeExceptionally(error); - } - } else { - if (loaded) { - future.complete(Collections.emptyList()); - } else { - LOG.warn("Attempt to bulk load region containing " + Bytes.toStringBinary(first) - + " into table " + tableName + " with files " + lqis - + " failed. This is recoverable and they will be retried."); - // return lqi's to retry - future.complete(lqis); - } - } - }); + FutureUtils.addListener(conn.bulkLoad(tableName, familyPaths, first, assignSeqIds, + fsDelegationToken.getUserToken(), bulkToken, copyFiles, clusterIds, replicate), + (loaded, error) -> { + if (error != null) { + LOG.error("Encountered unrecoverable error from region server", error); + if ( + getConf().getBoolean(RETRY_ON_IO_EXCEPTION, false) + && numRetries.get() < getConf().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + ) { + LOG.warn("Will attempt to retry loading failed HFiles. Retry #" + + numRetries.incrementAndGet()); + // return lqi's to retry + future.complete(lqis); + } else { + LOG.error(RETRY_ON_IO_EXCEPTION + + " is disabled or we have reached retry limit. Unable to recover"); + future.completeExceptionally(error); + } + } else { + if (loaded) { + future.complete(Collections.emptyList()); + } else { + LOG.warn("Attempt to bulk load region containing " + Bytes.toStringBinary(first) + + " into table " + tableName + " with files " + lqis + + " failed. This is recoverable and they will be retried."); + // return lqi's to retry + future.complete(lqis); + } + } + }); return future; } @@ -435,17 +436,17 @@ protected CompletableFuture> tryAtomicRegionLoad( */ @InterfaceAudience.Private protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, - Deque queue, Multimap regionGroups, - boolean copyFiles, Map item2RegionMap) throws IOException { + Deque queue, Multimap regionGroups, boolean copyFiles, + Map item2RegionMap) throws IOException { // atomically bulk load the groups. List>> loadingFutures = new ArrayList<>(); for (Entry> entry : regionGroups.asMap() - .entrySet()) { + .entrySet()) { byte[] first = entry.getKey().array(); final Collection lqis = entry.getValue(); if (bulkLoadByFamily) { groupByFamilies(lqis).values().forEach(familyQueue -> loadingFutures - .add(tryAtomicRegionLoad(conn, tableName, copyFiles, first, familyQueue))); + .add(tryAtomicRegionLoad(conn, tableName, copyFiles, first, familyQueue))); } else { loadingFutures.add(tryAtomicRegionLoad(conn, tableName, copyFiles, first, lqis)); } @@ -485,24 +486,24 @@ protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, } private Map> - groupByFamilies(Collection itemsInRegion) { + groupByFamilies(Collection itemsInRegion) { Map> families2Queue = new TreeMap<>(Bytes.BYTES_COMPARATOR); itemsInRegion.forEach(item -> families2Queue - .computeIfAbsent(item.getFamily(), queue -> new ArrayList<>()).add(item)); + .computeIfAbsent(item.getFamily(), queue -> new ArrayList<>()).add(item)); return families2Queue; } - private boolean checkHFilesCountPerRegionPerFamily( - final Multimap regionGroups) { + private boolean + checkHFilesCountPerRegionPerFamily(final Multimap regionGroups) { for (Map.Entry> e : regionGroups.asMap().entrySet()) { Map filesMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (LoadQueueItem lqi : e.getValue()) { MutableInt count = filesMap.computeIfAbsent(lqi.getFamily(), k -> new MutableInt()); count.increment(); if (count.intValue() > maxFilesPerRegionPerFamily) { - LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily + - " hfiles to family " + Bytes.toStringBinary(lqi.getFamily()) + - " of region with start key " + Bytes.toStringBinary(e.getKey())); + LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily + " hfiles to family " + + Bytes.toStringBinary(lqi.getFamily()) + " of region with start key " + + Bytes.toStringBinary(e.getKey())); return false; } } @@ -511,16 +512,16 @@ private boolean checkHFilesCountPerRegionPerFamily( } /** - * @param conn the HBase cluster connection - * @param tableName the table name of the table to load into - * @param pool the ExecutorService - * @param queue the queue for LoadQueueItem + * @param conn the HBase cluster connection + * @param tableName the table name of the table to load into + * @param pool the ExecutorService + * @param queue the queue for LoadQueueItem * @param startEndKeys start and end keys * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles. */ private Pair, Set> groupOrSplitPhase( - AsyncClusterConnection conn, TableName tableName, ExecutorService pool, - Deque queue, List> startEndKeys) throws IOException { + AsyncClusterConnection conn, TableName tableName, ExecutorService pool, + Deque queue, List> startEndKeys) throws IOException { // need synchronized only within this scope of this // phase because of the puts that happen in futures. Multimap rgs = HashMultimap.create(); @@ -572,7 +573,7 @@ private String getUniqueName() { } private List splitStoreFile(LoadQueueItem item, TableDescriptor tableDesc, - byte[] splitKey) throws IOException { + byte[] splitKey) throws IOException { Path hfilePath = item.getFilePath(); byte[] family = item.getFamily(); Path tmpDir = hfilePath.getParent(); @@ -616,8 +617,8 @@ private List splitStoreFile(LoadQueueItem item, TableDescriptor t /** * @param startEndKeys the start/end keys of regions belong to this table, the list in ascending - * order by start key - * @param key the key need to find which region belong to + * order by start key + * @param key the key need to find which region belong to * @return region index */ private int getRegionIndex(List> startEndKeys, byte[] key) { @@ -637,19 +638,23 @@ private int getRegionIndex(List> startEndKeys, byte[] key) * next region. 3) if the endkey of the last region is not empty. */ private void checkRegionIndexValid(int idx, List> startEndKeys, - TableName tableName) throws IOException { + TableName tableName) throws IOException { if (idx < 0) { throw new IOException("The first region info for table " + tableName - + " can't be found in hbase:meta.Please use hbck tool to fix it first."); - } else if ((idx == startEndKeys.size() - 1) - && !Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY)) { + + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + } else if ( + (idx == startEndKeys.size() - 1) + && !Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY) + ) { throw new IOException("The last region info for table " + tableName - + " can't be found in hbase:meta.Please use hbck tool to fix it first."); - } else if (idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(), - startEndKeys.get(idx + 1).getFirst()) == 0)) { + + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + } else if ( + idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(), + startEndKeys.get(idx + 1).getFirst()) == 0) + ) { throw new IOException("The endkey of one region for table " + tableName - + " is not equal to the startkey of the next region in hbase:meta." - + "Please use hbck tool to fix it first."); + + " is not equal to the startkey of the next region in hbase:meta." + + "Please use hbck tool to fix it first."); } } @@ -663,8 +668,8 @@ private void checkRegionIndexValid(int idx, List> startEndK */ @InterfaceAudience.Private protected Pair, String> groupOrSplit(AsyncClusterConnection conn, - TableName tableName, Multimap regionGroups, LoadQueueItem item, - List> startEndKeys) throws IOException { + TableName tableName, Multimap regionGroups, LoadQueueItem item, + List> startEndKeys) throws IOException { Path hfilePath = item.getFilePath(); Optional first, last; try (HFile.Reader hfr = HFile.createReader(hfilePath.getFileSystem(getConf()), hfilePath, @@ -676,8 +681,8 @@ CacheConfig.DISABLED, true, getConf())) { return new Pair<>(null, hfilePath.getName()); } - LOG.info("Trying to load hfile=" + hfilePath + " first=" + first.map(Bytes::toStringBinary) + - " last=" + last.map(Bytes::toStringBinary)); + LOG.info("Trying to load hfile=" + hfilePath + " first=" + first.map(Bytes::toStringBinary) + + " last=" + last.map(Bytes::toStringBinary)); if (!first.isPresent() || !last.isPresent()) { assert !first.isPresent() && !last.isPresent(); // TODO what if this is due to a bad HFile? @@ -686,13 +691,13 @@ CacheConfig.DISABLED, true, getConf())) { } if (Bytes.compareTo(first.get(), last.get()) > 0) { throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(first.get()) - + " > " + Bytes.toStringBinary(last.get())); + + " > " + Bytes.toStringBinary(last.get())); } int firstKeyRegionIdx = getRegionIndex(startEndKeys, first.get()); checkRegionIndexValid(firstKeyRegionIdx, startEndKeys, tableName); boolean lastKeyInRange = - Bytes.compareTo(last.get(), startEndKeys.get(firstKeyRegionIdx).getSecond()) < 0 || Bytes - .equals(startEndKeys.get(firstKeyRegionIdx).getSecond(), HConstants.EMPTY_BYTE_ARRAY); + Bytes.compareTo(last.get(), startEndKeys.get(firstKeyRegionIdx).getSecond()) < 0 || Bytes + .equals(startEndKeys.get(firstKeyRegionIdx).getSecond(), HConstants.EMPTY_BYTE_ARRAY); if (!lastKeyInRange) { int lastKeyRegionIdx = getRegionIndex(startEndKeys, last.get()); int splitIdx = (firstKeyRegionIdx + lastKeyRegionIdx) / 2; @@ -702,8 +707,8 @@ CacheConfig.DISABLED, true, getConf())) { checkRegionIndexValid(splitIdx, startEndKeys, tableName); } byte[] splitPoint = startEndKeys.get(splitIdx).getSecond(); - List lqis = splitStoreFile(item, - FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), splitPoint); + List lqis = + splitStoreFile(item, FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), splitPoint); return new Pair<>(lqis, null); } @@ -718,7 +723,7 @@ CacheConfig.DISABLED, true, getConf())) { */ @InterfaceAudience.Private static void splitStoreFile(Configuration conf, Path inFile, ColumnFamilyDescriptor familyDesc, - byte[] splitKey, Path bottomOut, Path topOut) throws IOException { + byte[] splitKey, Path bottomOut, Path topOut) throws IOException { // Open reader with no block cache, and not in-memory Reference topReference = Reference.createTopReference(splitKey); Reference bottomReference = Reference.createBottomReference(splitKey); @@ -731,17 +736,16 @@ static void splitStoreFile(Configuration conf, Path inFile, ColumnFamilyDescript * Copy half of an HFile into a new HFile. */ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, - Reference reference, ColumnFamilyDescriptor familyDescriptor) throws IOException { + Reference reference, ColumnFamilyDescriptor familyDescriptor) throws IOException { FileSystem fs = inFile.getFileSystem(conf); CacheConfig cacheConf = CacheConfig.DISABLED; HalfStoreFileReader halfReader = null; StoreFileWriter halfWriter = null; try { - ReaderContext context = new ReaderContextBuilder() - .withFileSystemAndPath(fs, inFile).build(); + ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, inFile).build(); HFileInfo hfile = new HFileInfo(context, conf); - halfReader = new HalfStoreFileReader(context, hfile, cacheConf, reference, - new AtomicInteger(0), conf); + halfReader = + new HalfStoreFileReader(context, hfile, cacheConf, reference, new AtomicInteger(0), conf); hfile.initMetaAndIndex(halfReader.getHFileReader()); Map fileInfo = halfReader.loadFileInfo(); @@ -846,20 +850,20 @@ public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) { @Override public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus) - throws IOException { + throws IOException { Path hfile = hfileStatus.getPath(); try (HFile.Reader reader = HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) { if (builder.getCompressionType() != reader.getFileContext().getCompression()) { builder.setCompressionType(reader.getFileContext().getCompression()); - LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + - " for family " + builder.getNameAsString()); + LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + + " for family " + builder.getNameAsString()); } byte[] first = reader.getFirstRowKey().get(); byte[] last = reader.getLastRowKey().get(); - LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + - Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); + LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.getOrDefault(first, 0); @@ -881,8 +885,8 @@ public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileSta } private Map performBulkLoad(AsyncClusterConnection conn, - TableName tableName, Deque queue, ExecutorService pool, boolean copyFile) - throws IOException { + TableName tableName, Deque queue, ExecutorService pool, boolean copyFile) + throws IOException { int count = 0; fsDelegationToken.acquireDelegationToken(queue.peek().getFilePath().getFileSystem(getConf())); @@ -896,8 +900,8 @@ private Map performBulkLoad(AsyncClusterConnection co final List> startEndKeys = FutureUtils.get(conn.getRegionLocator(tableName).getStartEndKeys()); if (count != 0) { - LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " + - queue.size() + " files remaining to group or split"); + LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " + + queue.size() + " files remaining to group or split"); } int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10); @@ -914,8 +918,8 @@ private Map performBulkLoad(AsyncClusterConnection co if (!checkHFilesCountPerRegionPerFamily(regionGroups)) { // Error is logged inside checkHFilesCountPerRegionPerFamily. - throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily + - " hfiles to one family of one region"); + throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily + + " hfiles to one family of one region"); } bulkLoadPhase(conn, tableName, queue, regionGroups, copyFile, item2RegionMap); @@ -929,7 +933,7 @@ private Map performBulkLoad(AsyncClusterConnection co } private void cleanup(AsyncClusterConnection conn, TableName tableName, Deque queue, - ExecutorService pool) throws IOException { + ExecutorService pool) throws IOException { fsDelegationToken.releaseDelegationToken(); if (bulkToken != null) { conn.cleanupBulkLoad(tableName, bulkToken); @@ -952,14 +956,14 @@ private void cleanup(AsyncClusterConnection conn, TableName tableName, Deque doBulkLoad(AsyncClusterConnection conn, - TableName tableName, Map> map, boolean silence, boolean copyFile) - throws IOException { + TableName tableName, Map> map, boolean silence, boolean copyFile) + throws IOException { tableExists(conn, tableName); // LQI queue does not need to be threadsafe -- all operations on this queue // happen in this thread @@ -982,14 +986,13 @@ private Map doBulkLoad(AsyncClusterConnection conn, * Perform a bulk load of the given directory into the given pre-existing table. This method is * not threadsafe. * @param tableName table to load the hfiles - * @param hfofDir the directory that was provided as the output path of a job using - * HFileOutputFormat - * @param silence true to ignore unmatched column families - * @param copyFile always copy hfiles if true + * @param hfofDir the directory that was provided as the output path of a job using + * HFileOutputFormat + * @param silence true to ignore unmatched column families + * @param copyFile always copy hfiles if true */ private Map doBulkLoad(AsyncClusterConnection conn, - TableName tableName, Path hfofDir, boolean silence, boolean copyFile) - throws IOException { + TableName tableName, Path hfofDir, boolean silence, boolean copyFile) throws IOException { tableExists(conn, tableName); /* @@ -998,10 +1001,10 @@ private Map doBulkLoad(AsyncClusterConnection conn, */ boolean validateHFile = getConf().getBoolean(VALIDATE_HFILES, true); if (!validateHFile) { - LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " + - "are not correct. If you fail to read data from your table after using this " + - "option, consider removing the files and bulkload again without this option. " + - "See HBASE-13985"); + LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " + + "are not correct. If you fail to read data from your table after using this " + + "option, consider removing the files and bulkload again without this option. " + + "See HBASE-13985"); } // LQI queue does not need to be threadsafe -- all operations on this queue // happen in this thread @@ -1012,8 +1015,8 @@ private Map doBulkLoad(AsyncClusterConnection conn, if (queue.isEmpty()) { LOG.warn( - "Bulk load operation did not find any files to load in directory {}. " + - "Does it contain files in subdirectories that correspond to column family names?", + "Bulk load operation did not find any files to load in directory {}. " + + "Does it contain files in subdirectories that correspond to column family names?", (hfofDir != null ? hfofDir.toUri().toString() : "")); return Collections.emptyMap(); } @@ -1026,16 +1029,15 @@ private Map doBulkLoad(AsyncClusterConnection conn, @Override public Map bulkLoad(TableName tableName, - Map> family2Files) throws IOException { - try (AsyncClusterConnection conn = ClusterConnectionFactory. - createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) { + Map> family2Files) throws IOException { + try (AsyncClusterConnection conn = ClusterConnectionFactory + .createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) { return doBulkLoad(conn, tableName, family2Files, isSilence(), isAlwaysCopyFiles()); } } @Override - public Map bulkLoad(TableName tableName, Path dir) - throws IOException { + public Map bulkLoad(TableName tableName, Path dir) throws IOException { try (AsyncClusterConnection conn = ClusterConnectionFactory .createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) { AsyncAdmin admin = conn.getAdmin(); @@ -1075,18 +1077,16 @@ public void setClusterIds(List clusterIds) { private void usage() { System.err.println("Usage: " + "bin/hbase completebulkload [OPTIONS] " - + " \n" - + "Loads directory of hfiles -- a region dir or product of HFileOutputFormat -- " - + "into an hbase table.\n" - + "OPTIONS (for other -D options, see source code):\n" - + " -D" + CREATE_TABLE_CONF_KEY + "=no whether to create table; when 'no', target " - + "table must exist.\n" - + " -D" + IGNORE_UNMATCHED_CF_CONF_KEY + "=yes to ignore unmatched column families.\n" - + " -loadTable for when directory of files to load has a depth of 3; target table must " - + "exist;\n" - + " must be last of the options on command line.\n" - + "See http://hbase.apache.org/book.html#arch.bulk.load.complete.strays for " - + "documentation.\n"); + + " \n" + + "Loads directory of hfiles -- a region dir or product of HFileOutputFormat -- " + + "into an hbase table.\n" + "OPTIONS (for other -D options, see source code):\n" + " -D" + + CREATE_TABLE_CONF_KEY + "=no whether to create table; when 'no', target " + + "table must exist.\n" + " -D" + IGNORE_UNMATCHED_CF_CONF_KEY + + "=yes to ignore unmatched column families.\n" + + " -loadTable for when directory of files to load has a depth of 3; target table must " + + "exist;\n" + " must be last of the options on command line.\n" + + "See http://hbase.apache.org/book.html#arch.bulk.load.complete.strays for " + + "documentation.\n"); } @Override @@ -1126,12 +1126,12 @@ public static void main(String[] args) throws Exception { } @Override - public void disableReplication(){ + public void disableReplication() { this.replicate = false; } @Override - public boolean isReplicationDisabled(){ + public boolean isReplicationDisabled() { return !this.replicate; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 89ad398c0fba..bd97f8466d5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,10 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool; - import java.util.Map; import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; @@ -39,7 +36,6 @@ static Canary create(Configuration conf, ExecutorService executor, CanaryTool.Si /** * Run Canary in Region mode. - * * @param targets -- list of monitor tables. * @return the exit code of the Canary tool. */ @@ -47,7 +43,6 @@ static Canary create(Configuration conf, ExecutorService executor, CanaryTool.Si /** * Runs Canary in Region server mode. - * * @param targets -- list of monitor tables. * @return the exit code of the Canary tool. */ @@ -55,7 +50,6 @@ static Canary create(Configuration conf, ExecutorService executor, CanaryTool.Si /** * Runs Canary in Zookeeper mode. - * * @return the exit code of the Canary tool. */ public int checkZooKeeper() throws Exception; @@ -63,4 +57,4 @@ static Canary create(Configuration conf, ExecutorService executor, CanaryTool.Si public Map getReadFailures(); public Map getWriteFailures(); -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java index ce214a7a2973..42e749f83641 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,15 +25,13 @@ import org.apache.hadoop.hbase.tmpl.tool.CanaryStatusTmpl; import org.apache.yetus.audience.InterfaceAudience; - @InterfaceAudience.Private public class CanaryStatusServlet extends HttpServlet { @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { CanaryTool.RegionStdOutSink sink = - (CanaryTool.RegionStdOutSink) getServletContext().getAttribute( - "sink"); + (CanaryTool.RegionStdOutSink) getServletContext().getAttribute("sink"); if (sink == null) { throw new ServletException( "RegionStdOutSink is null! The CanaryTool's InfoServer is not initialized correctly"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index 38f1ce31c18e..79f7f3d319a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool; import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.util.Addressing.inetSocketAddress2String; + import java.io.Closeable; import java.io.IOException; import java.net.BindException; @@ -99,24 +99,18 @@ import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * HBase Canary Tool for "canary monitoring" of a running HBase cluster. - * - * There are three modes: + * HBase Canary Tool for "canary monitoring" of a running HBase cluster. There are three modes: *
      *
    1. region mode (Default): For each region, try to get one row per column family outputting - * information on failure (ERROR) or else the latency. - *
    2. - * - *
    3. regionserver mode: For each regionserver try to get one row from one table selected - * randomly outputting information on failure (ERROR) or else the latency. - *
    4. - * + * information on failure (ERROR) or else the latency. + *
    5. regionserver mode: For each regionserver try to get one row from one table selected randomly + * outputting information on failure (ERROR) or else the latency.
    6. *
    7. zookeeper mode: for each zookeeper instance, selects a znode outputting information on - * failure (ERROR) or else the latency. - *
    8. + * failure (ERROR) or else the latency. *
    */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @@ -179,16 +173,27 @@ public int checkZooKeeper() throws Exception { */ public interface Sink { long getReadFailureCount(); + long incReadFailureCount(); - Map getReadFailures(); + + Map getReadFailures(); + void updateReadFailures(String regionName, String serverName); + long getWriteFailureCount(); + long incWriteFailureCount(); - Map getWriteFailures(); + + Map getWriteFailures(); + void updateWriteFailures(String regionName, String serverName); + long getReadSuccessCount(); + long incReadSuccessCount(); + long getWriteSuccessCount(); + long incWriteSuccessCount(); } @@ -196,10 +201,8 @@ public interface Sink { * Simple implementation of canary sink that allows plotting to a file or standard output. */ public static class StdOutSink implements Sink { - private AtomicLong readFailureCount = new AtomicLong(0), - writeFailureCount = new AtomicLong(0), - readSuccessCount = new AtomicLong(0), - writeSuccessCount = new AtomicLong(0); + private AtomicLong readFailureCount = new AtomicLong(0), writeFailureCount = new AtomicLong(0), + readSuccessCount = new AtomicLong(0), writeSuccessCount = new AtomicLong(0); private Map readFailures = new ConcurrentHashMap<>(); private Map writeFailures = new ConcurrentHashMap<>(); @@ -293,15 +296,14 @@ public void publishReadTiming(String znode, String server, long msTime) { } /** - * By Region, for 'region' mode. + * By Region, for 'region' mode. */ public static class RegionStdOutSink extends StdOutSink { private Map perTableReadLatency = new HashMap<>(); private LongAdder writeLatency = new LongAdder(); private final ConcurrentMap> regionMap = new ConcurrentHashMap<>(); - private ConcurrentMap perServerFailuresCount = - new ConcurrentHashMap<>(); + private ConcurrentMap perServerFailuresCount = new ConcurrentHashMap<>(); private ConcurrentMap perTableFailuresCount = new ConcurrentHashMap<>(); public ConcurrentMap getPerServerFailuresCount() { @@ -337,21 +339,20 @@ private void incFailuresCountDetails(ServerName serverName, RegionInfo region) { public void publishReadFailure(ServerName serverName, RegionInfo region, Exception e) { incReadFailureCount(); incFailuresCountDetails(serverName, region); - LOG.error("Read from {} on serverName={} failed", - region.getRegionNameAsString(), serverName, e); + LOG.error("Read from {} on serverName={} failed", region.getRegionNameAsString(), serverName, + e); } public void publishReadFailure(ServerName serverName, RegionInfo region, - ColumnFamilyDescriptor column, Exception e) { + ColumnFamilyDescriptor column, Exception e) { incReadFailureCount(); incFailuresCountDetails(serverName, region); LOG.error("Read from {} on serverName={}, columnFamily={} failed", - region.getRegionNameAsString(), serverName, - column.getNameAsString(), e); + region.getRegionNameAsString(), serverName, column.getNameAsString(), e); } public void publishReadTiming(ServerName serverName, RegionInfo region, - ColumnFamilyDescriptor column, long msTime) { + ColumnFamilyDescriptor column, long msTime) { RegionTaskResult rtr = new RegionTaskResult(region, region.getTable(), serverName, column); rtr.setReadSuccess(); rtr.setReadLatency(msTime); @@ -360,7 +361,7 @@ public void publishReadTiming(ServerName serverName, RegionInfo region, // Note that read success count will be equal to total column family read successes. incReadSuccessCount(); LOG.info("Read from {} on {} {} in {}ms", region.getRegionNameAsString(), serverName, - column.getNameAsString(), msTime); + column.getNameAsString(), msTime); } public void publishWriteFailure(ServerName serverName, RegionInfo region, Exception e) { @@ -370,15 +371,15 @@ public void publishWriteFailure(ServerName serverName, RegionInfo region, Except } public void publishWriteFailure(ServerName serverName, RegionInfo region, - ColumnFamilyDescriptor column, Exception e) { + ColumnFamilyDescriptor column, Exception e) { incWriteFailureCount(); incFailuresCountDetails(serverName, region); LOG.error("Write to {} on {} {} failed", region.getRegionNameAsString(), serverName, - column.getNameAsString(), e); + column.getNameAsString(), e); } public void publishWriteTiming(ServerName serverName, RegionInfo region, - ColumnFamilyDescriptor column, long msTime) { + ColumnFamilyDescriptor column, long msTime) { RegionTaskResult rtr = new RegionTaskResult(region, region.getTable(), serverName, column); rtr.setWriteSuccess(); rtr.setWriteLatency(msTime); @@ -386,8 +387,8 @@ public void publishWriteTiming(ServerName serverName, RegionInfo region, rtrs.add(rtr); // Note that write success count will be equal to total column family write successes. incWriteSuccessCount(); - LOG.info("Write to {} on {} {} in {}ms", - region.getRegionNameAsString(), serverName, column.getNameAsString(), msTime); + LOG.info("Write to {} on {} {} in {}ms", region.getRegionNameAsString(), serverName, + column.getNameAsString(), msTime); } public Map getReadLatencyMap() { @@ -428,7 +429,7 @@ static class ZookeeperTask implements Callable { private ZookeeperStdOutSink sink; public ZookeeperTask(Connection connection, String host, String znode, int timeout, - ZookeeperStdOutSink sink) { + ZookeeperStdOutSink sink) { this.connection = connection; this.host = host; this.znode = znode; @@ -436,7 +437,8 @@ public ZookeeperTask(Connection connection, String host, String znode, int timeo this.sink = sink; } - @Override public Void call() throws Exception { + @Override + public Void call() throws Exception { ZooKeeper zooKeeper = null; try { zooKeeper = new ZooKeeper(host, timeout, EmptyWatcher.instance); @@ -462,9 +464,11 @@ public ZookeeperTask(Connection connection, String host, String znode, int timeo * output latency or failure. */ static class RegionTask implements Callable { - public enum TaskType{ - READ, WRITE + public enum TaskType { + READ, + WRITE } + private Connection connection; private RegionInfo region; private RegionStdOutSink sink; @@ -475,8 +479,8 @@ public enum TaskType{ private boolean readAllCF; RegionTask(Connection connection, RegionInfo region, ServerName serverName, - RegionStdOutSink sink, TaskType taskType, boolean rawScanEnabled, LongAdder rwLatency, - boolean readAllCF) { + RegionStdOutSink sink, TaskType taskType, boolean rawScanEnabled, LongAdder rwLatency, + boolean readAllCF) { this.connection = connection; this.region = region; this.serverName = serverName; @@ -600,17 +604,17 @@ private Void write() { tableDesc = table.getDescriptor(); byte[] rowToCheck = region.getStartKey(); if (rowToCheck.length == 0) { - rowToCheck = new byte[]{0x0}; + rowToCheck = new byte[] { 0x0 }; } - int writeValueSize = connection.getConfiguration() - .getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10); + int writeValueSize = + connection.getConfiguration().getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10); for (ColumnFamilyDescriptor column : tableDesc.getColumnFamilies()) { Put put = new Put(rowToCheck); byte[] value = new byte[writeValueSize]; Bytes.random(value); put.addColumn(column.getName(), HConstants.EMPTY_BYTE_ARRAY, value); - LOG.debug("Writing to {} {} {} {}", - tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(), + LOG.debug("Writing to {} {} {} {}", tableDesc.getTableName(), + region.getRegionNameAsString(), column.getNameAsString(), Bytes.toStringBinary(rowToCheck)); try { long startTime = EnvironmentEdgeManager.currentTime(); @@ -632,8 +636,8 @@ private Void write() { } /** - * Run a single RegionServer Task and then exit. - * Get one row from a region on the regionserver and output latency or the failure. + * Run a single RegionServer Task and then exit. Get one row from a region on the regionserver and + * output latency or the failure. */ static class RegionServerTask implements Callable { private Connection connection; @@ -643,7 +647,7 @@ static class RegionServerTask implements Callable { private AtomicLong successes; RegionServerTask(Connection connection, String serverName, RegionInfo region, - RegionServerStdOutSink sink, AtomicLong successes) { + RegionServerStdOutSink sink, AtomicLong successes) { this.connection = connection; this.serverName = serverName; this.region = region; @@ -666,9 +670,8 @@ public Void call() { table = connection.getTable(tableName); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. - LOG.debug("Reading from {} {} {} {}", - serverName, region.getTable(), region.getRegionNameAsString(), - Bytes.toStringBinary(startKey)); + LOG.debug("Reading from {} {} {} {}", serverName, region.getTable(), + region.getRegionNameAsString(), Bytes.toStringBinary(startKey)); if (startKey.length > 0) { get = new Get(startKey); get.setCacheBlocks(false); @@ -733,8 +736,8 @@ public Void call() { private static final Logger LOG = LoggerFactory.getLogger(Canary.class); - public static final TableName DEFAULT_WRITE_TABLE_NAME = TableName.valueOf( - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "canary"); + public static final TableName DEFAULT_WRITE_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "canary"); private static final String CANARY_TABLE_FAMILY_NAME = "Test"; @@ -753,32 +756,31 @@ public Void call() { private boolean zookeeperMode = false; /** - * This is a Map of table to timeout. The timeout is for reading all regions in the table; i.e. - * we aggregate time to fetch each region and it needs to be less than this value else we - * log an ERROR. + * This is a Map of table to timeout. The timeout is for reading all regions in the table; i.e. we + * aggregate time to fetch each region and it needs to be less than this value else we log an + * ERROR. */ private HashMap configuredReadTableTimeouts = new HashMap<>(); - public static final String HBASE_CANARY_REGIONSERVER_ALL_REGIONS - = "hbase.canary.regionserver_all_regions"; + public static final String HBASE_CANARY_REGIONSERVER_ALL_REGIONS = + "hbase.canary.regionserver_all_regions"; - public static final String HBASE_CANARY_REGION_WRITE_SNIFFING - = "hbase.canary.region.write.sniffing"; - public static final String HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT - = "hbase.canary.region.write.table.timeout"; - public static final String HBASE_CANARY_REGION_WRITE_TABLE_NAME - = "hbase.canary.region.write.table.name"; - public static final String HBASE_CANARY_REGION_READ_TABLE_TIMEOUT - = "hbase.canary.region.read.table.timeout"; + public static final String HBASE_CANARY_REGION_WRITE_SNIFFING = + "hbase.canary.region.write.sniffing"; + public static final String HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT = + "hbase.canary.region.write.table.timeout"; + public static final String HBASE_CANARY_REGION_WRITE_TABLE_NAME = + "hbase.canary.region.write.table.name"; + public static final String HBASE_CANARY_REGION_READ_TABLE_TIMEOUT = + "hbase.canary.region.read.table.timeout"; - public static final String HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES - = "hbase.canary.zookeeper.permitted.failures"; + public static final String HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES = + "hbase.canary.zookeeper.permitted.failures"; public static final String HBASE_CANARY_USE_REGEX = "hbase.canary.use.regex"; public static final String HBASE_CANARY_TIMEOUT = "hbase.canary.timeout"; public static final String HBASE_CANARY_FAIL_ON_ERROR = "hbase.canary.fail.on.error"; - private ExecutorService executor; // threads to retrieve data from regionservers public CanaryTool() { @@ -853,15 +855,15 @@ private int parseArgs(String[] args) { } } else if (cmd.equals("-zookeeper")) { this.zookeeperMode = true; - } else if(cmd.equals("-regionserver")) { + } else if (cmd.equals("-regionserver")) { this.regionServerMode = true; - } else if(cmd.equals("-allRegions")) { + } else if (cmd.equals("-allRegions")) { conf.setBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, true); regionServerAllRegions = true; - } else if(cmd.equals("-writeSniffing")) { + } else if (cmd.equals("-writeSniffing")) { writeSniffing = true; conf.setBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, true); - } else if(cmd.equals("-treatFailureAsError") || cmd.equals("-failureAsError")) { + } else if (cmd.equals("-treatFailureAsError") || cmd.equals("-failureAsError")) { conf.setBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); } else if (cmd.equals("-e")) { conf.setBoolean(HBASE_CANARY_USE_REGEX, true); @@ -880,7 +882,7 @@ private int parseArgs(String[] args) { printUsageAndExit(); } conf.setLong(HBASE_CANARY_TIMEOUT, timeout); - } else if(cmd.equals("-writeTableTimeout")) { + } else if (cmd.equals("-writeTableTimeout")) { i++; if (i == args.length) { @@ -906,8 +908,7 @@ private int parseArgs(String[] args) { } else if (cmd.equals("-f")) { i++; if (i == args.length) { - System.err - .println("-f needs a boolean value argument (true|false)."); + System.err.println("-f needs a boolean value argument (true|false)."); printUsageAndExit(); } @@ -915,8 +916,8 @@ private int parseArgs(String[] args) { } else if (cmd.equals("-readTableTimeouts")) { i++; if (i == args.length) { - System.err.println("-readTableTimeouts needs a comma-separated list of read " + - "millisecond timeouts per table (without spaces)."); + System.err.println("-readTableTimeouts needs a comma-separated list of read " + + "millisecond timeouts per table (without spaces)."); printUsageAndExit(); } readTableTimeoutsStr = args[i]; @@ -951,8 +952,7 @@ private int parseArgs(String[] args) { } if (this.zookeeperMode) { if (this.regionServerMode || regionServerAllRegions || writeSniffing) { - System.err.println("-zookeeper is exclusive and cannot be combined with " - + "other modes."); + System.err.println("-zookeeper is exclusive and cannot be combined with " + "other modes."); printUsageAndExit(); } } @@ -978,7 +978,7 @@ public int run(String[] args) throws Exception { System.arraycopy(args, index, monitorTargets, 0, length); } if (interval > 0) { - //Only show the web page in daemon mode + // Only show the web page in daemon mode putUpWebUI(); } if (zookeeperMode) { @@ -1033,8 +1033,7 @@ private int runMonitor(String[] monitorTargets) throws Exception { currentTimeLength = EnvironmentEdgeManager.currentTime() - startTime; if (currentTimeLength > timeout) { LOG.error("The monitor is running too long (" + currentTimeLength - + ") after timeout limit:" + timeout - + " will be killed itself !!"); + + ") after timeout limit:" + timeout + " will be killed itself !!"); if (monitor.initialized) { return TIMEOUT_ERROR_EXIT_CODE; } else { @@ -1064,12 +1063,12 @@ private int runMonitor(String[] monitorTargets) throws Exception { } @Override - public Map getReadFailures() { + public Map getReadFailures() { return sink.getReadFailures(); } @Override - public Map getWriteFailures() { + public Map getWriteFailures() { return sink.getWriteFailures(); } @@ -1078,38 +1077,38 @@ private void printUsageAndExit() { "Usage: canary [OPTIONS] [ [ [ interval between checks in seconds"); - System.err.println(" -e consider table/regionserver argument as regular " + - "expression"); + System.err + .println(" -e consider table/regionserver argument as regular " + "expression"); System.err.println(" -f exit on first error; default=true"); System.err.println(" -failureAsError treat read/write failure as error"); System.err.println(" -t timeout for canary-test run; default=600000ms"); System.err.println(" -writeSniffing enable write sniffing"); System.err.println(" -writeTable the table used for write sniffing; default=hbase:canary"); System.err.println(" -writeTableTimeout timeout for writeTable; default=600000ms"); - System.err.println(" -readTableTimeouts =," + - "=,..."); - System.err.println(" comma-separated list of table read timeouts " + - "(no spaces);"); + System.err.println( + " -readTableTimeouts =," + "=,..."); + System.err + .println(" comma-separated list of table read timeouts " + "(no spaces);"); System.err.println(" logs 'ERROR' if takes longer. default=600000ms"); System.err.println(" -permittedZookeeperFailures Ignore first N failures attempting to "); System.err.println(" connect to individual zookeeper nodes in ensemble"); System.err.println(""); System.err.println(" -D= to assign or override configuration params"); - System.err.println(" -Dhbase.canary.read.raw.enabled= Set to enable/disable " + - "raw scan; default=false"); - System.err.println(" -Dhbase.canary.info.port=PORT_NUMBER Set for a Canary UI; " + - "default=-1 (None)"); + System.err.println(" -Dhbase.canary.read.raw.enabled= Set to enable/disable " + + "raw scan; default=false"); + System.err.println( + " -Dhbase.canary.info.port=PORT_NUMBER Set for a Canary UI; " + "default=-1 (None)"); System.err.println(""); - System.err.println("Canary runs in one of three modes: region (default), regionserver, or " + - "zookeeper."); + System.err.println( + "Canary runs in one of three modes: region (default), regionserver, or " + "zookeeper."); System.err.println("To sniff/probe all regions, pass no arguments."); System.err.println("To sniff/probe all regions of a table, pass tablename."); System.err.println("To sniff/probe regionservers, pass -regionserver, etc."); @@ -1119,14 +1118,15 @@ private void printUsageAndExit() { Sink getSink(Configuration configuration, Class clazz) { // In test context, this.sink might be set. Use it if non-null. For testing. - return this.sink != null? this.sink: - (Sink)ReflectionUtils.newInstance(configuration.getClass("hbase.canary.sink.class", - clazz, Sink.class)); + return this.sink != null + ? this.sink + : (Sink) ReflectionUtils + .newInstance(configuration.getClass("hbase.canary.sink.class", clazz, Sink.class)); } /** - * Canary region mode-specific data structure which stores information about each region - * to be scanned + * Canary region mode-specific data structure which stores information about each region to be + * scanned */ public static class RegionTaskResult { private RegionInfo region; @@ -1139,7 +1139,7 @@ public static class RegionTaskResult { private boolean writeSuccess = false; public RegionTaskResult(RegionInfo region, TableName tableName, ServerName serverName, - ColumnFamilyDescriptor column) { + ColumnFamilyDescriptor column) { this.region = region; this.tableName = tableName; this.serverName = serverName; @@ -1226,45 +1226,36 @@ public void setWriteSuccess() { } /** - * A Factory method for {@link Monitor}. - * Makes a RegionServerMonitor, or a ZooKeeperMonitor, or a RegionMonitor. + * A Factory method for {@link Monitor}. Makes a RegionServerMonitor, or a ZooKeeperMonitor, or a + * RegionMonitor. * @return a Monitor instance */ private Monitor newMonitor(final Connection connection, String[] monitorTargets) { Monitor monitor; boolean useRegExp = conf.getBoolean(HBASE_CANARY_USE_REGEX, false); - boolean regionServerAllRegions - = conf.getBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, false); - boolean failOnError - = conf.getBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); - int permittedFailures - = conf.getInt(HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES, 0); - boolean writeSniffing - = conf.getBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, false); - String writeTableName = conf.get(HBASE_CANARY_REGION_WRITE_TABLE_NAME, - DEFAULT_WRITE_TABLE_NAME.getNameAsString()); - long configuredWriteTableTimeout - = conf.getLong(HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT, DEFAULT_TIMEOUT); + boolean regionServerAllRegions = conf.getBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, false); + boolean failOnError = conf.getBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); + int permittedFailures = conf.getInt(HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES, 0); + boolean writeSniffing = conf.getBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, false); + String writeTableName = + conf.get(HBASE_CANARY_REGION_WRITE_TABLE_NAME, DEFAULT_WRITE_TABLE_NAME.getNameAsString()); + long configuredWriteTableTimeout = + conf.getLong(HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT, DEFAULT_TIMEOUT); if (this.regionServerMode) { - monitor = - new RegionServerMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), RegionServerStdOutSink.class), - this.executor, regionServerAllRegions, - failOnError, permittedFailures); + monitor = new RegionServerMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), RegionServerStdOutSink.class), this.executor, + regionServerAllRegions, failOnError, permittedFailures); } else if (this.zookeeperMode) { - monitor = - new ZookeeperMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), ZookeeperStdOutSink.class), - this.executor, failOnError, permittedFailures); + monitor = new ZookeeperMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), ZookeeperStdOutSink.class), this.executor, + failOnError, permittedFailures); } else { - monitor = - new RegionMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), RegionStdOutSink.class), - this.executor, writeSniffing, - TableName.valueOf(writeTableName), failOnError, configuredReadTableTimeouts, - configuredWriteTableTimeout, permittedFailures); + monitor = new RegionMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), RegionStdOutSink.class), this.executor, + writeSniffing, TableName.valueOf(writeTableName), failOnError, configuredReadTableTimeouts, + configuredWriteTableTimeout, permittedFailures); } return monitor; } @@ -1274,19 +1265,20 @@ private void populateReadTableTimeoutsMap(String configuredReadTableTimeoutsStr) for (String tT : tableTimeouts) { String[] nameTimeout = tT.split("="); if (nameTimeout.length < 2) { - throw new IllegalArgumentException("Each -readTableTimeouts argument must be of the form " + - "= (without spaces)."); + throw new IllegalArgumentException("Each -readTableTimeouts argument must be of the form " + + "= (without spaces)."); } long timeoutVal; try { timeoutVal = Long.parseLong(nameTimeout[1]); } catch (NumberFormatException e) { - throw new IllegalArgumentException("-readTableTimeouts read timeout for each table" + - " must be a numeric value argument."); + throw new IllegalArgumentException( + "-readTableTimeouts read timeout for each table" + " must be a numeric value argument."); } configuredReadTableTimeouts.put(nameTimeout[0], timeoutVal); } } + /** * A Monitor super-class can be extended by users */ @@ -1294,8 +1286,8 @@ public static abstract class Monitor implements Runnable, Closeable { protected Connection connection; protected Admin admin; /** - * 'Target' dependent on 'mode'. Could be Tables or RegionServers or ZNodes. - * Passed on the command-line as arguments. + * 'Target' dependent on 'mode'. Could be Tables or RegionServers or ZNodes. Passed on the + * command-line as arguments. */ protected String[] targets; protected boolean useRegExp; @@ -1320,8 +1312,10 @@ public boolean finalCheckForErrors() { if (errorCode != 0) { return true; } - if (treatFailureAsError && (sink.getReadFailureCount() > allowedFailures - || sink.getWriteFailureCount() > allowedFailures)) { + if ( + treatFailureAsError && (sink.getReadFailureCount() > allowedFailures + || sink.getWriteFailureCount() > allowedFailures) + ) { LOG.error("Too many failures detected, treating failure as error, failing the Canary."); errorCode = FAILURE_EXIT_CODE; return true; @@ -1337,7 +1331,7 @@ public void close() throws IOException { } protected Monitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink, - ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { + ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { if (null == connection) { throw new IllegalArgumentException("connection shall not be null"); } @@ -1390,32 +1384,30 @@ private static class RegionMonitor extends Monitor { private boolean readAllCF; /** - * This is a timeout per table. If read of each region in the table aggregated takes longer - * than what is configured here, we log an ERROR rather than just an INFO. + * This is a timeout per table. If read of each region in the table aggregated takes longer than + * what is configured here, we log an ERROR rather than just an INFO. */ private HashMap configuredReadTableTimeouts; private long configuredWriteTableTimeout; public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, - Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName, - boolean treatFailureAsError, HashMap configuredReadTableTimeouts, - long configuredWriteTableTimeout, - long allowedFailures) { + Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName, + boolean treatFailureAsError, HashMap configuredReadTableTimeouts, + long configuredWriteTableTimeout, long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, - allowedFailures); + allowedFailures); Configuration conf = connection.getConfiguration(); this.writeSniffing = writeSniffing; this.writeTableName = writeTableName; this.writeDataTTL = - conf.getInt(HConstants.HBASE_CANARY_WRITE_DATA_TTL_KEY, DEFAULT_WRITE_DATA_TTL); + conf.getInt(HConstants.HBASE_CANARY_WRITE_DATA_TTL_KEY, DEFAULT_WRITE_DATA_TTL); this.regionsLowerLimit = - conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY, 1.0f); + conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY, 1.0f); this.regionsUpperLimit = - conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_UPPERLIMIT_KEY, 1.5f); - this.checkPeriod = - conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY, - DEFAULT_WRITE_TABLE_CHECK_PERIOD); + conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_UPPERLIMIT_KEY, 1.5f); + this.checkPeriod = conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY, + DEFAULT_WRITE_TABLE_CHECK_PERIOD); this.rawScanEnabled = conf.getBoolean(HConstants.HBASE_CANARY_READ_RAW_SCAN_KEY, false); this.configuredReadTableTimeouts = new HashMap<>(configuredReadTableTimeouts); this.configuredWriteTableTimeout = configuredWriteTableTimeout; @@ -1440,10 +1432,12 @@ public void run() { String[] tables = generateMonitorTables(this.targets); // Check to see that each table name passed in the -readTableTimeouts argument is also // passed as a monitor target. - if (!new HashSet<>(Arrays.asList(tables)). - containsAll(this.configuredReadTableTimeouts.keySet())) { - LOG.error("-readTableTimeouts can only specify read timeouts for monitor targets " + - "passed via command line."); + if ( + !new HashSet<>(Arrays.asList(tables)) + .containsAll(this.configuredReadTableTimeouts.keySet()) + ) { + LOG.error("-readTableTimeouts can only specify read timeouts for monitor targets " + + "passed via command line."); this.errorCode = USAGE_EXIT_CODE; return; } @@ -1470,8 +1464,8 @@ public void run() { regionSink.initializeWriteLatency(); LongAdder writeTableLatency = regionSink.getWriteLatency(); taskFutures - .addAll(CanaryTool.sniff(admin, regionSink, admin.getDescriptor(writeTableName), - executor, TaskType.WRITE, this.rawScanEnabled, writeTableLatency, readAllCF)); + .addAll(CanaryTool.sniff(admin, regionSink, admin.getDescriptor(writeTableName), + executor, TaskType.WRITE, this.rawScanEnabled, writeTableLatency, readAllCF)); } for (Future future : taskFutures) { @@ -1488,11 +1482,11 @@ public void run() { Long actual = actualReadTableLatency.get(tableName).longValue(); Long configured = entry.getValue(); if (actual > configured) { - LOG.error("Read operation for {} took {}ms exceeded the configured read timeout." + - "(Configured read timeout {}ms.", tableName, actual, configured); + LOG.error("Read operation for {} took {}ms exceeded the configured read timeout." + + "(Configured read timeout {}ms.", tableName, actual, configured); } else { LOG.info("Read operation for {} took {}ms (Configured read timeout {}ms.", - tableName, actual, configured); + tableName, actual, configured); } } else { LOG.error("Read operation for {} failed!", tableName); @@ -1502,12 +1496,12 @@ public void run() { String writeTableStringName = this.writeTableName.getNameAsString(); long actualWriteLatency = regionSink.getWriteLatency().longValue(); LOG.info("Write operation for {} took {}ms. Configured write timeout {}ms.", - writeTableStringName, actualWriteLatency, this.configuredWriteTableTimeout); + writeTableStringName, actualWriteLatency, this.configuredWriteTableTimeout); // Check that the writeTable write operation latency does not exceed the configured // timeout. if (actualWriteLatency > this.configuredWriteTableTimeout) { LOG.error("Write operation for {} exceeded the configured write timeout.", - writeTableStringName); + writeTableStringName); } } } catch (Exception e) { @@ -1568,14 +1562,16 @@ private String[] generateMonitorTables(String[] monitorTargets) throws IOExcepti * Canary entry point to monitor all the tables. */ private List> sniff(TaskType taskType, RegionStdOutSink regionSink) - throws Exception { + throws Exception { LOG.debug("Reading list of tables"); List> taskFutures = new LinkedList<>(); - for (TableDescriptor td: admin.listTableDescriptors()) { - if (admin.tableExists(td.getTableName()) && admin.isTableEnabled(td.getTableName()) && - (!td.getTableName().equals(writeTableName))) { + for (TableDescriptor td : admin.listTableDescriptors()) { + if ( + admin.tableExists(td.getTableName()) && admin.isTableEnabled(td.getTableName()) + && (!td.getTableName().equals(writeTableName)) + ) { LongAdder readLatency = - regionSink.initializeAndGetReadLatencyForTable(td.getTableName().getNameAsString()); + regionSink.initializeAndGetReadLatencyForTable(td.getTableName().getNameAsString()); taskFutures.addAll(CanaryTool.sniff(admin, sink, td, executor, taskType, this.rawScanEnabled, readLatency, readAllCF)); } @@ -1597,17 +1593,19 @@ private void checkWriteTableDistribution() throws IOException { } ClusterMetrics status = - admin.getClusterMetrics(EnumSet.of(Option.SERVERS_NAME, Option.MASTER)); + admin.getClusterMetrics(EnumSet.of(Option.SERVERS_NAME, Option.MASTER)); int numberOfServers = status.getServersName().size(); if (status.getServersName().contains(status.getMasterName())) { numberOfServers -= 1; } List> pairs = - MetaTableAccessor.getTableRegionsAndLocations(connection, writeTableName); + MetaTableAccessor.getTableRegionsAndLocations(connection, writeTableName); int numberOfRegions = pairs.size(); - if (numberOfRegions < numberOfServers * regionsLowerLimit - || numberOfRegions > numberOfServers * regionsUpperLimit) { + if ( + numberOfRegions < numberOfServers * regionsLowerLimit + || numberOfRegions > numberOfServers * regionsUpperLimit + ) { admin.disableTable(writeTableName); admin.deleteTable(writeTableName); createWriteTable(numberOfServers); @@ -1623,16 +1621,16 @@ private void checkWriteTableDistribution() throws IOException { } private void createWriteTable(int numberOfServers) throws IOException { - int numberOfRegions = (int)(numberOfServers * regionsLowerLimit); - LOG.info("Number of live regionservers {}, pre-splitting the canary table into {} regions " + - "(current lower limit of regions per server is {} and you can change it with config {}).", - numberOfServers, numberOfRegions, regionsLowerLimit, - HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY); - ColumnFamilyDescriptor family = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(CANARY_TABLE_FAMILY_NAME)).setMaxVersions(1) - .setTimeToLive(writeDataTTL).build(); - TableDescriptor desc = TableDescriptorBuilder.newBuilder(writeTableName) - .setColumnFamily(family).build(); + int numberOfRegions = (int) (numberOfServers * regionsLowerLimit); + LOG.info("Number of live regionservers {}, pre-splitting the canary table into {} regions " + + "(current lower limit of regions per server is {} and you can change it with config {}).", + numberOfServers, numberOfRegions, regionsLowerLimit, + HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY); + ColumnFamilyDescriptor family = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CANARY_TABLE_FAMILY_NAME)) + .setMaxVersions(1).setTimeToLive(writeDataTTL).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(writeTableName).setColumnFamily(family).build(); byte[][] splits = new RegionSplitter.HexStringSplit().split(numberOfRegions); admin.createTable(desc, splits); } @@ -1643,8 +1641,8 @@ private void createWriteTable(int numberOfServers) throws IOException { * @throws Exception exception */ private static List> sniff(final Admin admin, final Sink sink, String tableName, - ExecutorService executor, TaskType taskType, boolean rawScanEnabled, LongAdder readLatency, - boolean readAllCF) throws Exception { + ExecutorService executor, TaskType taskType, boolean rawScanEnabled, LongAdder readLatency, + boolean readAllCF) throws Exception { LOG.debug("Checking table is enabled and getting table descriptor for table {}", tableName); if (admin.isTableEnabled(TableName.valueOf(tableName))) { return CanaryTool.sniff(admin, sink, admin.getDescriptor(TableName.valueOf(tableName)), @@ -1659,22 +1657,22 @@ private static List> sniff(final Admin admin, final Sink sink, Stri * Loops over regions of this table, and outputs information about the state. */ private static List> sniff(final Admin admin, final Sink sink, - TableDescriptor tableDesc, ExecutorService executor, TaskType taskType, - boolean rawScanEnabled, LongAdder rwLatency, boolean readAllCF) throws Exception { + TableDescriptor tableDesc, ExecutorService executor, TaskType taskType, boolean rawScanEnabled, + LongAdder rwLatency, boolean readAllCF) throws Exception { LOG.debug("Reading list of regions for table {}", tableDesc.getTableName()); try (Table table = admin.getConnection().getTable(tableDesc.getTableName())) { List tasks = new ArrayList<>(); try (RegionLocator regionLocator = - admin.getConnection().getRegionLocator(tableDesc.getTableName())) { - for (HRegionLocation location: regionLocator.getAllRegionLocations()) { + admin.getConnection().getRegionLocator(tableDesc.getTableName())) { + for (HRegionLocation location : regionLocator.getAllRegionLocations()) { if (location == null) { LOG.warn("Null location"); continue; } ServerName rs = location.getServerName(); RegionInfo region = location.getRegion(); - tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink)sink, - taskType, rawScanEnabled, rwLatency, readAllCF)); + tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink) sink, + taskType, rawScanEnabled, rwLatency, readAllCF)); Map> regionMap = ((RegionStdOutSink) sink).getRegionMap(); regionMap.put(region.getRegionNameAsString(), new ArrayList()); } @@ -1685,24 +1683,22 @@ private static List> sniff(final Admin admin, final Sink sink, } } - // monitor for zookeeper mode + // monitor for zookeeper mode private static class ZookeeperMonitor extends Monitor { private List hosts; private final String znode; private final int timeout; protected ZookeeperMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, - Sink sink, ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { - super(connection, monitorTargets, useRegExp, - sink, executor, treatFailureAsError, allowedFailures); + Sink sink, ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { + super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, + allowedFailures); Configuration configuration = connection.getConfiguration(); - znode = - configuration.get(ZOOKEEPER_ZNODE_PARENT, - DEFAULT_ZOOKEEPER_ZNODE_PARENT); - timeout = configuration - .getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + znode = configuration.get(ZOOKEEPER_ZNODE_PARENT, DEFAULT_ZOOKEEPER_ZNODE_PARENT); + timeout = + configuration.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); ConnectStringParser parser = - new ConnectStringParser(ZKConfig.getZKQuorumServersString(configuration)); + new ConnectStringParser(ZKConfig.getZKQuorumServersString(configuration)); hosts = Lists.newArrayList(); for (InetSocketAddress server : parser.getServerAddresses()) { hosts.add(inetSocketAddress2String(server)); @@ -1710,12 +1706,13 @@ protected ZookeeperMonitor(Connection connection, String[] monitorTargets, boole if (allowedFailures > (hosts.size() - 1) / 2) { LOG.warn( "Confirm allowable number of failed ZooKeeper nodes, as quorum will " - + "already be lost. Setting of {} failures is unexpected for {} ensemble size.", + + "already be lost. Setting of {} failures is unexpected for {} ensemble size.", allowedFailures, hosts.size()); } } - @Override public void run() { + @Override + public void run() { List tasks = Lists.newArrayList(); ZookeeperStdOutSink zkSink = null; try { @@ -1753,7 +1750,6 @@ private ZookeeperStdOutSink getSink() { } } - /** * A monitor for regionserver mode */ @@ -1761,10 +1757,10 @@ private static class RegionServerMonitor extends Monitor { private boolean allRegions; public RegionServerMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, - Sink sink, ExecutorService executor, boolean allRegions, - boolean treatFailureAsError, long allowedFailures) { + Sink sink, ExecutorService executor, boolean allRegions, boolean treatFailureAsError, + long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, - allowedFailures); + allowedFailures); this.allRegions = allRegions; } @@ -1817,15 +1813,15 @@ private boolean checkNoTableNames() { } if (foundTableNames.size() > 0) { - System.err.println("Cannot pass a tablename when using the -regionserver " + - "option, tablenames:" + foundTableNames.toString()); + System.err.println("Cannot pass a tablename when using the -regionserver " + + "option, tablenames:" + foundTableNames.toString()); this.errorCode = USAGE_EXIT_CODE; } return foundTableNames.isEmpty(); } private void monitorRegionServers(Map> rsAndRMap, - RegionServerStdOutSink regionServerSink) { + RegionServerStdOutSink regionServerSink) { List tasks = new ArrayList<>(); Map successMap = new HashMap<>(); for (Map.Entry> entry : rsAndRMap.entrySet()) { @@ -1836,21 +1832,15 @@ private void monitorRegionServers(Map> rsAndRMap, LOG.error("Regionserver not serving any regions - {}", serverName); } else if (this.allRegions) { for (RegionInfo region : entry.getValue()) { - tasks.add(new RegionServerTask(this.connection, - serverName, - region, - regionServerSink, - successes)); + tasks.add(new RegionServerTask(this.connection, serverName, region, regionServerSink, + successes)); } } else { // random select a region if flag not set - RegionInfo region = entry.getValue() - .get(ThreadLocalRandom.current().nextInt(entry.getValue().size())); - tasks.add(new RegionServerTask(this.connection, - serverName, - region, - regionServerSink, - successes)); + RegionInfo region = + entry.getValue().get(ThreadLocalRandom.current().nextInt(entry.getValue().size())); + tasks.add( + new RegionServerTask(this.connection, serverName, region, regionServerSink, successes)); } } try { @@ -1866,7 +1856,7 @@ private void monitorRegionServers(Map> rsAndRMap, for (Map.Entry> entry : rsAndRMap.entrySet()) { String serverName = entry.getKey(); LOG.info("Successfully read {} regions out of {} on regionserver {}", - successMap.get(serverName), entry.getValue().size(), serverName); + successMap.get(serverName), entry.getValue().size(), serverName); } } } catch (InterruptedException e) { @@ -1887,9 +1877,9 @@ private Map> getAllRegionServerByName() { LOG.debug("Reading list of tables and locations"); List tableDescs = this.admin.listTableDescriptors(); List regions = null; - for (TableDescriptor tableDesc: tableDescs) { + for (TableDescriptor tableDesc : tableDescs) { try (RegionLocator regionLocator = - this.admin.getConnection().getRegionLocator(tableDesc.getTableName())) { + this.admin.getConnection().getRegionLocator(tableDesc.getTableName())) { for (HRegionLocation location : regionLocator.getAllRegionLocations()) { if (location == null) { LOG.warn("Null location"); @@ -1910,7 +1900,7 @@ private Map> getAllRegionServerByName() { } // get any live regionservers not serving any regions - for (ServerName rs: this.admin.getRegionServers()) { + for (ServerName rs : this.admin.getRegionServers()) { String rsName = rs.getHostname(); if (!rsAndRMap.containsKey(rsName)) { rsAndRMap.put(rsName, Collections. emptyList()); @@ -1923,8 +1913,8 @@ private Map> getAllRegionServerByName() { return rsAndRMap; } - private Map> doFilterRegionServerByName( - Map> fullRsAndRMap) { + private Map> + doFilterRegionServerByName(Map> fullRsAndRMap) { Map> filteredRsAndRMap = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java index c909725a616a..e55c7c5737f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -44,7 +42,6 @@ public class DataBlockEncodingValidator extends AbstractHBaseTool { /** * Check DataBlockEncodings of column families are compatible. - * * @return number of column families with incompatible DataBlockEncoding * @throws IOException if a remote or network exception occurs */ @@ -54,7 +51,7 @@ private int validateDBE() throws IOException { LOG.info("Validating Data Block Encodings"); try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { List tableDescriptors = admin.listTableDescriptors(); String encoding = ""; @@ -68,7 +65,7 @@ private int validateDBE() throws IOException { } catch (IllegalArgumentException e) { incompatibilities++; LOG.warn("Incompatible DataBlockEncoding for table: {}, cf: {}, encoding: {}", - td.getTableName().getNameAsString(), cfd.getNameAsString(), encoding); + td.getTableName().getNameAsString(), cfd.getNameAsString(), encoding); } } } @@ -76,8 +73,8 @@ private int validateDBE() throws IOException { if (incompatibilities > 0) { LOG.warn("There are {} column families with incompatible Data Block Encodings. Do not " - + "upgrade until these encodings are converted to a supported one. " - + "Check https://s.apache.org/prefixtree for instructions.", incompatibilities); + + "upgrade until these encodings are converted to a supported one. " + + "Check https://s.apache.org/prefixtree for instructions.", incompatibilities); } else { LOG.info("The used Data Block Encodings are compatible with HBase 2.0."); } @@ -87,8 +84,8 @@ private int validateDBE() throws IOException { @Override protected void printUsage() { - String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + - PreUpgradeValidator.VALIDATE_DBE_NAME; + String header = + "hbase " + PreUpgradeValidator.TOOL_NAME + " " + PreUpgradeValidator.VALIDATE_DBE_NAME; printUsage(header, null, ""); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java index 2f648975724e..475a28088276 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,11 +33,11 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @@ -48,7 +47,6 @@ public class HFileContentValidator extends AbstractHBaseTool { /** * Check HFile contents are readable by HBase 2. - * * @param conf used configuration * @return number of HFiles corrupted HBase * @throws IOException if a remote or network exception occurs @@ -99,7 +97,7 @@ private boolean validateHFileContent(Configuration conf) throws IOException { } LOG.info("Change data block encodings before upgrading. " - + "Check https://s.apache.org/prefixtree for instructions."); + + "Check https://s.apache.org/prefixtree for instructions."); return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java index 818004c272ea..dcfb3878c502 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.tool; import java.util.Arrays; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -44,8 +42,7 @@ */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PreUpgradeValidator implements Tool { - private static final Logger LOG = LoggerFactory - .getLogger(PreUpgradeValidator.class); + private static final Logger LOG = LoggerFactory.getLogger(PreUpgradeValidator.class); public static final String TOOL_NAME = "pre-upgrade"; public static final String VALIDATE_CP_NAME = "validate-cp"; @@ -68,11 +65,10 @@ private void printUsage() { System.out.println("usage: hbase " + TOOL_NAME + " command ..."); System.out.println("Available commands:"); System.out.printf(" %-15s Validate co-processors are compatible with HBase%n", - VALIDATE_CP_NAME); + VALIDATE_CP_NAME); System.out.printf(" %-15s Validate DataBlockEncodings are compatible with HBase%n", - VALIDATE_DBE_NAME); - System.out.printf(" %-15s Validate HFile contents are readable%n", - VALIDATE_HFILE); + VALIDATE_DBE_NAME); + System.out.printf(" %-15s Validate HFile contents are readable%n", VALIDATE_HFILE); System.out.println("For further information, please use command -h"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java index 9311200ac939..e3df46cc9cba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,29 +34,23 @@ /** *

    - * This coprocessor 'shallows' all the writes. It allows to test a pure - * write workload, going through all the communication layers. - * The reads will work as well, but they as we never write, they will always always - * return an empty structure. The WAL is also skipped. - * Obviously, the region will never be split automatically. It's up to the user - * to split and move it. + * This coprocessor 'shallows' all the writes. It allows to test a pure write workload, going + * through all the communication layers. The reads will work as well, but they as we never write, + * they will always always return an empty structure. The WAL is also skipped. Obviously, the region + * will never be split automatically. It's up to the user to split and move it. *

    *

    - * For a table created like this: - * create 'usertable', {NAME => 'f1', VERSIONS => 1} + * For a table created like this: create 'usertable', {NAME => 'f1', VERSIONS => 1} *

    *

    - * You can then add the coprocessor with this command: - * alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|' + * You can then add the coprocessor with this command: alter 'usertable', 'coprocessor' => + * '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|' *

    *

    - * And then - * put 'usertable', 'f1', 'f1', 'f1' + * And then put 'usertable', 'f1', 'f1', 'f1' *

    *

    - * scan 'usertable' - * Will return: - * 0 row(s) in 0.0050 seconds + * scan 'usertable' Will return: 0 row(s) in 0.0050 seconds *

    * TODO: It needs tests */ @@ -80,15 +73,14 @@ public void preOpen(ObserverContext e) throws IOEx @Override public void preBatchMutate(final ObserverContext c, - final MiniBatchOperationInProgress miniBatchOp) - throws IOException { + final MiniBatchOperationInProgress miniBatchOp) throws IOException { if (ops.incrementAndGet() % 20000 == 0) { LOG.info("Wrote " + ops.get() + " times in region " + regionName); } for (int i = 0; i < miniBatchOp.size(); i++) { miniBatchOp.setOperationStatus(i, - new OperationStatus(HConstants.OperationStatusCode.SUCCESS)); + new OperationStatus(HConstants.OperationStatusCode.SUCCESS)); } c.bypass(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java index 0f5d829de6b9..92f419e543af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import org.apache.yetus.audience.InterfaceAudience; @@ -32,1106 +31,721 @@ public Branch1CoprocessorMethods() { private void addMethods() { /* BulkLoadObserver */ - addMethod("prePrepareBulkLoad", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest"); + addMethod("prePrepareBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest"); - addMethod("preCleanupBulkLoad", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest"); + addMethod("preCleanupBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest"); /* EndpointObserver */ - addMethod("postEndpointInvocation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "com.google.protobuf.Service", - "java.lang.String", - "com.google.protobuf.Message", - "com.google.protobuf.Message.Builder"); + addMethod("postEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message", + "com.google.protobuf.Message.Builder"); - addMethod("preEndpointInvocation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "com.google.protobuf.Service", - "java.lang.String", - "com.google.protobuf.Message"); + addMethod("preEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message"); /* MasterObserver */ - addMethod("preCreateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postCreateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("preDeleteTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDeleteTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDeleteTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preMove", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.ServerName", - "org.apache.hadoop.hbase.ServerName"); - - addMethod("preCreateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postCreateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postMove", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.ServerName", - "org.apache.hadoop.hbase.ServerName"); - - addMethod("postDeleteTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preTruncateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTruncateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preTruncateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTruncateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preModifyTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postModifyTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preModifyTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postModifyTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preAddColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postAddColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preAddColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postAddColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preModifyColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postModifyColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preModifyColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postModifyColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preDeleteColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("postDeleteColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("preDeleteColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("postDeleteColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("preEnableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postEnableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preEnableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postEnableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDisableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDisableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDisableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDisableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preAbortProcedure", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.procedure2.ProcedureExecutor", - "long"); - - addMethod("postAbortProcedure", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preListProcedures", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postListProcedures", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preAssign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postAssign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preUnassign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "boolean"); - - addMethod("postUnassign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "boolean"); - - addMethod("preRegionOffline", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postRegionOffline", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preBalance", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postBalance", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preSetSplitOrMergeEnabled", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); - - addMethod("postSetSplitOrMergeEnabled", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); - - addMethod("preBalanceSwitch", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); - - addMethod("postBalanceSwitch", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "boolean"); - - addMethod("preShutdown", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preStopMaster", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postStartMaster", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preMasterInitialization", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preListSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("postListSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("preCloneSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postCloneSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preRestoreSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postRestoreSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preDeleteSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("postDeleteSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("preGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List"); - - addMethod("preGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preGetTableNames", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableNames", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.lang.String"); - - addMethod("preCreateNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("postCreateNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preDeleteNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postDeleteNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preModifyNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("postModifyNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preGetNamespaceDescriptor", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postGetNamespaceDescriptor", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preListNamespaceDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("postListNamespaceDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preTableFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTableFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetTableQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetTableQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetNamespaceQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetNamespaceQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preDispatchMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postDispatchMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preGetClusterStatus", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postGetClusterStatus", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.ClusterStatus"); - - addMethod("preClearDeadServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postClearDeadServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List"); - - addMethod("preMoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("preMoveTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("preMoveServersAndTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveServersAndTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.util.Set", - "java.lang.String"); - - addMethod("preAddRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postAddRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preRemoveRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postRemoveRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preRemoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set"); - - addMethod("postRemoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set"); - - addMethod("preBalanceRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postBalanceRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "boolean"); + addMethod("preCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("preDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", + "org.apache.hadoop.hbase.ServerName"); + + addMethod("preCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", + "org.apache.hadoop.hbase.ServerName"); + + addMethod("postDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("postDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("preDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("postDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("preEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.procedure2.ProcedureExecutor", "long"); + + addMethod("postAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "boolean"); + + addMethod("postUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "boolean"); + + addMethod("preRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); + + addMethod("postSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); + + addMethod("preBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); + + addMethod("postBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", + "boolean"); + + addMethod("preShutdown", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preStopMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postStartMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preMasterInitialization", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("postListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("preCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("postDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List"); + + addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List", "java.lang.String"); + + addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List", "java.lang.String"); + + addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.lang.String"); + + addMethod("postGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.lang.String"); + + addMethod("preCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("postCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("postModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("postListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "java.lang.String", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "java.lang.String", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.ClusterStatus"); + + addMethod("preClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List"); + + addMethod("preMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("postMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("preMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("postMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("preMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.util.Set", "java.lang.String"); + + addMethod("postMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.util.Set", "java.lang.String"); + + addMethod("preAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set"); + + addMethod("postRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set"); + + addMethod("preBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "boolean"); /* RegionObserver */ - addMethod("preOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postLogReplay", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preFlushScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "long"); - - addMethod("preFlushScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("preFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("preFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile"); - - addMethod("postFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List"); - - addMethod("preCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("postCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "com.google.common.collect.ImmutableList"); - - addMethod("postCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "com.google.common.collect.ImmutableList", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("preCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.ScanType"); - - addMethod("preCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.ScanType", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("preClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); - - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest", - "long"); - - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("postCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile"); - - addMethod("postCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("preSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]"); - - addMethod("preSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); - - addMethod("preSplitBeforePONR", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "java.util.List"); - - addMethod("preSplitAfterPONR", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preRollBackSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postRollBackSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postCompleteSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); - - addMethod("preGetClosestRowBefore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.client.Result"); - - addMethod("postGetClosestRowBefore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.client.Result"); - - addMethod("preGetOp", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "java.util.List"); - - addMethod("postGetOp", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "java.util.List"); - - addMethod("preExists", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "boolean"); - - addMethod("postExists", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "boolean"); - - addMethod("prePut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Put", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); - - addMethod("postPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Put", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); - - addMethod("preDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Delete", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); + addMethod("preOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postLogReplay", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.KeyValueScanner", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "long"); + + addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.KeyValueScanner", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile"); + + addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List"); + + addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList"); + + addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.ScanType"); + + addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.ScanType", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("preClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); + + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest", "long"); + + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile"); + + addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]"); + + addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); + + addMethod("preSplitBeforePONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "java.util.List"); + + addMethod("preSplitAfterPONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postCompleteSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); + + addMethod("preGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result"); + + addMethod("postGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result"); + + addMethod("preGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "java.util.List"); + + addMethod("postGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "java.util.List"); + + addMethod("preExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "boolean"); + + addMethod("postExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "boolean"); + + addMethod("prePut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); + + addMethod("postPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); + + addMethod("preDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); addMethod("prePrepareTimeStampForDeleteVersion", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Mutation", - "org.apache.hadoop.hbase.Cell", - "byte[]", - "org.apache.hadoop.hbase.client.Get"); - - addMethod("postDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Delete", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); - - addMethod("preBatchMutate", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); - - addMethod("postBatchMutate", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); - - addMethod("postStartRegionOperation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region.Operation"); - - addMethod("postCloseRegionOperation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region.Operation"); - - addMethod("postBatchMutateIndispensably", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress", - "boolean"); - - addMethod("preCheckAndPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); - - addMethod("preCheckAndPutAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); - - addMethod("postCheckAndPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); - - addMethod("preCheckAndDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", "byte[]", + "org.apache.hadoop.hbase.client.Get"); + + addMethod("postDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); + + addMethod("preBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); + + addMethod("postBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); + + addMethod("postStartRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region.Operation"); + + addMethod("postCloseRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region.Operation"); + + addMethod("postBatchMutateIndispensably", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress", "boolean"); + + addMethod("preCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); + + addMethod("preCheckAndPutAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); + + addMethod("postCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); + + addMethod("preCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); addMethod("preCheckAndDeleteAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); - - addMethod("postCheckAndDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); - - addMethod("preIncrementColumnValue", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "long", - "boolean"); - - addMethod("postIncrementColumnValue", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "long", - "boolean", - "long"); - - addMethod("preAppend", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append"); - - addMethod("preAppendAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append"); - - addMethod("postAppend", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append", - "org.apache.hadoop.hbase.client.Result"); - - addMethod("preIncrement", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment"); - - addMethod("preIncrementAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment"); - - addMethod("postIncrement", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment", - "org.apache.hadoop.hbase.client.Result"); - - addMethod("preScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Scan", - "org.apache.hadoop.hbase.regionserver.RegionScanner"); - - addMethod("preStoreScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.client.Scan", - "java.util.NavigableSet", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner"); - - addMethod("postScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Scan", - "org.apache.hadoop.hbase.regionserver.RegionScanner"); - - addMethod("preScannerNext", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "java.util.List", - "int", - "boolean"); - - addMethod("postScannerNext", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "java.util.List", - "int", - "boolean"); - - addMethod("postScannerFilterRow", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "byte[]", - "int", - "short", - "boolean"); - - addMethod("preScannerClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("postScannerClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("preWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("preWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("postWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("postWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("preBulkLoadHFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preCommitStoreFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "java.util.List"); - - addMethod("postCommitStoreFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); - - addMethod("postBulkLoadHFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "boolean"); - - addMethod("preStoreFileReaderOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.FileSystem", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", - "long", - "org.apache.hadoop.hbase.io.hfile.CacheConfig", - "org.apache.hadoop.hbase.io.Reference", - "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); - - addMethod("postStoreFileReaderOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.FileSystem", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", - "long", - "org.apache.hadoop.hbase.io.hfile.CacheConfig", - "org.apache.hadoop.hbase.io.Reference", - "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); - - addMethod("postMutationBeforeWAL", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType", - "org.apache.hadoop.hbase.client.Mutation", - "org.apache.hadoop.hbase.Cell", - "org.apache.hadoop.hbase.Cell"); - - addMethod("postInstantiateDeleteTracker", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.DeleteTracker"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", + "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); + + addMethod("postCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); + + addMethod("preIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "long", "boolean"); + + addMethod("postIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "long", "boolean", "long"); + + addMethod("preAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append"); + + addMethod("preAppendAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append"); + + addMethod("postAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append", "org.apache.hadoop.hbase.client.Result"); + + addMethod("preIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment"); + + addMethod("preIncrementAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment"); + + addMethod("postIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment", "org.apache.hadoop.hbase.client.Result"); + + addMethod("preScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner"); + + addMethod("preStoreScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.client.Scan", + "java.util.NavigableSet", "org.apache.hadoop.hbase.regionserver.KeyValueScanner"); + + addMethod("postScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner"); + + addMethod("preScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean"); + + addMethod("postScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean"); + + addMethod("postScannerFilterRow", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "byte[]", "int", "short", "boolean"); + + addMethod("preScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("postScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("preBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "java.util.List"); + + addMethod("postCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); + + addMethod("postBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "boolean"); + + addMethod("preStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", + "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", + "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", + "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); + + addMethod("postStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", + "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", + "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", + "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); + + addMethod("postMutationBeforeWAL", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType", + "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", + "org.apache.hadoop.hbase.Cell"); + + addMethod("postInstantiateDeleteTracker", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.DeleteTracker"); /* RegionServerObserver */ - addMethod("preMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("preMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preStopRegionServer", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preStopRegionServer", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preMergeCommit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "java.util.List"); + addMethod("preMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "java.util.List"); - addMethod("postMergeCommit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preRollBackMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("preRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("postRollBackMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preRollWALWriterRequest", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postRollWALWriterRequest", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("postRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postCreateReplicationEndPoint", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.replication.ReplicationEndpoint"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.replication.ReplicationEndpoint"); - addMethod("preReplicateLogEntries", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "org.apache.hadoop.hbase.CellScanner"); + addMethod("preReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "org.apache.hadoop.hbase.CellScanner"); - addMethod("postReplicateLogEntries", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "org.apache.hadoop.hbase.CellScanner"); + addMethod("postReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "org.apache.hadoop.hbase.CellScanner"); /* WALObserver */ - addMethod("preWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("preWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("postWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("postWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("preWALRoll", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); - - addMethod("postWALRoll", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); + addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("preWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); + + addMethod("postWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java index 60e384171352..36b98f5bcfb4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.util.ArrayList; import java.util.List; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -36,7 +33,7 @@ public CoprocessorMethod(String name) { parameters = new ArrayList<>(); } - public CoprocessorMethod withParameters(String ... parameters) { + public CoprocessorMethod withParameters(String... parameters) { for (String parameter : parameters) { this.parameters.add(parameter); } @@ -44,7 +41,7 @@ public CoprocessorMethod withParameters(String ... parameters) { return this; } - public CoprocessorMethod withParameters(Class ... parameters) { + public CoprocessorMethod withParameters(Class... parameters) { for (Class parameter : parameters) { this.parameters.add(parameter.getCanonicalName()); } @@ -60,10 +57,9 @@ public boolean equals(Object obj) { return false; } - CoprocessorMethod other = (CoprocessorMethod)obj; + CoprocessorMethod other = (CoprocessorMethod) obj; - return Objects.equals(name, other.name) && - Objects.equals(parameters, other.parameters); + return Objects.equals(name, other.name) && Objects.equals(parameters, other.parameters); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java index 2e0c801b8aad..3766d901b03b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.lang.reflect.Method; import java.util.HashSet; import java.util.Set; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -32,35 +30,35 @@ public CoprocessorMethods() { methods = new HashSet<>(); } - public void addMethod(String name, String ... parameters) { + public void addMethod(String name, String... parameters) { CoprocessorMethod cpMethod = new CoprocessorMethod(name).withParameters(parameters); methods.add(cpMethod); } - public void addMethod(String name, Class ... parameters) { + public void addMethod(String name, Class... parameters) { CoprocessorMethod cpMethod = new CoprocessorMethod(name).withParameters(parameters); methods.add(cpMethod); } public void addMethod(Method method) { - CoprocessorMethod cpMethod = new CoprocessorMethod(method.getName()) - .withParameters(method.getParameterTypes()); + CoprocessorMethod cpMethod = + new CoprocessorMethod(method.getName()).withParameters(method.getParameterTypes()); methods.add(cpMethod); } - public boolean hasMethod(String name, String ... parameters) { + public boolean hasMethod(String name, String... parameters) { CoprocessorMethod method = new CoprocessorMethod(name).withParameters(parameters); return methods.contains(method); } - public boolean hasMethod(String name, Class ... parameters) { + public boolean hasMethod(String name, Class... parameters) { CoprocessorMethod method = new CoprocessorMethod(name).withParameters(parameters); return methods.contains(method); } public boolean hasMethod(Method method) { - CoprocessorMethod cpMethod = new CoprocessorMethod(method.getName()) - .withParameters(method.getParameterTypes()); + CoprocessorMethod cpMethod = + new CoprocessorMethod(method.getName()).withParameters(method.getParameterTypes()); return methods.contains(cpMethod); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java index 766224e5d381..45cbe8eab735 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.io.IOException; @@ -56,8 +54,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class CoprocessorValidator extends AbstractHBaseTool { - private static final Logger LOG = LoggerFactory - .getLogger(CoprocessorValidator.class); + private static final Logger LOG = LoggerFactory.getLogger(CoprocessorValidator.class); private CoprocessorMethods branch1; private CoprocessorMethods current; @@ -79,11 +76,10 @@ public CoprocessorValidator() { } /** - * This classloader implementation calls {@link #resolveClass(Class)} - * method for every loaded class. It means that some extra validation will - * take place - * according to JLS. + * This classloader implementation calls {@link #resolveClass(Class)} method for every loaded + * class. It means that some extra validation will take place + * according to + * JLS. */ private static final class ResolverUrlClassLoader extends URLClassLoader { private ResolverUrlClassLoader(URL[] urls, ClassLoader parent) { @@ -110,7 +106,7 @@ public ResolverUrlClassLoader run() { } private ResolverUrlClassLoader createClassLoader(ClassLoader parent, - org.apache.hadoop.fs.Path path) throws IOException { + org.apache.hadoop.fs.Path path) throws IOException { Path tempPath = Files.createTempFile("hbase-coprocessor-", ".jar"); org.apache.hadoop.fs.Path destination = new org.apache.hadoop.fs.Path(tempPath.toString()); @@ -125,7 +121,7 @@ private ResolverUrlClassLoader createClassLoader(ClassLoader parent, } private void validate(ClassLoader classLoader, String className, - List violations) { + List violations) { LOG.debug("Validating class '{}'.", className); try { @@ -135,45 +131,45 @@ private void validate(ClassLoader classLoader, String className, LOG.trace("Validating method '{}'.", method); if (branch1.hasMethod(method) && !current.hasMethod(method)) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.WARNING, "method '" + method + - "' was removed from new coprocessor API, so it won't be called by HBase"); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.WARNING, "method '" + method + + "' was removed from new coprocessor API, so it won't be called by HBase"); violations.add(violation); } } } catch (ClassNotFoundException e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, "no such class", e); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.ERROR, "no such class", e); violations.add(violation); } catch (RuntimeException | Error e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, "could not validate class", e); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.ERROR, "could not validate class", e); violations.add(violation); } } public void validateClasses(ClassLoader classLoader, List classNames, - List violations) { + List violations) { for (String className : classNames) { validate(classLoader, className, violations); } } public void validateClasses(ClassLoader classLoader, String[] classNames, - List violations) { + List violations) { validateClasses(classLoader, Arrays.asList(classNames), violations); } @InterfaceAudience.Private - protected void validateTables(ClassLoader classLoader, Admin admin, - Pattern pattern, List violations) throws IOException { + protected void validateTables(ClassLoader classLoader, Admin admin, Pattern pattern, + List violations) throws IOException { List tableDescriptors = admin.listTableDescriptors(pattern); for (TableDescriptor tableDescriptor : tableDescriptors) { LOG.debug("Validating table {}", tableDescriptor.getTableName()); Collection coprocessorDescriptors = - tableDescriptor.getCoprocessorDescriptors(); + tableDescriptor.getCoprocessorDescriptors(); for (CoprocessorDescriptor coprocessorDescriptor : coprocessorDescriptors) { String className = coprocessorDescriptor.getClassName(); @@ -184,9 +180,8 @@ protected void validateTables(ClassLoader classLoader, Admin admin, try (ResolverUrlClassLoader cpClassLoader = createClassLoader(classLoader, path)) { validate(cpClassLoader, className, violations); } catch (IOException e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, - "could not validate jar file '" + path + "'", e); + CoprocessorViolation violation = new CoprocessorViolation(className, Severity.ERROR, + "could not validate jar file '" + path + "'", e); violations.add(violation); } } else { @@ -197,18 +192,17 @@ protected void validateTables(ClassLoader classLoader, Admin admin, } private void validateTables(ClassLoader classLoader, Pattern pattern, - List violations) throws IOException { + List violations) throws IOException { try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { validateTables(classLoader, admin, pattern, violations); } } @Override protected void printUsage() { - String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + - PreUpgradeValidator.VALIDATE_CP_NAME + - " [-jar ...] [-class ... | -table ... | -config]"; + String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + + PreUpgradeValidator.VALIDATE_CP_NAME + " [-jar ...] [-class ... | -table ... | -config]"; printUsage(header, "Options:", ""); } @@ -249,9 +243,8 @@ private List buildClasspath(List jars) throws IOException { Path jarPath = Paths.get(jar); if (Files.isDirectory(jarPath)) { try (Stream stream = Files.list(jarPath)) { - List files = stream - .filter((path) -> Files.isRegularFile(path)) - .collect(Collectors.toList()); + List files = + stream.filter((path) -> Files.isRegularFile(path)).collect(Collectors.toList()); for (Path file : files) { URL url = file.toUri().toURL(); @@ -291,13 +284,13 @@ protected int doWork() throws Exception { if (config) { String[] masterCoprocessors = - getConf().getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); + getConf().getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); if (masterCoprocessors != null) { validateClasses(classLoader, masterCoprocessors, violations); } String[] regionCoprocessors = - getConf().getStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); + getConf().getStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); if (regionCoprocessors != null) { validateClasses(classLoader, regionCoprocessors, violations); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java index d00398ecc270..a57fed964c16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import org.apache.yetus.audience.InterfaceAudience; @@ -26,7 +24,8 @@ @InterfaceAudience.Private public class CoprocessorViolation { public enum Severity { - WARNING, ERROR + WARNING, + ERROR } private final String className; @@ -38,8 +37,7 @@ public CoprocessorViolation(String className, Severity severity, String message) this(className, severity, message, null); } - public CoprocessorViolation(String className, Severity severity, String message, - Throwable t) { + public CoprocessorViolation(String className, Severity severity, String message, Throwable t) { this.className = className; this.severity = severity; this.message = message; @@ -64,11 +62,7 @@ public Throwable getThrowable() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("className", className) - .add("severity", severity) - .add("message", message) - .add("throwable", throwable) - .toString(); + return MoreObjects.toStringHelper(this).add("className", className).add("severity", severity) + .add("message", message).add("throwable", throwable).toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java index 265cf5158ee4..8dca18aa7eda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.lang.reflect.Method; - import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; import org.apache.hadoop.hbase.coprocessor.EndpointObserver; import org.apache.hadoop.hbase.coprocessor.MasterObserver; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java index 6825e426c7dc..e43aef667ff3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,33 +17,28 @@ */ package org.apache.hadoop.hbase.util; +import edu.umd.cs.findbugs.annotations.CheckForNull; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; - -import edu.umd.cs.findbugs.annotations.CheckForNull; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * Typical base class for file status filter. Works more efficiently when - * filtering file statuses, otherwise implementation will need to lookup filestatus - * for the path which will be expensive. + * Typical base class for file status filter. Works more efficiently when filtering file statuses, + * otherwise implementation will need to lookup filestatus for the path which will be expensive. */ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class AbstractFileStatusFilter implements PathFilter, FileStatusFilter { /** - * Filters out a path. Can be given an optional directory hint to avoid - * filestatus lookup. - * - * @param p A filesystem path - * @param isDir An optional boolean indicating whether the path is a directory or not - * @return true if the path is accepted, false if the path is filtered out + * Filters out a path. Can be given an optional directory hint to avoid filestatus lookup. + * @param p A filesystem path + * @param isDir An optional boolean indicating whether the path is a directory or not + * @return true if the path is accepted, false if the path is filtered out */ protected abstract boolean accept(Path p, @CheckForNull Boolean isDir); @@ -61,7 +56,8 @@ protected boolean isFile(FileSystem fs, @CheckForNull Boolean isDir, Path p) thr return !isDirectory(fs, isDir, p); } - protected boolean isDirectory(FileSystem fs, @CheckForNull Boolean isDir, Path p) throws IOException { + protected boolean isDirectory(FileSystem fs, @CheckForNull Boolean isDir, Path p) + throws IOException { return isDir != null ? isDir : fs.isDirectory(p); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java index 31394e8a97b5..f0453b011123 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,13 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.yetus.audience.InterfaceAudience; /** - * The bloom context that is used by the StorefileWriter to add the bloom details - * per cell + * The bloom context that is used by the StorefileWriter to add the bloom details per cell */ @InterfaceAudience.Private public abstract class BloomContext { @@ -44,9 +42,7 @@ public Cell getLastCell() { } /** - * Bloom information from the cell is retrieved - * @param cell - * @throws IOException + * Bloom information from the cell is retrieved nn */ public void writeBloom(Cell cell) throws IOException { // only add to the bloom filter on a new, unique key @@ -60,15 +56,13 @@ private void sanityCheck(Cell cell) throws IOException { if (this.getLastCell() != null) { if (comparator.compare(cell, this.getLastCell()) <= 0) { throw new IOException("Added a key not lexically larger than" + " previous. Current cell = " - + cell + ", prevCell = " + this.getLastCell()); + + cell + ", prevCell = " + this.getLastCell()); } } } /** - * Adds the last bloom key to the HFile Writer as part of StorefileWriter close. - * @param writer - * @throws IOException + * Adds the last bloom key to the HFile Writer as part of StorefileWriter close. nn */ public abstract void addLastBloomKey(HFile.Writer writer) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java index 0d99d30da454..9478a99c9b71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,59 +17,40 @@ */ package org.apache.hadoop.hbase.util; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.yetus.audience.InterfaceAudience; /** - * * Implements a Bloom filter, as defined by Bloom in 1970. *

    - * The Bloom filter is a data structure that was introduced in 1970 and that has - * been adopted by the networking research community in the past decade thanks - * to the bandwidth efficiencies that it offers for the transmission of set - * membership information between networked hosts. A sender encodes the - * information into a bit vector, the Bloom filter, that is more compact than a - * conventional representation. Computation and space costs for construction are - * linear in the number of elements. The receiver uses the filter to test - * whether various elements are members of the set. Though the filter will - * occasionally return a false positive, it will never return a false negative. - * When creating the filter, the sender can choose its desired point in a - * trade-off between the false positive rate and the size. - * + * The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by the + * networking research community in the past decade thanks to the bandwidth efficiencies that it + * offers for the transmission of set membership information between networked hosts. A sender + * encodes the information into a bit vector, the Bloom filter, that is more compact than a + * conventional representation. Computation and space costs for construction are linear in the + * number of elements. The receiver uses the filter to test whether various elements are members of + * the set. Though the filter will occasionally return a false positive, it will never return a + * false negative. When creating the filter, the sender can choose its desired point in a trade-off + * between the false positive rate and the size. *

    - * Originally inspired by European Commission - * One-Lab Project 034819. - * - * Bloom filters are very sensitive to the number of elements inserted into - * them. For HBase, the number of entries depends on the size of the data stored - * in the column. Currently the default region size is 256MB, so entry count ~= - * 256MB / (average value size for column). Despite this rule of thumb, there is - * no efficient way to calculate the entry count after compactions. Therefore, - * it is often easier to use a dynamic bloom filter that will add extra space - * instead of allowing the error rate to grow. - * - * ( http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey - * .pdf ) - * - * m denotes the number of bits in the Bloom filter (bitSize) n denotes the - * number of elements inserted into the Bloom filter (maxKeys) k represents the - * number of hash functions used (nbHash) e represents the desired false - * positive rate for the bloom (err) - * - * If we fix the error rate (e) and know the number of entries, then the optimal - * bloom size m = -(n * ln(err) / (ln(2)^2) ~= n * ln(err) / ln(0.6185) - * + * Originally inspired by European Commission One-Lab Project + * 034819. Bloom filters are very sensitive to the number of elements inserted into them. For + * HBase, the number of entries depends on the size of the data stored in the column. Currently the + * default region size is 256MB, so entry count ~= 256MB / (average value size for column). Despite + * this rule of thumb, there is no efficient way to calculate the entry count after compactions. + * Therefore, it is often easier to use a dynamic bloom filter that will add extra space instead of + * allowing the error rate to grow. ( + * http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey .pdf ) m denotes the + * number of bits in the Bloom filter (bitSize) n denotes the number of elements inserted into the + * Bloom filter (maxKeys) k represents the number of hash functions used (nbHash) e represents the + * desired false positive rate for the bloom (err) If we fix the error rate (e) and know the number + * of entries, then the optimal bloom size m = -(n * ln(err) / (ln(2)^2) ~= n * ln(err) / ln(0.6185) * The probability of false positives is minimized when k = m/n ln(2). - * * @see BloomFilter The general behavior of a filter - * - * @see - * Space/Time Trade-Offs in Hash Coding with Allowable Errors - * + * @see Space/Time + * Trade-Offs in Hash Coding with Allowable Errors * @see BloomFilterWriter for the ability to add elements to a Bloom filter */ @InterfaceAudience.Private @@ -79,27 +59,25 @@ public interface BloomFilter extends BloomFilterBase { /** * Check if the specified key is contained in the bloom filter. * @param keyCell the key to check for the existence of - * @param bloom bloom filter data to search. This can be null if auto-loading - * is supported. - * @param type The type of Bloom ROW/ ROW_COL + * @param bloom bloom filter data to search. This can be null if auto-loading is supported. + * @param type The type of Bloom ROW/ ROW_COL * @return true if matched by bloom, false if not */ boolean contains(Cell keyCell, ByteBuff bloom, BloomType type); /** * Check if the specified key is contained in the bloom filter. - * @param buf data to check for existence of + * @param buf data to check for existence of * @param offset offset into the data * @param length length of the data - * @param bloom bloom filter data to search. This can be null if auto-loading - * is supported. + * @param bloom bloom filter data to search. This can be null if auto-loading is supported. * @return true if matched by bloom, false if not */ boolean contains(byte[] buf, int offset, int length, ByteBuff bloom); /** - * @return true if this Bloom filter can automatically load its data - * and thus allows a null byte buffer to be passed to contains() + * @return true if this Bloom filter can automatically load its data and thus allows a null byte + * buffer to be passed to contains() */ boolean supportsAutoLoading(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java index 131552560e59..142a36c35f8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,8 +31,7 @@ public interface BloomFilterBase { long getKeyCount(); /** - * @return The max number of keys that can be inserted - * to maintain the desired error rate + * @return The max number of keys that can be inserted to maintain the desired error rate */ long getMaxKeys(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java index 06cf699e34fe..e09420cf8052 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.yetus.audience.InterfaceAudience; /** * The basic building block for the {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter} @@ -56,8 +53,7 @@ public class BloomFilterChunk implements BloomFilterBase { * @param meta stored bloom meta data * @throws IllegalArgumentException meta data is invalid */ - public BloomFilterChunk(DataInput meta) - throws IOException, IllegalArgumentException { + public BloomFilterChunk(DataInput meta) throws IOException, IllegalArgumentException { this.byteSize = meta.readInt(); this.hashCount = meta.readInt(); this.hashType = meta.readInt(); @@ -72,12 +68,10 @@ public BloomFilterChunk(DataInput meta) } /** - * Computes the error rate for this Bloom filter, taking into account the - * actual number of hash functions and keys inserted. The return value of - * this function changes as a Bloom filter is being populated. Used for - * reporting the actual error rate of compound Bloom filters when writing - * them out. - * + * Computes the error rate for this Bloom filter, taking into account the actual number of hash + * functions and keys inserted. The return value of this function changes as a Bloom filter is + * being populated. Used for reporting the actual error rate of compound Bloom filters when + * writing them out. * @return error rate for this particular Bloom filter */ public double actualErrorRate() { @@ -93,21 +87,16 @@ public BloomFilterChunk(int hashType, BloomType bloomType) { /** * Determines & initializes bloom filter meta data from user config. Call * {@link #allocBloom()} to allocate bloom filter data. - * - * @param maxKeys Maximum expected number of keys that will be stored in this - * bloom - * @param errorRate Desired false positive error rate. Lower rate = more - * storage required - * @param hashType Type of hash function to use - * @param foldFactor When finished adding entries, you may be able to 'fold' - * this bloom to save space. Tradeoff potentially excess bytes in - * bloom for ability to fold if keyCount is exponentially greater - * than maxKeys. - * @throws IllegalArgumentException + * @param maxKeys Maximum expected number of keys that will be stored in this bloom + * @param errorRate Desired false positive error rate. Lower rate = more storage required + * @param hashType Type of hash function to use + * @param foldFactor When finished adding entries, you may be able to 'fold' this bloom to save + * space. Tradeoff potentially excess bytes in bloom for ability to fold if + * keyCount is exponentially greater than maxKeys. n */ // Used only in testcases - public BloomFilterChunk(int maxKeys, double errorRate, int hashType, - int foldFactor) throws IllegalArgumentException { + public BloomFilterChunk(int maxKeys, double errorRate, int hashType, int foldFactor) + throws IllegalArgumentException { this(hashType, BloomType.ROW); long bitSize = BloomFilterUtil.computeBitSize(maxKeys, errorRate); @@ -121,9 +110,8 @@ public BloomFilterChunk(int maxKeys, double errorRate, int hashType, } /** - * Creates another similar Bloom filter. Does not copy the actual bits, and - * sets the new filter's key count to zero. - * + * Creates another similar Bloom filter. Does not copy the actual bits, and sets the new filter's + * key count to zero. * @return a Bloom filter with the same configuration as this */ public BloomFilterChunk createAnother() { @@ -138,16 +126,16 @@ public void allocBloom() { if (this.bloom != null) { throw new IllegalArgumentException("can only create bloom once."); } - this.bloom = ByteBuffer.allocate((int)this.byteSize); + this.bloom = ByteBuffer.allocate((int) this.byteSize); assert this.bloom.hasArray(); } void sanityCheck() throws IllegalArgumentException { - if(0 >= this.byteSize || this.byteSize > Integer.MAX_VALUE) { + if (0 >= this.byteSize || this.byteSize > Integer.MAX_VALUE) { throw new IllegalArgumentException("Invalid byteSize: " + this.byteSize); } - if(this.hashCount <= 0) { + if (this.hashCount <= 0) { throw new IllegalArgumentException("Hash function count must be > 0"); } @@ -160,15 +148,14 @@ void sanityCheck() throws IllegalArgumentException { } } - void bloomCheck(ByteBuffer bloom) throws IllegalArgumentException { + void bloomCheck(ByteBuffer bloom) throws IllegalArgumentException { if (this.byteSize != bloom.limit()) { - throw new IllegalArgumentException( - "Configured bloom length should match actual length"); + throw new IllegalArgumentException("Configured bloom length should match actual length"); } } // Used only by tests - void add(byte [] buf, int offset, int len) { + void add(byte[] buf, int offset, int len) { /* * For faster hashing, use combinatorial generation * http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf @@ -208,17 +195,16 @@ private void setHashLoc(int hash1, int hash2) { ++this.keyCount; } - //--------------------------------------------------------------------------- + // --------------------------------------------------------------------------- /** Private helpers */ /** * Set the bit at the specified index to 1. - * * @param pos index of bit */ void set(long pos) { - int bytePos = (int)(pos / 8); - int bitPos = (int)(pos % 8); + int bytePos = (int) (pos / 8); + int bitPos = (int) (pos % 8); byte curByte = bloom.get(bytePos); curByte |= BloomFilterUtil.bitvals[bitPos]; bloom.put(bytePos, curByte); @@ -226,13 +212,12 @@ void set(long pos) { /** * Check if bit at specified index is 1. - * * @param pos index of bit * @return true if bit at specified index is 1, false if 0. */ static boolean get(int pos, ByteBuffer bloomBuf, int bloomOffset) { - int bytePos = pos >> 3; //pos / 8 - int bitPos = pos & 0x7; //pos % 8 + int bytePos = pos >> 3; // pos / 8 + int bitPos = pos & 0x7; // pos % 8 // TODO access this via Util API which can do Unsafe access if possible(?) byte curByte = bloomBuf.get(bloomOffset + bytePos); curByte &= BloomFilterUtil.bitvals[bitPos]; @@ -262,11 +247,11 @@ public void compactBloom() { // see if the actual size is exponentially smaller than expected. if (this.keyCount > 0 && this.bloom.hasArray()) { int pieces = 1; - int newByteSize = (int)this.byteSize; + int newByteSize = (int) this.byteSize; int newMaxKeys = this.maxKeys; // while exponentially smaller & folding is lossless - while ((newByteSize & 1) == 0 && newMaxKeys > (this.keyCount<<1)) { + while ((newByteSize & 1) == 0 && newMaxKeys > (this.keyCount << 1)) { pieces <<= 1; newByteSize >>= 1; newMaxKeys >>= 1; @@ -278,8 +263,8 @@ public void compactBloom() { int start = this.bloom.arrayOffset(); int end = start + newByteSize; int off = end; - for(int p = 1; p < pieces; ++p) { - for(int pos = start; pos < end; ++pos) { + for (int p = 1; p < pieces; ++p) { + for (int pos = start; pos < end; ++pos) { array[pos] |= array[off++]; } } @@ -298,8 +283,7 @@ public void compactBloom() { * @param out OutputStream to place bloom * @throws IOException Error writing bloom array */ - public void writeBloom(final DataOutput out) - throws IOException { + public void writeBloom(final DataOutput out) throws IOException { if (!this.bloom.hasArray()) { throw new IOException("Only writes ByteBuffer with underlying array."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java index 506aa210f914..7bcbf98dad49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.DataInput; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -39,80 +38,65 @@ @InterfaceAudience.Private public final class BloomFilterFactory { - private static final Logger LOG = - LoggerFactory.getLogger(BloomFilterFactory.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(BloomFilterFactory.class.getName()); /** This class should not be instantiated. */ - private BloomFilterFactory() {} + private BloomFilterFactory() { + } /** - * Specifies the target error rate to use when selecting the number of keys - * per Bloom filter. + * Specifies the target error rate to use when selecting the number of keys per Bloom filter. */ - public static final String IO_STOREFILE_BLOOM_ERROR_RATE = - "io.storefile.bloom.error.rate"; + public static final String IO_STOREFILE_BLOOM_ERROR_RATE = "io.storefile.bloom.error.rate"; /** - * Maximum folding factor allowed. The Bloom filter will be shrunk by - * the factor of up to 2 ** this times if we oversize it initially. + * Maximum folding factor allowed. The Bloom filter will be shrunk by the factor of up to 2 ** + * this times if we oversize it initially. */ - public static final String IO_STOREFILE_BLOOM_MAX_FOLD = - "io.storefile.bloom.max.fold"; + public static final String IO_STOREFILE_BLOOM_MAX_FOLD = "io.storefile.bloom.max.fold"; /** - * For default (single-block) Bloom filters this specifies the maximum number - * of keys. + * For default (single-block) Bloom filters this specifies the maximum number of keys. */ - public static final String IO_STOREFILE_BLOOM_MAX_KEYS = - "io.storefile.bloom.max.keys"; + public static final String IO_STOREFILE_BLOOM_MAX_KEYS = "io.storefile.bloom.max.keys"; /** Master switch to enable Bloom filters */ - public static final String IO_STOREFILE_BLOOM_ENABLED = - "io.storefile.bloom.enabled"; + public static final String IO_STOREFILE_BLOOM_ENABLED = "io.storefile.bloom.enabled"; /** Master switch to enable Delete Family Bloom filters */ public static final String IO_STOREFILE_DELETEFAMILY_BLOOM_ENABLED = - "io.storefile.delete.family.bloom.enabled"; + "io.storefile.delete.family.bloom.enabled"; /** - * Target Bloom block size. Bloom filter blocks of approximately this size - * are interleaved with data blocks. + * Target Bloom block size. Bloom filter blocks of approximately this size are interleaved with + * data blocks. */ - public static final String IO_STOREFILE_BLOOM_BLOCK_SIZE = - "io.storefile.bloom.block.size"; + public static final String IO_STOREFILE_BLOOM_BLOCK_SIZE = "io.storefile.bloom.block.size"; /** Maximum number of times a Bloom filter can be "folded" if oversized */ private static final int MAX_ALLOWED_FOLD_FACTOR = 7; /** - * Instantiates the correct Bloom filter class based on the version provided - * in the meta block data. - * - * @param meta the byte array holding the Bloom filter's metadata, including - * version information - * @param reader the {@link HFile} reader to use to lazily load Bloom filter - * blocks - * @return an instance of the correct type of Bloom filter - * @throws IllegalArgumentException + * Instantiates the correct Bloom filter class based on the version provided in the meta block + * data. + * @param meta the byte array holding the Bloom filter's metadata, including version information + * @param reader the {@link HFile} reader to use to lazily load Bloom filter blocks + * @return an instance of the correct type of Bloom filter n */ - public static BloomFilter - createFromMeta(DataInput meta, HFile.Reader reader) - throws IllegalArgumentException, IOException { + public static BloomFilter createFromMeta(DataInput meta, HFile.Reader reader) + throws IllegalArgumentException, IOException { int version = meta.readInt(); switch (version) { case CompoundBloomFilterBase.VERSION: return new CompoundBloomFilter(meta, reader); default: - throw new IllegalArgumentException( - "Bad bloom filter format version " + version - ); + throw new IllegalArgumentException("Bad bloom filter format version " + version); } } /** - * @return true if general Bloom (Row or RowCol) filters are enabled in the - * given configuration + * @return true if general Bloom (Row or RowCol) filters are enabled in the given configuration */ public static boolean isGeneralBloomEnabled(Configuration conf) { return conf.getBoolean(IO_STOREFILE_BLOOM_ENABLED, true); @@ -145,32 +129,26 @@ public static int getBloomBlockSize(Configuration conf) { } /** - * @return max key for the Bloom filter from the configuration - */ + * @return max key for the Bloom filter from the configuration + */ public static int getMaxKeys(Configuration conf) { return conf.getInt(IO_STOREFILE_BLOOM_MAX_KEYS, 128 * 1000 * 1000); } /** * Creates a new general (Row or RowCol) Bloom filter at the time of - * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. - * - * @param conf - * @param cacheConf - * @param bloomType - * @param maxKeys an estimate of the number of keys we expect to insert. - * Irrelevant if compound Bloom filters are enabled. + * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. nnn * @param maxKeys an + * estimate of the number of keys we expect to insert. Irrelevant if compound Bloom filters are + * enabled. * @param writer the HFile writer - * @return the new Bloom filter, or null in case Bloom filters are disabled - * or when failed to create one. + * @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to + * create one. */ public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf, - CacheConfig cacheConf, BloomType bloomType, int maxKeys, - HFile.Writer writer) { + CacheConfig cacheConf, BloomType bloomType, int maxKeys, HFile.Writer writer) { if (!isGeneralBloomEnabled(conf)) { - LOG.trace("Bloom filters are disabled by configuration for " - + writer.getPath() - + (conf == null ? " (configuration is null)" : "")); + LOG.trace("Bloom filters are disabled by configuration for " + writer.getPath() + + (conf == null ? " (configuration is null)" : "")); return null; } else if (bloomType == BloomType.NONE) { LOG.trace("Bloom filter is turned off for the column family"); @@ -187,35 +165,31 @@ public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf, err = (float) (1 - Math.sqrt(1 - err)); } - int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, - MAX_ALLOWED_FOLD_FACTOR); + int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, MAX_ALLOWED_FOLD_FACTOR); // Do we support compound bloom filters? // In case of compound Bloom filters we ignore the maxKeys hint. CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf), - err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), - bloomType == BloomType.ROWCOL ? CellComparatorImpl.COMPARATOR : null, bloomType); + err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), + bloomType == BloomType.ROWCOL ? CellComparatorImpl.COMPARATOR : null, bloomType); writer.addInlineBlockWriter(bloomWriter); return bloomWriter; } /** * Creates a new Delete Family Bloom filter at the time of - * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. - * @param conf - * @param cacheConf - * @param maxKeys an estimate of the number of keys we expect to insert. - * Irrelevant if compound Bloom filters are enabled. + * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. nn * @param maxKeys an + * estimate of the number of keys we expect to insert. Irrelevant if compound Bloom filters are + * enabled. * @param writer the HFile writer - * @return the new Bloom filter, or null in case Bloom filters are disabled - * or when failed to create one. + * @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to + * create one. */ public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf, - CacheConfig cacheConf, int maxKeys, HFile.Writer writer) { + CacheConfig cacheConf, int maxKeys, HFile.Writer writer) { if (!isDeleteFamilyBloomEnabled(conf)) { - LOG.info("Delete Bloom filters are disabled by configuration for " - + writer.getPath() - + (conf == null ? " (configuration is null)" : "")); + LOG.info("Delete Bloom filters are disabled by configuration for " + writer.getPath() + + (conf == null ? " (configuration is null)" : "")); return null; } @@ -223,9 +197,9 @@ public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf, int maxFold = getMaxFold(conf); // In case of compound Bloom filters we ignore the maxKeys hint. - CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf), - err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), - null, BloomType.ROW); + CompoundBloomFilterWriter bloomWriter = + new CompoundBloomFilterWriter(getBloomBlockSize(conf), err, Hash.getHashType(conf), maxFold, + cacheConf.shouldCacheBloomsOnWrite(), null, BloomType.ROW); writer.addInlineBlockWriter(bloomWriter); return bloomWriter; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java index c7afb0e5f915..b35e8258ddfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java @@ -21,7 +21,6 @@ import java.text.NumberFormat; import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -38,30 +37,21 @@ public final class BloomFilterUtil { /** Record separator for the Bloom filter statistics human-readable string */ public static final String STATS_RECORD_SEP = "; "; /** - * Used in computing the optimal Bloom filter size. This approximately equals - * 0.480453. + * Used in computing the optimal Bloom filter size. This approximately equals 0.480453. */ public static final double LOG2_SQUARED = Math.log(2) * Math.log(2); - + /** - * A random number generator to use for "fake lookups" when testing to - * estimate the ideal false positive rate. + * A random number generator to use for "fake lookups" when testing to estimate the ideal false + * positive rate. */ private static Random randomGeneratorForTest; public static final String PREFIX_LENGTH_KEY = "RowPrefixBloomFilter.prefix_length"; - + /** Bit-value lookup array to prevent doing the same work over and over */ - public static final byte [] bitvals = { - (byte) 0x01, - (byte) 0x02, - (byte) 0x04, - (byte) 0x08, - (byte) 0x10, - (byte) 0x20, - (byte) 0x40, - (byte) 0x80 - }; + public static final byte[] bitvals = { (byte) 0x01, (byte) 0x02, (byte) 0x04, (byte) 0x08, + (byte) 0x10, (byte) 0x20, (byte) 0x40, (byte) 0x80 }; /** * Private constructor to keep this class from being instantiated. @@ -70,12 +60,9 @@ private BloomFilterUtil() { } /** - * @param maxKeys - * @param errorRate - * @return the number of bits for a Bloom filter than can hold the given - * number of keys and provide the given error rate, assuming that the - * optimal number of hash functions is used and it does not have to - * be an integer. + * nn * @return the number of bits for a Bloom filter than can hold the given number of keys and + * provide the given error rate, assuming that the optimal number of hash functions is used and it + * does not have to be an integer. */ public static long computeBitSize(long maxKeys, double errorRate) { return (long) Math.ceil(maxKeys * (-Math.log(errorRate) / LOG2_SQUARED)); @@ -86,7 +73,7 @@ public static long computeBitSize(long maxKeys, double errorRate) { * simulate uniformity of accesses better in a test environment. Should not be set in a real * environment where correctness matters! *

    - * This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)} + * This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)} * @param random The random number source to use, or null to compute actual hashes */ public static void setRandomGeneratorForTest(Random random) { @@ -94,14 +81,10 @@ public static void setRandomGeneratorForTest(Random random) { } /** - * The maximum number of keys we can put into a Bloom filter of a certain - * size to maintain the given error rate, assuming the number of hash - * functions is chosen optimally and does not even have to be an integer - * (hence the "ideal" in the function name). - * - * @param bitSize - * @param errorRate - * @return maximum number of keys that can be inserted into the Bloom filter + * The maximum number of keys we can put into a Bloom filter of a certain size to maintain the + * given error rate, assuming the number of hash functions is chosen optimally and does not even + * have to be an integer (hence the "ideal" in the function name). nn * @return maximum number of + * keys that can be inserted into the Bloom filter * @see #computeMaxKeys(long, double, int) for a more precise estimate */ public static long idealMaxKeys(long bitSize, double errorRate) { @@ -111,47 +94,30 @@ public static long idealMaxKeys(long bitSize, double errorRate) { } /** - * The maximum number of keys we can put into a Bloom filter of a certain - * size to get the given error rate, with the given number of hash functions. - * - * @param bitSize - * @param errorRate - * @param hashCount - * @return the maximum number of keys that can be inserted in a Bloom filter - * to maintain the target error rate, if the number of hash functions - * is provided. + * The maximum number of keys we can put into a Bloom filter of a certain size to get the given + * error rate, with the given number of hash functions. nnn * @return the maximum number of keys + * that can be inserted in a Bloom filter to maintain the target error rate, if the number of hash + * functions is provided. */ - public static long computeMaxKeys(long bitSize, double errorRate, - int hashCount) { - return (long) (-bitSize * 1.0 / hashCount * - Math.log(1 - Math.exp(Math.log(errorRate) / hashCount))); + public static long computeMaxKeys(long bitSize, double errorRate, int hashCount) { + return (long) (-bitSize * 1.0 / hashCount + * Math.log(1 - Math.exp(Math.log(errorRate) / hashCount))); } /** - * Computes the actual error rate for the given number of elements, number - * of bits, and number of hash functions. Taken directly from the - * Wikipedia Bloom filter article. - * - * @param maxKeys - * @param bitSize - * @param functionCount - * @return the actual error rate + * Computes the actual error rate for the given number of elements, number of bits, and number of + * hash functions. Taken directly from the + * Wikipedia + * Bloom filter article. nnn * @return the actual error rate */ - public static double actualErrorRate(long maxKeys, long bitSize, - int functionCount) { - return Math.exp(Math.log(1 - Math.exp(-functionCount * maxKeys * 1.0 - / bitSize)) * functionCount); + public static double actualErrorRate(long maxKeys, long bitSize, int functionCount) { + return Math + .exp(Math.log(1 - Math.exp(-functionCount * maxKeys * 1.0 / bitSize)) * functionCount); } /** - * Increases the given byte size of a Bloom filter until it can be folded by - * the given factor. - * - * @param bitSize - * @param foldFactor - * @return Foldable byte size + * Increases the given byte size of a Bloom filter until it can be folded by the given factor. nn + * * @return Foldable byte size */ public static int computeFoldableByteSize(long bitSize, int foldFactor) { long byteSizeLong = (bitSize + 7) / 8; @@ -162,8 +128,8 @@ public static int computeFoldableByteSize(long bitSize, int foldFactor) { byteSizeLong <<= foldFactor; } if (byteSizeLong > Integer.MAX_VALUE) { - throw new IllegalArgumentException("byteSize=" + byteSizeLong + " too " - + "large for bitSize=" + bitSize + ", foldFactor=" + foldFactor); + throw new IllegalArgumentException("byteSize=" + byteSizeLong + " too " + "large for bitSize=" + + bitSize + ", foldFactor=" + foldFactor); } return (int) byteSizeLong; } @@ -171,25 +137,22 @@ public static int computeFoldableByteSize(long bitSize, int foldFactor) { public static int optimalFunctionCount(int maxKeys, long bitSize) { long i = bitSize / maxKeys; double result = Math.ceil(Math.log(2) * i); - if (result > Integer.MAX_VALUE){ + if (result > Integer.MAX_VALUE) { throw new IllegalArgumentException("result too large for integer value."); } - return (int)result; + return (int) result; } - + /** * Creates a Bloom filter chunk of the given size. - * - * @param byteSizeHint the desired number of bytes for the Bloom filter bit - * array. Will be increased so that folding is possible. - * @param errorRate target false positive rate of the Bloom filter - * @param hashType Bloom filter hash function type - * @param foldFactor - * @param bloomType - * @return the new Bloom filter of the desired size + * @param byteSizeHint the desired number of bytes for the Bloom filter bit array. Will be + * increased so that folding is possible. + * @param errorRate target false positive rate of the Bloom filter + * @param hashType Bloom filter hash function type nn * @return the new Bloom filter of the + * desired size */ - public static BloomFilterChunk createBySize(int byteSizeHint, - double errorRate, int hashType, int foldFactor, BloomType bloomType) { + public static BloomFilterChunk createBySize(int byteSizeHint, double errorRate, int hashType, + int foldFactor, BloomType bloomType) { BloomFilterChunk bbf = new BloomFilterChunk(hashType, bloomType); bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8L, foldFactor); @@ -205,15 +168,14 @@ public static BloomFilterChunk createBySize(int byteSizeHint, return bbf; } - public static boolean contains(byte[] buf, int offset, int length, - ByteBuff bloomBuf, int bloomOffset, int bloomSize, Hash hash, - int hashCount) { + public static boolean contains(byte[] buf, int offset, int length, ByteBuff bloomBuf, + int bloomOffset, int bloomSize, Hash hash, int hashCount) { HashKey hashKey = new ByteArrayHashKey(buf, offset, length); return contains(bloomBuf, bloomOffset, bloomSize, hash, hashCount, hashKey); } private static boolean contains(ByteBuff bloomBuf, int bloomOffset, int bloomSize, Hash hash, - int hashCount, HashKey hashKey) { + int hashCount, HashKey hashKey) { int hash1 = hash.hash(hashKey, 0); int bloomBitSize = bloomSize << 3; @@ -228,10 +190,10 @@ private static boolean contains(ByteBuff bloomBuf, int bloomOffset, int bloo for (int i = 0; i < hashCount; i++) { int hashLoc = (randomGeneratorForTest == null - // Production mode - ? Math.abs(compositeHash % bloomBitSize) - // Test mode with "fake look-ups" to estimate "ideal false positive rate" - : randomGeneratorForTest.nextInt(bloomBitSize)); + // Production mode + ? Math.abs(compositeHash % bloomBitSize) + // Test mode with "fake look-ups" to estimate "ideal false positive rate" + : randomGeneratorForTest.nextInt(bloomBitSize)); compositeHash += hash2; if (!checkBit(hashLoc, bloomBuf, bloomOffset)) { return false; @@ -241,21 +203,20 @@ private static boolean contains(ByteBuff bloomBuf, int bloomOffset, int bloo } public static boolean contains(Cell cell, ByteBuff bloomBuf, int bloomOffset, int bloomSize, - Hash hash, int hashCount, BloomType type) { - HashKey hashKey = type == BloomType.ROWCOL ? new RowColBloomHashKey(cell) - : new RowBloomHashKey(cell); + Hash hash, int hashCount, BloomType type) { + HashKey hashKey = + type == BloomType.ROWCOL ? new RowColBloomHashKey(cell) : new RowBloomHashKey(cell); return contains(bloomBuf, bloomOffset, bloomSize, hash, hashCount, hashKey); } /** * Check if bit at specified index is 1. - * * @param pos index of bit * @return true if bit at specified index is 1, false if 0. */ - static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { - int bytePos = pos >> 3; //pos / 8 - int bitPos = pos & 0x7; //pos % 8 + static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { + int bytePos = pos >> 3; // pos / 8 + int bitPos = pos & 0x7; // pos % 8 byte curByte = bloomBuf.get(bloomOffset + bytePos); curByte &= bitvals[bitPos]; return (curByte != 0); @@ -263,10 +224,9 @@ static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { /** * A human-readable string with statistics for the given Bloom filter. - * * @param bloomFilter the Bloom filter to output statistics for; - * @return a string consisting of "<key>: <value>" parts - * separated by {@link #STATS_RECORD_SEP}. + * @return a string consisting of "<key>: <value>" parts separated by + * {@link #STATS_RECORD_SEP}. */ public static String formatStats(BloomFilterBase bloomFilter) { StringBuilder sb = new StringBuilder(); @@ -278,18 +238,18 @@ public static String formatStats(BloomFilterBase bloomFilter) { sb.append("Max Keys for bloom: " + m); if (m > 0) { sb.append(STATS_RECORD_SEP + "Percentage filled: " - + NumberFormat.getPercentInstance().format(k * 1.0 / m)); + + NumberFormat.getPercentInstance().format(k * 1.0 / m)); } return sb.toString(); } public static String toString(BloomFilterChunk bloomFilter) { return formatStats(bloomFilter) + STATS_RECORD_SEP + "Actual error rate: " - + String.format("%.8f", bloomFilter.actualErrorRate()); + + String.format("%.8f", bloomFilter.actualErrorRate()); } public static byte[] getBloomFilterParam(BloomType bloomFilterType, Configuration conf) - throws IllegalArgumentException { + throws IllegalArgumentException { byte[] bloomParam = null; String message = "Bloom filter type is " + bloomFilterType + ", "; if (bloomFilterType.equals(ROWPREFIX_FIXED_LENGTH)) { @@ -303,12 +263,12 @@ public static byte[] getBloomFilterParam(BloomType bloomFilterType, Configuratio prefixLength = Integer.parseInt(prefixLengthString); if (prefixLength <= 0 || prefixLength > HConstants.MAX_ROW_LENGTH) { message += - "the value of " + PREFIX_LENGTH_KEY + " must >=0 and < " + HConstants.MAX_ROW_LENGTH; + "the value of " + PREFIX_LENGTH_KEY + " must >=0 and < " + HConstants.MAX_ROW_LENGTH; throw new IllegalArgumentException(message); } } catch (NumberFormatException nfe) { - message = "Number format exception when parsing " + PREFIX_LENGTH_KEY + " for BloomType " + - bloomFilterType.toString() + ":" + prefixLengthString; + message = "Number format exception when parsing " + PREFIX_LENGTH_KEY + " for BloomType " + + bloomFilterType.toString() + ":" + prefixLengthString; throw new IllegalArgumentException(message, nfe); } bloomParam = Bytes.toBytes(prefixLength); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java index a68897801580..ec8390697aec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,35 +15,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.CellSink; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; /** - * Specifies methods needed to add elements to a Bloom filter and serialize the - * resulting Bloom filter as a sequence of bytes. + * Specifies methods needed to add elements to a Bloom filter and serialize the resulting Bloom + * filter as a sequence of bytes. */ @InterfaceAudience.Private public interface BloomFilterWriter extends BloomFilterBase, CellSink, ShipperListener { /** Compact the Bloom filter before writing metadata & data to disk. */ void compactBloom(); + /** * Get a writable interface into bloom filter meta data. - * * @return a writable instance that can be later written to a stream */ Writable getMetaWriter(); /** - * Get a writable interface into bloom filter data (the actual Bloom bits). - * Not used for compound Bloom filters. - * + * Get a writable interface into bloom filter data (the actual Bloom bits). Not used for compound + * Bloom filters. * @return a writable instance that can be later written to a stream */ Writable getDataWriter(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java index efff41e11c86..7bcfa3f401d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java @@ -15,36 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.TimeUnit; +import java.util.AbstractQueue; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; import java.util.Objects; -import java.util.AbstractQueue; - +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; - /** - * A generic bounded blocking Priority-Queue. - * - * The elements of the priority queue are ordered according to the Comparator - * provided at queue construction time. - * - * If multiple elements have the same priority this queue orders them in - * FIFO (first-in-first-out) manner. - * The head of this queue is the least element with respect to the specified - * ordering. If multiple elements are tied for least value, the head is the - * first one inserted. - * The queue retrieval operations poll, remove, peek, and element access the - * element at the head of the queue. + * A generic bounded blocking Priority-Queue. The elements of the priority queue are ordered + * according to the Comparator provided at queue construction time. If multiple elements have the + * same priority this queue orders them in FIFO (first-in-first-out) manner. The head of this queue + * is the least element with respect to the specified ordering. If multiple elements are tied for + * least value, the head is the first one inserted. The queue retrieval operations poll, remove, + * peek, and element access the element at the head of the queue. */ @InterfaceAudience.Private @InterfaceStability.Stable @@ -58,7 +49,7 @@ private static class PriorityQueue { @SuppressWarnings("unchecked") public PriorityQueue(int capacity, Comparator comparator) { - this.objects = (E[])new Object[capacity]; + this.objects = (E[]) new Object[capacity]; this.comparator = comparator; } @@ -133,7 +124,6 @@ private int upperBound(int start, int end, E key) { } } - // Lock used for all operations private final ReentrantLock lock = new ReentrantLock(); @@ -146,13 +136,12 @@ private int upperBound(int start, int end, E key) { private final PriorityQueue queue; /** - * Creates a PriorityQueue with the specified capacity that orders its - * elements according to the specified comparator. - * @param capacity the capacity of this queue + * Creates a PriorityQueue with the specified capacity that orders its elements according to the + * specified comparator. + * @param capacity the capacity of this queue * @param comparator the comparator that will be used to order this priority queue */ - public BoundedPriorityBlockingQueue(int capacity, - Comparator comparator) { + public BoundedPriorityBlockingQueue(int capacity, Comparator comparator) { this.queue = new PriorityQueue<>(capacity, comparator); } @@ -190,16 +179,14 @@ public void put(E e) throws InterruptedException { } @Override - public boolean offer(E e, long timeout, TimeUnit unit) - throws InterruptedException { + public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { Objects.requireNonNull(e); long nanos = unit.toNanos(timeout); lock.lockInterruptibly(); try { while (queue.remainingCapacity() == 0) { - if (nanos <= 0) - return false; + if (nanos <= 0) return false; nanos = notFull.awaitNanos(nanos); } this.queue.add(e); @@ -242,8 +229,7 @@ public E poll() { } @Override - public E poll(long timeout, TimeUnit unit) - throws InterruptedException { + public E poll(long timeout, TimeUnit unit) throws InterruptedException { long nanos = unit.toNanos(timeout); lock.lockInterruptibly(); E result = null; @@ -323,10 +309,8 @@ public int drainTo(Collection c) { @Override public int drainTo(Collection c, int maxElements) { Objects.requireNonNull(c); - if (c == this) - throw new IllegalArgumentException(); - if (maxElements <= 0) - return 0; + if (c == this) throw new IllegalArgumentException(); + if (maxElements <= 0) return 0; lock.lock(); try { int n = Math.min(queue.size(), maxElements); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java index d69a8c7483c7..5465c24540a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,11 +22,10 @@ import java.util.Iterator; import java.util.List; import java.util.SortedSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility scanner that wraps a sortable collection and serves as a KeyValueScanner. @@ -43,8 +41,7 @@ public CollectionBackedScanner(SortedSet set) { this(set, CellComparator.getInstance()); } - public CollectionBackedScanner(SortedSet set, - CellComparator comparator) { + public CollectionBackedScanner(SortedSet set, CellComparator comparator) { this.comparator = comparator; data = set; init(); @@ -54,16 +51,14 @@ public CollectionBackedScanner(List list) { this(list, CellComparator.getInstance()); } - public CollectionBackedScanner(List list, - CellComparator comparator) { + public CollectionBackedScanner(List list, CellComparator comparator) { Collections.sort(list, comparator); this.comparator = comparator; data = list; init(); } - public CollectionBackedScanner(CellComparator comparator, - Cell... array) { + public CollectionBackedScanner(CellComparator comparator, Cell... array) { this.comparator = comparator; List tmp = new ArrayList<>(array.length); @@ -75,7 +70,7 @@ public CollectionBackedScanner(CellComparator comparator, private void init() { iter = data.iterator(); - if(iter.hasNext()){ + if (iter.hasNext()) { current = iter.next(); } } @@ -88,7 +83,7 @@ public Cell peek() { @Override public Cell next() { Cell oldCurrent = current; - if(iter.hasNext()){ + if (iter.hasNext()) { current = iter.next(); } else { current = null; @@ -105,10 +100,10 @@ public boolean seek(Cell seekCell) { @Override public boolean reseek(Cell seekCell) { - while(iter.hasNext()){ + while (iter.hasNext()) { Cell next = iter.next(); int ret = comparator.compare(next, seekCell); - if(ret >= 0){ + if (ret >= 0) { current = next; return true; } @@ -116,7 +111,6 @@ public boolean reseek(Cell seekCell) { return false; } - @Override public void close() { // do nothing diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index f0549c3d633c..0870dbe6f9bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,36 +19,35 @@ import java.io.IOException; import java.util.Locale; - import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.CellBuilderType; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.io.compress.Compressor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Compression validation test. Checks compression is working. Be sure to run - * on every node in your cluster. + * Compression validation test. Checks compression is working. Be sure to run on every node in your + * cluster. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -77,23 +75,22 @@ public static boolean testCompression(String codec) { } } - private final static Boolean[] compressionTestResults - = new Boolean[Compression.Algorithm.values().length]; + private final static Boolean[] compressionTestResults = + new Boolean[Compression.Algorithm.values().length]; static { - for (int i = 0 ; i < compressionTestResults.length ; ++i) { + for (int i = 0; i < compressionTestResults.length; ++i) { compressionTestResults[i] = null; } } - public static void testCompression(Compression.Algorithm algo) - throws IOException { + public static void testCompression(Compression.Algorithm algo) throws IOException { if (compressionTestResults[algo.ordinal()] != null) { if (compressionTestResults[algo.ordinal()]) { - return ; // already passed test, dont do it again. + return; // already passed test, dont do it again. } else { // failed. - throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" + - " previously failed test."); + throw new DoNotRetryIOException( + "Compression algorithm '" + algo.getName() + "'" + " previously failed test."); } } @@ -111,34 +108,24 @@ public static void testCompression(Compression.Algorithm algo) public static void usage() { - System.err.println( - "Usage: CompressionTest " + - StringUtils.join( Compression.Algorithm.values(), "|").toLowerCase(Locale.ROOT) + - "\n" + - "For example:\n" + - " hbase " + CompressionTest.class + " file:///tmp/testfile gz\n"); + System.err.println("Usage: CompressionTest " + + StringUtils.join(Compression.Algorithm.values(), "|").toLowerCase(Locale.ROOT) + "\n" + + "For example:\n" + " hbase " + CompressionTest.class + " file:///tmp/testfile gz\n"); System.exit(1); } - public static void doSmokeTest(FileSystem fs, Path path, String codec) - throws Exception { + public static void doSmokeTest(FileSystem fs, Path path, String codec) throws Exception { Configuration conf = HBaseConfiguration.create(); - HFileContext context = new HFileContextBuilder() - .withCompression(HFileWriterImpl.compressionByName(codec)).build(); - HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) - .withPath(fs, path) - .withFileContext(context) - .create(); + HFileContext context = + new HFileContextBuilder().withCompression(HFileWriterImpl.compressionByName(codec)).build(); + HFile.Writer writer = + HFile.getWriterFactoryNoCache(conf).withPath(fs, path).withFileContext(context).create(); // Write any-old Cell... - final byte [] rowKey = Bytes.toBytes("compressiontestkey"); - Cell c = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(rowKey) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(Bytes.toBytes("compressiontestval")) - .build(); + final byte[] rowKey = Bytes.toBytes("compressiontestkey"); + Cell c = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(Bytes.toBytes("compressiontestval")).build(); writer.append(c); writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval")); writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java index cdc926fa709f..555d1fa5a8a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.util.StringUtils; - import java.util.AbstractMap; import java.util.Collection; import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Utilities for storing more complex collection types in @@ -34,7 +33,7 @@ */ @InterfaceAudience.Public public final class ConfigurationUtil { - // TODO: hopefully this is a good delimiter; it's not in the base64 alphabet, + // TODO: hopefully this is a good delimiter; it's not in the base64 alphabet, // nor is it valid for paths public static final char KVP_DELIMITER = '^'; @@ -44,29 +43,27 @@ private ConfigurationUtil() { } /** - * Store a collection of Map.Entry's in conf, with each entry separated by ',' - * and key values delimited by {@link #KVP_DELIMITER} - * + * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values + * delimited by {@link #KVP_DELIMITER} * @param conf configuration to store the collection in * @param key overall key to store keyValues under * @param keyValues kvps to be stored under key in conf */ public static void setKeyValues(Configuration conf, String key, - Collection> keyValues) { + Collection> keyValues) { setKeyValues(conf, key, keyValues, KVP_DELIMITER); } /** - * Store a collection of Map.Entry's in conf, with each entry separated by ',' - * and key values delimited by delimiter. - * + * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values + * delimited by delimiter. * @param conf configuration to store the collection in * @param key overall key to store keyValues under * @param keyValues kvps to be stored under key in conf * @param delimiter character used to separate each kvp */ public static void setKeyValues(Configuration conf, String key, - Collection> keyValues, char delimiter) { + Collection> keyValues, char delimiter) { List serializedKvps = Lists.newArrayList(); for (Map.Entry kvp : keyValues) { @@ -78,7 +75,6 @@ public static void setKeyValues(Configuration conf, String key, /** * Retrieve a list of key value pairs from configuration, stored under the provided key - * * @param conf configuration to retrieve kvps from * @param key key under which the key values are stored * @return the list of kvps stored under key in conf, or null if the key isn't present. @@ -90,7 +86,6 @@ public static List> getKeyValues(Configuration conf, S /** * Retrieve a list of key value pairs from configuration, stored under the provided key - * * @param conf configuration to retrieve kvps from * @param key key under which the key values are stored * @param delimiter character used to separate each kvp @@ -98,7 +93,7 @@ public static List> getKeyValues(Configuration conf, S * @see #setKeyValues(Configuration, String, Collection, char) */ public static List> getKeyValues(Configuration conf, String key, - char delimiter) { + char delimiter) { String[] kvps = conf.getStrings(key); if (kvps == null) { @@ -111,9 +106,8 @@ public static List> getKeyValues(Configuration conf, S String[] splitKvp = StringUtils.split(kvp, delimiter); if (splitKvp.length != 2) { - throw new IllegalArgumentException( - "Expected key value pair for configuration key '" + key + "'" + " to be of form '" - + delimiter + "; was " + kvp + " instead"); + throw new IllegalArgumentException("Expected key value pair for configuration key '" + key + + "'" + " to be of form '" + delimiter + "; was " + kvp + " instead"); } rtn.add(new AbstractMap.SimpleImmutableEntry<>(splitKvp[0], splitKvp[1])); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java index 4559d783729c..4a0111b0ac9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,15 +21,11 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -39,12 +34,14 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A utility to store user specific HConnections in memory. - * There is a chore to clean up connections idle for too long. - * This class is used by REST server and Thrift server to - * support authentication and impersonation. + * A utility to store user specific HConnections in memory. There is a chore to clean up connections + * idle for too long. This class is used by REST server and Thrift server to support authentication + * and impersonation. */ @InterfaceAudience.Private public class ConnectionCache { @@ -58,27 +55,33 @@ public class ConnectionCache { private final Configuration conf; private final ChoreService choreService; - private final ThreadLocal effectiveUserNames = - new ThreadLocal() { + private final ThreadLocal effectiveUserNames = new ThreadLocal() { @Override protected String initialValue() { return realUserName; } }; - public ConnectionCache(final Configuration conf, - final UserProvider userProvider, - final int cleanInterval, final int maxIdleTime) throws IOException { + public ConnectionCache(final Configuration conf, final UserProvider userProvider, + final int cleanInterval, final int maxIdleTime) throws IOException { Stoppable stoppable = new Stoppable() { private volatile boolean isStopped = false; - @Override public void stop(String why) { isStopped = true;} - @Override public boolean isStopped() {return isStopped;} + + @Override + public void stop(String why) { + isStopped = true; + } + + @Override + public boolean isStopped() { + return isStopped; + } }; this.choreService = new ChoreService("ConnectionCache"); ScheduledChore cleaner = new ScheduledChore("ConnectionCleaner", stoppable, cleanInterval) { @Override protected void chore() { - for (Map.Entry entry: connections.entrySet()) { + for (Map.Entry entry : connections.entrySet()) { ConnectionInfo connInfo = entry.getValue(); if (connInfo.timedOut(maxIdleTime)) { if (connInfo.admin != null) { @@ -127,8 +130,7 @@ public void shutdown() { } /** - * Caller doesn't close the admin afterwards. - * We need to manage it and close it properly. + * Caller doesn't close the admin afterwards. We need to manage it and close it properly. */ public Admin getAdmin() throws IOException { ConnectionInfo connInfo = getCurrentConnection(); @@ -161,8 +163,7 @@ public RegionLocator getRegionLocator(byte[] tableName) throws IOException { } /** - * Get the cached connection for the current user. - * If none or timed out, create a new one. + * Get the cached connection for the current user. If none or timed out, create a new one. */ ConnectionInfo getCurrentConnection() throws IOException { String userName = getEffectiveUser(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CoprocessorConfigurationUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CoprocessorConfigurationUtil.java index 6c0415462507..93c88a897717 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CoprocessorConfigurationUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CoprocessorConfigurationUtil.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java index aba421d8078d..66e3c3326579 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.lang.management.ManagementFactory; @@ -26,22 +24,20 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Locale; - import javax.management.JMException; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocatorMetric; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocatorMetricProvider; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; - /** * Utilities for interacting with and monitoring DirectByteBuffer allocations. */ @@ -82,17 +78,16 @@ public class DirectMemoryUtils { } /** - * @return the setting of -XX:MaxDirectMemorySize as a long. Returns 0 if - * -XX:MaxDirectMemorySize is not set. + * @return the setting of -XX:MaxDirectMemorySize as a long. Returns 0 if -XX:MaxDirectMemorySize + * is not set. */ public static long getDirectMemorySize() { RuntimeMXBean runtimemxBean = ManagementFactory.getRuntimeMXBean(); List arguments = runtimemxBean.getInputArguments(); - long multiplier = 1; //for the byte case. + long multiplier = 1; // for the byte case. for (String s : arguments) { if (s.contains("-XX:MaxDirectMemorySize=")) { - String memSize = s.toLowerCase(Locale.ROOT) - .replace("-xx:maxdirectmemorysize=", "").trim(); + String memSize = s.toLowerCase(Locale.ROOT).replace("-xx:maxdirectmemorysize=", "").trim(); if (memSize.contains("k")) { multiplier = 1024; @@ -133,29 +128,24 @@ public static long getDirectMemoryUsage() { */ public static long getNettyDirectMemoryUsage() { - ByteBufAllocatorMetric metric = ((ByteBufAllocatorMetricProvider) - PooledByteBufAllocator.DEFAULT).metric(); + ByteBufAllocatorMetric metric = + ((ByteBufAllocatorMetricProvider) PooledByteBufAllocator.DEFAULT).metric(); return metric.usedDirectMemory(); } /** - * DirectByteBuffers are garbage collected by using a phantom reference and a - * reference queue. Every once a while, the JVM checks the reference queue and - * cleans the DirectByteBuffers. However, as this doesn't happen - * immediately after discarding all references to a DirectByteBuffer, it's - * easy to OutOfMemoryError yourself using DirectByteBuffers. This function - * explicitly calls the Cleaner method of a DirectByteBuffer. - * - * @param toBeDestroyed - * The DirectByteBuffer that will be "cleaned". Utilizes reflection. - * + * DirectByteBuffers are garbage collected by using a phantom reference and a reference queue. + * Every once a while, the JVM checks the reference queue and cleans the DirectByteBuffers. + * However, as this doesn't happen immediately after discarding all references to a + * DirectByteBuffer, it's easy to OutOfMemoryError yourself using DirectByteBuffers. This function + * explicitly calls the Cleaner method of a DirectByteBuffer. n * The DirectByteBuffer that will + * be "cleaned". Utilizes reflection. */ public static void destroyDirectByteBuffer(ByteBuffer toBeDestroyed) - throws IllegalArgumentException, IllegalAccessException, - InvocationTargetException, SecurityException, NoSuchMethodException { + throws IllegalArgumentException, IllegalAccessException, InvocationTargetException, + SecurityException, NoSuchMethodException { - Preconditions.checkArgument(toBeDestroyed.isDirect(), - "toBeDestroyed isn't direct!"); + Preconditions.checkArgument(toBeDestroyed.isDirect(), "toBeDestroyed isn't direct!"); Method cleanerMethod = toBeDestroyed.getClass().getMethod("cleaner"); cleanerMethod.setAccessible(true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java index cee3b56d6f6f..faacab9cb924 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,17 +22,16 @@ import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyStoreKeyProvider; import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class EncryptionTest { @@ -47,15 +45,11 @@ private EncryptionTest() { } /** - * Check that the configured key provider can be loaded and initialized, or - * throw an exception. - * - * @param conf - * @throws IOException + * Check that the configured key provider can be loaded and initialized, or throw an exception. nn */ public static void testKeyProvider(final Configuration conf) throws IOException { - String providerClassName = conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, - KeyStoreKeyProvider.class.getName()); + String providerClassName = + conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName()); Boolean result = keyProviderResults.get(providerClassName); if (result == null) { try { @@ -63,8 +57,8 @@ public static void testKeyProvider(final Configuration conf) throws IOException keyProviderResults.put(providerClassName, true); } catch (Exception e) { // most likely a RuntimeException keyProviderResults.put(providerClassName, false); - throw new IOException("Key provider " + providerClassName + " failed test: " + - e.getMessage(), e); + throw new IOException( + "Key provider " + providerClassName + " failed test: " + e.getMessage(), e); } } else if (!result) { throw new IOException("Key provider " + providerClassName + " previously failed test"); @@ -72,15 +66,12 @@ public static void testKeyProvider(final Configuration conf) throws IOException } /** - * Check that the configured cipher provider can be loaded and initialized, or - * throw an exception. - * - * @param conf - * @throws IOException + * Check that the configured cipher provider can be loaded and initialized, or throw an exception. + * nn */ public static void testCipherProvider(final Configuration conf) throws IOException { - String providerClassName = conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, - DefaultCipherProvider.class.getName()); + String providerClassName = + conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, DefaultCipherProvider.class.getName()); Boolean result = cipherProviderResults.get(providerClassName); if (result == null) { try { @@ -88,8 +79,8 @@ public static void testCipherProvider(final Configuration conf) throws IOExcepti cipherProviderResults.put(providerClassName, true); } catch (Exception e) { // most likely a RuntimeException cipherProviderResults.put(providerClassName, false); - throw new IOException("Cipher provider " + providerClassName + " failed test: " + - e.getMessage(), e); + throw new IOException( + "Cipher provider " + providerClassName + " failed test: " + e.getMessage(), e); } } else if (!result) { throw new IOException("Cipher provider " + providerClassName + " previously failed test"); @@ -97,23 +88,22 @@ public static void testCipherProvider(final Configuration conf) throws IOExcepti } /** - * Check that the specified cipher can be loaded and initialized, or throw - * an exception. Verifies key and cipher provider configuration as a - * prerequisite for cipher verification. Also verifies if encryption is enabled globally. - * - * @param conf HBase configuration + * Check that the specified cipher can be loaded and initialized, or throw an exception. Verifies + * key and cipher provider configuration as a prerequisite for cipher verification. Also verifies + * if encryption is enabled globally. + * @param conf HBase configuration * @param cipher chiper algorith to use for the column family - * @param key encryption key + * @param key encryption key * @throws IOException in case of encryption configuration error */ - public static void testEncryption(final Configuration conf, final String cipher, - byte[] key) throws IOException { + public static void testEncryption(final Configuration conf, final String cipher, byte[] key) + throws IOException { if (cipher == null) { return; } - if(!Encryption.isEncryptionEnabled(conf)) { - String message = String.format("Cipher %s failed test: encryption is disabled on the cluster", - cipher); + if (!Encryption.isEncryptionEnabled(conf)) { + String message = + String.format("Cipher %s failed test: encryption is disabled on the cluster", cipher); throw new IOException(message); } testKeyProvider(conf); @@ -129,8 +119,7 @@ public static void testEncryption(final Configuration conf, final String cipher, } else { // This will be a wrapped key from schema context.setKey(EncryptionUtil.unwrapKey(conf, - conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"), - key)); + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"), key)); } byte[] iv = null; if (context.getCipher().getIvLength() > 0) { @@ -143,8 +132,8 @@ public static void testEncryption(final Configuration conf, final String cipher, Encryption.encrypt(out, new ByteArrayInputStream(plaintext), context, iv); byte[] ciphertext = out.toByteArray(); out.reset(); - Encryption.decrypt(out, new ByteArrayInputStream(ciphertext), plaintext.length, - context, iv); + Encryption.decrypt(out, new ByteArrayInputStream(ciphertext), plaintext.length, context, + iv); byte[] test = out.toByteArray(); if (!Bytes.equals(plaintext, test)) { throw new IOException("Did not pass encrypt/decrypt test"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java index 04a33846871b..0cbb33e8c365 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,22 +20,18 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Thread that walks over the filesystem, and computes the mappings - * Region -> BestHost and Region -> {@code Map} - * + * Thread that walks over the filesystem, and computes the mappings Region -> BestHost and Region -> + * {@code Map} */ @InterfaceAudience.Private class FSRegionScanner implements Runnable { @@ -52,17 +47,16 @@ class FSRegionScanner implements Runnable { /** * Maps each region to the RS with highest locality for that region. */ - private final Map regionToBestLocalityRSMapping; + private final Map regionToBestLocalityRSMapping; /** - * Maps region encoded names to maps of hostnames to fractional locality of - * that region on that host. + * Maps region encoded names to maps of hostnames to fractional locality of that region on that + * host. */ private Map> regionDegreeLocalityMapping; - FSRegionScanner(FileSystem fs, Path regionPath, - Map regionToBestLocalityRSMapping, - Map> regionDegreeLocalityMapping) { + FSRegionScanner(FileSystem fs, Path regionPath, Map regionToBestLocalityRSMapping, + Map> regionDegreeLocalityMapping) { this.fs = fs; this.regionPath = regionPath; this.regionToBestLocalityRSMapping = regionToBestLocalityRSMapping; @@ -75,7 +69,7 @@ public void run() { // empty the map for each region Map blockCountMap = new HashMap<>(); - //get table name + // get table name String tableName = regionPath.getParent().getName(); int totalBlkCount = 0; @@ -98,15 +92,14 @@ public void run() { } for (FileStatus storeFile : storeFileLists) { - BlockLocation[] blkLocations = - fs.getFileBlockLocations(storeFile, 0, storeFile.getLen()); + BlockLocation[] blkLocations = fs.getFileBlockLocations(storeFile, 0, storeFile.getLen()); if (null == blkLocations) { continue; } totalBlkCount += blkLocations.length; - for(BlockLocation blk: blkLocations) { - for (String host: blk.getHosts()) { + for (BlockLocation blk : blkLocations) { + for (String host : blk.getHosts()) { AtomicInteger count = blockCountMap.get(host); if (count == null) { count = new AtomicInteger(0); @@ -137,11 +130,11 @@ public void run() { } if (hostToRun.endsWith(".")) { - hostToRun = hostToRun.substring(0, hostToRun.length()-1); + hostToRun = hostToRun.substring(0, hostToRun.length() - 1); } String name = tableName + ":" + regionPath.getName(); synchronized (regionToBestLocalityRSMapping) { - regionToBestLocalityRSMapping.put(name, hostToRun); + regionToBestLocalityRSMapping.put(name, hostToRun); } } @@ -153,7 +146,7 @@ public void run() { host = host.substring(0, host.length() - 1); } // Locality is fraction of blocks local to this host. - float locality = ((float)entry.getValue().get()) / totalBlkCount; + float locality = ((float) entry.getValue().get()) / totalBlkCount; hostLocalityMap.put(host, locality); } // Put the locality map into the result map, keyed by the encoded name diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 7ac09a97adfb..8f9dd4426dc7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -57,22 +57,19 @@ import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; /** - * Implementation of {@link TableDescriptors} that reads descriptors from the - * passed filesystem. It expects descriptors to be in a file in the - * {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only - * -- i.e. does not modify the filesystem or can be read and write. - * - *

    Also has utility for keeping up the table descriptors tableinfo file. - * The table schema file is kept in the {@link #TABLEINFO_DIR} subdir - * of the table directory in the filesystem. - * It has a {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the - * edit sequenceid: e.g. .tableinfo.0000000003. This sequenceid - * is always increasing. It starts at zero. The table schema file with the - * highest sequenceid has the most recent schema edit. Usually there is one file - * only, the most recent but there may be short periods where there are more - * than one file. Old files are eventually cleaned. Presumption is that there - * will not be lots of concurrent clients making table schema edits. If so, - * the below needs a bit of a reworking and perhaps some supporting api in hdfs. + * Implementation of {@link TableDescriptors} that reads descriptors from the passed filesystem. It + * expects descriptors to be in a file in the {@link #TABLEINFO_DIR} subdir of the table's directory + * in FS. Can be read-only -- i.e. does not modify the filesystem or can be read and write. + *

    + * Also has utility for keeping up the table descriptors tableinfo file. The table schema file is + * kept in the {@link #TABLEINFO_DIR} subdir of the table directory in the filesystem. It has a + * {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the edit sequenceid: e.g. + * .tableinfo.0000000003. This sequenceid is always increasing. It starts at zero. The + * table schema file with the highest sequenceid has the most recent schema edit. Usually there is + * one file only, the most recent but there may be short periods where there are more than one file. + * Old files are eventually cleaned. Presumption is that there will not be lots of concurrent + * clients making table schema edits. If so, the below needs a bit of a reworking and perhaps some + * supporting api in hdfs. */ @InterfaceAudience.Private public class FSTableDescriptors implements TableDescriptors { @@ -92,7 +89,7 @@ public class FSTableDescriptors implements TableDescriptors { static final String TABLEINFO_FILE_PREFIX = ".tableinfo"; public static final String TABLEINFO_DIR = ".tabledesc"; - // This cache does not age out the old stuff. Thinking is that the amount + // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. private final Map cache = new ConcurrentHashMap<>(); @@ -110,7 +107,7 @@ public FSTableDescriptors(final FileSystem fs, final Path rootdir) { } public FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean fsreadonly, - final boolean usecache) { + final boolean usecache) { this.fs = fs; this.rootdir = rootdir; this.fsreadonly = fsreadonly; @@ -143,29 +140,21 @@ public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration c return td; } - public static ColumnFamilyDescriptor getTableFamilyDescForMeta( - final Configuration conf) { - return ColumnFamilyDescriptorBuilder - .newBuilder(HConstants.TABLE_FAMILY) - .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, - HConstants.DEFAULT_HBASE_META_VERSIONS)) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + public static ColumnFamilyDescriptor getTableFamilyDescForMeta(final Configuration conf) { + return ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setInMemory(true).setBlocksize(8 * 1024).setScope(HConstants.REPLICATION_SCOPE_LOCAL) .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build(); + .setBloomFilterType(BloomType.ROWCOL).build(); } public static ColumnFamilyDescriptor getReplBarrierFamilyDescForMeta() { - return ColumnFamilyDescriptorBuilder - .newBuilder(HConstants.REPLICATION_BARRIER_FAMILY) - .setMaxVersions(HConstants.ALL_VERSIONS) - .setInMemory(true) + return ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY) + .setMaxVersions(HConstants.ALL_VERSIONS).setInMemory(true) .setScope(HConstants.REPLICATION_SCOPE_LOCAL) .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build(); + .setBloomFilterType(BloomType.ROWCOL).build(); } private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) @@ -175,31 +164,28 @@ private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Con // we have to rethink about adding back the setCacheDataInL1 for META table CFs. return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) - .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, - HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) .setInMemory(true) - .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, - HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setBloomFilterType(BloomType.ROWCOL) + .setBlocksize( + conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.ROWCOL) .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) .build()) .setColumnFamily(getTableFamilyDescForMeta(conf)) .setColumnFamily(getReplBarrierFamilyDescForMeta()) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(HConstants.NAMESPACE_FAMILY) - .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, - HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.NAMESPACE_FAMILY) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) .setInMemory(true) - .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, - HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) + .setBlocksize( + conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) .setScope(HConstants.REPLICATION_SCOPE_LOCAL) .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build()) - .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder( - MultiRowMutationEndpoint.class.getName()) - .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); + .setBloomFilterType(BloomType.ROWCOL).build()) + .setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName()) + .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); } protected boolean isUsecache() { @@ -251,7 +237,7 @@ public TableDescriptor get(TableName tableName) { public Map getAll() throws IOException { Map tds = new TreeMap<>(); if (fsvisited) { - for (Map.Entry entry: this.cache.entrySet()) { + for (Map.Entry entry : this.cache.entrySet()) { tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue()); } } else { @@ -271,9 +257,9 @@ public Map getAll() throws IOException { } /** - * Find descriptors by namespace. - * @see #get(org.apache.hadoop.hbase.TableName) - */ + * Find descriptors by namespace. + * @see #get(org.apache.hadoop.hbase.TableName) + */ @Override public Map getByNamespace(String name) throws IOException { Map htds = new TreeMap<>(); @@ -308,7 +294,7 @@ public void update(TableDescriptor td, boolean cacheOnly) throws IOException { } @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") Path updateTableDescriptor(TableDescriptor td) throws IOException { TableName tableName = td.getTableName(); Path tableDir = getTableDir(tableName); @@ -322,9 +308,8 @@ Path updateTableDescriptor(TableDescriptor td) throws IOException { } /** - * Removes the table descriptor from the local cache and returns it. - * If not in read only mode, it also deletes the entire table directory(!) - * from the FileSystem. + * Removes the table descriptor from the local cache and returns it. If not in read only mode, it + * also deletes the entire table directory(!) from the FileSystem. */ @Override public TableDescriptor remove(final TableName tablename) throws IOException { @@ -371,7 +356,8 @@ private Path getTableDir(TableName tableName) { public boolean accept(Path p) { // Accept any file that starts with TABLEINFO_NAME return p.getName().startsWith(TABLEINFO_FILE_PREFIX); - }}; + } + }; /** * Width of the sequenceid that is a suffix on a tableinfo file. @@ -384,10 +370,10 @@ public boolean accept(Path p) { * negative). */ private static String formatTableInfoSequenceId(final int number) { - byte [] b = new byte[WIDTH_OF_SEQUENCE_ID]; + byte[] b = new byte[WIDTH_OF_SEQUENCE_ID]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return Bytes.toString(b); @@ -410,7 +396,7 @@ static final class SequenceIdAndFileLength { * @param p Path to a .tableinfo file. */ @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") static SequenceIdAndFileLength getTableInfoSequenceIdAndFileLength(Path p) { String name = p.getName(); if (!name.startsWith(TABLEINFO_FILE_PREFIX)) { @@ -436,19 +422,18 @@ static SequenceIdAndFileLength getTableInfoSequenceIdAndFileLength(Path p) { * Returns Name of tableinfo file. */ @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") static String getTableInfoFileName(int sequenceId, byte[] content) { - return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceId) + "." + - content.length; + return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceId) + "." + + content.length; } /** - * Returns the latest table descriptor for the given table directly from the file system - * if it exists, bypassing the local cache. - * Returns null if it's not found. + * Returns the latest table descriptor for the given table directly from the file system if it + * exists, bypassing the local cache. Returns null if it's not found. */ - public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, - Path hbaseRootDir, TableName tableName) throws IOException { + public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, + TableName tableName) throws IOException { Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableName); return getTableDescriptorFromFs(fs, tableDir); } @@ -521,7 +506,7 @@ private static Optional> getTableDescriptorFro } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public static void deleteTableDescriptors(FileSystem fs, Path tableDir) throws IOException { Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE); @@ -566,15 +551,16 @@ private static Path writeTableDescriptor(final FileSystem fs, final TableDescrip // In proc v2 we have table lock so typically, there will be no concurrent writes. Keep the // retry logic here since we may still want to write the table descriptor from for example, // HBCK2? - int currentSequenceId = currentDescriptorFile == null ? 0 : - getTableInfoSequenceIdAndFileLength(currentDescriptorFile.getPath()).sequenceId; + int currentSequenceId = currentDescriptorFile == null + ? 0 + : getTableInfoSequenceIdAndFileLength(currentDescriptorFile.getPath()).sequenceId; // Put arbitrary upperbound on how often we retry int maxAttempts = 10; int maxSequenceId = currentSequenceId + maxAttempts; byte[] bytes = TableDescriptorBuilder.toByteArray(td); - for (int newSequenceId = - currentSequenceId + 1; newSequenceId <= maxSequenceId; newSequenceId++) { + for (int newSequenceId = currentSequenceId + 1; newSequenceId + <= maxSequenceId; newSequenceId++) { String fileName = getTableInfoFileName(newSequenceId, bytes); Path filePath = new Path(tableInfoDir, fileName); try (FSDataOutputStream out = fs.create(filePath, false)) { @@ -593,8 +579,7 @@ private static Path writeTableDescriptor(final FileSystem fs, final TableDescrip } /** - * Create new TableDescriptor in HDFS. Happens when we are creating table. - * Used by tests. + * Create new TableDescriptor in HDFS. Happens when we are creating table. Used by tests. * @return True if we successfully created file. */ public boolean createTableDescriptor(TableDescriptor htd) throws IOException { @@ -602,32 +587,30 @@ public boolean createTableDescriptor(TableDescriptor htd) throws IOException { } /** - * Create new TableDescriptor in HDFS. Happens when we are creating table. If - * forceCreation is true then even if previous table descriptor is present it - * will be overwritten - * + * Create new TableDescriptor in HDFS. Happens when we are creating table. If forceCreation is + * true then even if previous table descriptor is present it will be overwritten * @return True if we successfully created file. */ public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation) - throws IOException { + throws IOException { Path tableDir = getTableDir(htd.getTableName()); return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation); } /** - * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create - * a new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag + * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create a + * new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag * passed on construction. - * @param tableDir table directory under which we should write the file - * @param htd description of the table to write + * @param tableDir table directory under which we should write the file + * @param htd description of the table to write * @param forceCreation if true,then even if previous table descriptor is present it will - * be overwritten + * be overwritten * @return true if the we successfully created the file, false if the file * already exists and we weren't forcing the descriptor creation. * @throws IOException if a filesystem error occurs */ public boolean createTableDescriptorForTableDirectory(Path tableDir, TableDescriptor htd, - boolean forceCreation) throws IOException { + boolean forceCreation) throws IOException { if (this.fsreadonly) { throw new NotImplementedException("Cannot create a table descriptor - in read only mode"); } @@ -635,13 +618,13 @@ public boolean createTableDescriptorForTableDirectory(Path tableDir, TableDescri } /** - * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create - * a new table snapshoting. Does not enforce read-only. That is for caller to determine. - * @param fs Filesystem to use. - * @param tableDir table directory under which we should write the file - * @param htd description of the table to write + * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create a + * new table snapshoting. Does not enforce read-only. That is for caller to determine. + * @param fs Filesystem to use. + * @param tableDir table directory under which we should write the file + * @param htd description of the table to write * @param forceCreation if true,then even if previous table descriptor is present it will - * be overwritten + * be overwritten * @return true if the we successfully created the file, false if the file * already exists and we weren't forcing the descriptor creation. * @throws IOException if a filesystem error occurs @@ -661,4 +644,3 @@ public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path return writeTableDescriptor(fs, htd, tableDir, opt.map(Pair::getFirst).orElse(null)) != null; } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 461170de6a15..274db97098c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -118,26 +117,24 @@ private FSUtils() { } /** - * @return True is fs is instance of DistributedFileSystem - * @throws IOException + * @return True is fs is instance of DistributedFileSystem n */ public static boolean isDistributedFileSystem(final FileSystem fs) throws IOException { FileSystem fileSystem = fs; // If passed an instance of HFileSystem, it fails instanceof DistributedFileSystem. // Check its backing fs for dfs-ness. if (fs instanceof HFileSystem) { - fileSystem = ((HFileSystem)fs).getBackingFs(); + fileSystem = ((HFileSystem) fs).getBackingFs(); } return fileSystem instanceof DistributedFileSystem; } /** * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the - * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider + * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider * schema; i.e. if schemas different but path or subpath matches, the two will equate. - * @param pathToSearch Path we will be trying to match. - * @param pathTail - * @return True if pathTail is tail on the path of pathToSearch + * @param pathToSearch Path we will be trying to match. n * @return True if pathTail + * is tail on the path of pathToSearch */ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) { Path tailPath = pathTail; @@ -160,17 +157,16 @@ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTai if (toSearchName == null || toSearchName.isEmpty()) { break; } - // Move up a parent on each path for next go around. Path doesn't let us go off the end. + // Move up a parent on each path for next go around. Path doesn't let us go off the end. tailPath = tailPath.getParent(); toSearch = toSearch.getParent(); - } while(tailName.equals(toSearchName)); + } while (tailName.equals(toSearchName)); return result; } /** * Delete the region directory if exists. - * @return True if deleted the region directory. - * @throws IOException + * @return True if deleted the region directory. n */ public static boolean deleteRegionDir(final Configuration conf, final RegionInfo hri) throws IOException { @@ -180,7 +176,7 @@ public static boolean deleteRegionDir(final Configuration conf, final RegionInfo new Path(CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri.getEncodedName())); } - /** + /** * Create the specified file on the filesystem. By default, this will: *

      *
    1. overwrite the file if it exists
    2. @@ -191,10 +187,10 @@ public static boolean deleteRegionDir(final Configuration conf, final RegionInfo *
    3. use the default block size
    4. *
    5. not track progress
    6. *
    - * @param conf configurations - * @param fs {@link FileSystem} on which to write the file - * @param path {@link Path} to the file to write - * @param perm permissions + * @param conf configurations + * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @param perm permissions * @param favoredNodes favored data nodes * @return output stream to the created file * @throws IOException if the file cannot be created @@ -231,12 +227,10 @@ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path /** * Checks to see if the specified file system is available - * * @param fs filesystem * @throws IOException e */ - public static void checkFileSystemAvailable(final FileSystem fs) - throws IOException { + public static void checkFileSystemAvailable(final FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; } @@ -247,8 +241,7 @@ public static void checkFileSystemAvailable(final FileSystem fs) return; } } catch (IOException e) { - exception = e instanceof RemoteException ? - ((RemoteException)e).unwrapRemoteException() : e; + exception = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; } try { fs.close(); @@ -260,26 +253,21 @@ public static void checkFileSystemAvailable(final FileSystem fs) /** * Inquire the Active NameNode's safe mode status. - * * @param dfs A DistributedFileSystem object representing the underlying HDFS. - * @return whether we're in safe mode - * @throws IOException + * @return whether we're in safe mode n */ private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException { return dfs.setSafeMode(SAFEMODE_GET, true); } /** - * Check whether dfs is in safemode. - * @param conf - * @throws IOException + * Check whether dfs is in safemode. nn */ - public static void checkDfsSafeMode(final Configuration conf) - throws IOException { + public static void checkDfsSafeMode(final Configuration conf) throws IOException { boolean isInSafeMode = false; FileSystem fs = FileSystem.get(conf); if (fs instanceof DistributedFileSystem) { - DistributedFileSystem dfs = (DistributedFileSystem)fs; + DistributedFileSystem dfs = (DistributedFileSystem) fs; isInSafeMode = isInSafeMode(dfs); } if (isInSafeMode) { @@ -289,15 +277,14 @@ public static void checkDfsSafeMode(final Configuration conf) /** * Verifies current version of file system - * - * @param fs filesystem object + * @param fs filesystem object * @param rootdir root hbase directory * @return null if no version file exists, version string otherwise - * @throws IOException if the version file fails to open + * @throws IOException if the version file fails to open * @throws DeserializationException if the version data cannot be translated into a version */ public static String getVersion(FileSystem fs, Path rootdir) - throws IOException, DeserializationException { + throws IOException, DeserializationException { final Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); FileStatus[] status = null; try { @@ -311,7 +298,7 @@ public static String getVersion(FileSystem fs, Path rootdir) return null; } String version = null; - byte [] content = new byte [(int)status[0].getLen()]; + byte[] content = new byte[(int) status[0].getLen()]; FSDataInputStream s = fs.open(versionFile); try { IOUtils.readFully(s, content, 0, content.length); @@ -337,8 +324,7 @@ public static String getVersion(FileSystem fs, Path rootdir) * @return The version found in the file as a String * @throws DeserializationException if the version data cannot be translated into a version */ - static String parseVersionFrom(final byte [] bytes) - throws DeserializationException { + static String parseVersionFrom(final byte[] bytes) throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(bytes); int pblen = ProtobufUtil.lengthOfPBMagic(); FSProtos.HBaseVersionFileContent.Builder builder = @@ -355,9 +341,10 @@ static String parseVersionFrom(final byte [] bytes) /** * Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file. * @param version Version to persist - * @return Serialized protobuf with version content and a bit of pb magic for a prefix. + * @return Serialized protobuf with version content and a bit of pb magic for a + * prefix. */ - static byte [] toVersionByteArray(final String version) { + static byte[] toVersionByteArray(final String version) { FSProtos.HBaseVersionFileContent.Builder builder = FSProtos.HBaseVersionFileContent.newBuilder(); return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray()); @@ -365,33 +352,29 @@ static String parseVersionFrom(final byte [] bytes) /** * Verifies current version of file system - * - * @param fs file system + * @param fs file system * @param rootdir root directory of HBase installation * @param message if true, issues a message on System.out - * @throws IOException if the version file cannot be opened + * @throws IOException if the version file cannot be opened * @throws DeserializationException if the contents of the version file cannot be parsed */ public static void checkVersion(FileSystem fs, Path rootdir, boolean message) - throws IOException, DeserializationException { + throws IOException, DeserializationException { checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); } /** * Verifies current version of file system - * - * @param fs file system + * @param fs file system * @param rootdir root directory of HBase installation * @param message if true, issues a message on System.out - * @param wait wait interval + * @param wait wait interval * @param retries number of times to retry - * - * @throws IOException if the version file cannot be opened + * @throws IOException if the version file cannot be opened * @throws DeserializationException if the contents of the version file cannot be parsed */ - public static void checkVersion(FileSystem fs, Path rootdir, - boolean message, int wait, int retries) - throws IOException, DeserializationException { + public static void checkVersion(FileSystem fs, Path rootdir, boolean message, int wait, + int retries) throws IOException, DeserializationException { String version = getVersion(fs, rootdir); String msg; if (version == null) { @@ -401,17 +384,17 @@ public static void checkVersion(FileSystem fs, Path rootdir, setVersion(fs, rootdir, wait, retries); return; } else { - msg = "hbase.version file is missing. Is your hbase.rootdir valid? " + - "You can restore hbase.version file by running 'HBCK2 filesystem -fix'. " + - "See https://github.com/apache/hbase-operator-tools/tree/master/hbase-hbck2"; + msg = "hbase.version file is missing. Is your hbase.rootdir valid? " + + "You can restore hbase.version file by running 'HBCK2 filesystem -fix'. " + + "See https://github.com/apache/hbase-operator-tools/tree/master/hbase-hbck2"; } } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) { return; } else { - msg = "HBase file layout needs to be upgraded. Current filesystem version is " + version + - " but software requires version " + HConstants.FILE_SYSTEM_VERSION + - ". Consult http://hbase.apache.org/book.html for further information about " + - "upgrading HBase."; + msg = "HBase file layout needs to be upgraded. Current filesystem version is " + version + + " but software requires version " + HConstants.FILE_SYSTEM_VERSION + + ". Consult http://hbase.apache.org/book.html for further information about " + + "upgrading HBase."; } // version is deprecated require migration @@ -424,47 +407,42 @@ public static void checkVersion(FileSystem fs, Path rootdir, /** * Sets version of file system - * - * @param fs filesystem object + * @param fs filesystem object * @param rootdir hbase root * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir) - throws IOException { + public static void setVersion(FileSystem fs, Path rootdir) throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); } /** * Sets version of file system - * - * @param fs filesystem object + * @param fs filesystem object * @param rootdir hbase root - * @param wait time to wait for retry + * @param wait time to wait for retry * @param retries number of times to retry before failing * @throws IOException e */ public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries) - throws IOException { + throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries); } - /** * Sets version of file system - * - * @param fs filesystem object + * @param fs filesystem object * @param rootdir hbase root directory * @param version version to set - * @param wait time to wait for retry + * @param wait time to wait for retry * @param retries number of times to retry before throwing an IOException * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir, String version, - int wait, int retries) throws IOException { + public static void setVersion(FileSystem fs, Path rootdir, String version, int wait, int retries) + throws IOException { Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); - Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR + - HConstants.VERSION_FILE_NAME); + Path tempVersionFile = new Path(rootdir, + HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR + HConstants.VERSION_FILE_NAME); while (true) { try { // Write the version to a temporary file @@ -486,7 +464,8 @@ public static void setVersion(FileSystem fs, Path rootdir, String version, // Attempt to close the stream on the way out if it is still open. try { if (s != null) s.close(); - } catch (IOException ignore) { } + } catch (IOException ignore) { + } } LOG.info("Created version file at " + rootdir.toString() + " with version=" + version); return; @@ -499,7 +478,7 @@ public static void setVersion(FileSystem fs, Path rootdir, String version, Thread.sleep(wait); } } catch (InterruptedException ie) { - throw (InterruptedIOException)new InterruptedIOException().initCause(ie); + throw (InterruptedIOException) new InterruptedIOException().initCause(ie); } retries--; } else { @@ -511,14 +490,14 @@ public static void setVersion(FileSystem fs, Path rootdir, String version, /** * Checks that a cluster ID file exists in the HBase root directory - * @param fs the root directory FileSystem + * @param fs the root directory FileSystem * @param rootdir the HBase root directory in HDFS - * @param wait how long to wait between retries + * @param wait how long to wait between retries * @return true if the file exists, otherwise false * @throws IOException if checking the FileSystem fails */ - public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, - long wait) throws IOException { + public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, long wait) + throws IOException { while (true) { try { Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); @@ -541,25 +520,24 @@ public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, /** * Returns the value of the unique cluster ID stored for this HBase instance. - * @param fs the root directory FileSystem + * @param fs the root directory FileSystem * @param rootdir the path to the HBase root directory * @return the unique cluster identifier * @throws IOException if reading the cluster ID file fails */ - public static ClusterId getClusterId(FileSystem fs, Path rootdir) - throws IOException { + public static ClusterId getClusterId(FileSystem fs, Path rootdir) throws IOException { Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); ClusterId clusterId = null; - FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null; + FileStatus status = fs.exists(idPath) ? fs.getFileStatus(idPath) : null; if (status != null) { int len = Ints.checkedCast(status.getLen()); - byte [] content = new byte[len]; + byte[] content = new byte[len]; FSDataInputStream in = fs.open(idPath); try { in.readFully(content); } catch (EOFException eof) { LOG.warn("Cluster ID file {} is empty", idPath); - } finally{ + } finally { in.close(); } try { @@ -589,13 +567,11 @@ public static ClusterId getClusterId(FileSystem fs, Path rootdir) } /** - * @param cid - * @throws IOException + * nn */ private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p, - final ClusterId cid) - throws IOException { - // Rewrite the file as pb. Move aside the old one first, write new + final ClusterId cid) throws IOException { + // Rewrite the file as pb. Move aside the old one first, write new // then delete the moved-aside file. Path movedAsideName = new Path(p + "." + EnvironmentEdgeManager.currentTime()); if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p); @@ -610,15 +586,14 @@ private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final P * Writes a new unique identifier for this cluster to the "hbase.id" file in the HBase root * directory. If any operations on the ID file fails, and {@code wait} is a positive value, the * method will retry to produce the ID file until the thread is forcibly interrupted. - * - * @param fs the root directory FileSystem - * @param rootdir the path to the HBase root directory + * @param fs the root directory FileSystem + * @param rootdir the path to the HBase root directory * @param clusterId the unique identifier to store - * @param wait how long (in milliseconds) to wait between retries + * @param wait how long (in milliseconds) to wait between retries * @throws IOException if writing to the FileSystem fails and no wait value */ public static void setClusterId(final FileSystem fs, final Path rootdir, - final ClusterId clusterId, final long wait) throws IOException { + final ClusterId clusterId, final long wait) throws IOException { final Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); final Path tempDir = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY); @@ -643,7 +618,7 @@ public static void setClusterId(final FileSystem fs, final Path rootdir, if (!fs.rename(tempIdFile, idFile)) { failure = - Optional.of(new IOException("Unable to move temp cluster ID file to " + idFile)); + Optional.of(new IOException("Unable to move temp cluster ID file to " + idFile)); } } catch (IOException ioe) { failure = Optional.of(ioe); @@ -677,12 +652,10 @@ public static void setClusterId(final FileSystem fs, final Path rootdir, * @param wait Sleep between retries * @throws IOException e */ - public static void waitOnSafeMode(final Configuration conf, - final long wait) - throws IOException { + public static void waitOnSafeMode(final Configuration conf, final long wait) throws IOException { FileSystem fs = FileSystem.get(conf); if (!(fs instanceof DistributedFileSystem)) return; - DistributedFileSystem dfs = (DistributedFileSystem)fs; + DistributedFileSystem dfs = (DistributedFileSystem) fs; // Make sure dfs is not in safe mode while (isInSafeMode(dfs)) { LOG.info("Waiting for dfs to exit safe mode..."); @@ -697,7 +670,7 @@ public static void waitOnSafeMode(final Configuration conf, /** * Checks if meta region exists - * @param fs file system + * @param fs file system * @param rootDir root directory of HBase installation * @return true if exists */ @@ -707,17 +680,16 @@ public static boolean metaRegionExists(FileSystem fs, Path rootDir) throws IOExc } /** - * Compute HDFS block distribution of a given HdfsDataInputStream. All HdfsDataInputStreams - * are backed by a series of LocatedBlocks, which are fetched periodically from the namenode. - * This method retrieves those blocks from the input stream and uses them to calculate - * HDFSBlockDistribution. - * - * The underlying method in DFSInputStream does attempt to use locally cached blocks, but - * may hit the namenode if the cache is determined to be incomplete. The method also involves - * making copies of all LocatedBlocks rather than return the underlying blocks themselves. + * Compute HDFS block distribution of a given HdfsDataInputStream. All HdfsDataInputStreams are + * backed by a series of LocatedBlocks, which are fetched periodically from the namenode. This + * method retrieves those blocks from the input stream and uses them to calculate + * HDFSBlockDistribution. The underlying method in DFSInputStream does attempt to use locally + * cached blocks, but may hit the namenode if the cache is determined to be incomplete. The method + * also involves making copies of all LocatedBlocks rather than return the underlying blocks + * themselves. */ - static public HDFSBlocksDistribution computeHDFSBlocksDistribution( - HdfsDataInputStream inputStream) throws IOException { + static public HDFSBlocksDistribution + computeHDFSBlocksDistribution(HdfsDataInputStream inputStream) throws IOException { List blocks = inputStream.getAllBlocks(); HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); for (LocatedBlock block : blocks) { @@ -740,18 +712,16 @@ private static String[] getHostsForLocations(LocatedBlock block) { /** * Compute HDFS blocks distribution of a given file, or a portion of the file - * @param fs file system + * @param fs file system * @param status file status of the file - * @param start start position of the portion + * @param start start position of the portion * @param length length of the portion * @return The HDFS blocks distribution */ - static public HDFSBlocksDistribution computeHDFSBlocksDistribution( - final FileSystem fs, FileStatus status, long start, long length) - throws IOException { + static public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs, + FileStatus status, long start, long length) throws IOException { HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); - BlockLocation [] blockLocations = - fs.getFileBlockLocations(status, start, length); + BlockLocation[] blockLocations = fs.getFileBlockLocations(status, start, length); addToHDFSBlocksDistribution(blocksDistribution, blockLocations); return blocksDistribution; } @@ -759,11 +729,10 @@ static public HDFSBlocksDistribution computeHDFSBlocksDistribution( /** * Update blocksDistribution with blockLocations * @param blocksDistribution the hdfs blocks distribution - * @param blockLocations an array containing block location + * @param blockLocations an array containing block location */ - static public void addToHDFSBlocksDistribution( - HDFSBlocksDistribution blocksDistribution, BlockLocation[] blockLocations) - throws IOException { + static public void addToHDFSBlocksDistribution(HDFSBlocksDistribution blocksDistribution, + BlockLocation[] blockLocations) throws IOException { for (BlockLocation bl : blockLocations) { String[] hosts = bl.getHosts(); long len = bl.getLength(); @@ -774,27 +743,22 @@ static public void addToHDFSBlocksDistribution( // TODO move this method OUT of FSUtils. No dependencies to HMaster /** - * Returns the total overall fragmentation percentage. Includes hbase:meta and - * -ROOT- as well. - * - * @param master The master defining the HBase root and file system + * Returns the total overall fragmentation percentage. Includes hbase:meta and -ROOT- as well. + * @param master The master defining the HBase root and file system * @return A map for each table and its percentage (never null) * @throws IOException When scanning the directory fails */ - public static int getTotalTableFragmentation(final HMaster master) - throws IOException { + public static int getTotalTableFragmentation(final HMaster master) throws IOException { Map map = getTableFragmentation(master); - return map.isEmpty() ? -1 : map.get("-TOTAL-"); + return map.isEmpty() ? -1 : map.get("-TOTAL-"); } /** - * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and hbase:meta too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * - * @param master The master defining the HBase root and file system. + * Runs through the HBase rootdir and checks how many stores for each table have more than one + * file in them. Checks -ROOT- and hbase:meta too. The total percentage across all tables is + * stored under the special key "-TOTAL-". + * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage (never null). - * * @throws IOException When scanning the directory fails. */ public static Map getTableFragmentation(final HMaster master) @@ -806,18 +770,16 @@ public static Map getTableFragmentation(final HMaster master) } /** - * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and hbase:meta too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * - * @param fs The file system to use - * @param hbaseRootDir The root directory to scan + * Runs through the HBase rootdir and checks how many stores for each table have more than one + * file in them. Checks -ROOT- and hbase:meta too. The total percentage across all tables is + * stored under the special key "-TOTAL-". + * @param fs The file system to use + * @param hbaseRootDir The root directory to scan * @return A map for each table and its percentage (never null) * @throws IOException When scanning the directory fails */ - public static Map getTableFragmentation( - final FileSystem fs, final Path hbaseRootDir) - throws IOException { + public static Map getTableFragmentation(final FileSystem fs, + final Path hbaseRootDir) throws IOException { Map frags = new HashMap<>(); int cfCountTotal = 0; int cfFragTotal = 0; @@ -846,11 +808,11 @@ public static Map getTableFragmentation( } // compute percentage per table and store in result list frags.put(CommonFSUtils.getTableName(d).getNameAsString(), - cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100)); + cfCount == 0 ? 0 : Math.round((float) cfFrag / cfCount * 100)); } // set overall percentage for all tables frags.put("-TOTAL-", - cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100)); + cfCountTotal == 0 ? 0 : Math.round((float) cfFragTotal / cfCountTotal * 100)); return frags; } @@ -893,16 +855,16 @@ public static class BlackListDirFilter extends AbstractFileStatusFilter { /** * Create a filter on the givem filesystem with the specified blacklist - * @param fs filesystem to filter + * @param fs filesystem to filter * @param directoryNameBlackList list of the names of the directories to filter. If - * null, all directories are returned + * null, all directories are returned */ @SuppressWarnings("unchecked") public BlackListDirFilter(final FileSystem fs, final List directoryNameBlackList) { this.fs = fs; - blacklist = - (List) (directoryNameBlackList == null ? Collections.emptyList() - : directoryNameBlackList); + blacklist = (List) (directoryNameBlackList == null + ? Collections.emptyList() + : directoryNameBlackList); } @Override @@ -915,7 +877,7 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { return isDirectory(fs, isDir, p); } catch (IOException e) { LOG.warn("An error occurred while verifying if [{}] is a valid directory." - + " Returning 'not valid' and continuing.", p, e); + + " Returning 'not valid' and continuing.", p, e); return false; } } @@ -946,8 +908,7 @@ public UserTableDirFilter(FileSystem fs) { @Override protected boolean isValidName(final String name) { - if (!super.isValidName(name)) - return false; + if (!super.isValidName(name)) return false; try { TableName.isLegalTableQualifierName(Bytes.toBytes(name)); @@ -960,7 +921,7 @@ protected boolean isValidName(final String name) { } public static List getTableDirs(final FileSystem fs, final Path rootdir) - throws IOException { + throws IOException { List tableDirs = new ArrayList<>(); Path baseNamespaceDir = new Path(rootdir, HConstants.BASE_NAMESPACE_DIR); if (fs.exists(baseNamespaceDir)) { @@ -972,18 +933,15 @@ public static List getTableDirs(final FileSystem fs, final Path rootdir) } /** - * @param fs - * @param rootdir - * @return All the table directories under rootdir. Ignore non table hbase folders such as - * .logs, .oldlogs, .corrupt folders. - * @throws IOException + * nn * @return All the table directories under rootdir. Ignore non table hbase + * folders such as .logs, .oldlogs, .corrupt folders. n */ public static List getLocalTableDirs(final FileSystem fs, final Path rootdir) - throws IOException { + throws IOException { // presumes any directory under hbase.rootdir is a table FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs)); List tabledirs = new ArrayList<>(dirs.length); - for (FileStatus dir: dirs) { + for (FileStatus dir : dirs) { tabledirs.add(dir.getPath()); } return tabledirs; @@ -1020,19 +978,19 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { /** * Given a particular table dir, return all the regiondirs inside it, excluding files such as * .tableinfo - * @param fs A file system for the Path + * @param fs A file system for the Path * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir> - * @return List of paths to valid region directories in table dir. - * @throws IOException + * @return List of paths to valid region directories in table dir. n */ - public static List getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException { + public static List getRegionDirs(final FileSystem fs, final Path tableDir) + throws IOException { // assumes we are in a table dir. List rds = listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (rds == null) { return Collections.emptyList(); } List regionDirs = new ArrayList<>(rds.size()); - for (FileStatus rdfs: rds) { + for (FileStatus rdfs : rds) { Path rdPath = rdfs.getPath(); regionDirs.add(rdPath); } @@ -1045,7 +1003,7 @@ public static Path getRegionDirFromRootDir(Path rootDir, RegionInfo region) { public static Path getRegionDirFromTableDir(Path tableDir, RegionInfo region) { return getRegionDirFromTableDir(tableDir, - ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName()); + ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName()); } public static Path getRegionDirFromTableDir(Path tableDir, String encodedRegionName) { @@ -1053,8 +1011,8 @@ public static Path getRegionDirFromTableDir(Path tableDir, String encodedRegionN } /** - * Filter for all dirs that are legal column family names. This is generally used for colfam - * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>. + * Filter for all dirs that are legal column family names. This is generally used for colfam dirs + * <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>. */ public static class FamilyDirFilter extends AbstractFileStatusFilter { final FileSystem fs; @@ -1085,33 +1043,31 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { /** * Given a particular region dir, return all the familydirs inside it - * - * @param fs A file system for the Path + * @param fs A file system for the Path * @param regionDir Path to a specific region directory - * @return List of paths to valid family directories in region dir. - * @throws IOException + * @return List of paths to valid family directories in region dir. n */ public static List getFamilyDirs(final FileSystem fs, final Path regionDir) - throws IOException { + throws IOException { // assumes we are in a region dir. return getFilePaths(fs, regionDir, new FamilyDirFilter(fs)); } public static List getReferenceFilePaths(final FileSystem fs, final Path familyDir) - throws IOException { + throws IOException { return getFilePaths(fs, familyDir, new ReferenceFileFilter(fs)); } public static List getReferenceAndLinkFilePaths(final FileSystem fs, final Path familyDir) - throws IOException { + throws IOException { return getFilePaths(fs, familyDir, new ReferenceAndLinkFileFilter(fs)); } private static List getFilePaths(final FileSystem fs, final Path dir, - final PathFilter pathFilter) throws IOException { + final PathFilter pathFilter) throws IOException { FileStatus[] fds = fs.listStatus(dir, pathFilter); List files = new ArrayList<>(fds.length); - for (FileStatus fdfs: fds) { + for (FileStatus fdfs : fds) { Path fdPath = fdfs.getPath(); files.add(fdPath); } @@ -1142,11 +1098,11 @@ public ReferenceAndLinkFileFilter(FileSystem fs) { public boolean accept(Path rd) { try { // only files can be references. - return !fs.getFileStatus(rd).isDirectory() && (StoreFileInfo.isReference(rd) || - HFileLink.isHFileLink(rd)); + return !fs.getFileStatus(rd).isDirectory() + && (StoreFileInfo.isReference(rd) || HFileLink.isHFileLink(rd)); } catch (IOException ioe) { // Maybe the file was moved or the fs was disconnected. - LOG.warn("Skipping file " + rd +" due to IOException", ioe); + LOG.warn("Skipping file " + rd + " due to IOException", ioe); return false; } } @@ -1179,8 +1135,8 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { } /** - * Filter for HFileLinks (StoreFiles and HFiles not included). - * the filter itself does not consider if a link is file or not. + * Filter for HFileLinks (StoreFiles and HFiles not included). the filter itself does not consider + * if a link is file or not. */ public static class HFileLinkFilter implements PathFilter { @@ -1216,8 +1172,7 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { } /** - * Called every so-often by storefile map builder getTableStoreFilePathMap to - * report progress. + * Called every so-often by storefile map builder getTableStoreFilePathMap to report progress. */ interface ProgressReporter { /** @@ -1227,107 +1182,103 @@ interface ProgressReporter { } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
    + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path.
    * Example...
    - * Key = 3944417774205889744
    + * Key = 3944417774205889744
    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param map map to add values. If null, this method will create and populate one to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param tableName name of the table to scan. + * @param map map to add values. If null, this method will create and populate one to + * return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param tableName name of the table to scan. * @return Map keyed by StoreFile name with a value of the full Path. - * @throws IOException When scanning the directory fails. - * @throws InterruptedException + * @throws IOException When scanning the directory fails. n */ public static Map getTableStoreFilePathMap(Map map, - final FileSystem fs, final Path hbaseRootDir, TableName tableName) - throws IOException, InterruptedException { + final FileSystem fs, final Path hbaseRootDir, TableName tableName) + throws IOException, InterruptedException { return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null, null, - (ProgressReporter)null); + (ProgressReporter) null); } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. Note that because this method can be called - * on a 'live' HBase system that we will skip files that no longer exist by the time - * we traverse them and similarly the user of the result needs to consider that some - * entries in this map may not exist by the time this call completes. - *
    + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path. Note that because this method can be called on a 'live' HBase system + * that we will skip files that no longer exist by the time we traverse them and similarly the + * user of the result needs to consider that some entries in this map may not exist by the time + * this call completes.
    * Example...
    - * Key = 3944417774205889744
    + * Key = 3944417774205889744
    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param resultMap map to add values. If null, this method will create and populate one to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param tableName name of the table to scan. - * @param sfFilter optional path filter to apply to store files - * @param executor optional executor service to parallelize this operation + * @param resultMap map to add values. If null, this method will create and populate one to + * return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param tableName name of the table to scan. + * @param sfFilter optional path filter to apply to store files + * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. * @deprecated Since 2.3.0. For removal in hbase4. Use ProgressReporter override instead. */ @Deprecated public static Map getTableStoreFilePathMap(Map resultMap, - final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, - ExecutorService executor, final HbckErrorReporter progressReporter) - throws IOException, InterruptedException { + final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, + ExecutorService executor, final HbckErrorReporter progressReporter) + throws IOException, InterruptedException { return getTableStoreFilePathMap(resultMap, fs, hbaseRootDir, tableName, sfFilter, executor, - new ProgressReporter() { - @Override - public void progress(FileStatus status) { - // status is not used in this implementation. - progressReporter.progress(); - } - }); + new ProgressReporter() { + @Override + public void progress(FileStatus status) { + // status is not used in this implementation. + progressReporter.progress(); + } + }); } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. Note that because this method can be called - * on a 'live' HBase system that we will skip files that no longer exist by the time - * we traverse them and similarly the user of the result needs to consider that some - * entries in this map may not exist by the time this call completes. - *
    + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path. Note that because this method can be called on a 'live' HBase system + * that we will skip files that no longer exist by the time we traverse them and similarly the + * user of the result needs to consider that some entries in this map may not exist by the time + * this call completes.
    * Example...
    - * Key = 3944417774205889744
    + * Key = 3944417774205889744
    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param resultMap map to add values. If null, this method will create and populate one - * to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param tableName name of the table to scan. - * @param sfFilter optional path filter to apply to store files - * @param executor optional executor service to parallelize this operation + * @param resultMap map to add values. If null, this method will create and populate one to + * return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param tableName name of the table to scan. + * @param sfFilter optional path filter to apply to store files + * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. - * @throws IOException When scanning the directory fails. + * @throws IOException When scanning the directory fails. * @throws InterruptedException the thread is interrupted, either before or during the activity. */ public static Map getTableStoreFilePathMap(Map resultMap, - final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, - ExecutorService executor, final ProgressReporter progressReporter) + final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, + ExecutorService executor, final ProgressReporter progressReporter) throws IOException, InterruptedException { final Map finalResultMap = - resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap; + resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap; // only include the directory paths to tables Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableName); - // Inside a table, there are compaction.dir directories to skip. Otherwise, all else + // Inside a table, there are compaction.dir directories to skip. Otherwise, all else // should be regions. final FamilyDirFilter familyFilter = new FamilyDirFilter(fs); final Vector exceptions = new Vector<>(); try { - List regionDirs = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); + List regionDirs = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (regionDirs == null) { return finalResultMap; } @@ -1348,8 +1299,9 @@ public static Map getTableStoreFilePathMap(Map resul @Override public void run() { try { - HashMap regionStoreFileMap = new HashMap<>(); - List familyDirs = FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter); + HashMap regionStoreFileMap = new HashMap<>(); + List familyDirs = + FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter); if (familyDirs == null) { if (!fs.exists(dd)) { LOG.warn("Skipping region because it no longer exists: " + dd); @@ -1375,7 +1327,7 @@ public void run() { } Path sf = sfStatus.getPath(); if (sfFilter == null || sfFilter.accept(sf)) { - regionStoreFileMap.put( sf.getName(), sf); + regionStoreFileMap.put(sf.getName(), sf); } } } @@ -1429,7 +1381,7 @@ public void run() { public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) { int result = 0; try { - for (Path familyDir:getFamilyDirs(fs, p)){ + for (Path familyDir : getFamilyDirs(fs, p)) { result += getReferenceFilePaths(fs, familyDir).size(); } } catch (IOException e) { @@ -1439,80 +1391,69 @@ public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
    + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
    * Example...
    - * Key = 3944417774205889744
    + * Key = 3944417774205889744
    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. */ public static Map getTableStoreFilePathMap(final FileSystem fs, - final Path hbaseRootDir) - throws IOException, InterruptedException { - return getTableStoreFilePathMap(fs, hbaseRootDir, null, null, (ProgressReporter)null); + final Path hbaseRootDir) throws IOException, InterruptedException { + return getTableStoreFilePathMap(fs, hbaseRootDir, null, null, (ProgressReporter) null); } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
    + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
    * Example...
    - * Key = 3944417774205889744
    + * Key = 3944417774205889744
    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param sfFilter optional path filter to apply to store files - * @param executor optional executor service to parallelize this operation + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param sfFilter optional path filter to apply to store files + * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. - * @deprecated Since 2.3.0. Will be removed in hbase4. Used {@link - * #getTableStoreFilePathMap(FileSystem, Path, PathFilter, ExecutorService, ProgressReporter)} + * @deprecated Since 2.3.0. Will be removed in hbase4. Used + * {@link #getTableStoreFilePathMap(FileSystem, Path, PathFilter, ExecutorService, ProgressReporter)} */ @Deprecated public static Map getTableStoreFilePathMap(final FileSystem fs, - final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, - HbckErrorReporter progressReporter) - throws IOException, InterruptedException { - return getTableStoreFilePathMap(fs, hbaseRootDir, sfFilter, executor, - new ProgressReporter() { - @Override - public void progress(FileStatus status) { - // status is not used in this implementation. - progressReporter.progress(); - } - }); + final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, + HbckErrorReporter progressReporter) throws IOException, InterruptedException { + return getTableStoreFilePathMap(fs, hbaseRootDir, sfFilter, executor, new ProgressReporter() { + @Override + public void progress(FileStatus status) { + // status is not used in this implementation. + progressReporter.progress(); + } + }); } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
    + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
    * Example...
    - * Key = 3944417774205889744
    + * Key = 3944417774205889744
    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. - * @param sfFilter optional path filter to apply to store files - * @param executor optional executor service to parallelize this operation + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param sfFilter optional path filter to apply to store files + * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. - * @throws IOException When scanning the directory fails. - * @throws InterruptedException + * @throws IOException When scanning the directory fails. n */ - public static Map getTableStoreFilePathMap( - final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter, - ExecutorService executor, ProgressReporter progressReporter) - throws IOException, InterruptedException { + public static Map getTableStoreFilePathMap(final FileSystem fs, + final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, + ProgressReporter progressReporter) throws IOException, InterruptedException { ConcurrentHashMap map = new ConcurrentHashMap<>(1024, 0.75f, 32); // if this method looks similar to 'getTableFragmentation' that is because @@ -1528,26 +1469,23 @@ public static Map getTableStoreFilePathMap( /** * Filters FileStatuses in an array and returns a list - * - * @param input An array of FileStatuses - * @param filter A required filter to filter the array - * @return A list of FileStatuses + * @param input An array of FileStatuses + * @param filter A required filter to filter the array + * @return A list of FileStatuses */ - public static List filterFileStatuses(FileStatus[] input, - FileStatusFilter filter) { + public static List filterFileStatuses(FileStatus[] input, FileStatusFilter filter) { if (input == null) return null; return filterFileStatuses(Iterators.forArray(input), filter); } /** * Filters FileStatuses in an iterator and returns a list - * - * @param input An iterator of FileStatuses - * @param filter A required filter to filter the array - * @return A list of FileStatuses + * @param input An iterator of FileStatuses + * @param filter A required filter to filter the array + * @return A list of FileStatuses */ public static List filterFileStatuses(Iterator input, - FileStatusFilter filter) { + FileStatusFilter filter) { if (input == null) return null; ArrayList results = new ArrayList<>(); while (input.hasNext()) { @@ -1560,19 +1498,17 @@ public static List filterFileStatuses(Iterator input, } /** - * Calls fs.listStatus() and treats FileNotFoundException as non-fatal - * This accommodates differences between hadoop versions, where hadoop 1 - * does not throw a FileNotFoundException, and return an empty FileStatus[] - * while Hadoop 2 will throw FileNotFoundException. - * - * @param fs file system - * @param dir directory + * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates + * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and + * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. + * @param fs file system + * @param dir directory * @param filter file status filter * @return null if dir is empty or doesn't exist, otherwise FileStatus list */ - public static List listStatusWithStatusFilter(final FileSystem fs, - final Path dir, final FileStatusFilter filter) throws IOException { - FileStatus [] status = null; + public static List listStatusWithStatusFilter(final FileSystem fs, final Path dir, + final FileStatusFilter filter) throws IOException { + FileStatus[] status = null; try { status = fs.listStatus(dir); } catch (FileNotFoundException fnfe) { @@ -1580,7 +1516,7 @@ public static List listStatusWithStatusFilter(final FileSystem fs, return null; } - if (ArrayUtils.getLength(status) == 0) { + if (ArrayUtils.getLength(status) == 0) { return null; } @@ -1597,67 +1533,42 @@ public static List listStatusWithStatusFilter(final FileSystem fs, } /** - * This function is to scan the root path of the file system to get the - * degree of locality for each region on each of the servers having at least - * one block of that region. - * This is used by the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer} - * - * @param conf - * the configuration to use - * @return the mapping from region encoded name to a map of server names to - * locality fraction - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get the degree of locality for + * each region on each of the servers having at least one block of that region. This is used by + * the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer} n * the configuration + * to use + * @return the mapping from region encoded name to a map of server names to locality fraction n * + * in case of file system errors or interrupts */ - public static Map> getRegionDegreeLocalityMappingFromFS( - final Configuration conf) throws IOException { - return getRegionDegreeLocalityMappingFromFS( - conf, null, - conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE)); + public static Map> + getRegionDegreeLocalityMappingFromFS(final Configuration conf) throws IOException { + return getRegionDegreeLocalityMappingFromFS(conf, null, + conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE)); } /** - * This function is to scan the root path of the file system to get the - * degree of locality for each region on each of the servers having at least - * one block of that region. - * - * @param conf - * the configuration to use - * @param desiredTable - * the table you wish to scan locality for - * @param threadPoolSize - * the thread pool size to use - * @return the mapping from region encoded name to a map of server names to - * locality fraction - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get the degree of locality for + * each region on each of the servers having at least one block of that region. n * the + * configuration to use n * the table you wish to scan locality for n * the thread pool size to + * use + * @return the mapping from region encoded name to a map of server names to locality fraction n * + * in case of file system errors or interrupts */ public static Map> getRegionDegreeLocalityMappingFromFS( - final Configuration conf, final String desiredTable, int threadPoolSize) - throws IOException { + final Configuration conf, final String desiredTable, int threadPoolSize) throws IOException { Map> regionDegreeLocalityMapping = new ConcurrentHashMap<>(); getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, regionDegreeLocalityMapping); return regionDegreeLocalityMapping; } /** - * This function is to scan the root path of the file system to get either the - * mapping between the region name and its best locality region server or the - * degree of locality of each region on each of the servers having at least - * one block of that region. The output map parameters are both optional. - * - * @param conf - * the configuration to use - * @param desiredTable - * the table you wish to scan locality for - * @param threadPoolSize - * the thread pool size to use - * @param regionDegreeLocalityMapping - * the map into which to put the locality degree mapping or null, - * must be a thread-safe implementation - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get either the mapping between the + * region name and its best locality region server or the degree of locality of each region on + * each of the servers having at least one block of that region. The output map parameters are + * both optional. n * the configuration to use n * the table you wish to scan locality for n * the + * thread pool size to use n * the map into which to put the locality degree mapping or null, must + * be a thread-safe implementation n * in case of file system errors or interrupts */ private static void getRegionLocalityMappingFromFS(final Configuration conf, final String desiredTable, int threadPoolSize, @@ -1738,13 +1649,12 @@ public boolean accept(Path path) { try { // here we wait until TPE terminates, which is either naturally or by // exceptions in the execution of the threads - while (!tpe.awaitTermination(threadWakeFrequency, - TimeUnit.MILLISECONDS)) { + while (!tpe.awaitTermination(threadWakeFrequency, TimeUnit.MILLISECONDS)) { // printing out rough estimate, so as to not introduce // AtomicInteger LOG.info("Locality checking is underway: { Scanned Regions : " - + ((ThreadPoolExecutor) tpe).getCompletedTaskCount() + "/" - + ((ThreadPoolExecutor) tpe).getTaskCount() + " }"); + + ((ThreadPoolExecutor) tpe).getCompletedTaskCount() + "/" + + ((ThreadPoolExecutor) tpe).getTaskCount() + " }"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -1757,9 +1667,8 @@ public boolean accept(Path path) { } /** - * Do our short circuit read setup. - * Checks buffer size to use and whether to do checksumming in hbase or hdfs. - * @param conf + * Do our short circuit read setup. Checks buffer size to use and whether to do checksumming in + * hbase or hdfs. n */ public static void setupShortCircuitRead(final Configuration conf) { // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property. @@ -1767,17 +1676,19 @@ public static void setupShortCircuitRead(final Configuration conf) { conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false); boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); if (shortCircuitSkipChecksum) { - LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " + - "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " + - "it, see https://issues.apache.org/jira/browse/HBASE-6868." : "")); - assert !shortCircuitSkipChecksum; //this will fail if assertions are on + LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " + + "be set to true." + + (useHBaseChecksum + ? " HBase checksum doesn't require " + + "it, see https://issues.apache.org/jira/browse/HBASE-6868." + : "")); + assert !shortCircuitSkipChecksum; // this will fail if assertions are on } checkShortCircuitReadBufferSize(conf); } /** - * Check if short circuit read buffer size is set and if not, set it to hbase value. - * @param conf + * Check if short circuit read buffer size is set and if not, set it to hbase value. n */ public static void checkShortCircuitReadBufferSize(final Configuration conf) { final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2; @@ -1787,18 +1698,17 @@ public static void checkShortCircuitReadBufferSize(final Configuration conf) { int size = conf.getInt(dfsKey, notSet); // If a size is set, return -- we will use it. if (size != notSet) return; - // But short circuit buffer size is normally not set. Put in place the hbase wanted size. + // But short circuit buffer size is normally not set. Put in place the hbase wanted size. int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize); conf.setIfUnset(dfsKey, Integer.toString(hbaseSize)); } /** - * @param c - * @return The DFSClient DFSHedgedReadMetrics instance or null if can't be found or not on hdfs. - * @throws IOException + * n * @return The DFSClient DFSHedgedReadMetrics instance or null if can't be found or not on + * hdfs. n */ public static DFSHedgedReadMetrics getDFSHedgedReadMetrics(final Configuration c) - throws IOException { + throws IOException { if (!CommonFSUtils.isHDFS(c)) { return null; } @@ -1806,31 +1716,31 @@ public static DFSHedgedReadMetrics getDFSHedgedReadMetrics(final Configuration c // to the DFS FS instance and make the method getHedgedReadMetrics accessible, then invoke it // to get the singleton instance of DFSHedgedReadMetrics shared by DFSClients. final String name = "getHedgedReadMetrics"; - DFSClient dfsclient = ((DistributedFileSystem)FileSystem.get(c)).getClient(); + DFSClient dfsclient = ((DistributedFileSystem) FileSystem.get(c)).getClient(); Method m; try { m = dfsclient.getClass().getDeclaredMethod(name); } catch (NoSuchMethodException e) { - LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn( + "Failed find method " + name + " in dfsclient; no hedged read metrics: " + e.getMessage()); return null; } catch (SecurityException e) { - LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn( + "Failed find method " + name + " in dfsclient; no hedged read metrics: " + e.getMessage()); return null; } m.setAccessible(true); try { - return (DFSHedgedReadMetrics)m.invoke(dfsclient); + return (DFSHedgedReadMetrics) m.invoke(dfsclient); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { - LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " + + e.getMessage()); return null; } } public static List copyFilesParallel(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, - Configuration conf, int threads) throws IOException { + Configuration conf, int threads) throws IOException { ExecutorService pool = Executors.newFixedThreadPool(threads); List> futures = new ArrayList<>(); List traversedPaths; @@ -1848,7 +1758,7 @@ public static List copyFilesParallel(FileSystem srcFS, Path src, FileSyste } private static List copyFiles(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, - Configuration conf, ExecutorService pool, List> futures) throws IOException { + Configuration conf, ExecutorService pool, List> futures) throws IOException { List traversedPaths = new ArrayList<>(); traversedPaths.add(dst); FileStatus currentFileStatus = srcFS.getFileStatus(src); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java index c3858aeccf0c..edf7f5fed2d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +36,7 @@ public final class FSVisitor { public interface StoreFileVisitor { void storeFile(final String region, final String family, final String hfileName) - throws IOException; + throws IOException; } private FSVisitor() { @@ -46,15 +45,15 @@ private FSVisitor() { /** * Iterate over the table store files - * - * @param fs {@link FileSystem} + * @param fs {@link FileSystem} * @param tableDir {@link Path} to the table directory - * @param visitor callback object to get the store files + * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, - final StoreFileVisitor visitor) throws IOException { - List regions = FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs)); + final StoreFileVisitor visitor) throws IOException { + List regions = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { if (LOG.isTraceEnabled()) { LOG.trace("No regions under directory:" + tableDir); @@ -62,22 +61,22 @@ public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir return; } - for (FileStatus region: regions) { + for (FileStatus region : regions) { visitRegionStoreFiles(fs, region.getPath(), visitor); } } /** * Iterate over the region store files - * - * @param fs {@link FileSystem} + * @param fs {@link FileSystem} * @param regionDir {@link Path} to the region directory - * @param visitor callback object to get the store files + * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir, - final StoreFileVisitor visitor) throws IOException { - List families = FSUtils.listStatusWithStatusFilter(fs, regionDir, new FSUtils.FamilyDirFilter(fs)); + final StoreFileVisitor visitor) throws IOException { + List families = + FSUtils.listStatusWithStatusFilter(fs, regionDir, new FSUtils.FamilyDirFilter(fs)); if (families == null) { if (LOG.isTraceEnabled()) { LOG.trace("No families under region directory:" + regionDir); @@ -86,7 +85,7 @@ public static void visitRegionStoreFiles(final FileSystem fs, final Path regionD } PathFilter fileFilter = new FSUtils.FileFilter(fs); - for (FileStatus family: families) { + for (FileStatus family : families) { Path familyDir = family.getPath(); String familyName = familyDir.getName(); @@ -99,7 +98,7 @@ public static void visitRegionStoreFiles(final FileSystem fs, final Path regionD continue; } - for (FileStatus hfile: storeFiles) { + for (FileStatus hfile : storeFiles) { Path hfilePath = hfile.getPath(); visitor.storeFile(regionDir.getName(), familyName, hfilePath.getName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java index e757fca8e5b2..e57d0c1814b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,20 +17,17 @@ */ package org.apache.hadoop.hbase.util; +import org.apache.hadoop.fs.FileStatus; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.fs.FileStatus; @InterfaceAudience.Private @InterfaceStability.Evolving public interface FileStatusFilter { /** - * Tests whether or not the specified filestatus should be - * included in a filestatus list. - * - * @param f The filestatus to be tested - * @return true if and only if the filestatus - * should be included + * Tests whether or not the specified filestatus should be included in a filestatus list. + * @param f The filestatus to be tested + * @return true if and only if the filestatus should be included */ boolean accept(FileStatus f); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java index 2d4de3b4d52d..552ac6c9f870 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,15 +26,16 @@ public final class GetJavaProperty { public static void main(String args[]) { if (args.length == 0) { - for (Object prop: System.getProperties().keySet()) { - System.out.println(prop + "=" + System.getProperty((String)prop, "")); + for (Object prop : System.getProperties().keySet()) { + System.out.println(prop + "=" + System.getProperty((String) prop, "")); } } else { - for (String prop: args) { + for (String prop : args) { System.out.println(System.getProperty(prop, "")); } } } - private GetJavaProperty() {} + private GetJavaProperty() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java index 44dd9776d3e3..91cdff76b3e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.yetus.audience.InterfaceAudience; /** - * Tool that prints out a configuration. - * Pass the configuration key on the command-line. + * Tool that prints out a configuration. Pass the configuration key on the command-line. */ @InterfaceAudience.Private public class HBaseConfTool { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 1aacd2d1ac29..3261b8296971 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -134,67 +134,59 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** - * HBaseFsck (hbck) is a tool for checking and repairing region consistency and - * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not - * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'. - * Even though it can 'read' state, given how so much has changed in how hbase1 and hbase2 operate, - * it will often misread. See hbck2 (HBASE-19121) for a hbck tool for hbase2. This class is - * deprecated. - * + * HBaseFsck (hbck) is a tool for checking and repairing region consistency and table integrity + * problems in a corrupted HBase. This tool was written for hbase-1.x. It does not work with + * hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'. Even + * though it can 'read' state, given how so much has changed in how hbase1 and hbase2 operate, it + * will often misread. See hbck2 (HBASE-19121) for a hbck tool for hbase2. This class is deprecated. *

    - * Region consistency checks verify that hbase:meta, region deployment on region - * servers and the state of data in HDFS (.regioninfo files) all are in - * accordance. + * Region consistency checks verify that hbase:meta, region deployment on region servers and the + * state of data in HDFS (.regioninfo files) all are in accordance. *

    - * Table integrity checks verify that all possible row keys resolve to exactly - * one region of a table. This means there are no individual degenerate - * or backwards regions; no holes between regions; and that there are no - * overlapping regions. + * Table integrity checks verify that all possible row keys resolve to exactly one region of a + * table. This means there are no individual degenerate or backwards regions; no holes between + * regions; and that there are no overlapping regions. *

    * The general repair strategy works in two phases: *

      - *
    1. Repair Table Integrity on HDFS. (merge or fabricate regions) - *
    2. Repair Region Consistency with hbase:meta and assignments + *
    3. Repair Table Integrity on HDFS. (merge or fabricate regions) + *
    4. Repair Region Consistency with hbase:meta and assignments *
    *

    - * For table integrity repairs, the tables' region directories are scanned - * for .regioninfo files. Each table's integrity is then verified. If there - * are any orphan regions (regions with no .regioninfo files) or holes, new - * regions are fabricated. Backwards regions are sidelined as well as empty - * degenerate (endkey==startkey) regions. If there are any overlapping regions, - * a new region is created and all data is merged into the new region. + * For table integrity repairs, the tables' region directories are scanned for .regioninfo files. + * Each table's integrity is then verified. If there are any orphan regions (regions with no + * .regioninfo files) or holes, new regions are fabricated. Backwards regions are sidelined as well + * as empty degenerate (endkey==startkey) regions. If there are any overlapping regions, a new + * region is created and all data is merged into the new region. *

    - * Table integrity repairs deal solely with HDFS and could potentially be done - * offline -- the hbase region servers or master do not need to be running. - * This phase can eventually be used to completely reconstruct the hbase:meta table in - * an offline fashion. + * Table integrity repairs deal solely with HDFS and could potentially be done offline -- the hbase + * region servers or master do not need to be running. This phase can eventually be used to + * completely reconstruct the hbase:meta table in an offline fashion. *

    - * Region consistency requires three conditions -- 1) valid .regioninfo file - * present in an HDFS region dir, 2) valid row with .regioninfo data in META, - * and 3) a region is deployed only at the regionserver that was assigned to - * with proper state in the master. + * Region consistency requires three conditions -- 1) valid .regioninfo file present in an HDFS + * region dir, 2) valid row with .regioninfo data in META, and 3) a region is deployed only at the + * regionserver that was assigned to with proper state in the master. *

    - * Region consistency repairs require hbase to be online so that hbck can - * contact the HBase master and region servers. The hbck#connect() method must - * first be called successfully. Much of the region consistency information - * is transient and less risky to repair. + * Region consistency repairs require hbase to be online so that hbck can contact the HBase master + * and region servers. The hbck#connect() method must first be called successfully. Much of the + * region consistency information is transient and less risky to repair. *

    - * If hbck is run from the command line, there are a handful of arguments that - * can be used to limit the kinds of repairs hbck will do. See the code in - * {@link #printUsageAndExit()} for more details. + * If hbck is run from the command line, there are a handful of arguments that can be used to limit + * the kinds of repairs hbck will do. See the code in {@link #printUsageAndExit()} for more details. * @deprecated For removal in hbase-4.0.0. Use HBCK2 instead. */ @Deprecated @@ -209,8 +201,8 @@ public class HBaseFsck extends Configured implements Closeable { private static final int DEFAULT_MAX_MERGE = 5; /** - * Here is where hbase-1.x used to default the lock for hbck1. - * It puts in place a lock when it goes to write/make changes. + * Here is where hbase-1.x used to default the lock for hbck1. It puts in place a lock when it + * goes to write/make changes. */ @InterfaceAudience.Private public static final String HBCK_LOCK_FILE = "hbase-hbck.lock"; @@ -248,9 +240,9 @@ public class HBaseFsck extends Configured implements Closeable { // Unsupported options in HBase 2.0+ private static final Set unsupportedOptionsInV2 = Sets.newHashSet("-fix", - "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans", - "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents", - "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge"); + "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans", + "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents", + "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge"); /*********** * Options @@ -298,23 +290,20 @@ public class HBaseFsck extends Configured implements Closeable { int fixes = 0; /** - * This map contains the state of all hbck items. It maps from encoded region - * name to HbckRegionInfo structure. The information contained in HbckRegionInfo is used - * to detect and correct consistency (hdfs/meta/deployment) problems. + * This map contains the state of all hbck items. It maps from encoded region name to + * HbckRegionInfo structure. The information contained in HbckRegionInfo is used to detect and + * correct consistency (hdfs/meta/deployment) problems. */ private TreeMap regionInfoMap = new TreeMap<>(); // Empty regioninfo qualifiers in hbase:meta private Set emptyRegionInfoQualifiers = new HashSet<>(); /** - * This map from Tablename -> TableInfo contains the structures necessary to - * detect table consistency problems (holes, dupes, overlaps). It is sorted - * to prevent dupes. - * - * If tablesIncluded is empty, this map contains all tables. - * Otherwise, it contains only meta tables and tables in tablesIncluded, - * unless checkMetaOnly is specified, in which case, it contains only - * the meta table + * This map from Tablename -> TableInfo contains the structures necessary to detect table + * consistency problems (holes, dupes, overlaps). It is sorted to prevent dupes. If tablesIncluded + * is empty, this map contains all tables. Otherwise, it contains only meta tables and tables in + * tablesIncluded, unless checkMetaOnly is specified, in which case, it contains only the meta + * table */ private SortedMap tablesInfo = new ConcurrentSkipListMap<>(); @@ -336,9 +325,8 @@ public class HBaseFsck extends Configured implements Closeable { /** * Constructor - * * @param conf Configuration object - * @throws MasterNotRunningException if the master is not running + * @throws MasterNotRunningException if the master is not running * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper */ public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException { @@ -353,17 +341,11 @@ private static ExecutorService createThreadPool(Configuration conf) { } /** - * Constructor - * - * @param conf - * Configuration object - * @throws MasterNotRunningException - * if the master is not running - * @throws ZooKeeperConnectionException - * if unable to connect to ZooKeeper + * Constructor n * Configuration object n * if the master is not running n * if unable to connect + * to ZooKeeper */ public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException, - ZooKeeperConnectionException, IOException, ClassNotFoundException { + ZooKeeperConnectionException, IOException, ClassNotFoundException { super(conf); errors = getErrorReporter(getConf()); this.executor = exec; @@ -377,11 +359,11 @@ public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunni */ public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) { return new RetryCounterFactory( - conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS), - conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval", - DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), - conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime", - DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); + conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS), + conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval", + DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), + conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime", + DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); } /** @@ -389,11 +371,11 @@ public static RetryCounterFactory createLockRetryCounterFactory(Configuration co */ private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) { return new RetryCounterFactory( - conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS), - conf.getInt("hbase.hbck.createznode.attempt.sleep.interval", - DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL), - conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime", - DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME)); + conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS), + conf.getInt("hbase.hbck.createznode.attempt.sleep.interval", + DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL), + conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime", + DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME)); } /** @@ -433,13 +415,13 @@ public FSDataOutputStream call() throws IOException { final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms); out.writeBytes(InetAddress.getLocalHost().toString()); // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file. - out.writeBytes(" Written by an hbase-2.x Master to block an " + - "attempt by an hbase-1.x HBCK tool making modification to state. " + - "See 'HBCK must match HBase server version' in the hbase refguide."); + out.writeBytes(" Written by an hbase-2.x Master to block an " + + "attempt by an hbase-1.x HBCK tool making modification to state. " + + "See 'HBCK must match HBase server version' in the hbase refguide."); out.flush(); return out; - } catch(RemoteException e) { - if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){ + } catch (RemoteException e) { + if (AlreadyBeingCreatedException.class.getName().equals(e.getClassName())) { return null; } else { throw e; @@ -448,25 +430,21 @@ public FSDataOutputStream call() throws IOException { } private FSDataOutputStream createFileWithRetries(final FileSystem fs, - final Path hbckLockFilePath, final FsPermission defaultPerms) - throws IOException { + final Path hbckLockFilePath, final FsPermission defaultPerms) throws IOException { IOException exception = null; do { try { return CommonFSUtils.create(fs, hbckLockFilePath, defaultPerms, false); } catch (IOException ioe) { - LOG.info("Failed to create lock file " + hbckLockFilePath.getName() - + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of " - + retryCounter.getMaxAttempts()); - LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), - ioe); + LOG.info("Failed to create lock file " + hbckLockFilePath.getName() + ", try=" + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); + LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), ioe); try { exception = ioe; retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { throw (InterruptedIOException) new InterruptedIOException( - "Can't create lock file " + hbckLockFilePath.getName()) - .initCause(ie); + "Can't create lock file " + hbckLockFilePath.getName()).initCause(ie); } } } while (retryCounter.shouldRetry()); @@ -477,18 +455,17 @@ private FSDataOutputStream createFileWithRetries(final FileSystem fs, /** * This method maintains a lock using a file. If the creation fails we return null - * * @return FSDataOutputStream object corresponding to the newly opened lock file * @throws IOException if IO failure occurs */ public static Pair checkAndMarkRunningHbck(Configuration conf, - RetryCounter retryCounter) throws IOException { + RetryCounter retryCounter) throws IOException { FileLockCallable callable = new FileLockCallable(conf, retryCounter); ExecutorService executor = Executors.newFixedThreadPool(1); FutureTask futureTask = new FutureTask<>(callable); executor.execute(futureTask); - final int timeoutInSeconds = conf.getInt( - "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT); + final int timeoutInSeconds = + conf.getInt("hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT); FSDataOutputStream stream = null; try { stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS); @@ -518,15 +495,13 @@ private void unlockHbck() { return; } catch (IOException ioe) { LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try=" - + (retryCounter.getAttemptTimes() + 1) + " of " - + retryCounter.getMaxAttempts()); + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); - LOG.warn("Interrupted while deleting lock file" + - HBCK_LOCK_PATH); + LOG.warn("Interrupted while deleting lock file" + HBCK_LOCK_PATH); return; } } @@ -535,22 +510,21 @@ private void unlockHbck() { } /** - * To repair region consistency, one must call connect() in order to repair - * online state. + * To repair region consistency, one must call connect() in order to repair online state. */ public void connect() throws IOException { if (isExclusive()) { // Grab the lock Pair pair = - checkAndMarkRunningHbck(getConf(), this.lockFileRetryCounterFactory.create()); + checkAndMarkRunningHbck(getConf(), this.lockFileRetryCounterFactory.create()); HBCK_LOCK_PATH = pair.getFirst(); this.hbckOutFd = pair.getSecond(); if (hbckOutFd == null) { setRetCode(-1); - LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " + - "[If you are sure no other instance is running, delete the lock file " + - HBCK_LOCK_PATH + " and rerun the tool]"); + LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " + + "[If you are sure no other instance is running, delete the lock file " + HBCK_LOCK_PATH + + " and rerun the tool]"); throw new IOException("Duplicate hbck - Abort"); } @@ -558,7 +532,6 @@ public void connect() throws IOException { hbckLockCleanup.set(true); } - // Add a shutdown hook to this thread, in case user tries to // kill the hbck with a ctrl-c, we want to cleanup the lock so that // it is available for further calls @@ -576,9 +549,8 @@ public void run() { connection = ConnectionFactory.createConnection(getConf()); admin = connection.getAdmin(); meta = connection.getTable(TableName.META_TABLE_NAME); - status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, - Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS, - Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION)); + status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS, + Option.MASTER, Option.BACKUP_MASTERS, Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION)); } /** @@ -589,7 +561,7 @@ private void loadDeployedRegions() throws IOException, InterruptedException { Collection regionServers = status.getLiveServerMetrics().keySet(); errors.print("Number of live region servers: " + regionServers.size()); if (details) { - for (ServerName rsinfo: regionServers) { + for (ServerName rsinfo : regionServers) { errors.print(" " + rsinfo.getServerName()); } } @@ -598,7 +570,7 @@ private void loadDeployedRegions() throws IOException, InterruptedException { Collection deadRegionServers = status.getDeadServerNames(); errors.print("Number of dead region servers: " + deadRegionServers.size()); if (details) { - for (ServerName name: deadRegionServers) { + for (ServerName name : deadRegionServers) { errors.print(" " + name); } } @@ -610,7 +582,7 @@ private void loadDeployedRegions() throws IOException, InterruptedException { Collection backupMasters = status.getBackupMasterNames(); errors.print("Number of backup masters: " + backupMasters.size()); if (details) { - for (ServerName name: backupMasters) { + for (ServerName name : backupMasters) { errors.print(" " + name); } } @@ -622,7 +594,7 @@ private void loadDeployedRegions() throws IOException, InterruptedException { List rits = status.getRegionStatesInTransition(); errors.print("Number of regions in transition: " + rits.size()); if (details) { - for (RegionState state: rits) { + for (RegionState state : rits) { errors.print(" " + state.toDescriptiveString()); } } @@ -647,14 +619,15 @@ private void clearState() { } /** - * This repair method analyzes hbase data in hdfs and repairs it to satisfy - * the table integrity rules. HBase doesn't need to be online for this - * operation to work. + * This repair method analyzes hbase data in hdfs and repairs it to satisfy the table integrity + * rules. HBase doesn't need to be online for this operation to work. */ public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException { // Initial pass to fix orphans. - if (shouldCheckHdfs() && (shouldFixHdfsOrphans() || shouldFixHdfsHoles() - || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) { + if ( + shouldCheckHdfs() && (shouldFixHdfsOrphans() || shouldFixHdfsHoles() + || shouldFixHdfsOverlaps() || shouldFixTableOrphans()) + ) { LOG.info("Loading regioninfos HDFS"); // if nothing is happening this should always complete in two iterations. int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3); @@ -671,7 +644,7 @@ public void offlineHdfsIntegrityRepair() throws IOException, InterruptedExceptio if (curIter > 2) { if (curIter == maxIterations) { LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. " - + "Tables integrity may not be fully repaired!"); + + "Tables integrity may not be fully repaired!"); } else { LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations"); } @@ -680,15 +653,12 @@ public void offlineHdfsIntegrityRepair() throws IOException, InterruptedExceptio } /** - * This repair method requires the cluster to be online since it contacts - * region servers and the masters. It makes each region's state in HDFS, in - * hbase:meta, and deployments consistent. - * - * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable - * error. If 0, we have a clean hbase. + * This repair method requires the cluster to be online since it contacts region servers and the + * masters. It makes each region's state in HDFS, in hbase:meta, and deployments consistent. + * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable error. If + * 0, we have a clean hbase. */ - public int onlineConsistencyRepair() throws IOException, KeeperException, - InterruptedException { + public int onlineConsistencyRepair() throws IOException, KeeperException, InterruptedException { // get regions according to what is online on each RegionServer loadDeployedRegions(); @@ -749,14 +719,12 @@ public int onlineConsistencyRepair() throws IOException, KeeperException, /** * This method maintains an ephemeral znode. If the creation fails we return false or throw * exception - * * @return true if creating znode succeeds; false otherwise * @throws IOException if IO failure occurs */ private boolean setMasterInMaintenanceMode() throws IOException { RetryCounter retryCounter = createZNodeRetryCounterFactory.create(); - hbckEphemeralNodePath = ZNodePaths.joinZNode( - zkw.getZNodePaths().masterMaintZNode, + hbckEphemeralNodePath = ZNodePaths.joinZNode(zkw.getZNodePaths().masterMaintZNode, "hbck-" + Long.toString(EnvironmentEdgeManager.currentTime())); do { try { @@ -766,19 +734,19 @@ private boolean setMasterInMaintenanceMode() throws IOException { } } catch (KeeperException e) { if (retryCounter.getAttemptTimes() >= retryCounter.getMaxAttempts()) { - throw new IOException("Can't create znode " + hbckEphemeralNodePath, e); + throw new IOException("Can't create znode " + hbckEphemeralNodePath, e); } // fall through and retry } - LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" + - (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); + LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { throw (InterruptedIOException) new InterruptedIOException( - "Can't create znode " + hbckEphemeralNodePath).initCause(ie); + "Can't create znode " + hbckEphemeralNodePath).initCause(ie); } } while (retryCounter.shouldRetry()); return hbckZodeCreated; @@ -803,7 +771,7 @@ private void cleanupHbckZnode() { * @return 0 on success, non-zero on failure */ public int onlineHbck() - throws IOException, KeeperException, InterruptedException, ReplicationException { + throws IOException, KeeperException, InterruptedException, ReplicationException { // print hbase server version errors.print("Version: " + status.getHBaseVersion()); @@ -814,7 +782,7 @@ public int onlineHbck() offlineReferenceFileRepair(); offlineHLinkFileRepair(); // If Master runs maintenance tasks (such as balancer, catalog janitor, etc) during online - // hbck, it is likely that hbck would be misled and report transient errors. Therefore, it + // hbck, it is likely that hbck would be misled and report transient errors. Therefore, it // is better to set Master into maintenance mode during online hbck. // if (!setMasterInMaintenanceMode()) { @@ -844,8 +812,7 @@ public int onlineHbck() } public static byte[] keyOnly(byte[] b) { - if (b == null) - return b; + if (b == null) return b; int rowlength = Bytes.toShort(b, 0); byte[] result = new byte[rowlength]; System.arraycopy(b, Bytes.SIZEOF_SHORT, result, 0, rowlength); @@ -871,18 +838,18 @@ public void close() throws IOException { } private static class RegionBoundariesInformation { - public byte [] regionName; - public byte [] metaFirstKey; - public byte [] metaLastKey; - public byte [] storesFirstKey; - public byte [] storesLastKey; + public byte[] regionName; + public byte[] metaFirstKey; + public byte[] metaLastKey; + public byte[] storesFirstKey; + public byte[] storesLastKey; + @Override - public String toString () { - return "regionName=" + Bytes.toStringBinary(regionName) + - "\nmetaFirstKey=" + Bytes.toStringBinary(metaFirstKey) + - "\nmetaLastKey=" + Bytes.toStringBinary(metaLastKey) + - "\nstoresFirstKey=" + Bytes.toStringBinary(storesFirstKey) + - "\nstoresLastKey=" + Bytes.toStringBinary(storesLastKey); + public String toString() { + return "regionName=" + Bytes.toStringBinary(regionName) + "\nmetaFirstKey=" + + Bytes.toStringBinary(metaFirstKey) + "\nmetaLastKey=" + Bytes.toStringBinary(metaLastKey) + + "\nstoresFirstKey=" + Bytes.toStringBinary(storesFirstKey) + "\nstoresLastKey=" + + Bytes.toStringBinary(storesLastKey); } } @@ -891,7 +858,7 @@ public void checkRegionBoundaries() { ByteArrayComparator comparator = new ByteArrayComparator(); List regions = MetaTableAccessor.getAllRegions(connection, true); final RegionBoundariesInformation currentRegionBoundariesInformation = - new RegionBoundariesInformation(); + new RegionBoundariesInformation(); Path hbaseRoot = CommonFSUtils.getRootDir(getConf()); for (RegionInfo regionInfo : regions) { Path tableDir = CommonFSUtils.getTableDir(hbaseRoot, regionInfo.getTable()); @@ -911,17 +878,21 @@ public void checkRegionBoundaries() { FileStatus[] storeFiles = fs.listStatus(file.getPath()); // For all the stores in this column family. for (FileStatus storeFile : storeFiles) { - HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), - CacheConfig.DISABLED, true, getConf()); - if ((reader.getFirstKey() != null) + HFile.Reader reader = + HFile.createReader(fs, storeFile.getPath(), CacheConfig.DISABLED, true, getConf()); + if ( + (reader.getFirstKey() != null) && ((storeFirstKey == null) || (comparator.compare(storeFirstKey, - ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0))) { - storeFirstKey = ((KeyValue.KeyOnlyKeyValue)reader.getFirstKey().get()).getKey(); + ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0)) + ) { + storeFirstKey = ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey(); } - if ((reader.getLastKey() != null) + if ( + (reader.getLastKey() != null) && ((storeLastKey == null) || (comparator.compare(storeLastKey, - ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey())) < 0)) { - storeLastKey = ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey(); + ((KeyValue.KeyOnlyKeyValue) reader.getLastKey().get()).getKey())) < 0) + ) { + storeLastKey = ((KeyValue.KeyOnlyKeyValue) reader.getLastKey().get()).getKey(); } reader.close(); } @@ -943,18 +914,20 @@ public void checkRegionBoundaries() { boolean valid = true; // Checking start key. - if ((currentRegionBoundariesInformation.storesFirstKey != null) - && (currentRegionBoundariesInformation.metaFirstKey != null)) { - valid = valid - && comparator.compare(currentRegionBoundariesInformation.storesFirstKey, - currentRegionBoundariesInformation.metaFirstKey) >= 0; + if ( + (currentRegionBoundariesInformation.storesFirstKey != null) + && (currentRegionBoundariesInformation.metaFirstKey != null) + ) { + valid = valid && comparator.compare(currentRegionBoundariesInformation.storesFirstKey, + currentRegionBoundariesInformation.metaFirstKey) >= 0; } // Checking stop key. - if ((currentRegionBoundariesInformation.storesLastKey != null) - && (currentRegionBoundariesInformation.metaLastKey != null)) { - valid = valid - && comparator.compare(currentRegionBoundariesInformation.storesLastKey, - currentRegionBoundariesInformation.metaLastKey) < 0; + if ( + (currentRegionBoundariesInformation.storesLastKey != null) + && (currentRegionBoundariesInformation.metaLastKey != null) + ) { + valid = valid && comparator.compare(currentRegionBoundariesInformation.storesLastKey, + currentRegionBoundariesInformation.metaLastKey) < 0; } if (!valid) { errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries", @@ -979,13 +952,11 @@ private void adoptHdfsOrphans(Collection orphanHdfsDirs) throws } /** - * Orphaned regions are regions without a .regioninfo file in them. We "adopt" - * these orphans by creating a new region, and moving the column families, - * recovered edits, WALs, into the new region dir. We determine the region - * startkey and endkeys by looking at all of the hfiles inside the column - * families to identify the min and max keys. The resulting region will - * likely violate table integrity but will be dealt with by merging - * overlapping regions. + * Orphaned regions are regions without a .regioninfo file in them. We "adopt" these orphans by + * creating a new region, and moving the column families, recovered edits, WALs, into the new + * region dir. We determine the region startkey and endkeys by looking at all of the hfiles inside + * the column families to identify the min and max keys. The resulting region will likely violate + * table integrity but will be dealt with by merging overlapping regions. */ @SuppressWarnings("deprecation") private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { @@ -993,9 +964,9 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { FileSystem fs = p.getFileSystem(getConf()); FileStatus[] dirs = fs.listStatus(p); if (dirs == null) { - LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + - p + ". This dir could probably be deleted."); - return ; + LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + p + + ". This dir could probably be deleted."); + return; } TableName tableName = hi.getTableName(); @@ -1004,9 +975,9 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { TableDescriptor template = tableInfo.getTableDescriptor(); // find min and max key values - Pair orphanRegionRange = null; + Pair orphanRegionRange = null; for (FileStatus cf : dirs) { - String cfName= cf.getPath().getName(); + String cfName = cf.getPath().getName(); // TODO Figure out what the special dirs are if (cfName.startsWith(".") || cfName.equals(HConstants.SPLIT_LOGDIR_NAME)) continue; @@ -1043,7 +1014,7 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { if (Bytes.compareTo(orphanRegionRange.getFirst(), start) > 0) { orphanRegionRange.setFirst(start); } - if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0 ) { + if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0) { orphanRegionRange.setSecond(end); } } @@ -1055,14 +1026,13 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { sidelineRegionDir(fs, hi); return; } - LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + - Bytes.toString(orphanRegionRange.getSecond()) + ")"); + LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + + Bytes.toString(orphanRegionRange.getSecond()) + ")"); // create new region on hdfs. move data into place. RegionInfo regionInfo = RegionInfoBuilder.newBuilder(template.getTableName()) - .setStartKey(orphanRegionRange.getFirst()) - .setEndKey(Bytes.add(orphanRegionRange.getSecond(), new byte[1])) - .build(); + .setStartKey(orphanRegionRange.getFirst()) + .setEndKey(Bytes.add(orphanRegionRange.getSecond(), new byte[1])).build(); LOG.info("Creating new region : " + regionInfo); HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), regionInfo, template); Path target = region.getRegionFileSystem().getRegionDir(); @@ -1073,11 +1043,9 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { } /** - * This method determines if there are table integrity errors in HDFS. If - * there are errors and the appropriate "fix" options are enabled, the method - * will first correct orphan regions making them into legit regiondirs, and - * then reload to merge potentially overlapping regions. - * + * This method determines if there are table integrity errors in HDFS. If there are errors and the + * appropriate "fix" options are enabled, the method will first correct orphan regions making them + * into legit regiondirs, and then reload to merge potentially overlapping regions. * @return number of table integrity errors found */ private int restoreHdfsIntegrity() throws IOException, InterruptedException { @@ -1121,12 +1089,12 @@ private int restoreHdfsIntegrity() throws IOException, InterruptedException { } /** - * Scan all the store file names to find any lingering reference files, - * which refer to some none-exiting files. If "fix" option is enabled, - * any lingering reference file will be sidelined if found. + * Scan all the store file names to find any lingering reference files, which refer to some + * none-exiting files. If "fix" option is enabled, any lingering reference file will be sidelined + * if found. *

    - * Lingering reference file prevents a region from opening. It has to - * be fixed before a cluster can start properly. + * Lingering reference file prevents a region from opening. It has to be fixed before a cluster + * can start properly. */ private void offlineReferenceFileRepair() throws IOException, InterruptedException { clearState(); @@ -1138,9 +1106,9 @@ private void offlineReferenceFileRepair() throws IOException, InterruptedExcepti new FSUtils.ReferenceFileFilter(fs), executor, errors); errors.print(""); LOG.info("Validating mapping using HDFS state"); - for (Path path: allFiles.values()) { + for (Path path : allFiles.values()) { Path referredToFile = StoreFileInfo.getReferredToFile(path); - if (fs.exists(referredToFile)) continue; // good, expected + if (fs.exists(referredToFile)) continue; // good, expected // Found a lingering reference file errors.reportError(ERROR_CODE.LINGERING_REFERENCE_HFILE, @@ -1163,8 +1131,7 @@ private void offlineReferenceFileRepair() throws IOException, InterruptedExcepti Path rootDir = getSidelineDir(); Path dst = new Path(rootDir, pathStr.substring(index + 1)); fs.mkdirs(dst.getParent()); - LOG.info("Trying to sideline reference file " - + path + " to " + dst); + LOG.info("Trying to sideline reference file " + path + " to " + dst); setShouldRerun(); success = fs.rename(path, dst); @@ -1178,17 +1145,17 @@ private void offlineReferenceFileRepair() throws IOException, InterruptedExcepti } /** - * Scan all the store file names to find any lingering HFileLink files, - * which refer to some none-exiting files. If "fix" option is enabled, - * any lingering HFileLink file will be sidelined if found. + * Scan all the store file names to find any lingering HFileLink files, which refer to some + * none-exiting files. If "fix" option is enabled, any lingering HFileLink file will be sidelined + * if found. */ private void offlineHLinkFileRepair() throws IOException, InterruptedException { Configuration conf = getConf(); Path hbaseRoot = CommonFSUtils.getRootDir(conf); FileSystem fs = hbaseRoot.getFileSystem(conf); LOG.info("Computing mapping of all link files"); - Map allFiles = FSUtils - .getTableStoreFilePathMap(fs, hbaseRoot, new FSUtils.HFileLinkFilter(), executor, errors); + Map allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot, + new FSUtils.HFileLinkFilter(), executor, errors); errors.print(""); LOG.info("Validating mapping using HDFS state"); @@ -1206,7 +1173,8 @@ private void offlineHLinkFileRepair() throws IOException, InterruptedException { // An HFileLink path should be like // ${hbase.rootdir}/data/namespace/table_name/region_id/family_name/linkedtable=linkedregionname-linkedhfilename - // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same folder structure. + // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same + // folder structure. boolean success = sidelineFile(fs, hbaseRoot, path); if (!success) { @@ -1215,12 +1183,13 @@ private void offlineHLinkFileRepair() throws IOException, InterruptedException { // An HFileLink backreference path should be like // ${hbase.rootdir}/archive/data/namespace/table_name/region_id/family_name/.links-linkedhfilename - // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same folder structure. - Path backRefPath = FileLink.getBackReferencesDir(HFileArchiveUtil - .getStoreArchivePath(conf, HFileLink.getReferencedTableName(path.getName().toString()), - HFileLink.getReferencedRegionName(path.getName().toString()), - path.getParent().getName()), - HFileLink.getReferencedHFileName(path.getName().toString())); + // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same + // folder structure. + Path backRefPath = FileLink.getBackReferencesDir( + HFileArchiveUtil.getStoreArchivePath(conf, + HFileLink.getReferencedTableName(path.getName().toString()), + HFileLink.getReferencedRegionName(path.getName().toString()), path.getParent().getName()), + HFileLink.getReferencedHFileName(path.getName().toString())); success = sidelineFile(fs, hbaseRoot, backRefPath); if (!success) { @@ -1248,10 +1217,10 @@ private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IO * TODO -- need to add tests for this. */ private void reportEmptyMetaCells() { - errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " + - emptyRegionInfoQualifiers.size()); + errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " + + emptyRegionInfoQualifiers.size()); if (details) { - for (Result r: emptyRegionInfoQualifiers) { + for (Result r : emptyRegionInfoQualifiers) { errors.print(" " + r); } } @@ -1269,10 +1238,9 @@ private void reportTablesInFlux() { errors.detail("Number of Tables in flux: " + numSkipped.get()); } for (TableDescriptor td : allTables) { - errors.detail(" Table: " + td.getTableName() + "\t" + - (td.isReadOnly() ? "ro" : "rw") + "\t" + - (td.isMetaRegion() ? "META" : " ") + "\t" + - " families: " + td.getColumnFamilyCount()); + errors.detail(" Table: " + td.getTableName() + "\t" + (td.isReadOnly() ? "ro" : "rw") + + "\t" + (td.isMetaRegion() ? "META" : " ") + "\t" + " families: " + + td.getColumnFamilyCount()); } } } @@ -1285,7 +1253,7 @@ public HbckErrorReporter getErrors() { * Populate hbi's from regionInfos loaded from file system. */ private SortedMap loadHdfsRegionInfos() - throws IOException, InterruptedException { + throws IOException, InterruptedException { tablesInfo.clear(); // regenerating the data // generate region split structure Collection hbckRegionInfos = regionInfoMap.values(); @@ -1302,28 +1270,27 @@ private SortedMap loadHdfsRegionInfos() // Submit and wait for completion hbiFutures = executor.invokeAll(hbis); - for(int i=0; i f = hbiFutures.get(i); try { f.get(); - } catch(ExecutionException e) { - LOG.warn("Failed to read .regioninfo file for region " + - work.hbi.getRegionNameAsString(), e.getCause()); + } catch (ExecutionException e) { + LOG.warn("Failed to read .regioninfo file for region " + work.hbi.getRegionNameAsString(), + e.getCause()); } } Path hbaseRoot = CommonFSUtils.getRootDir(getConf()); FileSystem fs = hbaseRoot.getFileSystem(getConf()); // serialized table info gathering. - for (HbckRegionInfo hbi: hbckRegionInfos) { + for (HbckRegionInfo hbi : hbckRegionInfos) { if (hbi.getHdfsHRI() == null) { // was an orphan continue; } - // get table name from hdfs, populate various HBaseFsck tables. TableName tableName = hbi.getTableName(); if (tableName == null) { @@ -1339,14 +1306,14 @@ private SortedMap loadHdfsRegionInfos() tablesInfo.put(tableName, modTInfo); try { TableDescriptor htd = - FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName); + FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName); modTInfo.htds.add(htd); } catch (IOException ioe) { if (!orphanTableDirs.containsKey(tableName)) { LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe); - //should only report once for each table + // should only report once for each table errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE, - "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName); + "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName); Set columns = new HashSet<>(); orphanTableDirs.put(tableName, getColumnFamilyList(columns, hbi)); } @@ -1364,14 +1331,11 @@ private SortedMap loadHdfsRegionInfos() } /** - * To get the column family list according to the column family dirs - * @param columns - * @param hbi - * @return a set of column families - * @throws IOException + * To get the column family list according to the column family dirs nn * @return a set of column + * families n */ private Set getColumnFamilyList(Set columns, HbckRegionInfo hbi) - throws IOException { + throws IOException { Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); FileStatus[] subDirs = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs)); @@ -1386,12 +1350,13 @@ private Set getColumnFamilyList(Set columns, HbckRegionInfo hbi) * To fabricate a .tableinfo file with following contents
    * 1. the correct tablename
    * 2. the correct colfamily list
    - * 3. the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}
    - * @throws IOException + * 3. the default properties for both {@link TableDescriptor} and + * {@link ColumnFamilyDescriptor}
    + * n */ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, - Set columns) throws IOException { - if (columns ==null || columns.isEmpty()) return false; + Set columns) throws IOException { + if (columns == null || columns.isEmpty()) return false; TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (String columnfamimly : columns) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly)); @@ -1402,7 +1367,7 @@ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, /** * To fix the empty REGIONINFO_QUALIFIER rows from hbase:meta
    - * @throws IOException + * n */ public void fixEmptyMetaCells() throws IOException { if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) { @@ -1421,8 +1386,9 @@ public void fixEmptyMetaCells() throws IOException { * 2. else create a default .tableinfo file with following items
    *  2.1 the correct tablename
    *  2.2 the correct colfamily list
    - *  2.3 the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}
    - * @throws IOException + *  2.3 the default properties for both {@link TableDescriptor} and + * {@link ColumnFamilyDescriptor}
    + * n */ public void fixOrphanTables() throws IOException { if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) { @@ -1430,14 +1396,12 @@ public void fixOrphanTables() throws IOException { List tmpList = new ArrayList<>(orphanTableDirs.keySet().size()); tmpList.addAll(orphanTableDirs.keySet()); TableDescriptor[] htds = getTableDescriptors(tmpList); - Iterator>> iter = - orphanTableDirs.entrySet().iterator(); + Iterator>> iter = orphanTableDirs.entrySet().iterator(); int j = 0; int numFailedCase = 0; FSTableDescriptors fstd = new FSTableDescriptors(getConf()); while (iter.hasNext()) { - Entry> entry = - iter.next(); + Entry> entry = iter.next(); TableName tableName = entry.getKey(); LOG.info("Trying to fix orphan table error: " + tableName); if (j < htds.length) { @@ -1451,10 +1415,12 @@ public void fixOrphanTables() throws IOException { } else { if (fabricateTableInfo(fstd, tableName, entry.getValue())) { LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file"); - LOG.warn("Strongly recommend to modify the TableDescriptor if necessary for: " + tableName); + LOG.warn( + "Strongly recommend to modify the TableDescriptor if necessary for: " + tableName); iter.remove(); } else { - LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information"); + LOG.error("Unable to create default .tableinfo for " + tableName + + " while missing column family information"); numFailedCase++; } } @@ -1465,14 +1431,14 @@ public void fixOrphanTables() throws IOException { // all orphanTableDirs are luckily recovered // re-run doFsck after recovering the .tableinfo file setShouldRerun(); - LOG.warn("Strongly recommend to re-run manually hfsck after all orphanTableDirs being fixed"); + LOG.warn( + "Strongly recommend to re-run manually hfsck after all orphanTableDirs being fixed"); } else if (numFailedCase > 0) { - LOG.error("Failed to fix " + numFailedCase - + " OrphanTables with default .tableinfo files"); + LOG.error("Failed to fix " + numFailedCase + " OrphanTables with default .tableinfo files"); } } - //cleanup the list + // cleanup the list orphanTableDirs.clear(); } @@ -1482,23 +1448,22 @@ public void fixOrphanTables() throws IOException { */ private void logParallelMerge() { if (getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) { - LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" + - " false to run serially."); + LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" + + " false to run serially."); } else { - LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" + - " true to run in parallel."); + LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" + + " true to run in parallel."); } } private SortedMap checkHdfsIntegrity(boolean fixHoles, - boolean fixOverlaps) throws IOException { + boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); logParallelMerge(); for (HbckTableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { - handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), - fixHoles, fixOverlaps); + handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } @@ -1514,8 +1479,7 @@ Path getSidelineDir() throws IOException { if (sidelineDir == null) { Path hbaseDir = CommonFSUtils.getRootDir(getConf()); Path hbckDir = new Path(hbaseDir, HConstants.HBCK_SIDELINEDIR_NAME); - sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-" - + startMillis); + sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-" + startMillis); } return sidelineDir; } @@ -1529,14 +1493,12 @@ Path sidelineRegionDir(FileSystem fs, HbckRegionInfo hi) throws IOException { /** * Sideline a region dir (instead of deleting it) - * * @param parentDir if specified, the region will be sidelined to folder like - * {@literal .../parentDir/

    /}. The purpose is to group together - * similar regions sidelined, for example, those regions should be bulk loaded back later - * on. If NULL, it is ignored. + * {@literal .../parentDir/
    /}. The purpose is to group + * together similar regions sidelined, for example, those regions should be bulk + * loaded back later on. If NULL, it is ignored. */ - Path sidelineRegionDir(FileSystem fs, - String parentDir, HbckRegionInfo hi) throws IOException { + Path sidelineRegionDir(FileSystem fs, String parentDir, HbckRegionInfo hi) throws IOException { TableName tableName = hi.getTableName(); Path regionDir = hi.getHdfsRegionDir(); @@ -1549,22 +1511,22 @@ Path sidelineRegionDir(FileSystem fs, if (parentDir != null) { rootDir = new Path(rootDir, parentDir); } - Path sidelineTableDir= CommonFSUtils.getTableDir(rootDir, tableName); + Path sidelineTableDir = CommonFSUtils.getTableDir(rootDir, tableName); Path sidelineRegionDir = new Path(sidelineTableDir, regionDir.getName()); fs.mkdirs(sidelineRegionDir); boolean success = false; - FileStatus[] cfs = fs.listStatus(regionDir); + FileStatus[] cfs = fs.listStatus(regionDir); if (cfs == null) { LOG.info("Region dir is empty: " + regionDir); } else { for (FileStatus cf : cfs) { Path src = cf.getPath(); - Path dst = new Path(sidelineRegionDir, src.getName()); + Path dst = new Path(sidelineRegionDir, src.getName()); if (fs.isFile(src)) { // simple file success = fs.rename(src, dst); if (!success) { - String msg = "Unable to rename file " + src + " to " + dst; + String msg = "Unable to rename file " + src + " to " + dst; LOG.error(msg); throw new IOException(msg); } @@ -1577,14 +1539,14 @@ Path sidelineRegionDir(FileSystem fs, LOG.info("Sidelining files from " + src + " into containing region " + dst); // FileSystem.rename is inconsistent with directories -- if the // dst (foo/a) exists and is a dir, and the src (foo/b) is a dir, - // it moves the src into the dst dir resulting in (foo/a/b). If + // it moves the src into the dst dir resulting in (foo/a/b). If // the dst does not exist, and the src a dir, src becomes dst. (foo/b) FileStatus[] hfiles = fs.listStatus(src); if (hfiles != null && hfiles.length > 0) { for (FileStatus hfile : hfiles) { success = fs.rename(hfile.getPath(), dst); if (!success) { - String msg = "Unable to rename file " + src + " to " + dst; + String msg = "Unable to rename file " + src + " to " + dst; LOG.error(msg); throw new IOException(msg); } @@ -1606,18 +1568,15 @@ Path sidelineRegionDir(FileSystem fs, } /** - * Load the list of disabled tables in ZK into local set. - * @throws ZooKeeperConnectionException - * @throws IOException + * Load the list of disabled tables in ZK into local set. nn */ - private void loadTableStates() - throws IOException { + private void loadTableStates() throws IOException { tableStates = MetaTableAccessor.getTableStates(connection); // Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it // has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in // meantime. this.tableStates.put(TableName.META_TABLE_NAME, - new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); + new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); } /** @@ -1626,13 +1585,11 @@ private void loadTableStates() */ boolean isTableDisabled(TableName tableName) { return tableStates.containsKey(tableName) - && tableStates.get(tableName) - .inStates(TableState.State.DISABLED, TableState.State.DISABLING); + && tableStates.get(tableName).inStates(TableState.State.DISABLED, TableState.State.DISABLING); } /** - * Scan HDFS for all regions, recording their information into - * regionInfoMap + * Scan HDFS for all regions, recording their information into regionInfoMap */ public void loadHdfsRegionDirs() throws IOException, InterruptedException { Path rootDir = CommonFSUtils.getRootDir(getConf()); @@ -1646,44 +1603,43 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException { List paths = FSUtils.getTableDirs(fs, rootDir); for (Path path : paths) { TableName tableName = CommonFSUtils.getTableName(path); - if ((!checkMetaOnly && - isTableIncluded(tableName)) || - tableName.equals(TableName.META_TABLE_NAME)) { - tableDirs.add(fs.getFileStatus(path)); - } + if ( + (!checkMetaOnly && isTableIncluded(tableName)) + || tableName.equals(TableName.META_TABLE_NAME) + ) { + tableDirs.add(fs.getFileStatus(path)); + } } // verify that version file exists if (!foundVersionFile) { errors.reportError(ERROR_CODE.NO_VERSION_FILE, - "Version file does not exist in root dir " + rootDir); + "Version file does not exist in root dir " + rootDir); if (shouldFixVersionFile()) { - LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME - + " file."); + LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME + " file."); setShouldRerun(); - FSUtils.setVersion(fs, rootDir, getConf().getInt( - HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt( - HConstants.VERSION_FILE_WRITE_ATTEMPTS, + FSUtils.setVersion(fs, rootDir, + getConf().getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), + getConf().getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS)); } } // Avoid multithreading at table-level because already multithreaded internally at - // region-level. Additionally multithreading at table-level can lead to deadlock - // if there are many tables in the cluster. Since there are a limited # of threads + // region-level. Additionally multithreading at table-level can lead to deadlock + // if there are many tables in the cluster. Since there are a limited # of threads // in the executor's thread pool and if we multithread at the table-level by putting // WorkItemHdfsDir callables into the executor, then we will have some threads in the // executor tied up solely in waiting for the tables' region-level calls to complete. // If there are enough tables then there will be no actual threads in the pool left // for the region-level callables to be serviced. for (FileStatus tableDir : tableDirs) { - LOG.debug("Loading region dirs from " +tableDir.getPath()); + LOG.debug("Loading region dirs from " + tableDir.getPath()); WorkItemHdfsDir item = new WorkItemHdfsDir(fs, errors, tableDir); try { item.call(); } catch (ExecutionException e) { - LOG.warn("Could not completely load table dir " + - tableDir.getPath(), e.getCause()); + LOG.warn("Could not completely load table dir " + tableDir.getPath(), e.getCause()); } } errors.print(""); @@ -1717,7 +1673,7 @@ private boolean recordMetaRegion() throws IOException { } ServerName sn = metaLocation.getServerName(); HbckRegionInfo.MetaEntry m = new HbckRegionInfo.MetaEntry(metaLocation.getRegion(), sn, - EnvironmentEdgeManager.currentTime()); + EnvironmentEdgeManager.currentTime()); HbckRegionInfo hbckRegionInfo = regionInfoMap.get(metaLocation.getRegion().getEncodedName()); if (hbckRegionInfo == null) { regionInfoMap.put(metaLocation.getRegion().getEncodedName(), new HbckRegionInfo(m)); @@ -1756,20 +1712,19 @@ void processRegionServers(Collection regionServerList) List> workFutures; // loop to contact each region server in parallel - for (ServerName rsinfo: regionServerList) { + for (ServerName rsinfo : regionServerList) { workItems.add(new WorkItemRegion(this, rsinfo, errors, connection)); } workFutures = executor.invokeAll(workItems); - for(int i=0; i f = workFutures.get(i); try { f.get(); - } catch(ExecutionException e) { - LOG.warn("Could not process regionserver {}", item.rsinfo.getAddress(), - e.getCause()); + } catch (ExecutionException e) { + LOG.warn("Could not process regionserver {}", item.rsinfo.getAddress(), e.getCause()); } } } @@ -1777,13 +1732,12 @@ void processRegionServers(Collection regionServerList) /** * Check consistency of all regions that have been found in previous phases. */ - private void checkAndFixConsistency() - throws IOException, KeeperException, InterruptedException { + private void checkAndFixConsistency() throws IOException, KeeperException, InterruptedException { // Divide the checks in two phases. One for default/primary replicas and another // for the non-primary ones. Keeps code cleaner this way. List workItems = new ArrayList<>(regionInfoMap.size()); - for (java.util.Map.Entry e: regionInfoMap.entrySet()) { + for (java.util.Map.Entry e : regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); } @@ -1791,11 +1745,11 @@ private void checkAndFixConsistency() checkRegionConsistencyConcurrently(workItems); boolean prevHdfsCheck = shouldCheckHdfs(); - setCheckHdfs(false); //replicas don't have any hdfs data + setCheckHdfs(false); // replicas don't have any hdfs data // Run a pass over the replicas and fix any assignment issues that exist on the currently // deployed/undeployed replicas. List replicaWorkItems = new ArrayList<>(regionInfoMap.size()); - for (java.util.Map.Entry e: regionInfoMap.entrySet()) { + for (java.util.Map.Entry e : regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); } @@ -1807,11 +1761,11 @@ private void checkAndFixConsistency() // not get accurate state of the hbase if continuing. The config here allows users to tune // the tolerance of number of skipped region. // TODO: evaluate the consequence to continue the hbck operation without config. - int terminateThreshold = getConf().getInt("hbase.hbck.skipped.regions.limit", 0); + int terminateThreshold = getConf().getInt("hbase.hbck.skipped.regions.limit", 0); int numOfSkippedRegions = skippedRegions.size(); if (numOfSkippedRegions > 0 && numOfSkippedRegions > terminateThreshold) { - throw new IOException(numOfSkippedRegions - + " region(s) could not be checked or repaired. See logs for detail."); + throw new IOException( + numOfSkippedRegions + " region(s) could not be checked or repaired. See logs for detail."); } if (shouldCheckHdfs()) { @@ -1822,25 +1776,25 @@ private void checkAndFixConsistency() /** * Check consistency of all regions using multiple threads concurrently. */ - private void checkRegionConsistencyConcurrently( - final List workItems) - throws IOException, KeeperException, InterruptedException { + private void + checkRegionConsistencyConcurrently(final List workItems) + throws IOException, KeeperException, InterruptedException { if (workItems.isEmpty()) { - return; // nothing to check + return; // nothing to check } List> workFutures = executor.invokeAll(workItems); - for(Future f: workFutures) { + for (Future f : workFutures) { try { f.get(); - } catch(ExecutionException e1) { - LOG.warn("Could not check region consistency " , e1.getCause()); + } catch (ExecutionException e1) { + LOG.warn("Could not check region consistency ", e1.getCause()); if (e1.getCause() instanceof IOException) { - throw (IOException)e1.getCause(); + throw (IOException) e1.getCause(); } else if (e1.getCause() instanceof KeeperException) { - throw (KeeperException)e1.getCause(); + throw (KeeperException) e1.getCause(); } else if (e1.getCause() instanceof InterruptedException) { - throw (InterruptedException)e1.getCause(); + throw (InterruptedException) e1.getCause(); } else { throw new IOException(e1.getCause()); } @@ -1864,8 +1818,9 @@ public synchronized Void call() throws Exception { } catch (Exception e) { // If the region is non-META region, skip this region and send warning/error message; if // the region is META region, we should not continue. - LOG.warn("Unable to complete check or repair the region '" + hbi.getRegionNameAsString() - + "'.", e); + LOG.warn( + "Unable to complete check or repair the region '" + hbi.getRegionNameAsString() + "'.", + e); if (hbi.getHdfsHRI().isMetaRegion()) { throw e; } @@ -1886,9 +1841,7 @@ private void addSkippedRegion(final HbckRegionInfo hbi) { } /** - * Check and fix table states, assumes full info available: - * - tableInfos - * - empty tables loaded + * Check and fix table states, assumes full info available: - tableInfos - empty tables loaded */ private void checkAndFixTableStates() throws IOException { // first check dangling states @@ -1896,21 +1849,19 @@ private void checkAndFixTableStates() throws IOException { TableName tableName = entry.getKey(); TableState tableState = entry.getValue(); HbckTableInfo tableInfo = tablesInfo.get(tableName); - if (isTableIncluded(tableName) - && !tableName.isSystemTable() - && tableInfo == null) { + if (isTableIncluded(tableName) && !tableName.isSystemTable() && tableInfo == null) { if (fixMeta) { MetaTableAccessor.deleteTableState(connection, tableName); TableState state = MetaTableAccessor.getTableState(connection, tableName); if (state != null) { errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE, - tableName + " unable to delete dangling table state " + tableState); + tableName + " unable to delete dangling table state " + tableState); } } else if (!checkMetaOnly) { // dangling table state in meta if checkMetaOnly is false. If checkMetaOnly is // true, tableInfo will be null as tablesInfo are not polulated for all tables from hdfs errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE, - tableName + " has dangling table state " + tableState); + tableName + " has dangling table state " + tableState); } } } @@ -1922,11 +1873,10 @@ private void checkAndFixTableStates() throws IOException { TableState newState = MetaTableAccessor.getTableState(connection, tableName); if (newState == null) { errors.reportError(ERROR_CODE.NO_TABLE_STATE, - "Unable to change state for table " + tableName + " in meta "); + "Unable to change state for table " + tableName + " in meta "); } } else { - errors.reportError(ERROR_CODE.NO_TABLE_STATE, - tableName + " has no state in meta "); + errors.reportError(ERROR_CODE.NO_TABLE_STATE, tableName + " has no state in meta "); } } } @@ -1947,9 +1897,9 @@ private void preCheckPermission() throws IOException { fs.access(file.getPath(), FsAction.WRITE); } catch (AccessControlException ace) { LOG.warn("Got AccessDeniedException when preCheckPermission ", ace); - errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName() - + " does not have write perms to " + file.getPath() - + ". Please rerun hbck as hdfs user " + file.getOwner()); + errors.reportError(ERROR_CODE.WRONG_USAGE, + "Current user " + ugi.getUserName() + " does not have write perms to " + file.getPath() + + ". Please rerun hbck as hdfs user " + file.getOwner()); throw ace; } } @@ -1968,7 +1918,7 @@ private void deleteMetaRegion(HbckRegionInfo hi) throws IOException { private void deleteMetaRegion(byte[] metaKey) throws IOException { Delete d = new Delete(metaKey); meta.delete(d); - LOG.info("Deleted " + Bytes.toString(metaKey) + " from META" ); + LOG.info("Deleted " + Bytes.toString(metaKey) + " from META"); } /** @@ -1987,24 +1937,22 @@ private void resetSplitParent(HbckRegionInfo hi) throws IOException { mutations.add(p); meta.mutateRow(mutations); - LOG.info("Reset split parent " + hi.getMetaEntry().getRegionInfo().getRegionNameAsString() + - " in META"); + LOG.info("Reset split parent " + hi.getMetaEntry().getRegionInfo().getRegionNameAsString() + + " in META"); } /** - * This backwards-compatibility wrapper for permanently offlining a region - * that should not be alive. If the region server does not support the - * "offline" method, it will use the closest unassign method instead. This - * will basically work until one attempts to disable or delete the affected - * table. The problem has to do with in-memory only master state, so - * restarting the HMaster or failing over to another should fix this. + * This backwards-compatibility wrapper for permanently offlining a region that should not be + * alive. If the region server does not support the "offline" method, it will use the closest + * unassign method instead. This will basically work until one attempts to disable or delete the + * affected table. The problem has to do with in-memory only master state, so restarting the + * HMaster or failing over to another should fix this. */ void offline(byte[] regionName) throws IOException { String regionString = Bytes.toStringBinary(regionName); if (!rsSupportsOffline) { - LOG.warn( - "Using unassign region " + regionString + " instead of using offline method, you should" + - " restart HMaster after these repairs"); + LOG.warn("Using unassign region " + regionString + + " instead of using offline method, you should" + " restart HMaster after these repairs"); admin.unassign(regionName, true); return; } @@ -2014,12 +1962,12 @@ void offline(byte[] regionName) throws IOException { LOG.info("Offlining region " + regionString); admin.offline(regionName); } catch (IOException ioe) { - String notFoundMsg = "java.lang.NoSuchMethodException: " + - "org.apache.hadoop.hbase.master.HMaster.offline([B)"; + String notFoundMsg = + "java.lang.NoSuchMethodException: " + "org.apache.hadoop.hbase.master.HMaster.offline([B)"; if (ioe.getMessage().contains(notFoundMsg)) { - LOG.warn("Using unassign region " + regionString + - " instead of using offline method, you should" + - " restart HMaster after these repairs"); + LOG.warn( + "Using unassign region " + regionString + " instead of using offline method, you should" + + " restart HMaster after these repairs"); rsSupportsOffline = false; // in the future just use unassign admin.unassign(regionName, true); return; @@ -2029,16 +1977,13 @@ void offline(byte[] regionName) throws IOException { } /** - * Attempts to undeploy a region from a region server based in information in - * META. Any operations that modify the file system should make sure that - * its corresponding region is not deployed to prevent data races. - * - * A separate call is required to update the master in-memory region state - * kept in the AssignementManager. Because disable uses this state instead of - * that found in META, we can't seem to cleanly disable/delete tables that - * have been hbck fixed. When used on a version of HBase that does not have - * the offline ipc call exposed on the master (<0.90.5, <0.92.0) a master - * restart or failover may be required. + * Attempts to undeploy a region from a region server based in information in META. Any operations + * that modify the file system should make sure that its corresponding region is not deployed to + * prevent data races. A separate call is required to update the master in-memory region state + * kept in the AssignementManager. Because disable uses this state instead of that found in META, + * we can't seem to cleanly disable/delete tables that have been hbck fixed. When used on a + * version of HBase that does not have the offline ipc call exposed on the master (<0.90.5, + * <0.92.0) a master restart or failover may be required. */ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException { if (hi.getMetaEntry() == null && hi.getHdfsEntry() == null) { @@ -2062,25 +2007,22 @@ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException { Result r = meta.get(get); RegionLocations rl = CatalogFamilyFormat.getRegionLocations(r); if (rl == null) { - LOG.warn("Unable to close region " + hi.getRegionNameAsString() + - " since meta does not have handle to reach it"); + LOG.warn("Unable to close region " + hi.getRegionNameAsString() + + " since meta does not have handle to reach it"); return; } for (HRegionLocation h : rl.getRegionLocations()) { ServerName serverName = h.getServerName(); if (serverName == null) { - errors.reportError("Unable to close region " - + hi.getRegionNameAsString() + " because meta does not " - + "have handle to reach it."); + errors.reportError("Unable to close region " + hi.getRegionNameAsString() + + " because meta does not " + "have handle to reach it."); continue; } RegionInfo hri = h.getRegion(); if (hri == null) { LOG.warn("Unable to close region " + hi.getRegionNameAsString() - + " because hbase:meta had invalid or missing " - + HConstants.CATALOG_FAMILY_STR + ":" - + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) - + " qualifier value."); + + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" + + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value."); continue; } // close the region -- close files and remove assignment @@ -2097,13 +2039,13 @@ private void undeployRegions(HbckRegionInfo hi) throws IOException, InterruptedE int numReplicas = admin.getDescriptor(hi.getTableName()).getRegionReplication(); for (int i = 1; i < numReplicas; i++) { if (hi.getPrimaryHRIForDeployedReplica() == null) continue; - RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( - hi.getPrimaryHRIForDeployedReplica(), i); + RegionInfo hri = + RegionReplicaUtil.getRegionInfoForReplica(hi.getPrimaryHRIForDeployedReplica(), i); HbckRegionInfo h = regionInfoMap.get(hri.getEncodedName()); if (h != null) { undeployRegionsForHbi(h); - //set skip checks; we undeployed it, and we don't want to evaluate this anymore - //in consistency checks + // set skip checks; we undeployed it, and we don't want to evaluate this anymore + // in consistency checks h.setSkipChecks(true); } } @@ -2111,20 +2053,20 @@ private void undeployRegions(HbckRegionInfo hi) throws IOException, InterruptedE private void undeployRegionsForHbi(HbckRegionInfo hi) throws IOException, InterruptedException { for (HbckRegionInfo.OnlineEntry rse : hi.getOnlineEntries()) { - LOG.debug("Undeploy region " + rse.getRegionInfo() + " from " + rse.getServerName()); + LOG.debug("Undeploy region " + rse.getRegionInfo() + " from " + rse.getServerName()); try { - HBaseFsckRepair - .closeRegionSilentlyAndWait(connection, rse.getServerName(), rse.getRegionInfo()); + HBaseFsckRepair.closeRegionSilentlyAndWait(connection, rse.getServerName(), + rse.getRegionInfo()); offline(rse.getRegionInfo().getRegionName()); } catch (IOException ioe) { LOG.warn("Got exception when attempting to offline region " - + Bytes.toString(rse.getRegionInfo().getRegionName()), ioe); + + Bytes.toString(rse.getRegionInfo().getRegionName()), ioe); } } } - private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) throws IOException, - KeeperException, InterruptedException { + private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) + throws IOException, KeeperException, InterruptedException { // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print(msg); @@ -2145,8 +2087,8 @@ private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) throws IOExcept HbckRegionInfo h = regionInfoMap.get(hri.getEncodedName()); if (h != null) { undeployRegions(h); - //set skip checks; we undeploy & deploy it; we don't want to evaluate this hbi anymore - //in consistency checks + // set skip checks; we undeploy & deploy it; we don't want to evaluate this hbi anymore + // in consistency checks h.setSkipChecks(true); } HBaseFsckRepair.fixUnassigned(admin, hri); @@ -2160,7 +2102,7 @@ private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) throws IOExcept * Check a single region for consistency and correct deployment. */ private void checkRegionConsistency(final String key, final HbckRegionInfo hbi) - throws IOException, KeeperException, InterruptedException { + throws IOException, KeeperException, InterruptedException { if (hbi.isSkipChecks()) return; String descriptiveName = hbi.toString(); @@ -2170,14 +2112,14 @@ private void checkRegionConsistency(final String key, final HbckRegionInfo hbi) boolean hasMetaAssignment = inMeta && hbi.getMetaEntry().regionServer != null; boolean isDeployed = !hbi.getDeployedOn().isEmpty(); boolean isMultiplyDeployed = hbi.getDeployedOn().size() > 1; - boolean deploymentMatchesMeta = hasMetaAssignment && isDeployed && !isMultiplyDeployed && - hbi.getMetaEntry().regionServer.equals(hbi.getDeployedOn().get(0)); - boolean splitParent = inMeta && hbi.getMetaEntry().getRegionInfo().isSplit() && - hbi.getMetaEntry().getRegionInfo().isOffline(); + boolean deploymentMatchesMeta = hasMetaAssignment && isDeployed && !isMultiplyDeployed + && hbi.getMetaEntry().regionServer.equals(hbi.getDeployedOn().get(0)); + boolean splitParent = inMeta && hbi.getMetaEntry().getRegionInfo().isSplit() + && hbi.getMetaEntry().getRegionInfo().isOffline(); boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.getMetaEntry().getRegionInfo().getTable()); - boolean recentlyModified = inHdfs && - hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTime(); + boolean recentlyModified = + inHdfs && hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTime(); // ========== First the healthy cases ============= if (hbi.containsOnlyHdfsEdits()) { @@ -2186,8 +2128,8 @@ private void checkRegionConsistency(final String key, final HbckRegionInfo hbi) if (inMeta && inHdfs && isDeployed && deploymentMatchesMeta && shouldBeDeployed) { return; } else if (inMeta && inHdfs && !shouldBeDeployed && !isDeployed) { - LOG.info("Region " + descriptiveName + " is in META, and in a disabled " + - "tabled that is not deployed"); + LOG.info("Region " + descriptiveName + " is in META, and in a disabled " + + "tabled that is not deployed"); return; } else if (recentlyModified) { LOG.warn("Region " + descriptiveName + " was recently modified -- skipping"); @@ -2198,9 +2140,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { // We shouldn't have record of this region at all then! assert false : "Entry for region with no data"; } else if (!inMeta && !inHdfs && isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, "Region " - + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " + - "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, + "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " + + "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); if (shouldFixAssignments()) { undeployRegions(hbi); } @@ -2211,18 +2153,16 @@ else if (!inMeta && !inHdfs && !isDeployed) { // cleaned by CatalogJanitor later hbi.setSkipChecks(true); LOG.info("Region " + descriptiveName - + " got merge recently, its file(s) will be cleaned by CatalogJanitor later"); + + " got merge recently, its file(s) will be cleaned by CatalogJanitor later"); return; } - errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " - + descriptiveName + " on HDFS, but not listed in hbase:meta " + - "or deployed on any region server"); + errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " + descriptiveName + + " on HDFS, but not listed in hbase:meta " + "or deployed on any region server"); // restore region consistency of an adopted orphan if (shouldFixMeta()) { if (!hbi.isHdfsRegioninfoPresent()) { LOG.error("Region " + hbi.getHdfsHRI() + " could have been repaired" - + " in table integrity repair phase if -fixHdfsOrphans was" + - " used."); + + " in table integrity repair phase if -fixHdfsOrphans was" + " used."); return; } @@ -2230,11 +2170,13 @@ else if (!inMeta && !inHdfs && !isDeployed) { HbckTableInfo tableInfo = tablesInfo.get(hri.getTable()); for (RegionInfo region : tableInfo.getRegionsFromMeta(this.regionInfoMap)) { - if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0 - && (region.getEndKey().length == 0 || Bytes.compareTo(region.getEndKey(), - hri.getEndKey()) >= 0) - && Bytes.compareTo(region.getStartKey(), hri.getEndKey()) <= 0) { - if(region.isSplit() || region.isOffline()) continue; + if ( + Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0 + && (region.getEndKey().length == 0 + || Bytes.compareTo(region.getEndKey(), hri.getEndKey()) >= 0) + && Bytes.compareTo(region.getStartKey(), hri.getEndKey()) <= 0 + ) { + if (region.isSplit() || region.isOffline()) continue; Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); List familyDirs = FSUtils.getFamilyDirs(fs, regionDir); @@ -2242,13 +2184,13 @@ else if (!inMeta && !inHdfs && !isDeployed) { List referenceFilePaths = FSUtils.getReferenceFilePaths(fs, familyDir); for (Path referenceFilePath : referenceFilePaths) { Path parentRegionDir = - StoreFileInfo.getReferredToFile(referenceFilePath).getParent().getParent(); + StoreFileInfo.getReferredToFile(referenceFilePath).getParent().getParent(); if (parentRegionDir.toString().endsWith(region.getEncodedName())) { LOG.warn(hri + " start and stop keys are in the range of " + region - + ". The region might not be cleaned up from hdfs when region " + region - + " split failed. Hence deleting from hdfs."); - HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs, - regionDir.getParent(), hri); + + ". The region might not be cleaned up from hdfs when region " + region + + " split failed. Hence deleting from hdfs."); + HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs, regionDir.getParent(), + hri); return; } } @@ -2258,15 +2200,15 @@ else if (!inMeta && !inHdfs && !isDeployed) { LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI()); int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet(), numReplicas); + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), + numReplicas); tryAssignmentRepair(hbi, "Trying to reassign region..."); } } else if (!inMeta && inHdfs && isDeployed) { errors.reportError(ERROR_CODE.NOT_IN_META, "Region " + descriptiveName - + " not in META, but deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + + " not in META, but deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); debugLsr(hbi.getHdfsRegionDir()); if (hbi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { // for replicas, this means that we should undeploy the region (we would have @@ -2286,12 +2228,12 @@ else if (!inMeta && !inHdfs && !isDeployed) { LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI()); int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet(), numReplicas); + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), + numReplicas); tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); } - // ========== Cases where the region is in hbase:meta ============= + // ========== Cases where the region is in hbase:meta ============= } else if (inMeta && inHdfs && !isDeployed && splitParent) { // check whether this is an actual error, or just transient state where parent // is not cleaned @@ -2310,13 +2252,13 @@ else if (!inMeta && !inHdfs && !isDeployed) { // error is going to be reported against primary daughter region. if (hbi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { LOG.info("Region " + descriptiveName + " is a split parent in META, in HDFS, " - + "and not deployed on any region server. This may be transient."); + + "and not deployed on any region server. This may be transient."); hbi.setSkipChecks(true); return; } - errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region " - + descriptiveName + " is a split parent in META, in HDFS, " + errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, + "Region " + descriptiveName + " is a split parent in META, in HDFS, " + "and not deployed on any region server. This could be transient, " + "consider to run the catalog janitor first!"); if (shouldFixSplitParents()) { @@ -2324,18 +2266,17 @@ else if (!inMeta && !inHdfs && !isDeployed) { resetSplitParent(hbi); } } else if (inMeta && !inHdfs && !isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region " - + descriptiveName + " found in META, but not in HDFS " - + "or deployed on any region server."); + errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region " + descriptiveName + + " found in META, but not in HDFS " + "or deployed on any region server."); if (shouldFixMeta()) { deleteMetaRegion(hbi); } } else if (inMeta && !inHdfs && isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName - + " found in META, but not in HDFS, " + - "and deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); - // We treat HDFS as ground truth. Any information in meta is transient - // and equivalent data can be regenerated. So, lets unassign and remove + errors.reportError(ERROR_CODE.NOT_IN_HDFS, + "Region " + descriptiveName + " found in META, but not in HDFS, " + "and deployed on " + + Joiner.on(", ").join(hbi.getDeployedOn())); + // We treat HDFS as ground truth. Any information in meta is transient + // and equivalent data can be regenerated. So, lets unassign and remove // these problems from META. if (shouldFixAssignments()) { errors.print("Trying to fix unassigned region..."); @@ -2346,13 +2287,13 @@ else if (!inMeta && !inHdfs && !isDeployed) { deleteMetaRegion(hbi); } } else if (inMeta && inHdfs && !isDeployed && shouldBeDeployed) { - errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName - + " not deployed on any region server."); + errors.reportError(ERROR_CODE.NOT_DEPLOYED, + "Region " + descriptiveName + " not deployed on any region server."); tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); } else if (inMeta && inHdfs && isDeployed && !shouldBeDeployed) { errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, - "Region " + descriptiveName + " should not be deployed according " + - "to META, but is deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + " should not be deployed according " + + "to META, but is deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); if (shouldFixAssignments()) { errors.print("Trying to close the region " + descriptiveName); setShouldRerun(); @@ -2361,9 +2302,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { } } else if (inMeta && inHdfs && isMultiplyDeployed) { errors.reportError(ERROR_CODE.MULTI_DEPLOYED, - "Region " + descriptiveName + " is listed in hbase:meta on region server " + - hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers " + - Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + " is listed in hbase:meta on region server " + + hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers " + + Joiner.on(", ").join(hbi.getDeployedOn())); // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print("Trying to fix assignment error..."); @@ -2372,10 +2313,10 @@ else if (!inMeta && !inHdfs && !isDeployed) { hbi.getDeployedOn()); } } else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) { - errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region " - + descriptiveName + " listed in hbase:meta on region server " + - hbi.getMetaEntry().regionServer + " but found on region server " + - hbi.getDeployedOn().get(0)); + errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, + "Region " + descriptiveName + " listed in hbase:meta on region server " + + hbi.getMetaEntry().regionServer + " but found on region server " + + hbi.getDeployedOn().get(0)); // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print("Trying to fix assignment error..."); @@ -2385,22 +2326,17 @@ else if (!inMeta && !inHdfs && !isDeployed) { HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI()); } } else { - errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName + - " is in an unforeseen state:" + - " inMeta=" + inMeta + - " inHdfs=" + inHdfs + - " isDeployed=" + isDeployed + - " isMultiplyDeployed=" + isMultiplyDeployed + - " deploymentMatchesMeta=" + deploymentMatchesMeta + - " shouldBeDeployed=" + shouldBeDeployed); + errors.reportError(ERROR_CODE.UNKNOWN, + "Region " + descriptiveName + " is in an unforeseen state:" + " inMeta=" + inMeta + + " inHdfs=" + inHdfs + " isDeployed=" + isDeployed + " isMultiplyDeployed=" + + isMultiplyDeployed + " deploymentMatchesMeta=" + deploymentMatchesMeta + + " shouldBeDeployed=" + shouldBeDeployed); } } /** - * Checks tables integrity. Goes over all regions and scans the tables. - * Collects all the pieces for each table and checks if there are missing, - * repeated or overlapping ones. - * @throws IOException + * Checks tables integrity. Goes over all regions and scans the tables. Collects all the pieces + * for each table and checks if there are missing, repeated or overlapping ones. n */ SortedMap checkIntegrity() throws IOException { tablesInfo = new TreeMap<>(); @@ -2433,8 +2369,8 @@ SortedMap checkIntegrity() throws IOException { // Missing regionDir or over-deployment is checked elsewhere. Include // these cases in modTInfo, so we can evaluate those regions as part of // the region chain in META - //if (hbi.foundRegionDir == null) continue; - //if (hbi.deployedOn.size() != 1) continue; + // if (hbi.foundRegionDir == null) continue; + // if (hbi.deployedOn.size() != 1) continue; if (hbi.getDeployedOn().isEmpty()) { continue; } @@ -2468,8 +2404,9 @@ SortedMap checkIntegrity() throws IOException { return tablesInfo; } - /** Loads table info's for tables that may not have been included, since there are no - * regions reported for the table, but table dir is there in hdfs + /** + * Loads table info's for tables that may not have been included, since there are no regions + * reported for the table, but table dir is there in hdfs */ private void loadTableInfosForTablesWithNoRegion() throws IOException { Map allTables = new FSTableDescriptors(getConf()).getAll(); @@ -2503,11 +2440,11 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw try { dirs = fs.listStatus(contained.getHdfsRegionDir()); } catch (FileNotFoundException fnfe) { - // region we are attempting to merge in is not present! Since this is a merge, there is + // region we are attempting to merge in is not present! Since this is a merge, there is // no harm skipping this region if it does not exist. if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() - + " is missing. Assuming already sidelined or moved."); + + " is missing. Assuming already sidelined or moved."); } else { sidelineRegionDir(fs, contained); } @@ -2517,7 +2454,7 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw if (dirs == null) { if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() - + " already sidelined."); + + " already sidelined."); } else { sidelineRegionDir(fs, contained); } @@ -2526,7 +2463,7 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw for (FileStatus cf : dirs) { Path src = cf.getPath(); - Path dst = new Path(targetRegionDir, src.getName()); + Path dst = new Path(targetRegionDir, src.getName()); if (src.getName().equals(HRegionFileSystem.REGION_INFO_FILE)) { // do not copy the old .regioninfo file. @@ -2541,7 +2478,7 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw LOG.info("[" + thread + "] Moving files from " + src + " into containing region " + dst); // FileSystem.rename is inconsistent with directories -- if the // dst (foo/a) exists and is a dir, and the src (foo/b) is a dir, - // it moves the src into the dst dir resulting in (foo/a/b). If + // it moves the src into the dst dir resulting in (foo/a/b). If // the dst does not exist, and the src a dir, src becomes dst. (foo/b) for (FileStatus hfile : fs.listStatus(src)) { boolean success = fs.rename(hfile.getPath(), dst); @@ -2555,20 +2492,19 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw // if all success. sidelineRegionDir(fs, contained); - LOG.info("[" + thread + "] Sidelined region dir "+ contained.getHdfsRegionDir() + " into " + - getSidelineDir()); + LOG.info("[" + thread + "] Sidelined region dir " + contained.getHdfsRegionDir() + " into " + + getSidelineDir()); debugLsr(contained.getHdfsRegionDir()); return fileMoves; } - static class WorkItemOverlapMerge implements Callable { private TableIntegrityErrorHandler handler; Collection overlapgroup; WorkItemOverlapMerge(Collection overlapgroup, - TableIntegrityErrorHandler handler) { + TableIntegrityErrorHandler handler) { this.handler = handler; this.overlapgroup = overlapgroup; } @@ -2581,10 +2517,9 @@ public Void call() throws Exception { } /** - * Return a list of user-space table names whose metadata have not been - * modified in the last few milliseconds specified by timelag - * if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, STARTCODE_QUALIFIER, - * SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last + * Return a list of user-space table names whose metadata have not been modified in the last few + * milliseconds specified by timelag if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, + * STARTCODE_QUALIFIER, SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last * milliseconds specified by timelag, then the table is a candidate to be returned. * @return tables that have not been modified recently * @throws IOException if an error is encountered @@ -2598,8 +2533,10 @@ TableDescriptor[] getTables(AtomicInteger numSkipped) { // if the start key is zero, then we have found the first region of a table. // pick only those tables that were not modified in the last few milliseconds. - if (info != null && info.getRegionInfo().getStartKey().length == 0 && - !info.getRegionInfo().isMetaRegion()) { + if ( + info != null && info.getRegionInfo().getStartKey().length == 0 + && !info.getRegionInfo().isMetaRegion() + ) { if (info.modTime + timelag < now) { tableNames.add(info.getRegionInfo().getTable()); } else { @@ -2611,9 +2548,9 @@ TableDescriptor[] getTables(AtomicInteger numSkipped) { } TableDescriptor[] getTableDescriptors(List tableNames) { - LOG.info("getTableDescriptors == tableNames => " + tableNames); + LOG.info("getTableDescriptors == tableNames => " + tableNames); try (Connection conn = ConnectionFactory.createConnection(getConf()); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { List tds = admin.listTableDescriptors(tableNames); return tds.toArray(new TableDescriptor[tds.size()]); } catch (IOException e) { @@ -2623,9 +2560,8 @@ TableDescriptor[] getTableDescriptors(List tableNames) { } /** - * Gets the entry in regionInfo corresponding to the the given encoded - * region name. If the region has not been seen yet, a new entry is added - * and returned. + * Gets the entry in regionInfo corresponding to the the given encoded region name. If the region + * has not been seen yet, a new entry is added and returned. */ private synchronized HbckRegionInfo getOrCreateInfo(String name) { HbckRegionInfo hbi = regionInfoMap.get(name); @@ -2647,14 +2583,11 @@ private void checkAndFixReplication() throws ReplicationException { } /** - * Check values in regionInfo for hbase:meta - * Check if zero or more than one regions with hbase:meta are found. - * If there are inconsistencies (i.e. zero or more than one regions - * pretend to be holding the hbase:meta) try to fix that and report an error. - * @throws IOException from HBaseFsckRepair functions - * @throws KeeperException - * @throws InterruptedException - */ + * Check values in regionInfo for hbase:meta Check if zero or more than one regions with + * hbase:meta are found. If there are inconsistencies (i.e. zero or more than one regions pretend + * to be holding the hbase:meta) try to fix that and report an error. + * @throws IOException from HBaseFsckRepair functions nn + */ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedException { Map metaRegions = new HashMap<>(); for (HbckRegionInfo value : regionInfoMap.values()) { @@ -2662,8 +2595,7 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept metaRegions.put(value.getReplicaId(), value); } } - int metaReplication = admin.getDescriptor(TableName.META_TABLE_NAME) - .getRegionReplication(); + int metaReplication = admin.getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); boolean noProblem = true; // There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas // Check the deployed servers. It should be exactly one server for each replica. @@ -2678,12 +2610,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept if (servers.isEmpty()) { assignMetaReplica(i); } else if (servers.size() > 1) { - errors - .reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " + - metaHbckRegionInfo.getReplicaId() + " is found on more than one region."); + errors.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " + + metaHbckRegionInfo.getReplicaId() + " is found on more than one region."); if (shouldFixAssignments()) { - errors.print("Trying to fix a problem with hbase:meta, replicaId " + - metaHbckRegionInfo.getReplicaId() + ".."); + errors.print("Trying to fix a problem with hbase:meta, replicaId " + + metaHbckRegionInfo.getReplicaId() + ".."); setShouldRerun(); // try fix it (treat is a dupe assignment) HBaseFsckRepair.fixMultiAssignment(connection, @@ -2696,11 +2627,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept for (Map.Entry entry : metaRegions.entrySet()) { noProblem = false; errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, - "hbase:meta replicas are deployed in excess. Configured " + metaReplication + - ", deployed " + metaRegions.size()); + "hbase:meta replicas are deployed in excess. Configured " + metaReplication + ", deployed " + + metaRegions.size()); if (shouldFixAssignments()) { - errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() + - " of hbase:meta.."); + errors.print( + "Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of hbase:meta.."); setShouldRerun(); unassignMetaReplica(entry.getValue()); } @@ -2718,15 +2649,15 @@ private void unassignMetaReplica(HbckRegionInfo hi) } private void assignMetaReplica(int replicaId) - throws IOException, KeeperException, InterruptedException { - errors.reportError(ERROR_CODE.NO_META_REGION, "hbase:meta, replicaId " + - replicaId +" is not found on any region."); + throws IOException, KeeperException, InterruptedException { + errors.reportError(ERROR_CODE.NO_META_REGION, + "hbase:meta, replicaId " + replicaId + " is not found on any region."); if (shouldFixAssignments()) { errors.print("Trying to fix a problem with hbase:meta.."); setShouldRerun(); // try to fix it (treat it as unassigned region) - RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); + RegionInfo h = RegionReplicaUtil + .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); HBaseFsckRepair.fixUnassigned(admin, h); HBaseFsckRepair.waitUntilAssigned(admin, h); } @@ -2753,7 +2684,7 @@ public boolean visit(Result result) throws IOException { try { // record the latest modification of this META record - long ts = Collections.max(result.listCells(), comp).getTimestamp(); + long ts = Collections.max(result.listCells(), comp).getTimestamp(); RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result); if (rl == null) { emptyRegionInfoQualifiers.add(result); @@ -2762,16 +2693,17 @@ public boolean visit(Result result) throws IOException { return true; } ServerName sn = null; - if (rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID) == null || - rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion() == null) { + if ( + rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID) == null + || rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion() == null + ) { emptyRegionInfoQualifiers.add(result); errors.reportError(ERROR_CODE.EMPTY_META_CELL, "Empty REGIONINFO_QUALIFIER found in hbase:meta"); return true; } RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion(); - if (!(isTableIncluded(hri.getTable()) - || hri.isMetaRegion())) { + if (!(isTableIncluded(hri.getTable()) || hri.isMetaRegion())) { return true; } PairOfSameType daughters = MetaTableAccessor.getDaughterRegions(result); @@ -2785,7 +2717,7 @@ public boolean visit(Result result) throws IOException { HbckRegionInfo.MetaEntry m = null; if (hri.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { m = new HbckRegionInfo.MetaEntry(hri, sn, ts, daughters.getFirst(), - daughters.getSecond()); + daughters.getSecond()); } else { m = new HbckRegionInfo.MetaEntry(hri, sn, ts, null, null); } @@ -2838,16 +2770,16 @@ private void printTableSummary(SortedMap tablesInfo) { int numOfSkippedRegions; errors.print("Summary:"); for (HbckTableInfo tInfo : tablesInfo.values()) { - numOfSkippedRegions = (skippedRegions.containsKey(tInfo.getName())) ? - skippedRegions.get(tInfo.getName()).size() : 0; + numOfSkippedRegions = (skippedRegions.containsKey(tInfo.getName())) + ? skippedRegions.get(tInfo.getName()).size() + : 0; if (errors.tableHasErrors(tInfo)) { errors.print("Table " + tInfo.getName() + " is inconsistent."); - } else if (numOfSkippedRegions > 0){ - errors.print("Table " + tInfo.getName() + " is okay (with " - + numOfSkippedRegions + " skipped regions)."); - } - else { + } else if (numOfSkippedRegions > 0) { + errors.print("Table " + tInfo.getName() + " is okay (with " + numOfSkippedRegions + + " skipped regions)."); + } else { errors.print("Table " + tInfo.getName() + " is okay."); } errors.print(" Number of regions: " + tInfo.getNumRegions()); @@ -2855,7 +2787,7 @@ private void printTableSummary(SortedMap tablesInfo) { Set skippedRegionStrings = skippedRegions.get(tInfo.getName()); System.out.println(" Number of skipped regions: " + numOfSkippedRegions); System.out.println(" List of skipped regions:"); - for(String sr : skippedRegionStrings) { + for (String sr : skippedRegionStrings) { System.out.println(" " + sr); } } @@ -2869,10 +2801,9 @@ private void printTableSummary(SortedMap tablesInfo) { } static HbckErrorReporter getErrorReporter(final Configuration conf) - throws ClassNotFoundException { - Class reporter = - conf.getClass("hbasefsck.errorreporter", PrintingErrorReporter.class, - HbckErrorReporter.class); + throws ClassNotFoundException { + Class reporter = conf.getClass("hbasefsck.errorreporter", + PrintingErrorReporter.class, HbckErrorReporter.class); return ReflectionUtils.newInstance(reporter, conf); } @@ -2911,14 +2842,14 @@ public synchronized void reportError(ERROR_CODE errorCode, String message) { @Override public synchronized void reportError(ERROR_CODE errorCode, String message, - HbckTableInfo table) { + HbckTableInfo table) { errorTables.add(table); reportError(errorCode, message); } @Override public synchronized void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, - HbckRegionInfo info) { + HbckRegionInfo info) { errorTables.add(table); String reference = "(region " + info.getRegionNameAsString() + ")"; reportError(errorCode, reference + " " + message); @@ -2926,10 +2857,10 @@ public synchronized void reportError(ERROR_CODE errorCode, String message, HbckT @Override public synchronized void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, - HbckRegionInfo info1, HbckRegionInfo info2) { + HbckRegionInfo info1, HbckRegionInfo info2) { errorTables.add(table); - String reference = "(regions " + info1.getRegionNameAsString() - + " and " + info2.getRegionNameAsString() + ")"; + String reference = + "(regions " + info1.getRegionNameAsString() + " and " + info2.getRegionNameAsString() + ")"; reportError(errorCode, reference + " " + message); } @@ -2939,13 +2870,12 @@ public synchronized void reportError(String message) { } /** - * Report error information, but do not increment the error count. Intended for cases - * where the actual error would have been reported previously. - * @param message + * Report error information, but do not increment the error count. Intended for cases where the + * actual error would have been reported previously. n */ @Override public synchronized void report(String message) { - if (! summary) { + if (!summary) { System.out.println("ERROR: " + message); } showProgress = 0; @@ -2953,8 +2883,7 @@ public synchronized void report(String message) { @Override public synchronized int summarize() { - System.out.println(Integer.toString(errorCount) + - " inconsistencies detected."); + System.out.println(Integer.toString(errorCount) + " inconsistencies detected."); if (errorCount == 0) { System.out.println("Status: OK"); return 0; @@ -3015,7 +2944,7 @@ static class WorkItemRegion implements Callable { private final Connection connection; WorkItemRegion(HBaseFsck hbck, ServerName info, HbckErrorReporter errors, - Connection connection) { + Connection connection) { this.hbck = hbck; this.rsinfo = info; this.errors = errors; @@ -3034,10 +2963,10 @@ public synchronized Void call() throws IOException { errors.detail( "RegionServer: " + rsinfo.getServerName() + " number of regions: " + regions.size()); for (RegionInfo rinfo : regions) { - errors.detail(" " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() + - " encoded_name: " + rinfo.getEncodedName() + " start: " + - Bytes.toStringBinary(rinfo.getStartKey()) + " end: " + - Bytes.toStringBinary(rinfo.getEndKey())); + errors.detail(" " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() + + " encoded_name: " + rinfo.getEncodedName() + " start: " + + Bytes.toStringBinary(rinfo.getStartKey()) + " end: " + + Bytes.toStringBinary(rinfo.getEndKey())); } } @@ -3057,8 +2986,7 @@ public synchronized Void call() throws IOException { private List filterRegions(List regions) { List ret = Lists.newArrayList(); for (RegionInfo hri : regions) { - if (hri.isMetaRegion() || (!hbck.checkMetaOnly - && hbck.isTableIncluded(hri.getTable()))) { + if (hri.isMetaRegion() || (!hbck.checkMetaOnly && hbck.isTableIncluded(hri.getTable()))) { ret.add(hri); } } @@ -3067,8 +2995,7 @@ private List filterRegions(List regions) { } /** - * Contact hdfs and get all information about specified table directory into - * regioninfo list. + * Contact hdfs and get all information about specified table directory into regioninfo list. */ class WorkItemHdfsDir implements Callable { private FileStatus tableDir; @@ -3105,9 +3032,10 @@ public synchronized Void call() throws InterruptedException, ExecutionException @Override public void run() { try { - LOG.debug("Loading region info from hdfs:"+ regionDir.getPath()); + LOG.debug("Loading region info from hdfs:" + regionDir.getPath()); - Path regioninfoFile = new Path(regionDir.getPath(), HRegionFileSystem.REGION_INFO_FILE); + Path regioninfoFile = + new Path(regionDir.getPath(), HRegionFileSystem.REGION_INFO_FILE); boolean regioninfoFileExists = fs.exists(regioninfoFile); if (!regioninfoFileExists) { @@ -3115,7 +3043,7 @@ public void run() { // reach a given region that it will be gone due to region splits/merges. if (!fs.exists(regionDir.getPath())) { LOG.warn("By the time we tried to process this region dir it was already gone: " - + regionDir.getPath()); + + regionDir.getPath()); return; } } @@ -3124,8 +3052,8 @@ public void run() { HbckRegionInfo.HdfsEntry he = new HbckRegionInfo.HdfsEntry(); synchronized (hbi) { if (hbi.getHdfsRegionDir() != null) { - errors.print("Directory " + encodedName + " duplicate??" + - hbi.getHdfsRegionDir()); + errors + .print("Directory " + encodedName + " duplicate??" + hbi.getHdfsRegionDir()); } he.regionDir = regionDir.getPath(); @@ -3174,11 +3102,11 @@ public void run() { } finally { if (!exceptions.isEmpty()) { errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "Table Directory: " - + tableDir.getPath().getName() - + " Unable to fetch all HDFS region information. "); + + tableDir.getPath().getName() + " Unable to fetch all HDFS region information. "); // Just throw the first exception as an indication something bad happened // Don't need to propagate all the exceptions, we already logged them all anyway - throw new ExecutionException("First exception in WorkItemHdfsDir", exceptions.firstElement()); + throw new ExecutionException("First exception in WorkItemHdfsDir", + exceptions.firstElement()); } } return null; @@ -3186,8 +3114,7 @@ public void run() { } /** - * Contact hdfs and get all information about specified table directory into - * regioninfo list. + * Contact hdfs and get all information about specified table directory into regioninfo list. */ static class WorkItemHdfsRegionInfo implements Callable { private HbckRegionInfo hbi; @@ -3209,10 +3136,9 @@ public synchronized Void call() throws IOException { hbi.loadHdfsRegioninfo(hbck.getConf()); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " - + hbi.getTableName() + " in hdfs dir " - + hbi.getHdfsRegionDir() - + "! It may be an invalid format or version file. Treating as " - + "an orphaned regiondir."; + + hbi.getTableName() + " in hdfs dir " + hbi.getHdfsRegionDir() + + "! It may be an invalid format or version file. Treating as " + + "an orphaned regiondir."; errors.reportError(ERROR_CODE.ORPHAN_HDFS_REGION, msg); try { hbck.debugLsr(hbi.getHdfsRegionDir()); @@ -3229,8 +3155,8 @@ public synchronized Void call() throws IOException { } /** - * Display the full report from fsck. This displays all live and dead region - * servers, and all known regions. + * Display the full report from fsck. This displays all live and dead region servers, and all + * known regions. */ public static void setDisplayFullReport() { details = true; @@ -3255,16 +3181,14 @@ public boolean isExclusive() { } /** - * Set summary mode. - * Print only summary of the tables and status (OK or INCONSISTENT) + * Set summary mode. Print only summary of the tables and status (OK or INCONSISTENT) */ static void setSummary() { summary = true; } /** - * Set hbase:meta check mode. - * Print only info about hbase:meta table deployment/state + * Set hbase:meta check mode. Print only info about hbase:meta table deployment/state */ void setCheckMetaOnly() { checkMetaOnly = true; @@ -3290,9 +3214,8 @@ public void setCleanReplicationBarrier(boolean shouldClean) { } /** - * Check if we should rerun fsck again. This checks if we've tried to - * fix something and we should rerun fsck tool again. - * Display the full report from fsck. This displays all live and dead + * Check if we should rerun fsck again. This checks if we've tried to fix something and we should + * rerun fsck tool again. Display the full report from fsck. This displays all live and dead * region servers, and all known regions. */ void setShouldRerun() { @@ -3304,8 +3227,8 @@ public boolean shouldRerun() { } /** - * Fix inconsistencies found by fsck. This should try to fix errors (if any) - * found by fsck utility. + * Fix inconsistencies found by fsck. This should try to fix errors (if any) found by fsck + * utility. */ public void setFixAssignments(boolean shouldFix) { fixAssignments = shouldFix; @@ -3459,8 +3382,7 @@ public int getMaxOverlapsToSideline() { } /** - * Only check/fix tables specified by the list, - * Empty list means all tables are included. + * Only check/fix tables specified by the list, Empty list means all tables are included. */ boolean isTableIncluded(TableName table) { return (tablesIncluded.isEmpty()) || tablesIncluded.contains(table); @@ -3475,8 +3397,8 @@ Set getIncludedTables() { } /** - * We are interested in only those tables that have not changed their state in - * hbase:meta during the last few seconds specified by hbase.admin.fsck.timelag + * We are interested in only those tables that have not changed their state in hbase:meta during + * the last few seconds specified by hbase.admin.fsck.timelag * @param seconds - the time in seconds */ public void setTimeLag(long seconds) { @@ -3484,14 +3406,14 @@ public void setTimeLag(long seconds) { } /** - * * @param sidelineDir - HDFS path to sideline data */ public void setSidelineDir(String sidelineDir) { this.sidelineDir = new Path(sidelineDir); } - protected HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { + protected HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) + throws IOException { return new HFileCorruptionChecker(getConf(), executor, sidelineCorruptHFiles); } @@ -3526,65 +3448,77 @@ protected HBaseFsck printUsageAndExit() { out.println(" where [opts] are:"); out.println(" -help Display help options (this)"); out.println(" -details Display full report of all regions."); - out.println(" -timelag Process only regions that " + - " have not experienced any metadata updates in the last " + - " seconds."); - out.println(" -sleepBeforeRerun Sleep this many seconds" + - " before checking if the fix worked if run with -fix"); + out.println(" -timelag Process only regions that " + + " have not experienced any metadata updates in the last " + " seconds."); + out.println(" -sleepBeforeRerun Sleep this many seconds" + + " before checking if the fix worked if run with -fix"); out.println(" -summary Print only summary of the tables and status."); out.println(" -metaonly Only check the state of the hbase:meta table."); out.println(" -sidelineDir HDFS path to backup existing meta."); - out.println(" -boundaries Verify that regions boundaries are the same between META and store files."); + out.println( + " -boundaries Verify that regions boundaries are the same between META and store files."); out.println(" -exclusive Abort if another hbck is exclusive or fixing."); out.println(""); out.println(" Datafile Repair options: (expert features, use with caution!)"); - out.println(" -checkCorruptHFiles Check all Hfiles by opening them to make sure they are valid"); - out.println(" -sidelineCorruptHFiles Quarantine corrupted HFiles. implies -checkCorruptHFiles"); + out.println( + " -checkCorruptHFiles Check all Hfiles by opening them to make sure they are valid"); + out.println( + " -sidelineCorruptHFiles Quarantine corrupted HFiles. implies -checkCorruptHFiles"); out.println(""); out.println(" Replication options"); out.println(" -fixReplication Deletes replication queues for removed peers"); out.println(""); - out.println(" Metadata Repair options supported as of version 2.0: (expert features, use with caution!)"); + out.println( + " Metadata Repair options supported as of version 2.0: (expert features, use with caution!)"); out.println(" -fixVersionFile Try to fix missing hbase.version file in hdfs."); out.println(" -fixReferenceFiles Try to offline lingering reference store files"); out.println(" -fixHFileLinks Try to offline lingering HFileLinks"); out.println(" -noHdfsChecking Don't load/check region info from HDFS." - + " Assumes hbase:meta region info is good. Won't check/fix any HDFS issue, e.g. hole, orphan, or overlap"); + + " Assumes hbase:meta region info is good. Won't check/fix any HDFS issue, e.g. hole, orphan, or overlap"); out.println(" -ignorePreCheckPermission ignore filesystem permission pre-check"); out.println(""); out.println("NOTE: Following options are NOT supported as of HBase version 2.0+."); out.println(""); out.println(" UNSUPPORTED Metadata Repair options: (expert features, use with caution!)"); - out.println(" -fix Try to fix region assignments. This is for backwards compatibility"); + out.println( + " -fix Try to fix region assignments. This is for backwards compatibility"); out.println(" -fixAssignments Try to fix region assignments. Replaces the old -fix"); - out.println(" -fixMeta Try to fix meta problems. This assumes HDFS region info is good."); + out.println( + " -fixMeta Try to fix meta problems. This assumes HDFS region info is good."); out.println(" -fixHdfsHoles Try to fix region holes in hdfs."); out.println(" -fixHdfsOrphans Try to fix region dirs with no .regioninfo file in hdfs"); - out.println(" -fixTableOrphans Try to fix table dirs with no .tableinfo file in hdfs (online mode only)"); + out.println( + " -fixTableOrphans Try to fix table dirs with no .tableinfo file in hdfs (online mode only)"); out.println(" -fixHdfsOverlaps Try to fix region overlaps in hdfs."); - out.println(" -maxMerge When fixing region overlaps, allow at most regions to merge. (n=" + DEFAULT_MAX_MERGE +" by default)"); - out.println(" -sidelineBigOverlaps When fixing region overlaps, allow to sideline big overlaps"); - out.println(" -maxOverlapsToSideline When fixing region overlaps, allow at most regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE +" by default)"); + out.println( + " -maxMerge When fixing region overlaps, allow at most regions to merge. (n=" + + DEFAULT_MAX_MERGE + " by default)"); + out.println( + " -sidelineBigOverlaps When fixing region overlaps, allow to sideline big overlaps"); + out.println( + " -maxOverlapsToSideline When fixing region overlaps, allow at most regions to sideline per group. (n=" + + DEFAULT_OVERLAPS_TO_SIDELINE + " by default)"); out.println(" -fixSplitParents Try to force offline split parents to be online."); - out.println(" -removeParents Try to offline and sideline lingering parents and keep daughter regions."); + out.println( + " -removeParents Try to offline and sideline lingering parents and keep daughter regions."); out.println(" -fixEmptyMetaCells Try to fix hbase:meta entries not referencing any region" - + " (empty REGIONINFO_QUALIFIER rows)"); + + " (empty REGIONINFO_QUALIFIER rows)"); out.println(""); out.println(" UNSUPPORTED Metadata Repair shortcuts"); - out.println(" -repair Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " + - "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles" + - "-fixHFileLinks"); + out.println(" -repair Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " + + "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles" + + "-fixHFileLinks"); out.println(" -repairHoles Shortcut for -fixAssignments -fixMeta -fixHdfsHoles"); out.println(""); out.println(" Replication options"); out.println(" -fixReplication Deletes replication queues for removed peers"); - out.println(" -cleanReplicationBarrier [tableName] clean the replication barriers " + - "of a specified table, tableName is required"); + out.println(" -cleanReplicationBarrier [tableName] clean the replication barriers " + + "of a specified table, tableName is required"); out.flush(); errors.reportError(ERROR_CODE.WRONG_USAGE, sw.toString()); @@ -3593,10 +3527,7 @@ protected HBaseFsck printUsageAndExit() { } /** - * Main program - * - * @param args - * @throws Exception + * Main program nn */ public static void main(String[] args) throws Exception { // create a fsck object @@ -3612,7 +3543,10 @@ public static void main(String[] args) throws Exception { * This is a Tool wrapper that gathers -Dxxx=yyy configuration settings from the command line. */ static class HBaseFsckTool extends Configured implements Tool { - HBaseFsckTool(Configuration conf) { super(conf); } + HBaseFsckTool(Configuration conf) { + super(conf); + } + @Override public int run(String[] args) throws Exception { HBaseFsck hbck = new HBaseFsck(getConf()); @@ -3623,7 +3557,7 @@ public int run(String[] args) throws Exception { } public HBaseFsck exec(ExecutorService exec, String[] args) - throws KeeperException, IOException, InterruptedException, ReplicationException { + throws KeeperException, IOException, InterruptedException, ReplicationException { long sleepBeforeRerun = DEFAULT_SLEEP_BEFORE_RERUN; boolean checkCorruptHFiles = false; @@ -3652,8 +3586,7 @@ public HBaseFsck exec(ExecutorService exec, String[] args) } } else if (cmd.equals("-sleepBeforeRerun")) { if (i == args.length - 1) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "HBaseFsck: -sleepBeforeRerun needs a value."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -sleepBeforeRerun needs a value."); return printUsageAndExit(); } try { @@ -3746,16 +3679,14 @@ public HBaseFsck exec(ExecutorService exec, String[] args) } } else if (cmd.equals("-maxMerge")) { if (i == args.length - 1) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "-maxMerge needs a numeric value argument."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxMerge needs a numeric value argument."); return printUsageAndExit(); } try { int maxMerge = Integer.parseInt(args[++i]); setMaxMerge(maxMerge); } catch (NumberFormatException e) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "-maxMerge needs a numeric value argument."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxMerge needs a numeric value argument."); return printUsageAndExit(); } } else if (cmd.equals("-summary")) { @@ -3768,7 +3699,7 @@ public HBaseFsck exec(ExecutorService exec, String[] args) setFixReplication(true); } else if (cmd.equals("-cleanReplicationBarrier")) { setCleanReplicationBarrier(true); - if(args[++i].startsWith("-")){ + if (args[++i].startsWith("-")) { printUsageAndExit(); } setCleanReplicationBarrierTable(args[i]); @@ -3859,7 +3790,7 @@ private boolean isOptionsSupported(String[] args) { for (String arg : args) { if (unsupportedOptionsInV2.contains(arg)) { errors.reportError(ERROR_CODE.UNSUPPORTED_OPTION, - "option '" + arg + "' is not " + "supported!"); + "option '" + arg + "' is not " + "supported!"); result = false; break; } @@ -3899,10 +3830,10 @@ public void cleanReplicationBarrier() throws IOException { barrierScan.setCaching(100); barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY); barrierScan - .withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable, - ClientMetaTableAccessor.QueryType.REGION)) - .withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable, - ClientMetaTableAccessor.QueryType.REGION)); + .withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable, + ClientMetaTableAccessor.QueryType.REGION)) + .withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable, + ClientMetaTableAccessor.QueryType.REGION)); Result result; try (ResultScanner scanner = meta.getScanner(barrierScan)) { while ((result = scanner.next()) != null) { @@ -3915,13 +3846,13 @@ public void cleanReplicationBarrier() throws IOException { return; } ReplicationQueueStorage queueStorage = - ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); + ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); List peerDescriptions = admin.listReplicationPeers(); if (peerDescriptions != null && peerDescriptions.size() > 0) { List peers = peerDescriptions.stream() - .filter(peerConfig -> peerConfig.getPeerConfig() - .needToReplicate(cleanReplicationBarrierTable)) - .map(peerConfig -> peerConfig.getPeerId()).collect(Collectors.toList()); + .filter( + peerConfig -> peerConfig.getPeerConfig().needToReplicate(cleanReplicationBarrierTable)) + .map(peerConfig -> peerConfig.getPeerId()).collect(Collectors.toList()); try { List batch = new ArrayList<>(); for (String peer : peers) { @@ -3957,16 +3888,15 @@ void debugLsr(Path p) throws IOException { /** * ls -r for debugging purposes */ - public static void debugLsr(Configuration conf, - Path p) throws IOException { + public static void debugLsr(Configuration conf, Path p) throws IOException { debugLsr(conf, p, new PrintingErrorReporter()); } /** * ls -r for debugging purposes */ - public static void debugLsr(Configuration conf, - Path p, HbckErrorReporter errors) throws IOException { + public static void debugLsr(Configuration conf, Path p, HbckErrorReporter errors) + throws IOException { if (!LOG.isDebugEnabled() || p == null) { return; } @@ -3983,7 +3913,7 @@ public static void debugLsr(Configuration conf, } if (fs.getFileStatus(p).isDirectory()) { - FileStatus[] fss= fs.listStatus(p); + FileStatus[] fss = fs.listStatus(p); for (FileStatus status : fss) { debugLsr(conf, status.getPath(), errors); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index c6db715d1a0f..06b73c67a318 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterMetrics.Option; @@ -51,27 +49,23 @@ import org.slf4j.LoggerFactory; /** - * This class contains helper methods that repair parts of hbase's filesystem - * contents. + * This class contains helper methods that repair parts of hbase's filesystem contents. */ @InterfaceAudience.Private public class HBaseFsckRepair { private static final Logger LOG = LoggerFactory.getLogger(HBaseFsckRepair.class); /** - * Fix multiple assignment by doing silent closes on each RS hosting the region - * and then force ZK unassigned node to OFFLINE to trigger assignment by - * master. - * + * Fix multiple assignment by doing silent closes on each RS hosting the region and then force ZK + * unassigned node to OFFLINE to trigger assignment by master. * @param connection HBase connection to the cluster - * @param region Region to undeploy - * @param servers list of Servers to undeploy from + * @param region Region to undeploy + * @param servers list of Servers to undeploy from */ public static void fixMultiAssignment(Connection connection, RegionInfo region, - List servers) - throws IOException, KeeperException, InterruptedException { + List servers) throws IOException, KeeperException, InterruptedException { // Close region on the servers silently - for(ServerName server : servers) { + for (ServerName server : servers) { closeRegionSilentlyAndWait(connection, server, region); } @@ -80,50 +74,41 @@ public static void fixMultiAssignment(Connection connection, RegionInfo region, } /** - * Fix unassigned by creating/transition the unassigned ZK node for this - * region to OFFLINE state with a special flag to tell the master that this is - * a forced operation by HBCK. - * - * This assumes that info is in META. - * - * @param admin - * @param region - * @throws IOException - * @throws KeeperException + * Fix unassigned by creating/transition the unassigned ZK node for this region to OFFLINE state + * with a special flag to tell the master that this is a forced operation by HBCK. This assumes + * that info is in META. nnnn */ public static void fixUnassigned(Admin admin, RegionInfo region) - throws IOException, KeeperException, InterruptedException { + throws IOException, KeeperException, InterruptedException { // Force ZK node to OFFLINE so master assigns forceOfflineInZK(admin, region); } /** - * In 0.90, this forces an HRI offline by setting the RegionTransitionData - * in ZK to have HBCK_CODE_NAME as the server. This is a special case in - * the AssignmentManager that attempts an assign call by the master. - * - * This doesn't seem to work properly in the updated version of 0.92+'s hbck - * so we use assign to force the region into transition. This has the - * side-effect of requiring a RegionInfo that considers regionId (timestamp) - * in comparators that is addressed by HBASE-5563. + * In 0.90, this forces an HRI offline by setting the RegionTransitionData in ZK to have + * HBCK_CODE_NAME as the server. This is a special case in the AssignmentManager that attempts an + * assign call by the master. This doesn't seem to work properly in the updated version of 0.92+'s + * hbck so we use assign to force the region into transition. This has the side-effect of + * requiring a RegionInfo that considers regionId (timestamp) in comparators that is addressed by + * HBASE-5563. */ private static void forceOfflineInZK(Admin admin, final RegionInfo region) - throws ZooKeeperConnectionException, KeeperException, IOException, InterruptedException { + throws ZooKeeperConnectionException, KeeperException, IOException, InterruptedException { admin.assign(region.getRegionName()); } /* * Should we check all assignments or just not in RIT? */ - public static void waitUntilAssigned(Admin admin, - RegionInfo region) throws IOException, InterruptedException { + public static void waitUntilAssigned(Admin admin, RegionInfo region) + throws IOException, InterruptedException { long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000); long expiration = timeout + EnvironmentEdgeManager.currentTime(); while (EnvironmentEdgeManager.currentTime() < expiration) { try { boolean inTransition = false; for (RegionState rs : admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) - .getRegionStatesInTransition()) { + .getRegionStatesInTransition()) { if (RegionInfo.COMPARATOR.compare(rs.getRegion(), region) == 0) { inTransition = true; break; @@ -134,16 +119,14 @@ public static void waitUntilAssigned(Admin admin, return; } // still in rit - LOG.info("Region still in transition, waiting for " - + "it to become assigned: " + region); + LOG.info("Region still in transition, waiting for " + "it to become assigned: " + region); } catch (IOException e) { - LOG.warn("Exception when waiting for region to become assigned," - + " retrying", e); + LOG.warn("Exception when waiting for region to become assigned," + " retrying", e); } Thread.sleep(1000); } - throw new IOException("Region " + region + " failed to move out of " + - "transition within timeout " + timeout + "ms"); + throw new IOException("Region " + region + " failed to move out of " + + "transition within timeout " + timeout + "ms"); } /** @@ -151,7 +134,7 @@ public static void waitUntilAssigned(Admin admin, * the region. This bypasses the active hmaster. */ public static void closeRegionSilentlyAndWait(Connection connection, ServerName server, - RegionInfo region) throws IOException, InterruptedException { + RegionInfo region) throws IOException, InterruptedException { long timeout = connection.getConfiguration().getLong("hbase.hbck.close.timeout", 120000); // this is a bit ugly but it is only used in the old hbck and tests, so I think it is fine. try (AsyncClusterConnection asyncConn = ClusterConnectionFactory @@ -163,8 +146,8 @@ public static void closeRegionSilentlyAndWait(Connection connection, ServerName /** * Puts the specified RegionInfo into META with replica related columns */ - public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, - RegionInfo hri, Collection servers, int numReplicas) throws IOException { + public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri, + Collection servers, int numReplicas) throws IOException { Connection conn = ConnectionFactory.createConnection(conf); Table meta = conn.getTable(TableName.META_TABLE_NAME); Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime()); @@ -188,8 +171,8 @@ public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, /** * Creates, flushes, and closes a new region. */ - public static HRegion createHDFSRegionDir(Configuration conf, - RegionInfo hri, TableDescriptor htd) throws IOException { + public static HRegion createHDFSRegionDir(Configuration conf, RegionInfo hri, TableDescriptor htd) + throws IOException { // Create HRegion Path root = CommonFSUtils.getRootDir(conf); HRegion region = HRegion.createHRegion(hri, root, conf, htd, null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java index f54864492f35..9f26eda12c1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,8 @@ private HFileArchiveUtil() { /** * Get the directory to archive a store directory - * @param conf {@link Configuration} to read for the archive directory name - * @param tableName table name under which the store currently lives + * @param conf {@link Configuration} to read for the archive directory name + * @param tableName table name under which the store currently lives * @param regionName region encoded name under which the store currently lives * @param familyName name of the family in the store * @return {@link Path} to the directory to archive the given store or null if it should @@ -54,10 +54,10 @@ public static Path getStoreArchivePath(final Configuration conf, final TableName /** * Get the directory to archive a store directory - * @param conf {@link Configuration} to read for the archive directory name. - * @param region parent region information under which the store currently lives + * @param conf {@link Configuration} to read for the archive directory name. + * @param region parent region information under which the store currently lives * @param tabledir directory for the table under which the store currently lives - * @param family name of the family in the store + * @param family name of the family in the store * @return {@link Path} to the directory to archive the given store or null if it should * not be archived */ @@ -68,7 +68,7 @@ public static Path getStoreArchivePath(Configuration conf, RegionInfo region, Pa /** * Gets the directory to archive a store directory. - * @param conf {@link Configuration} to read for the archive directory name. + * @param conf {@link Configuration} to read for the archive directory name. * @param region parent region information under which the store currently lives * @param family name of the family in the store * @return {@link Path} to the directory to archive the given store or null if it should @@ -87,8 +87,8 @@ public static Path getStoreArchivePath(Configuration conf, RegionInfo region, by * HDFS. This is mostly useful for archiving recovered edits, when * hbase.region.archive.recovered.edits is enabled. * @param rootDir {@link Path} the root dir under which archive path should be created. - * @param region parent region information under which the store currently lives - * @param family name of the family in the store + * @param region parent region information under which the store currently lives + * @param family name of the family in the store * @return {@link Path} to the WAL FS directory to archive the given store or null if it * should not be archived */ @@ -121,8 +121,8 @@ public static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path r /** * Get the archive directory for a given region under the specified table - * @param rootDir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) + * @param rootDir {@link Path} to the root directory where hbase files are stored (for building + * the archive path) * @param tableName name of the table to archive. Cannot be null. * @return {@link Path} to the directory to archive the given region, or null if it * should not be archived @@ -140,8 +140,8 @@ public static Path getRegionArchiveDir(Path rootDir, TableName tableName, * Get the path to the table's archive directory. *

    * Generally of the form: /hbase/.archive/[tablename] - * @param rootdir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) + * @param rootdir {@link Path} to the root directory where hbase files are stored (for building + * the archive path) * @param tableName Name of the table to be archived. Cannot be null. * @return {@link Path} to the archive directory for the table */ @@ -153,7 +153,7 @@ public static Path getTableArchivePath(final Path rootdir, final TableName table * Get the path to the table archive directory based on the configured archive directory. *

    * Assumed that the table should already be archived. - * @param conf {@link Configuration} to read the archive directory property. Can be null + * @param conf {@link Configuration} to read the archive directory property. Can be null * @param tableName Name of the table to be archived. Cannot be null. * @return {@link Path} to the archive directory for the table */ @@ -166,7 +166,7 @@ public static Path getTableArchivePath(final Configuration conf, final TableName * Get the full path to the archive directory on the configured * {@link org.apache.hadoop.hbase.master.MasterFileSystem} * @param conf to look for archive directory name and root directory. Cannot be null. Notes for - * testing: requires a FileSystem root directory to be specified. + * testing: requires a FileSystem root directory to be specified. * @return the full {@link Path} to the archive directory, as defined by the configuration * @throws IOException if an unexpected error occurs */ @@ -178,7 +178,7 @@ public static Path getArchivePath(Configuration conf) throws IOException { * Get the full path to the archive directory on the configured * {@link org.apache.hadoop.hbase.master.MasterFileSystem} * @param rootdir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) + * the archive path) * @return the full {@link Path} to the archive directory, as defined by the configuration */ private static Path getArchivePath(final Path rootdir) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java index 774871b38263..c1ac06cada1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +18,12 @@ package org.apache.hadoop.hbase.util; import java.util.Arrays; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * This class encapsulates a byte array and overrides hashCode and equals so - * that it's identity is based on the data rather than the array instance. + * This class encapsulates a byte array and overrides hashCode and equals so that it's identity is + * based on the data rather than the array instance. */ @InterfaceAudience.Private @InterfaceStability.Stable @@ -50,10 +48,8 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null || getClass() != obj.getClass()) - return false; + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; HashedBytes other = (HashedBytes) obj; return (hashCode == other.hashCode) && Arrays.equals(bytes, other.bytes); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java index 52012dfa2354..0735809424ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java @@ -18,27 +18,54 @@ package org.apache.hadoop.hbase.util; import java.util.ArrayList; - import org.apache.yetus.audience.InterfaceAudience; /** * Used by {@link HBaseFsck} reporting system. * @deprecated Since 2.3.0. To be removed in hbase4. Use HBCK2 instead. Remove when - * {@link HBaseFsck} is removed. + * {@link HBaseFsck} is removed. */ @Deprecated @InterfaceAudience.Private public interface HbckErrorReporter { enum ERROR_CODE { - UNKNOWN, NO_META_REGION, NULL_META_REGION, NO_VERSION_FILE, NOT_IN_META_HDFS, NOT_IN_META, - NOT_IN_META_OR_DEPLOYED, NOT_IN_HDFS_OR_DEPLOYED, NOT_IN_HDFS, SERVER_DOES_NOT_MATCH_META, - NOT_DEPLOYED, MULTI_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, MULTI_META_REGION, RS_CONNECT_FAILURE, - FIRST_REGION_STARTKEY_NOT_EMPTY, LAST_REGION_ENDKEY_NOT_EMPTY, DUPE_STARTKEYS, - HOLE_IN_REGION_CHAIN, OVERLAP_IN_REGION_CHAIN, REGION_CYCLE, DEGENERATE_REGION, - ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE, - LINGERING_HFILELINK, WRONG_USAGE, EMPTY_META_CELL, EXPIRED_TABLE_LOCK, BOUNDARIES_ERROR, - ORPHAN_TABLE_STATE, NO_TABLE_STATE, UNDELETED_REPLICATION_QUEUE, DUPE_ENDKEYS, - UNSUPPORTED_OPTION, INVALID_TABLE + UNKNOWN, + NO_META_REGION, + NULL_META_REGION, + NO_VERSION_FILE, + NOT_IN_META_HDFS, + NOT_IN_META, + NOT_IN_META_OR_DEPLOYED, + NOT_IN_HDFS_OR_DEPLOYED, + NOT_IN_HDFS, + SERVER_DOES_NOT_MATCH_META, + NOT_DEPLOYED, + MULTI_DEPLOYED, + SHOULD_NOT_BE_DEPLOYED, + MULTI_META_REGION, + RS_CONNECT_FAILURE, + FIRST_REGION_STARTKEY_NOT_EMPTY, + LAST_REGION_ENDKEY_NOT_EMPTY, + DUPE_STARTKEYS, + HOLE_IN_REGION_CHAIN, + OVERLAP_IN_REGION_CHAIN, + REGION_CYCLE, + DEGENERATE_REGION, + ORPHAN_HDFS_REGION, + LINGERING_SPLIT_PARENT, + NO_TABLEINFO_FILE, + LINGERING_REFERENCE_HFILE, + LINGERING_HFILELINK, + WRONG_USAGE, + EMPTY_META_CELL, + EXPIRED_TABLE_LOCK, + BOUNDARIES_ERROR, + ORPHAN_TABLE_STATE, + NO_TABLE_STATE, + UNDELETED_REPLICATION_QUEUE, + DUPE_ENDKEYS, + UNSUPPORTED_OPTION, + INVALID_TABLE } void clear(); @@ -54,7 +81,7 @@ enum ERROR_CODE { void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, HbckRegionInfo info); void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, HbckRegionInfo info1, - HbckRegionInfo info2); + HbckRegionInfo info2); int summarize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java index c2bfa7bae145..b3fcd6862520 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,8 +37,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Maintain information about a particular region. It gathers information - * from three places -- HDFS, META, and region servers. + * Maintain information about a particular region. It gathers information from three places -- HDFS, + * META, and region servers. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -59,24 +59,24 @@ public HbckRegionInfo(MetaEntry metaEntry) { } public synchronized int getReplicaId() { - return metaEntry != null? metaEntry.hri.getReplicaId(): deployedReplicaId; + return metaEntry != null ? metaEntry.hri.getReplicaId() : deployedReplicaId; } public synchronized void addServer(RegionInfo regionInfo, ServerName serverName) { - OnlineEntry rse = new OnlineEntry(regionInfo, serverName) ; + OnlineEntry rse = new OnlineEntry(regionInfo, serverName); this.deployedEntries.add(rse); this.deployedOn.add(serverName); // save the replicaId that we see deployed in the cluster this.deployedReplicaId = regionInfo.getReplicaId(); this.primaryHRIForDeployedReplica = - RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo); + RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo); } @Override public synchronized String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ meta => "); - sb.append((metaEntry != null)? metaEntry.hri.getRegionNameAsString() : "null"); + sb.append((metaEntry != null) ? metaEntry.hri.getRegionNameAsString() : "null"); sb.append(", hdfs => " + getHdfsRegionDir()); sb.append(", deployed => " + Joiner.on(", ").join(deployedEntries)); sb.append(", replicaId => " + getReplicaId()); @@ -133,8 +133,8 @@ public List getDeployedOn() { } /** - * Read the .regioninfo file from the file system. If there is no - * .regioninfo, add it to the orphan hdfs region list. + * Read the .regioninfo file from the file system. If there is no .regioninfo, add it to the + * orphan hdfs region list. */ public void loadHdfsRegioninfo(Configuration conf) throws IOException { Path regionDir = getHdfsRegionDir(); @@ -264,16 +264,16 @@ public boolean isMerged() { */ public static class MetaEntry { RegionInfo hri; - ServerName regionServer; // server hosting this region - long modTime; // timestamp of most recent modification metadata - RegionInfo splitA, splitB; //split daughters + ServerName regionServer; // server hosting this region + long modTime; // timestamp of most recent modification metadata + RegionInfo splitA, splitB; // split daughters public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime) { this(rinfo, regionServer, modTime, null, null); } - public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime, - RegionInfo splitA, RegionInfo splitB) { + public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime, RegionInfo splitA, + RegionInfo splitB) { this.hri = rinfo; this.regionServer = regionServer; this.modTime = modTime; @@ -371,8 +371,8 @@ public int compare(HbckRegionInfo l, HbckRegionInfo r) { return tableCompare; } - int startComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare( - l.getStartKey(), r.getStartKey()); + int startComparison = + RegionSplitCalculator.BYTES_COMPARATOR.compare(l.getStartKey(), r.getStartKey()); if (startComparison != 0) { return startComparison; } @@ -382,8 +382,7 @@ public int compare(HbckRegionInfo l, HbckRegionInfo r) { endKey = (endKey.length == 0) ? null : endKey; byte[] endKey2 = l.getEndKey(); endKey2 = (endKey2.length == 0) ? null : endKey2; - int endComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare( - endKey2, endKey); + int endComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare(endKey2, endKey); if (endComparison != 0) { return endComparison; @@ -405,4 +404,4 @@ public int compare(HbckRegionInfo l, HbckRegionInfo r) { return Long.compare(l.getHdfsEntry().hri.getRegionId(), r.getHdfsEntry().hri.getRegionId()); } }; -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java index 75699d888900..5a709619d8fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java @@ -30,7 +30,6 @@ import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -77,14 +76,14 @@ public class HbckTableInfo { // region split calculator final RegionSplitCalculator sc = - new RegionSplitCalculator<>(HbckRegionInfo.COMPARATOR); + new RegionSplitCalculator<>(HbckRegionInfo.COMPARATOR); - // Histogram of different TableDescriptors found. Ideally there is only one! + // Histogram of different TableDescriptors found. Ideally there is only one! final Set htds = new HashSet<>(); // key = start split, values = set of splits in problem group final Multimap overlapGroups = - TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, HbckRegionInfo.COMPARATOR); + TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, HbckRegionInfo.COMPARATOR); // list of regions derived from meta entries. private ImmutableList regionsFromMeta = null; @@ -98,14 +97,14 @@ public class HbckTableInfo { } /** - * @return descriptor common to all regions. null if are none or multiple! + * @return descriptor common to all regions. null if are none or multiple! */ TableDescriptor getTableDescriptor() { if (htds.size() == 1) { - return (TableDescriptor)htds.toArray()[0]; + return (TableDescriptor) htds.toArray()[0]; } else { - LOG.error("None/Multiple table descriptors found for table '" - + tableName + "' regions: " + htds); + LOG.error( + "None/Multiple table descriptors found for table '" + tableName + "' regions: " + htds); } return null; } @@ -122,10 +121,11 @@ public void addRegionInfo(HbckRegionInfo hir) { // if not the absolute end key, check for cycle if (Bytes.compareTo(hir.getStartKey(), hir.getEndKey()) > 0) { - hbck.getErrors().reportError(HbckErrorReporter.ERROR_CODE.REGION_CYCLE, String.format( + hbck.getErrors().reportError(HbckErrorReporter.ERROR_CODE.REGION_CYCLE, + String.format( "The endkey for this region comes before the " + "startkey, startkey=%s, endkey=%s", - Bytes.toStringBinary(hir.getStartKey()), Bytes.toStringBinary(hir.getEndKey())), this, - hir); + Bytes.toStringBinary(hir.getStartKey()), Bytes.toStringBinary(hir.getEndKey())), + this, hir); backwards.add(hir); return; } @@ -149,8 +149,8 @@ public int getNumRegions() { return sc.getStarts().size() + backwards.size(); } - public synchronized ImmutableList getRegionsFromMeta( - TreeMap regionInfoMap) { + public synchronized ImmutableList + getRegionsFromMeta(TreeMap regionInfoMap) { // lazy loaded, synchronized to ensure a single load if (regionsFromMeta == null) { List regions = new ArrayList<>(); @@ -178,22 +178,23 @@ class IntegrityFixSuggester extends TableIntegrityErrorHandlerImpl { @Override public void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, - "First region should start with an empty key. You need to " - + " create a new region and regioninfo in HDFS to plug the hole.", - getTableInfo(), hi); + "First region should start with an empty key. You need to " + + " create a new region and regioninfo in HDFS to plug the hole.", + getTableInfo(), hi); } @Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY, - "Last region should end with an empty key. You need to " - + "create a new region and regioninfo in HDFS to plug the hole.", getTableInfo()); + "Last region should end with an empty key. You need to " + + "create a new region and regioninfo in HDFS to plug the hole.", + getTableInfo()); } @Override - public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException{ + public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.DEGENERATE_REGION, - "Region has the same start and end key.", getTableInfo(), hi); + "Region has the same start and end key.", getTableInfo(), hi); } @Override @@ -201,55 +202,47 @@ public void handleDuplicateStartKeys(HbckRegionInfo r1, HbckRegionInfo r2) throw byte[] key = r1.getStartKey(); // dup start key errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_STARTKEYS, - "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), - r1); + "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), + r1); errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_STARTKEYS, - "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), - r2); + "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), + r2); } @Override - public void handleSplit(HbckRegionInfo r1, HbckRegionInfo r2) throws IOException{ + public void handleSplit(HbckRegionInfo r1, HbckRegionInfo r2) throws IOException { byte[] key = r1.getStartKey(); // dup start key errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_ENDKEYS, - "Multiple regions have the same regionID: " - + Bytes.toStringBinary(key), getTableInfo(), r1); + "Multiple regions have the same regionID: " + Bytes.toStringBinary(key), getTableInfo(), + r1); errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_ENDKEYS, - "Multiple regions have the same regionID: " - + Bytes.toStringBinary(key), getTableInfo(), r2); + "Multiple regions have the same regionID: " + Bytes.toStringBinary(key), getTableInfo(), + r2); } @Override public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException { + throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN, - "There is an overlap in the region chain.", getTableInfo(), hi1, hi2); + "There is an overlap in the region chain.", getTableInfo(), hi1, hi2); } @Override public void handleHoleInRegionChain(byte[] holeStart, byte[] holeStop) throws IOException { - errors.reportError( - HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, - "There is a hole in the region chain between " - + Bytes.toStringBinary(holeStart) + " and " - + Bytes.toStringBinary(holeStop) - + ". You need to create a new .regioninfo and region " - + "dir in hdfs to plug the hole."); + errors.reportError(HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, + "There is a hole in the region chain between " + Bytes.toStringBinary(holeStart) + " and " + + Bytes.toStringBinary(holeStop) + ". You need to create a new .regioninfo and region " + + "dir in hdfs to plug the hole."); } } /** - * This handler fixes integrity errors from hdfs information. There are - * basically three classes of integrity problems 1) holes, 2) overlaps, and - * 3) invalid regions. - * - * This class overrides methods that fix holes and the overlap group case. - * Individual cases of particular overlaps are handled by the general - * overlap group merge repair case. - * - * If hbase is online, this forces regions offline before doing merge - * operations. + * This handler fixes integrity errors from hdfs information. There are basically three classes of + * integrity problems 1) holes, 2) overlaps, and 3) invalid regions. This class overrides methods + * that fix holes and the overlap group case. Individual cases of particular overlaps are handled + * by the general overlap group merge repair case. If hbase is online, this forces regions offline + * before doing merge operations. */ class HDFSIntegrityFixer extends IntegrityFixSuggester { Configuration conf; @@ -257,7 +250,7 @@ class HDFSIntegrityFixer extends IntegrityFixSuggester { boolean fixOverlaps = true; HDFSIntegrityFixer(HbckTableInfo ti, HbckErrorReporter errors, Configuration conf, - boolean fixHoles, boolean fixOverlaps) { + boolean fixHoles, boolean fixOverlaps) { super(ti, errors); this.conf = conf; this.fixOverlaps = fixOverlaps; @@ -265,84 +258,74 @@ class HDFSIntegrityFixer extends IntegrityFixSuggester { } /** - * This is a special case hole -- when the first region of a table is - * missing from META, HBase doesn't acknowledge the existance of the - * table. + * This is a special case hole -- when the first region of a table is missing from META, HBase + * doesn't acknowledge the existance of the table. */ @Override public void handleRegionStartKeyNotEmpty(HbckRegionInfo next) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, - "First region should start with an empty key. Creating a new " + - "region and regioninfo in HDFS to plug the hole.", - getTableInfo(), next); + "First region should start with an empty key. Creating a new " + + "region and regioninfo in HDFS to plug the hole.", + getTableInfo(), next); TableDescriptor htd = getTableInfo().getTableDescriptor(); // from special EMPTY_START_ROW to next region's startKey RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(HConstants.EMPTY_START_ROW) - .setEndKey(next.getStartKey()) - .build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(next.getStartKey()).build(); // TODO test HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("Table region start key was not empty. Created new empty region: " - + newRegion + " " +region); + LOG.info("Table region start key was not empty. Created new empty region: " + newRegion + " " + + region); hbck.fixes++; } @Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY, - "Last region should end with an empty key. Creating a new " - + "region and regioninfo in HDFS to plug the hole.", getTableInfo()); + "Last region should end with an empty key. Creating a new " + + "region and regioninfo in HDFS to plug the hole.", + getTableInfo()); TableDescriptor htd = getTableInfo().getTableDescriptor(); // from curEndKey to EMPTY_START_ROW - RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(curEndKey) - .setEndKey(HConstants.EMPTY_START_ROW) - .build(); + RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(curEndKey) + .setEndKey(HConstants.EMPTY_START_ROW).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("Table region end key was not empty. Created new empty region: " + newRegion - + " " + region); + LOG.info("Table region end key was not empty. Created new empty region: " + newRegion + " " + + region); hbck.fixes++; } /** - * There is a hole in the hdfs regions that violates the table integrity - * rules. Create a new empty region that patches the hole. + * There is a hole in the hdfs regions that violates the table integrity rules. Create a new + * empty region that patches the hole. */ @Override public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) - throws IOException { + throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, - "There is a hole in the region chain between " + Bytes.toStringBinary(holeStartKey) + - " and " + Bytes.toStringBinary(holeStopKey) + - ". Creating a new regioninfo and region " + "dir in hdfs to plug the hole."); + "There is a hole in the region chain between " + Bytes.toStringBinary(holeStartKey) + + " and " + Bytes.toStringBinary(holeStopKey) + ". Creating a new regioninfo and region " + + "dir in hdfs to plug the hole."); TableDescriptor htd = getTableInfo().getTableDescriptor(); - RegionInfo newRegion = - RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(holeStartKey) - .setEndKey(holeStopKey).build(); + RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) + .setStartKey(holeStartKey).setEndKey(holeStopKey).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("Plugged hole by creating new empty region: " + newRegion + " " + region); hbck.fixes++; } /** - * This takes set of overlapping regions and merges them into a single - * region. This covers cases like degenerate regions, shared start key, - * general overlaps, duplicate ranges, and partial overlapping regions. - * - * Cases: - * - Clean regions that overlap - * - Only .oldlogs regions (can't find start/stop range, or figure out) - * - * This is basically threadsafe, except for the fixer increment in mergeOverlaps. + * This takes set of overlapping regions and merges them into a single region. This covers cases + * like degenerate regions, shared start key, general overlaps, duplicate ranges, and partial + * overlapping regions. Cases: - Clean regions that overlap - Only .oldlogs regions (can't find + * start/stop range, or figure out) This is basically threadsafe, except for the fixer increment + * in mergeOverlaps. */ @Override - public void handleOverlapGroup(Collection overlap) - throws IOException { + public void handleOverlapGroup(Collection overlap) throws IOException { Preconditions.checkNotNull(overlap); - Preconditions.checkArgument(overlap.size() >0); + Preconditions.checkArgument(overlap.size() > 0); if (!this.fixOverlaps) { LOG.warn("Not attempting to repair overlaps."); @@ -350,9 +333,9 @@ public void handleOverlapGroup(Collection overlap) } if (overlap.size() > hbck.getMaxMerge()) { - LOG.warn("Overlap group has " + overlap.size() + " overlapping " + - "regions which is greater than " + hbck.getMaxMerge() + - ", the max number of regions to merge"); + LOG.warn( + "Overlap group has " + overlap.size() + " overlapping " + "regions which is greater than " + + hbck.getMaxMerge() + ", the max number of regions to merge"); if (hbck.shouldSidelineBigOverlaps()) { // we only sideline big overlapped groups that exceeds the max number of regions to merge sidelineBigOverlaps(overlap); @@ -385,24 +368,28 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce if (range == null) { range = new Pair(hi.getStartKey(), hi.getEndKey()); } else { - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getStartKey(), range.getFirst()) < 0) { + if ( + RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getStartKey(), range.getFirst()) < 0 + ) { range.setFirst(hi.getStartKey()); } - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getEndKey(), range.getSecond()) > 0) { + if ( + RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getEndKey(), range.getSecond()) > 0 + ) { range.setSecond(hi.getEndKey()); } } } LOG.info("This group range is [" + Bytes.toStringBinary(range.getFirst()) + ", " - + Bytes.toStringBinary(range.getSecond()) + "]"); + + Bytes.toStringBinary(range.getSecond()) + "]"); // attempt to find a possible parent for the edge case of a split for (HbckRegionInfo hi : overlap) { - if (Bytes.compareTo(hi.getHdfsHRI().getStartKey(), range.getFirst()) == 0 - && Bytes.compareTo(hi.getHdfsHRI().getEndKey(), range.getSecond()) == 0) { + if ( + Bytes.compareTo(hi.getHdfsHRI().getStartKey(), range.getFirst()) == 0 + && Bytes.compareTo(hi.getHdfsHRI().getEndKey(), range.getSecond()) == 0 + ) { LOG.info("This is a parent for this group: " + hi.toString()); parent = hi; } @@ -429,8 +416,10 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce } // daughters must share the same regionID and we should have a parent too - if (daughterA.getHdfsHRI().getRegionId() != daughterB.getHdfsHRI().getRegionId() || - parent == null) { + if ( + daughterA.getHdfsHRI().getRegionId() != daughterB.getHdfsHRI().getRegionId() + || parent == null + ) { return; } @@ -453,7 +442,7 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce hbck.offline(parent.getRegionName()); } catch (IOException ioe) { LOG.warn("Unable to offline parent region: " + parent.getRegionNameAsString() - + ". Just continuing with regular merge... ", ioe); + + ". Just continuing with regular merge... ", ioe); return; } @@ -461,14 +450,13 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce HBaseFsckRepair.removeParentInMeta(conf, parent.getHdfsHRI()); } catch (IOException ioe) { LOG.warn("Unable to remove parent region in META: " + parent.getRegionNameAsString() - + ". Just continuing with regular merge... ", ioe); + + ". Just continuing with regular merge... ", ioe); return; } hbck.sidelineRegionDir(fs, parent); - LOG.info( - "[" + thread + "] Sidelined parent region dir " + parent.getHdfsRegionDir() + " into " + - hbck.getSidelineDir()); + LOG.info("[" + thread + "] Sidelined parent region dir " + parent.getHdfsRegionDir() + + " into " + hbck.getSidelineDir()); hbck.debugLsr(parent.getHdfsRegionDir()); // Make sure we don't have the parents and daughters around @@ -480,39 +468,40 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce } - void mergeOverlaps(Collection overlap) - throws IOException { + void mergeOverlaps(Collection overlap) throws IOException { String thread = Thread.currentThread().getName(); - LOG.info("== [" + thread + "] Merging regions into one region: " - + Joiner.on(",").join(overlap)); + LOG.info( + "== [" + thread + "] Merging regions into one region: " + Joiner.on(",").join(overlap)); // get the min / max range and close all concerned regions Pair range = null; for (HbckRegionInfo hi : overlap) { if (range == null) { range = new Pair<>(hi.getStartKey(), hi.getEndKey()); } else { - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getStartKey(), range.getFirst()) < 0) { + if ( + RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getStartKey(), range.getFirst()) < 0 + ) { range.setFirst(hi.getStartKey()); } - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getEndKey(), range.getSecond()) > 0) { + if ( + RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getEndKey(), range.getSecond()) > 0 + ) { range.setSecond(hi.getEndKey()); } } // need to close files so delete can happen. - LOG.debug("[" + thread + "] Closing region before moving data around: " + hi); + LOG.debug("[" + thread + "] Closing region before moving data around: " + hi); LOG.debug("[" + thread + "] Contained region dir before close"); hbck.debugLsr(hi.getHdfsRegionDir()); try { LOG.info("[" + thread + "] Closing region: " + hi); hbck.closeRegion(hi); } catch (IOException ioe) { - LOG.warn("[" + thread + "] Was unable to close region " + hi - + ". Just continuing... ", ioe); + LOG.warn("[" + thread + "] Was unable to close region " + hi + ". Just continuing... ", + ioe); } catch (InterruptedException e) { - LOG.warn("[" + thread + "] Was unable to close region " + hi - + ". Just continuing... ", e); + LOG.warn("[" + thread + "] Was unable to close region " + hi + ". Just continuing... ", + e); } try { @@ -520,7 +509,7 @@ void mergeOverlaps(Collection overlap) hbck.offline(hi.getRegionName()); } catch (IOException ioe) { LOG.warn("[" + thread + "] Unable to offline region from master: " + hi - + ". Just continuing... ", ioe); + + ". Just continuing... ", ioe); } } @@ -528,19 +517,17 @@ void mergeOverlaps(Collection overlap) TableDescriptor htd = getTableInfo().getTableDescriptor(); // from start key to end Key RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(range.getFirst()) - .setEndKey(range.getSecond()) - .build(); + .setStartKey(range.getFirst()).setEndKey(range.getSecond()).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("[" + thread + "] Created new empty container region: " + - newRegion + " to contain regions: " + Joiner.on(",").join(overlap)); + LOG.info("[" + thread + "] Created new empty container region: " + newRegion + + " to contain regions: " + Joiner.on(",").join(overlap)); hbck.debugLsr(region.getRegionFileSystem().getRegionDir()); // all target regions are closed, should be able to safely cleanup. - boolean didFix= false; + boolean didFix = false; Path target = region.getRegionFileSystem().getRegionDir(); for (HbckRegionInfo contained : overlap) { - LOG.info("[" + thread + "] Merging " + contained + " into " + target); + LOG.info("[" + thread + "] Merging " + contained + " into " + target); int merges = hbck.mergeRegionDirs(target, contained); if (merges > 0) { didFix = true; @@ -552,9 +539,8 @@ void mergeOverlaps(Collection overlap) } /** - * Sideline some regions in a big overlap group so that it - * will have fewer regions, and it is easier to merge them later on. - * + * Sideline some regions in a big overlap group so that it will have fewer regions, and it is + * easier to merge them later on. * @param bigOverlap the overlapped group with regions more than maxMerge */ void sidelineBigOverlaps(Collection bigOverlap) throws IOException { @@ -563,26 +549,26 @@ void sidelineBigOverlaps(Collection bigOverlap) throws IOExcepti overlapsToSideline = hbck.getMaxOverlapsToSideline(); } List regionsToSideline = - RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline); + RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline); FileSystem fs = FileSystem.get(conf); - for (HbckRegionInfo regionToSideline: regionsToSideline) { + for (HbckRegionInfo regionToSideline : regionsToSideline) { try { LOG.info("Closing region: " + regionToSideline); hbck.closeRegion(regionToSideline); } catch (IOException ioe) { - LOG.warn("Was unable to close region " + regionToSideline - + ". Just continuing... ", ioe); + LOG.warn("Was unable to close region " + regionToSideline + ". Just continuing... ", + ioe); } catch (InterruptedException e) { - LOG.warn("Was unable to close region " + regionToSideline - + ". Just continuing... ", e); + LOG.warn("Was unable to close region " + regionToSideline + ". Just continuing... ", e); } try { LOG.info("Offlining region: " + regionToSideline); hbck.offline(regionToSideline.getRegionName()); } catch (IOException ioe) { - LOG.warn("Unable to offline region from master: " + regionToSideline - + ". Just continuing... ", ioe); + LOG.warn( + "Unable to offline region from master: " + regionToSideline + ". Just continuing... ", + ioe); } LOG.info("Before sideline big overlapped region: " + regionToSideline.toString()); @@ -590,8 +576,7 @@ void sidelineBigOverlaps(Collection bigOverlap) throws IOExcepti if (sidelineRegionDir != null) { sidelinedRegions.put(sidelineRegionDir, regionToSideline); LOG.info("After sidelined big overlapped region: " - + regionToSideline.getRegionNameAsString() - + " to " + sidelineRegionDir.toString()); + + regionToSideline.getRegionNameAsString() + " to " + sidelineRegionDir.toString()); hbck.fixes++; } } @@ -599,8 +584,8 @@ void sidelineBigOverlaps(Collection bigOverlap) throws IOExcepti } /** - * Check the region chain (from META) of this table. We are looking for - * holes, overlaps, and cycles. + * Check the region chain (from META) of this table. We are looking for holes, overlaps, and + * cycles. * @return false if there are errors */ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOException { @@ -635,7 +620,7 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc // special endkey case converts '' to null byte[] endKey = rng.getEndKey(); endKey = (endKey.length == 0) ? null : endKey; - if (Bytes.equals(rng.getStartKey(),endKey)) { + if (Bytes.equals(rng.getStartKey(), endKey)) { handler.handleDegenerateRegion(rng); } } @@ -658,7 +643,7 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc // record errors ArrayList subRange = new ArrayList<>(ranges); - // this dumb and n^2 but this shouldn't happen often + // this dumb and n^2 but this shouldn't happen often for (HbckRegionInfo r1 : ranges) { if (r1.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { continue; @@ -669,10 +654,12 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc continue; } // general case of same start key - if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) { - handler.handleDuplicateStartKeys(r1,r2); - } else if (Bytes.compareTo(r1.getEndKey(), r2.getStartKey())==0 && - r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId()) { + if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey()) == 0) { + handler.handleDuplicateStartKeys(r1, r2); + } else if ( + Bytes.compareTo(r1.getEndKey(), r2.getStartKey()) == 0 + && r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId() + ) { LOG.info("this is a split, log to splits"); handler.handleSplit(r1, r2); } else { @@ -718,28 +705,25 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc if (HBaseFsck.shouldDisplayFullReport()) { // do full region split map dump - hbck.getErrors().print("---- Table '" + this.tableName - + "': region split map"); + hbck.getErrors().print("---- Table '" + this.tableName + "': region split map"); dump(splits, regions); - hbck.getErrors().print("---- Table '" + this.tableName - + "': overlap groups"); + hbck.getErrors().print("---- Table '" + this.tableName + "': overlap groups"); dumpOverlapProblems(overlapGroups); - hbck.getErrors().print("There are " + overlapGroups.keySet().size() - + " overlap groups with " + overlapGroups.size() - + " overlapping regions"); + hbck.getErrors().print("There are " + overlapGroups.keySet().size() + " overlap groups with " + + overlapGroups.size() + " overlapping regions"); } if (!sidelinedRegions.isEmpty()) { LOG.warn("Sidelined big overlapped regions, please bulk load them!"); - hbck.getErrors().print("---- Table '" + this.tableName - + "': sidelined big overlapped regions"); + hbck.getErrors() + .print("---- Table '" + this.tableName + "': sidelined big overlapped regions"); dumpSidelinedRegions(sidelinedRegions); } return hbck.getErrors().getErrorList().size() == originalErrorsCount; } private boolean handleOverlapsParallel(TableIntegrityErrorHandler handler, byte[] prevKey) - throws IOException { - // we parallelize overlap handler for the case we have lots of groups to fix. We can + throws IOException { + // we parallelize overlap handler for the case we have lots of groups to fix. We can // safely assume each group is independent. List merges = new ArrayList<>(overlapGroups.size()); List> rets; @@ -753,12 +737,12 @@ private boolean handleOverlapsParallel(TableIntegrityErrorHandler handler, byte[ LOG.error("Overlap merges were interrupted", e); return false; } - for(int i=0; i f = rets.get(i); try { f.get(); - } catch(ExecutionException e) { + } catch (ExecutionException e) { LOG.warn("Failed to merge overlap group" + work, e.getCause()); } catch (InterruptedException e) { LOG.error("Waiting for overlap merges was interrupted", e); @@ -778,8 +762,7 @@ private void dump(SortedSet splits, Multimap reg sb.setLength(0); // clear out existing buffer, if any. sb.append(Bytes.toStringBinary(k) + ":\t"); for (HbckRegionInfo r : regions.get(k)) { - sb.append("[ "+ r.toString() + ", " - + Bytes.toStringBinary(r.getEndKey())+ "]\t"); + sb.append("[ " + r.toString() + ", " + Bytes.toStringBinary(r.getEndKey()) + "]\t"); } hbck.getErrors().print(sb.toString()); } @@ -791,8 +774,8 @@ private void dumpOverlapProblems(Multimap regions) { for (byte[] k : regions.keySet()) { hbck.getErrors().print(Bytes.toStringBinary(k) + ":"); for (HbckRegionInfo r : regions.get(k)) { - hbck.getErrors().print("[ " + r.toString() + ", " - + Bytes.toStringBinary(r.getEndKey()) + "]"); + hbck.getErrors() + .print("[ " + r.toString() + ", " + Bytes.toStringBinary(r.getEndKey()) + "]"); } hbck.getErrors().print("----"); } @@ -803,8 +786,8 @@ private void dumpSidelinedRegions(Map regions) { TableName tableName = entry.getValue().getTableName(); Path path = entry.getKey(); hbck.getErrors().print("This sidelined region dir should be bulk loaded: " + path.toString()); - hbck.getErrors().print("Bulk load command looks like: " + BulkLoadHFilesTool.NAME + " " + - path.toUri().getPath() + " " + tableName); + hbck.getErrors().print("Bulk load command looks like: " + BulkLoadHFilesTool.NAME + " " + + path.toUri().getPath() + " " + tableName); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java index 10cc4e98d39a..b1c66baf7395 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.util; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java index 179b7d4a732e..c7febcf0549b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java index 5492a8537d22..2fc4cca7c809 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,10 @@ import java.lang.ref.Reference; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class IdReadWriteLockWithObjectPool extends IdReadWriteLock{ +public class IdReadWriteLockWithObjectPool extends IdReadWriteLock { // The number of lock we want to easily support. It's not a maximum. private static final int NB_CONCURRENT_LOCKS = 1000; /** @@ -41,8 +39,8 @@ public IdReadWriteLockWithObjectPool() { /** * Constructor of IdReadWriteLockWithObjectPool * @param referenceType type of the reference used in lock pool, {@link ReferenceType#WEAK} by - * default. Use {@link ReferenceType#SOFT} if the key set is limited and the locks will - * be reused with a high frequency + * default. Use {@link ReferenceType#SOFT} if the key set is limited and the + * locks will be reused with a high frequency */ public IdReadWriteLockWithObjectPool(ReferenceType referenceType) { this.refType = referenceType; @@ -67,7 +65,8 @@ public ReentrantReadWriteLock createObject(T id) { } public static enum ReferenceType { - WEAK, SOFT + WEAK, + SOFT } /** @@ -89,7 +88,7 @@ int purgeAndGetEntryPoolSize() { return lockPool.size(); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DM_GC", justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DM_GC", justification = "Intentional") private void gc() { System.gc(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 1e2ac3ebb973..a9f9443d4bf8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,21 @@ */ package org.apache.hadoop.hbase.util; -import java.io.InterruptedIOException; import java.io.IOException; +import java.io.InterruptedIOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.regionserver.HRegionServer; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -59,29 +58,25 @@ public HRegionServer getRegionServer() { } /** - * Block until the region server has come online, indicating it is ready - * to be used. + * Block until the region server has come online, indicating it is ready to be used. */ public void waitForServerOnline() { // The server is marked online after the init method completes inside of - // the HRS#run method. HRS#init can fail for whatever region. In those - // cases, we'll jump out of the run without setting online flag. Check + // the HRS#run method. HRS#init can fail for whatever region. In those + // cases, we'll jump out of the run without setting online flag. Check // stopRequested so we don't wait here a flag that will never be flipped. regionServer.waitForServerOnline(); } } /** - * Creates a {@link RegionServerThread}. - * Call 'start' on the returned thread to make it run. - * @param c Configuration to use. - * @param hrsc Class to create. - * @param index Used distinguishing the object returned. - * @throws IOException - * @return Region server added. + * Creates a {@link RegionServerThread}. Call 'start' on the returned thread to make it run. + * @param c Configuration to use. + * @param hrsc Class to create. + * @param index Used distinguishing the object returned. n * @return Region server added. */ public static JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration c, - final Class hrsc, final int index) throws IOException { + final Class hrsc, final int index) throws IOException { HRegionServer server; try { Constructor ctor = hrsc.getConstructor(Configuration.class); @@ -89,16 +84,14 @@ public static JVMClusterUtil.RegionServerThread createRegionServerThread(final C server = ctor.newInstance(c); } catch (InvocationTargetException ite) { Throwable target = ite.getTargetException(); - throw new RuntimeException("Failed construction of RegionServer: " + - hrsc.toString() + ((target.getCause() != null)? - target.getCause().getMessage(): ""), target); + throw new RuntimeException("Failed construction of RegionServer: " + hrsc.toString() + + ((target.getCause() != null) ? target.getCause().getMessage() : ""), target); } catch (Exception e) { throw new IOException(e); } return new JVMClusterUtil.RegionServerThread(server, index); } - /** * Datastructure to hold Master Thread and Master instance */ @@ -117,24 +110,20 @@ public HMaster getMaster() { } /** - * Creates a {@link MasterThread}. - * Call 'start' on the returned thread to make it run. - * @param c Configuration to use. - * @param hmc Class to create. - * @param index Used distinguishing the object returned. - * @throws IOException - * @return Master added. + * Creates a {@link MasterThread}. Call 'start' on the returned thread to make it run. + * @param c Configuration to use. + * @param hmc Class to create. + * @param index Used distinguishing the object returned. n * @return Master added. */ public static JVMClusterUtil.MasterThread createMasterThread(final Configuration c, - final Class hmc, final int index) throws IOException { + final Class hmc, final int index) throws IOException { HMaster server; try { server = hmc.getConstructor(Configuration.class).newInstance(c); } catch (InvocationTargetException ite) { Throwable target = ite.getTargetException(); - throw new RuntimeException("Failed construction of Master: " + - hmc.toString() + ((target.getCause() != null)? - target.getCause().getMessage(): ""), target); + throw new RuntimeException("Failed construction of Master: " + hmc.toString() + + ((target.getCause() != null) ? target.getCause().getMessage() : ""), target); } catch (Exception e) { throw new IOException(e); } @@ -142,12 +131,12 @@ public static JVMClusterUtil.MasterThread createMasterThread(final Configuration // just add the current master host port since we do not know other master addresses up front // in mini cluster tests. c.set(HConstants.MASTER_ADDRS_KEY, - Preconditions.checkNotNull(server.getServerName().getAddress()).toString()); + Preconditions.checkNotNull(server.getServerName().getAddress()).toString()); return new JVMClusterUtil.MasterThread(server, index); } - private static JVMClusterUtil.MasterThread findActiveMaster( - List masters) { + private static JVMClusterUtil.MasterThread + findActiveMaster(List masters) { for (JVMClusterUtil.MasterThread t : masters) { if (t.master.isActiveMaster()) { return t; @@ -158,14 +147,11 @@ private static JVMClusterUtil.MasterThread findActiveMaster( } /** - * Start the cluster. Waits until there is a primary master initialized - * and returns its address. - * @param masters - * @param regionservers - * @return Address to use contacting primary master. + * Start the cluster. Waits until there is a primary master initialized and returns its address. + * nn * @return Address to use contacting primary master. */ public static String startup(final List masters, - final List regionservers) throws IOException { + final List regionservers) throws IOException { // Implementation note: This method relies on timed sleeps in a loop. It's not great, and // should probably be re-written to use actual synchronization objects, but it's ok for now @@ -181,28 +167,29 @@ public static String startup(final List masters, } // Wait for an active master - // having an active master before starting the region threads allows - // then to succeed on their connection to master - final int startTimeout = configuration != null ? Integer.parseInt( - configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000")) : 30000; + // having an active master before starting the region threads allows + // then to succeed on their connection to master + final int startTimeout = configuration != null + ? Integer.parseInt(configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000")) + : 30000; waitForEvent(startTimeout, "active", () -> findActiveMaster(masters) != null); if (regionservers != null) { - for (JVMClusterUtil.RegionServerThread t: regionservers) { + for (JVMClusterUtil.RegionServerThread t : regionservers) { t.start(); } } // Wait for an active master to be initialized (implies being master) - // with this, when we return the cluster is complete - final int initTimeout = configuration != null ? Integer.parseInt( - configuration.get("hbase.master.init.timeout.localHBaseCluster", "200000")) : 200000; + // with this, when we return the cluster is complete + final int initTimeout = configuration != null + ? Integer.parseInt(configuration.get("hbase.master.init.timeout.localHBaseCluster", "200000")) + : 200000; waitForEvent(initTimeout, "initialized", () -> { - JVMClusterUtil.MasterThread t = findActiveMaster(masters); - // master thread should never be null at this point, but let's keep the check anyway - return t != null && t.master.isInitialized(); - } - ); + JVMClusterUtil.MasterThread t = findActiveMaster(masters); + // master thread should never be null at this point, but let's keep the check anyway + return t != null && t.master.isInitialized(); + }); return findActiveMaster(masters).master.getServerName().toString(); } @@ -210,15 +197,15 @@ public static String startup(final List masters, /** * Utility method to wait some time for an event to occur, and then return control to the caller. * @param millis How long to wait, in milliseconds. - * @param action The action that we are waiting for. Will be used in log message if the event - * does not occur. - * @param check A Supplier that will be checked periodically to produce an updated true/false - * result indicating if the expected event has happened or not. + * @param action The action that we are waiting for. Will be used in log message if the event does + * not occur. + * @param check A Supplier that will be checked periodically to produce an updated true/false + * result indicating if the expected event has happened or not. * @throws InterruptedIOException If we are interrupted while waiting for the event. - * @throws RuntimeException If we reach the specified timeout while waiting for the event. + * @throws RuntimeException If we reach the specified timeout while waiting for the event. */ private static void waitForEvent(long millis, String action, Supplier check) - throws InterruptedIOException { + throws InterruptedIOException { long end = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(millis); while (true) { @@ -235,18 +222,17 @@ private static void waitForEvent(long millis, String action, Supplier c try { Thread.sleep(100); } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } } /** - * @param masters - * @param regionservers + * nn */ public static void shutdown(final List masters, - final List regionservers) { + final List regionservers) { LOG.debug("Shutting down HBase Cluster"); if (masters != null) { // Do backups first. @@ -260,15 +246,15 @@ public static void shutdown(final List masters, } catch (IOException e) { LOG.error("Exception occurred while stopping master", e); } - LOG.info("Stopped backup Master {} is stopped: {}", - t.master.hashCode(), t.master.isStopped()); + LOG.info("Stopped backup Master {} is stopped: {}", t.master.hashCode(), + t.master.isStopped()); } else { if (activeMaster != null) { LOG.warn("Found more than 1 active master, hash {}", activeMaster.master.hashCode()); } activeMaster = t; - LOG.debug("Found active master hash={}, stopped={}", - t.master.hashCode(), t.master.isStopped()); + LOG.debug("Found active master hash={}, stopped={}", t.master.hashCode(), + t.master.isStopped()); } } } @@ -294,8 +280,8 @@ public static void shutdown(final List masters, try { t.join(maxTime - now); } catch (InterruptedException e) { - LOG.info("Got InterruptedException on shutdown - " + - "not waiting anymore on region server ends", e); + LOG.info("Got InterruptedException on shutdown - " + + "not waiting anymore on region server ends", e); wasInterrupted = true; // someone wants us to speed up. } } @@ -318,8 +304,8 @@ public static void shutdown(final List masters, if (!atLeastOneLiveServer) break; for (RegionServerThread t : regionservers) { if (t.isAlive()) { - LOG.warn("RegionServerThreads taking too long to stop, interrupting; thread dump " + - "if > 3 attempts: i=" + i); + LOG.warn("RegionServerThreads taking too long to stop, interrupting; thread dump " + + "if > 3 attempts: i=" + i); if (i > 3) { Threads.printThreadInfo(System.out, "Thread dump " + t.getName()); } @@ -337,20 +323,19 @@ public static void shutdown(final List masters, // tests. // this.master.join(): Threads.threadDumpingIsAlive(t.master); - } catch(InterruptedException e) { - LOG.info("Got InterruptedException on shutdown - " + - "not waiting anymore on master ends", e); + } catch (InterruptedException e) { + LOG.info( + "Got InterruptedException on shutdown - " + "not waiting anymore on master ends", e); wasInterrupted = true; } } } } - LOG.info("Shutdown of " + - ((masters != null) ? masters.size() : "0") + " master(s) and " + - ((regionservers != null) ? regionservers.size() : "0") + - " regionserver(s) " + (wasInterrupted ? "interrupted" : "complete")); + LOG.info("Shutdown of " + ((masters != null) ? masters.size() : "0") + " master(s) and " + + ((regionservers != null) ? regionservers.size() : "0") + " regionserver(s) " + + (wasInterrupted ? "interrupted" : "complete")); - if (wasInterrupted){ + if (wasInterrupted) { Thread.currentThread().interrupt(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java index 9c00771ee4fe..6d8566ab5716 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java @@ -23,12 +23,11 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.metrics.JvmPauseMonitorSource; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.metrics.JvmPauseMonitorSource; -import org.apache.hadoop.conf.Configuration; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -38,16 +37,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * Class which sets up a simple thread which runs in a loop sleeping - * for a short interval of time. If the sleep takes significantly longer - * than its target time, it implies that the JVM or host machine has - * paused processing, which may cause other problems. If such a pause is - * detected, the thread logs a message. - * The original JvmPauseMonitor is: - * ${hadoop-common-project}/hadoop-common/src/main/java/org/apache/hadoop/util/ - * JvmPauseMonitor.java - * r1503806 | cmccabe | 2013-07-17 01:48:24 +0800 (Wed, 17 Jul 2013) | 1 line - * HADOOP-9618. thread which detects GC pauses(Todd Lipcon) + * Class which sets up a simple thread which runs in a loop sleeping for a short interval of time. + * If the sleep takes significantly longer than its target time, it implies that the JVM or host + * machine has paused processing, which may cause other problems. If such a pause is detected, the + * thread logs a message. The original JvmPauseMonitor is: + * ${hadoop-common-project}/hadoop-common/src/main/java/org/apache/hadoop/util/ JvmPauseMonitor.java + * r1503806 | cmccabe | 2013-07-17 01:48:24 +0800 (Wed, 17 Jul 2013) | 1 line HADOOP-9618. thread + * which detects GC pauses(Todd Lipcon) */ @InterfaceAudience.Private public class JvmPauseMonitor { @@ -55,17 +51,15 @@ public class JvmPauseMonitor { /** The target sleep time */ private static final long SLEEP_INTERVAL_MS = 500; - + /** log WARN if we detect a pause longer than this threshold */ private final long warnThresholdMs; - public static final String WARN_THRESHOLD_KEY = - "jvm.pause.warn-threshold.ms"; + public static final String WARN_THRESHOLD_KEY = "jvm.pause.warn-threshold.ms"; private static final long WARN_THRESHOLD_DEFAULT = 10000; - + /** log INFO if we detect a pause longer than this threshold */ private final long infoThresholdMs; - public static final String INFO_THRESHOLD_KEY = - "jvm.pause.info-threshold.ms"; + public static final String INFO_THRESHOLD_KEY = "jvm.pause.info-threshold.ms"; private static final long INFO_THRESHOLD_DEFAULT = 1000; private Thread monitorThread; @@ -81,7 +75,7 @@ public JvmPauseMonitor(Configuration conf, JvmPauseMonitorSource metricsSource) this.infoThresholdMs = conf.getLong(INFO_THRESHOLD_KEY, INFO_THRESHOLD_DEFAULT); this.metricsSource = metricsSource; } - + public void start() { Preconditions.checkState(monitorThread == null, "Already started"); monitorThread = new Thread(new Monitor(), "JvmPauseMonitor"); @@ -98,10 +92,10 @@ public void stop() { Thread.currentThread().interrupt(); } } - + private String formatMessage(long extraSleepTime, List gcDiffs) { String ret = "Detected pause in JVM or host machine (eg GC): " + "pause of approximately " - + extraSleepTime + "ms\n"; + + extraSleepTime + "ms\n"; if (gcDiffs.isEmpty()) { ret += "No GCs detected"; } else { @@ -109,7 +103,7 @@ private String formatMessage(long extraSleepTime, List gcDiffs) { } return ret; } - + private Map getGcTimes() { Map map = Maps.newHashMap(); List gcBeans = ManagementFactory.getGarbageCollectorMXBeans(); @@ -160,8 +154,8 @@ public void run() { Map gcTimesAfterSleep = getGcTimes(); if (extraSleepTime > infoThresholdMs) { - Set gcBeanNames = Sets.intersection(gcTimesAfterSleep.keySet(), - gcTimesBeforeSleep.keySet()); + Set gcBeanNames = + Sets.intersection(gcTimesAfterSleep.keySet(), gcTimesBeforeSleep.keySet()); List gcDiffs = Lists.newArrayList(); for (String name : gcBeanNames) { GcTimes diff = gcTimesAfterSleep.get(name).subtract(gcTimesBeforeSleep.get(name)); @@ -207,13 +201,11 @@ public void setMetricsSource(JvmPauseMonitorSource metricsSource) { } /** - * Simple 'main' to facilitate manual testing of the pause monitor. - * - * This main function just leaks memory into a list. Running this class - * with a 1GB heap will very quickly go into "GC hell" and result in - * log messages about the GC pauses. + * Simple 'main' to facilitate manual testing of the pause monitor. This main function just leaks + * memory into a list. Running this class with a 1GB heap will very quickly go into "GC hell" and + * result in log messages about the GC pauses. */ - public static void main(String []args) throws Exception { + public static void main(String[] args) throws Exception { new JvmPauseMonitor(new Configuration()).start(); List list = Lists.newArrayList(); int i = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java index 65c952e4be73..01932b07c60a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.HashSet; import java.util.Set; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -47,8 +45,8 @@ public static boolean isBadJvmVersion() { * Return the current JVM version information. */ public static String getVersion() { - return System.getProperty("java.vm.vendor", "UNKNOWN_VM_VENDOR") + ' ' + - System.getProperty("java.version", "UNKNOWN_JAVA_VERSION") + '-' + - System.getProperty("java.vm.version", "UNKNOWN_VM_VERSION"); + return System.getProperty("java.vm.vendor", "UNKNOWN_VM_VENDOR") + ' ' + + System.getProperty("java.version", "UNKNOWN_JAVA_VERSION") + '-' + + System.getProperty("java.vm.version", "UNKNOWN_VM_VERSION"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java index 29e7836a7481..b579b609f324 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java index e6075d2754bf..0857364fc06c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,8 +21,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when the lease was expected to be recovered, - * but the file can't be opened. + * Thrown when the lease was expected to be recovered, but the file can't be opened. */ @InterfaceAudience.Public public class LeaseNotRecoveredException extends HBaseIOException { @@ -36,10 +34,10 @@ public LeaseNotRecoveredException(String message) { } public LeaseNotRecoveredException(String message, Throwable cause) { - super(message, cause); + super(message, cause); } public LeaseNotRecoveredException(Throwable cause) { - super(cause); + super(cause); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java index 9ade12d578c4..7d2483c66639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.util.Map; @@ -35,14 +33,10 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * LossyCounting utility, bounded data structure that maintains approximate high frequency - * elements in data stream. - * - * Bucket size is 1 / error rate. (Error rate is 0.02 by default) - * Lemma If element does not appear in set, then is frequency is less than e * N - * (N is total element counts until now.) - * Based on paper: - * http://www.vldb.org/conf/2002/S10P03.pdf + * LossyCounting utility, bounded data structure that maintains approximate high frequency elements + * in data stream. Bucket size is 1 / error rate. (Error rate is 0.02 by default) Lemma If element + * does not appear in set, then is frequency is less than e * N (N is total element counts until + * now.) Based on paper: http://www.vldb.org/conf/2002/S10P03.pdf */ @InterfaceAudience.Private public class LossyCounting { @@ -88,22 +82,22 @@ public LossyCounting(String name, Configuration conf, LossyCountingListener l } private void addByOne(T key) { - //If entry exists, we update the entry by incrementing its frequency by one. Otherwise, - //we create a new entry starting with currentTerm so that it will not be pruned immediately + // If entry exists, we update the entry by incrementing its frequency by one. Otherwise, + // we create a new entry starting with currentTerm so that it will not be pruned immediately data.put(key, data.getOrDefault(key, currentTerm != 0 ? currentTerm - 1 : 0) + 1); - //update totalDataCount and term + // update totalDataCount and term totalDataCount++; calculateCurrentTerm(); } public void add(T key) { addByOne(key); - if(totalDataCount % bucketSize == 0) { - //sweep the entries at bucket boundaries - //run Sweep + if (totalDataCount % bucketSize == 0) { + // sweep the entries at bucket boundaries + // run Sweep Future future = fut.get(); - if (future != null && !future.isDone()){ + if (future != null && !future.isDone()) { return; } future = executor.submit(new SweepRunnable()); @@ -111,13 +105,12 @@ public void add(T key) { } } - /** * sweep low frequency data */ public void sweep() { - for(Map.Entry entry : data.entrySet()) { - if(entry.getValue() < currentTerm) { + for (Map.Entry entry : data.entrySet()) { + if (entry.getValue() < currentTerm) { T metric = entry.getKey(); data.remove(metric); if (listener != null) { @@ -134,7 +127,7 @@ private void calculateCurrentTerm() { this.currentTerm = (int) Math.ceil(1.0 * totalDataCount / (double) bucketSize); } - public long getBucketSize(){ + public long getBucketSize() { return bucketSize; } @@ -146,7 +139,7 @@ public boolean contains(T key) { return data.containsKey(key); } - public Set getElements(){ + public Set getElements() { return data.keySet(); } @@ -155,7 +148,8 @@ public long getCurrentTerm() { } class SweepRunnable implements Runnable { - @Override public void run() { + @Override + public void run() { if (LOG.isTraceEnabled()) { LOG.trace("Starting sweep of lossyCounting-" + name); } @@ -171,4 +165,3 @@ public Future getSweepFuture() { return fut.get(); } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java index e5081273d472..29cc1063b972 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * An environment edge that uses a manually set value. This is useful for testing events that are supposed to - * happen in the same millisecond. + * An environment edge that uses a manually set value. This is useful for testing events that are + * supposed to happen in the same millisecond. */ @InterfaceAudience.Private public class ManualEnvironmentEdge implements EnvironmentEdge { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index b1517c76c9a6..772b89bc135b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; @@ -30,7 +28,6 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; @@ -38,11 +35,12 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * Utility methods for interacting with the regions. */ @@ -62,18 +60,12 @@ public interface RegionEditTask { } public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, - byte[][] splitKeys) { + byte[][] splitKeys) { long regionId = EnvironmentEdgeManager.currentTime(); RegionInfo[] hRegionInfos = null; if (splitKeys == null || splitKeys.length == 0) { - hRegionInfos = new RegionInfo[]{ - RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setStartKey(null) - .setEndKey(null) - .setSplit(false) - .setRegionId(regionId) - .build() - }; + hRegionInfos = new RegionInfo[] { RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) + .setStartKey(null).setEndKey(null).setSplit(false).setRegionId(regionId).build() }; } else { int numRegions = splitKeys.length + 1; hRegionInfos = new RegionInfo[numRegions]; @@ -81,13 +73,8 @@ public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, byte[] endKey = null; for (int i = 0; i < numRegions; i++) { endKey = (i == splitKeys.length) ? null : splitKeys[i]; - hRegionInfos[i] = - RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setStartKey(startKey) - .setEndKey(endKey) - .setSplit(false) - .setRegionId(regionId) - .build(); + hRegionInfos[i] = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) + .setStartKey(startKey).setEndKey(endKey).setSplit(false).setRegionId(regionId).build(); startKey = endKey; } } @@ -95,23 +82,21 @@ public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, } /** - * Create new set of regions on the specified file-system. - * NOTE: that you should add the regions to hbase:meta after this operation. - * - * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance + * Create new set of regions on the specified file-system. NOTE: that you should add the regions + * to hbase:meta after this operation. + * @param conf {@link Configuration} + * @param rootDir Root directory for HBase instance * @param tableDescriptor description of the table - * @param newRegions {@link RegionInfo} that describes the regions to create - * @param task {@link RegionFillTask} custom code to populate region after creation - * @throws IOException + * @param newRegions {@link RegionInfo} that describes the regions to create + * @param task {@link RegionFillTask} custom code to populate region after creation n */ public static List createRegions(final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, - final RegionFillTask task) throws IOException { + final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task) + throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf, - "RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber); + "RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber); try { return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task); } finally { @@ -120,21 +105,18 @@ public static List createRegions(final Configuration conf, final Pat } /** - * Create new set of regions on the specified file-system. - * NOTE: that you should add the regions to hbase:meta after this operation. - * - * @param exec Thread Pool Executor - * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance + * Create new set of regions on the specified file-system. NOTE: that you should add the regions + * to hbase:meta after this operation. + * @param exec Thread Pool Executor + * @param conf {@link Configuration} + * @param rootDir Root directory for HBase instance * @param tableDescriptor description of the table - * @param newRegions {@link RegionInfo} that describes the regions to create - * @param task {@link RegionFillTask} custom code to populate region after creation - * @throws IOException + * @param newRegions {@link RegionInfo} that describes the regions to create + * @param task {@link RegionFillTask} custom code to populate region after creation n */ public static List createRegions(final ThreadPoolExecutor exec, - final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, - final RegionFillTask task) throws IOException { + final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor, + final RegionInfo[] newRegions, final RegionFillTask task) throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; CompletionService completionService = new ExecutorCompletionService<>(exec); @@ -163,16 +145,15 @@ public RegionInfo call() throws IOException { /** * Create new set of regions on the specified file-system. - * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance + * @param conf {@link Configuration} + * @param rootDir Root directory for HBase instance * @param tableDescriptor description of the table - * @param newRegion {@link RegionInfo} that describes the region to create - * @param task {@link RegionFillTask} custom code to populate region after creation - * @throws IOException + * @param newRegion {@link RegionInfo} that describes the region to create + * @param task {@link RegionFillTask} custom code to populate region after creation n */ public static RegionInfo createRegion(final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo newRegion, - final RegionFillTask task) throws IOException { + final TableDescriptor tableDescriptor, final RegionInfo newRegion, final RegionFillTask task) + throws IOException { // 1. Create HRegion // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. @@ -193,16 +174,14 @@ public static RegionInfo createRegion(final Configuration conf, final Path rootD /** * Execute the task on the specified set of regions. - * - * @param exec Thread Pool Executor + * @param exec Thread Pool Executor * @param regions {@link RegionInfo} that describes the regions to edit - * @param task {@link RegionFillTask} custom code to edit the region - * @throws IOException + * @param task {@link RegionFillTask} custom code to edit the region n */ public static void editRegions(final ThreadPoolExecutor exec, - final Collection regions, final RegionEditTask task) throws IOException { + final Collection regions, final RegionEditTask task) throws IOException { final ExecutorCompletionService completionService = new ExecutorCompletionService<>(exec); - for (final RegionInfo hri: regions) { + for (final RegionInfo hri : regions) { completionService.submit(new Callable() { @Override public Void call() throws IOException { @@ -213,7 +192,7 @@ public Void call() throws IOException { } try { - for (RegionInfo hri: regions) { + for (RegionInfo hri : regions) { completionService.take().get(); } } catch (InterruptedException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java index bde7fea1c366..ede1f8b71508 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Callable; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; @@ -32,10 +33,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.Callable; - /** * Move Regions and make sure that they are up on the target server.If a region movement fails we * exit as failure @@ -66,10 +63,10 @@ class MoveWithAck implements Callable { public Boolean call() throws IOException, InterruptedException { boolean moved = false; int count = 0; - int retries = admin.getConfiguration() - .getInt(RegionMover.MOVE_RETRIES_MAX_KEY, RegionMover.DEFAULT_MOVE_RETRIES_MAX); - int maxWaitInSeconds = admin.getConfiguration() - .getInt(RegionMover.MOVE_WAIT_MAX_KEY, RegionMover.DEFAULT_MOVE_WAIT_MAX); + int retries = admin.getConfiguration().getInt(RegionMover.MOVE_RETRIES_MAX_KEY, + RegionMover.DEFAULT_MOVE_RETRIES_MAX); + int maxWaitInSeconds = admin.getConfiguration().getInt(RegionMover.MOVE_WAIT_MAX_KEY, + RegionMover.DEFAULT_MOVE_WAIT_MAX); long startTime = EnvironmentEdgeManager.currentTime(); boolean sameServer = true; // Assert we can scan the region in its current location @@ -114,8 +111,7 @@ private static String getTimeDiffInSec(long startTime) { */ private void isSuccessfulScan(RegionInfo region) throws IOException { Scan scan = new Scan().withStartRow(region.getStartKey()).setRaw(true).setOneRowLimit() - .setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()) - .setCacheBlocks(false); + .setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()).setCacheBlocks(false); try (Table table = conn.getTable(region.getTable()); ResultScanner scanner = table.getScanner(scan)) { scanner.next(); @@ -129,8 +125,7 @@ private void isSuccessfulScan(RegionInfo region) throws IOException { * Returns true if passed region is still on serverName when we look at hbase:meta. * @return true if region is hosted on serverName otherwise false */ - private boolean isSameServer(RegionInfo region, ServerName serverName) - throws IOException { + private boolean isSameServer(RegionInfo region, ServerName serverName) throws IOException { ServerName serverForRegion = getServerNameForRegion(region, admin, conn); return serverForRegion != null && serverForRegion.equals(serverName); } @@ -141,13 +136,12 @@ private boolean isSameServer(RegionInfo region, ServerName serverName) * @return regionServer hosting the given region */ static ServerName getServerNameForRegion(RegionInfo region, Admin admin, Connection conn) - throws IOException { + throws IOException { if (!admin.isTableEnabled(region.getTable())) { return null; } - HRegionLocation loc = - conn.getRegionLocator(region.getTable()).getRegionLocation(region.getStartKey(), - region.getReplicaId(),true); + HRegionLocation loc = conn.getRegionLocator(region.getTable()) + .getRegionLocation(region.getStartKey(), region.getReplicaId(), true); if (loc != null) { return loc.getServerName(); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java index 0ddb99ac4180..b4abd0de73db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; +import java.util.List; +import java.util.concurrent.Callable; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; @@ -26,12 +26,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; -import java.util.concurrent.Callable; - /** - * Move Regions without Acknowledging.Usefule in case of RS shutdown as we might want to shut the - * RS down anyways and not abort on a stuck region. Improves movement performance + * Move Regions without Acknowledging.Usefule in case of RS shutdown as we might want to shut the RS + * down anyways and not abort on a stuck region. Improves movement performance */ @InterfaceAudience.Private class MoveWithoutAck implements Callable { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java index fa4c18442ac8..f3e731f53339 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,18 +20,15 @@ import java.util.Arrays; import java.util.Deque; import java.util.LinkedList; - import org.apache.yetus.audience.InterfaceAudience; /** - * Computes the optimal (minimal cost) assignment of jobs to workers (or other - * analogous) concepts given a cost matrix of each pair of job and worker, using - * the algorithm by James Munkres in "Algorithms for the Assignment and - * Transportation Problems", with additional optimizations as described by Jin - * Kue Wong in "A New Implementation of an Algorithm for the Optimal Assignment - * Problem: An Improved Version of Munkres' Algorithm". The algorithm runs in - * O(n^3) time and need O(n^2) auxiliary space where n is the number of jobs or - * workers, whichever is greater. + * Computes the optimal (minimal cost) assignment of jobs to workers (or other analogous) concepts + * given a cost matrix of each pair of job and worker, using the algorithm by James Munkres in + * "Algorithms for the Assignment and Transportation Problems", with additional optimizations as + * described by Jin Kue Wong in "A New Implementation of an Algorithm for the Optimal Assignment + * Problem: An Improved Version of Munkres' Algorithm". The algorithm runs in O(n^3) time and need + * O(n^2) auxiliary space where n is the number of jobs or workers, whichever is greater. */ @InterfaceAudience.Private public class MunkresAssignment { @@ -88,11 +84,10 @@ public class MunkresAssignment { private float[] colAdjust; /** - * Construct a new problem instance with the specified cost matrix. The cost - * matrix must be rectangular, though not necessarily square. If one dimension - * is greater than the other, some elements in the greater dimension will not - * be assigned. The input cost matrix will not be modified. - * @param costMatrix + * Construct a new problem instance with the specified cost matrix. The cost matrix must be + * rectangular, though not necessarily square. If one dimension is greater than the other, some + * elements in the greater dimension will not be assigned. The input cost matrix will not be + * modified. n */ public MunkresAssignment(float[][] costMatrix) { // The algorithm assumes that the number of columns is at least as great as @@ -146,11 +141,10 @@ public MunkresAssignment(float[][] costMatrix) { } /** - * Get the optimal assignments. The returned array will have the same number - * of elements as the number of elements as the number of rows in the input - * cost matrix. Each element will indicate which column should be assigned to - * that row or -1 if no column should be assigned, i.e. if result[i] = j then - * row i should be assigned to column j. Subsequent invocations of this method + * Get the optimal assignments. The returned array will have the same number of elements as the + * number of elements as the number of rows in the input cost matrix. Each element will indicate + * which column should be assigned to that row or -1 if no column should be assigned, i.e. if + * result[i] = j then row i should be assigned to column j. Subsequent invocations of this method * will simply return the same object without additional computation. * @return an array with the optimal assignments */ @@ -174,8 +168,7 @@ public int[] solve() { // Extract the assignments from the mask matrix. if (transposed) { assignments = new int[cols]; - outer: - for (int c = 0; c < cols; c++) { + outer: for (int c = 0; c < cols; c++) { for (int r = 0; r < rows; r++) { if (mask[r][c] == STAR) { assignments[c] = r; @@ -187,8 +180,7 @@ public int[] solve() { } } else { assignments = new int[rows]; - outer: - for (int r = 0; r < rows; r++) { + outer: for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { if (mask[r][c] == STAR) { assignments[r] = c; @@ -215,9 +207,8 @@ public int[] solve() { } /** - * Corresponds to the "preliminaries" step of the original algorithm. - * Guarantees that the matrix is an equivalent non-negative matrix with at - * least one zero in each row. + * Corresponds to the "preliminaries" step of the original algorithm. Guarantees that the matrix + * is an equivalent non-negative matrix with at least one zero in each row. */ private void preliminaries() { for (int r = 0; r < rows; r++) { @@ -250,8 +241,8 @@ private void preliminaries() { } /** - * Test whether the algorithm is done, i.e. we have the optimal assignment. - * This occurs when there is exactly one starred zero in each row. + * Test whether the algorithm is done, i.e. we have the optimal assignment. This occurs when there + * is exactly one starred zero in each row. * @return true if the algorithm is done */ private boolean testIsDone() { @@ -431,8 +422,8 @@ private void stepThree() { } /** - * Find a zero cost assignment which is not covered. If there are no zero cost - * assignments which are uncovered, then null will be returned. + * Find a zero cost assignment which is not covered. If there are no zero cost assignments which + * are uncovered, then null will be returned. * @return pair of row and column indices of an uncovered zero or null */ private Pair findUncoveredZero() { @@ -445,8 +436,8 @@ private Pair findUncoveredZero() { } /** - * A specified row has become covered, and a specified column has become - * uncovered. The least value per row may need to be updated. + * A specified row has become covered, and a specified column has become uncovered. The least + * value per row may need to be updated. * @param row the index of the row which was just covered * @param col the index of the column which was just uncovered */ @@ -467,8 +458,8 @@ private void updateMin(int row, int col) { } /** - * Find a starred zero in a specified row. If there are no starred zeroes in - * the specified row, then null will be returned. + * Find a starred zero in a specified row. If there are no starred zeroes in the specified row, + * then null will be returned. * @param r the index of the row to be searched * @return pair of row and column indices of starred zero or null */ @@ -482,8 +473,8 @@ private Pair starInRow(int r) { } /** - * Find a starred zero in the specified column. If there are no starred zeroes - * in the specified row, then null will be returned. + * Find a starred zero in the specified column. If there are no starred zeroes in the specified + * row, then null will be returned. * @param c the index of the column to be searched * @return pair of row and column indices of starred zero or null */ @@ -497,8 +488,8 @@ private Pair starInCol(int c) { } /** - * Find a primed zero in the specified row. If there are no primed zeroes in - * the specified row, then null will be returned. + * Find a primed zero in the specified row. If there are no primed zeroes in the specified row, + * then null will be returned. * @param r the index of the row to be searched * @return pair of row and column indices of primed zero or null */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java index 346f3df51834..49010363d277 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hbase.util; +import java.util.concurrent.ThreadFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper; +import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.ServerChannel; @@ -27,11 +33,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioServerSocketChannel; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel; import org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultThreadFactory; -import java.util.concurrent.ThreadFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper; -import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; -import org.apache.yetus.audience.InterfaceAudience; /** * Event loop group related config. @@ -55,7 +56,7 @@ public NettyEventLoopGroupConfig(Configuration conf, String threadPoolName) { boolean useEpoll = useEpoll(conf); int workerCount = conf.getInt("hbase.netty.worker.count", 0); ThreadFactory eventLoopThreadFactory = - new DefaultThreadFactory(threadPoolName, true, Thread.MAX_PRIORITY); + new DefaultThreadFactory(threadPoolName, true, Thread.MAX_PRIORITY); if (useEpoll) { group = new EpollEventLoopGroup(workerCount, eventLoopThreadFactory); serverChannelClass = EpollServerSocketChannel.class; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java index 9fdf7ea74b33..c900a838c5b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,9 +33,11 @@ private OOMEChecker() { public static boolean exitIfOOME(Throwable e, String service) { boolean stop = false; try { - if (e instanceof OutOfMemoryError || - (e.getCause() != null && e.getCause() instanceof OutOfMemoryError) || - (e.getMessage() != null && e.getMessage().contains("java.lang.OutOfMemoryError"))) { + if ( + e instanceof OutOfMemoryError + || (e.getCause() != null && e.getCause() instanceof OutOfMemoryError) + || (e.getMessage() != null && e.getMessage().contains("java.lang.OutOfMemoryError")) + ) { stop = true; LOG.error(HBaseMarkers.FATAL, "Run out of memory; {} will abort itself immediately", service, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 286caf8ed3b0..7ff8f34c64b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.BufferedInputStream; @@ -171,9 +169,9 @@ private static Configuration createConf() { } /** - * @param hostname Hostname to unload regions from or load regions to. Can be either hostname - * or hostname:port. - * @param conf Configuration object + * @param hostname Hostname to unload regions from or load regions to. Can be either hostname or + * hostname:port. + * @param conf Configuration object */ public RegionMoverBuilder(String hostname, Configuration conf) { String[] splitHostname = hostname.toLowerCase().split(":"); @@ -189,9 +187,8 @@ public RegionMoverBuilder(String hostname, Configuration conf) { } /** - * Path of file where regions will be written to during unloading/read from during loading - * @param filename - * @return RegionMoverBuilder object + * Path of file where regions will be written to during unloading/read from during loading n + * * @return RegionMoverBuilder object */ public RegionMoverBuilder filename(String filename) { this.filename = filename; @@ -236,8 +233,7 @@ public RegionMoverBuilder designatedFile(String designatedFile) { * effort mode,each region movement is tried once.This can be used during graceful shutdown as * even if we have a stuck region,upon shutdown it'll be reassigned anyway. *

    - * @param ack - * @return RegionMoverBuilder object + * n * @return RegionMoverBuilder object */ public RegionMoverBuilder ack(boolean ack) { this.ack = ack; @@ -257,9 +253,7 @@ public RegionMoverBuilder timeout(int timeout) { } /** - * Set specific rackManager implementation. - * This setter method is for testing purpose only. - * + * Set specific rackManager implementation. This setter method is for testing purpose only. * @param rackManager rackManager impl * @return RegionMoverBuilder object */ @@ -333,13 +327,11 @@ private Optional getMetaRegionInfoIfToBeMoved(List regio return regionsToMove.stream().filter(RegionInfo::isMetaRegion).findFirst(); } - private void loadRegions(List regionsToMove) - throws Exception { + private void loadRegions(List regionsToMove) throws Exception { ServerName server = getTargetServer(); List movedRegions = Collections.synchronizedList(new ArrayList<>()); - LOG.info( - "Moving " + regionsToMove.size() + " regions to " + server + " using " + this.maxthreads - + " threads.Ack mode:" + this.ack); + LOG.info("Moving " + regionsToMove.size() + " regions to " + server + " using " + + this.maxthreads + " threads.Ack mode:" + this.ack); final ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); List> taskList = new ArrayList<>(); @@ -348,13 +340,13 @@ private void loadRegions(List regionsToMove) RegionInfo region = regionsToMove.get(counter); ServerName currentServer = MoveWithAck.getServerNameForRegion(region, admin, conn); if (currentServer == null) { - LOG.warn( - "Could not get server for Region:" + region.getRegionNameAsString() + " moving on"); + LOG + .warn("Could not get server for Region:" + region.getRegionNameAsString() + " moving on"); counter++; continue; } else if (server.equals(currentServer)) { LOG.info( - "Region " + region.getRegionNameAsString() + " is already on target server=" + server); + "Region " + region.getRegionNameAsString() + " is already on target server=" + server); counter++; continue; } @@ -371,8 +363,8 @@ private void loadRegions(List regionsToMove) } moveRegionsPool.shutdown(); - long timeoutInSeconds = regionsToMove.size() * admin.getConfiguration() - .getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + long timeoutInSeconds = regionsToMove.size() + * admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); waitMoveTasksToFinish(moveRegionsPool, taskList, timeoutInSeconds); } @@ -382,7 +374,6 @@ private void loadRegions(List regionsToMove) * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions * to hostnames provided in {@link #designatedFile} - * * @return true if unloading succeeded, false otherwise */ public boolean unload() throws InterruptedException, ExecutionException, TimeoutException { @@ -394,19 +385,18 @@ public boolean unload() throws InterruptedException, ExecutionException, Timeout * noAck mode we do not make sure that region is successfully online on the target region * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions - * to hostnames provided in {@link #designatedFile}. - * While unloading regions, destination RegionServers are selected from different rack i.e - * regions should not move to any RegionServers that belong to same rack as source RegionServer. - * + * to hostnames provided in {@link #designatedFile}. While unloading regions, destination + * RegionServers are selected from different rack i.e regions should not move to any RegionServers + * that belong to same rack as source RegionServer. * @return true if unloading succeeded, false otherwise */ public boolean unloadFromRack() - throws InterruptedException, ExecutionException, TimeoutException { + throws InterruptedException, ExecutionException, TimeoutException { return unloadRegions(true); } - private boolean unloadRegions(boolean unloadFromRack) throws InterruptedException, - ExecutionException, TimeoutException { + private boolean unloadRegions(boolean unloadFromRack) + throws InterruptedException, ExecutionException, TimeoutException { deleteFile(this.filename); ExecutorService unloadPool = Executors.newFixedThreadPool(1); Future unloadTask = unloadPool.submit(() -> { @@ -421,7 +411,7 @@ private boolean unloadRegions(boolean unloadFromRack) throws InterruptedExceptio ServerName server = stripServer(regionServers, hostname, port); if (server == null) { LOG.info("Could not find server '{}:{}' in the set of region servers. giving up.", - hostname, port); + hostname, port); LOG.debug("List of region servers: {}", regionServers); return false; } @@ -452,8 +442,8 @@ private boolean unloadRegions(boolean unloadFromRack) throws InterruptedExceptio Set decommissionedRS = new HashSet<>(admin.listDecommissionedRegionServers()); if (CollectionUtils.isNotEmpty(decommissionedRS)) { regionServers.removeIf(decommissionedRS::contains); - LOG.debug("Excluded RegionServers from unloading regions to because they " + - "are marked as decommissioned. Servers: {}", decommissionedRS); + LOG.debug("Excluded RegionServers from unloading regions to because they " + + "are marked as decommissioned. Servers: {}", decommissionedRS); } stripMaster(regionServers); @@ -479,7 +469,7 @@ private boolean unloadRegions(boolean unloadFromRack) throws InterruptedExceptio @InterfaceAudience.Private Collection filterRSGroupServers(RSGroupInfo rsgroup, - Collection onlineServers) { + Collection onlineServers) { if (rsgroup.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { return onlineServers; } @@ -494,7 +484,7 @@ Collection filterRSGroupServers(RSGroupInfo rsgroup, } private void unloadRegions(ServerName server, List regionServers, - List movedRegions) throws Exception { + List movedRegions) throws Exception { while (true) { List regionsToMove = admin.getRegions(server); regionsToMove.removeAll(movedRegions); @@ -523,32 +513,29 @@ private void submitRegionMovesWhileUnloading(ServerName server, List int serverIndex = 0; for (RegionInfo regionToMove : regionsToMove) { if (ack) { - Future task = moveRegionsPool.submit( - new MoveWithAck(conn, regionToMove, server, regionServers.get(serverIndex), - movedRegions)); + Future task = moveRegionsPool.submit(new MoveWithAck(conn, regionToMove, server, + regionServers.get(serverIndex), movedRegions)); taskList.add(task); } else { - Future task = moveRegionsPool.submit( - new MoveWithoutAck(admin, regionToMove, server, regionServers.get(serverIndex), - movedRegions)); + Future task = moveRegionsPool.submit(new MoveWithoutAck(admin, regionToMove, + server, regionServers.get(serverIndex), movedRegions)); taskList.add(task); } serverIndex = (serverIndex + 1) % regionServers.size(); } moveRegionsPool.shutdown(); - long timeoutInSeconds = regionsToMove.size() * admin.getConfiguration() - .getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + long timeoutInSeconds = regionsToMove.size() + * admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); waitMoveTasksToFinish(moveRegionsPool, taskList, timeoutInSeconds); } private boolean waitTaskToFinish(ExecutorService pool, Future task, String operation) - throws TimeoutException, InterruptedException, ExecutionException { + throws TimeoutException, InterruptedException, ExecutionException { pool.shutdown(); try { if (!pool.awaitTermination((long) this.timeout, TimeUnit.SECONDS)) { - LOG.warn( - "Timed out before finishing the " + operation + " operation. Timeout: " + this.timeout - + "sec"); + LOG.warn("Timed out before finishing the " + operation + " operation. Timeout: " + + this.timeout + "sec"); pool.shutdownNow(); } } catch (InterruptedException e) { @@ -567,7 +554,7 @@ private boolean waitTaskToFinish(ExecutorService pool, Future task, Str } private void waitMoveTasksToFinish(ExecutorService moveRegionsPool, - List> taskList, long timeoutInSeconds) throws Exception { + List> taskList, long timeoutInSeconds) throws Exception { try { if (!moveRegionsPool.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS)) { moveRegionsPool.shutdownNow(); @@ -596,7 +583,7 @@ private void waitMoveTasksToFinish(ExecutorService moveRegionsPool, } } catch (CancellationException e) { LOG.error("Thread for moving region cancelled. Timeout for cancellation:" + timeoutInSeconds - + "secs", e); + + "secs", e); throw e; } } @@ -607,9 +594,11 @@ private boolean ignoreRegionMoveFailure(ExecutionException e) { if (e.getCause() instanceof UnknownRegionException) { // region does not exist anymore ignoreFailure = true; - } else if (e.getCause() instanceof DoNotRetryRegionException - && e.getCause().getMessage() != null && e.getCause().getMessage() - .contains(AssignmentManager.UNEXPECTED_STATE_REGION + "state=SPLIT,")) { + } else if ( + e.getCause() instanceof DoNotRetryRegionException && e.getCause().getMessage() != null + && e.getCause().getMessage() + .contains(AssignmentManager.UNEXPECTED_STATE_REGION + "state=SPLIT,") + ) { // region is recently split ignoreFailure = true; } @@ -619,7 +608,7 @@ private boolean ignoreRegionMoveFailure(ExecutionException e) { private ServerName getTargetServer() throws Exception { ServerName server = null; int maxWaitInSeconds = - admin.getConfiguration().getInt(SERVERSTART_WAIT_MAX_KEY, DEFAULT_SERVERSTART_WAIT_MAX); + admin.getConfiguration().getInt(SERVERSTART_WAIT_MAX_KEY, DEFAULT_SERVERSTART_WAIT_MAX); long maxWait = EnvironmentEdgeManager.currentTime() + maxWaitInSeconds * 1000; while (EnvironmentEdgeManager.currentTime() < maxWait) { try { @@ -650,8 +639,8 @@ private List readRegionsFromFile(String filename) throws IOException if (!f.exists()) { return regions; } - try (DataInputStream dis = new DataInputStream( - new BufferedInputStream(new FileInputStream(f)))) { + try ( + DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(f)))) { int numRegions = dis.readInt(); int index = 0; while (index < numRegions) { @@ -670,16 +659,15 @@ private List readRegionsFromFile(String filename) throws IOException * lines */ private void writeFile(String filename, List movedRegions) throws IOException { - try (DataOutputStream dos = new DataOutputStream( - new BufferedOutputStream(new FileOutputStream(filename)))) { + try (DataOutputStream dos = + new DataOutputStream(new BufferedOutputStream(new FileOutputStream(filename)))) { dos.writeInt(movedRegions.size()); for (RegionInfo region : movedRegions) { Bytes.writeByteArray(dos, RegionInfo.toByteArray(region)); } } catch (IOException e) { - LOG.error( - "ERROR: Was Not able to write regions moved to output file but moved " + movedRegions - .size() + " regions", e); + LOG.error("ERROR: Was Not able to write regions moved to output file but moved " + + movedRegions.size() + " regions", e); throw e; } } @@ -711,18 +699,17 @@ private List readServersFromFile(String filename) throws IOException { } /** - * Designates or excludes the servername whose hostname and port portion matches the list given - * in the file. - * Example:
    + * Designates or excludes the servername whose hostname and port portion matches the list given in + * the file. Example:
    * If you want to designated RSs, suppose designatedFile has RS1, regionServers has RS1, RS2 and - * RS3. When we call includeExcludeRegionServers(designatedFile, regionServers, true), RS2 and - * RS3 are removed from regionServers list so that regions can move to only RS1. - * If you want to exclude RSs, suppose excludeFile has RS1, regionServers has RS1, RS2 and RS3. - * When we call includeExcludeRegionServers(excludeFile, servers, false), RS1 is removed from - * regionServers list so that regions can move to only RS2 and RS3. + * RS3. When we call includeExcludeRegionServers(designatedFile, regionServers, true), RS2 and RS3 + * are removed from regionServers list so that regions can move to only RS1. If you want to + * exclude RSs, suppose excludeFile has RS1, regionServers has RS1, RS2 and RS3. When we call + * includeExcludeRegionServers(excludeFile, servers, false), RS1 is removed from regionServers + * list so that regions can move to only RS2 and RS3. */ private void includeExcludeRegionServers(String fileName, List regionServers, - boolean isInclude) throws IOException { + boolean isInclude) throws IOException { if (fileName != null) { List servers = readServersFromFile(fileName); if (servers.isEmpty()) { @@ -732,8 +719,8 @@ private void includeExcludeRegionServers(String fileName, List regio Iterator i = regionServers.iterator(); while (i.hasNext()) { String rs = i.next().getServerName(); - String rsPort = rs.split(ServerName.SERVERNAME_SEPARATOR)[0].toLowerCase() + ":" + rs - .split(ServerName.SERVERNAME_SEPARATOR)[1]; + String rsPort = rs.split(ServerName.SERVERNAME_SEPARATOR)[0].toLowerCase() + ":" + + rs.split(ServerName.SERVERNAME_SEPARATOR)[1]; if (isInclude != servers.contains(rsPort)) { i.remove(); } @@ -757,8 +744,10 @@ private void stripMaster(List regionServers) throws IOException { private ServerName stripServer(List regionServers, String hostname, int port) { for (Iterator iter = regionServers.iterator(); iter.hasNext();) { ServerName server = iter.next(); - if (server.getAddress().getHostName().equalsIgnoreCase(hostname) && - server.getAddress().getPort() == port) { + if ( + server.getAddress().getHostName().equalsIgnoreCase(hostname) + && server.getAddress().getPort() == port + ) { iter.remove(); return server; } @@ -771,22 +760,22 @@ protected void addOptions() { this.addRequiredOptWithArg("r", "regionserverhost", "region server |"); this.addRequiredOptWithArg("o", "operation", "Expected: load/unload/unload_from_rack"); this.addOptWithArg("m", "maxthreads", - "Define the maximum number of threads to use to unload and reload the regions"); + "Define the maximum number of threads to use to unload and reload the regions"); this.addOptWithArg("x", "excludefile", - "File with per line to exclude as unload targets; default excludes only " - + "target host; useful for rack decommisioning."); - this.addOptWithArg("d","designatedfile","File with per line as unload targets;" - + "default is all online hosts"); + "File with per line to exclude as unload targets; default excludes only " + + "target host; useful for rack decommisioning."); + this.addOptWithArg("d", "designatedfile", + "File with per line as unload targets;" + "default is all online hosts"); this.addOptWithArg("f", "filename", - "File to save regions list into unloading, or read from loading; " - + "default /tmp/"); + "File to save regions list into unloading, or read from loading; " + + "default /tmp/"); this.addOptNoArg("n", "noack", - "Turn on No-Ack mode(default: false) which won't check if region is online on target " - + "RegionServer, hence best effort. This is more performant in unloading and loading " - + "but might lead to region being unavailable for some time till master reassigns it " - + "in case the move failed"); + "Turn on No-Ack mode(default: false) which won't check if region is online on target " + + "RegionServer, hence best effort. This is more performant in unloading and loading " + + "but might lead to region being unavailable for some time till master reassigns it " + + "in case the move failed"); this.addOptWithArg("t", "timeout", "timeout in seconds after which the tool will exit " - + "irrespective of whether it finished or not;default Integer.MAX_VALUE"); + + "irrespective of whether it finished or not;default Integer.MAX_VALUE"); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java index 0f75b0e9bd5b..67117b260e81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,30 +24,23 @@ import java.util.Map.Entry; import java.util.TreeMap; import java.util.TreeSet; - +import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap; /** - * This is a generic region split calculator. It requires Ranges that provide - * start, end, and a comparator. It works in two phases -- the first adds ranges - * and rejects backwards ranges. Then one calls calcRegions to generate the - * multimap that has a start split key as a key and possibly multiple Ranges as - * members. - * - * To traverse, one normally would get the split set, and iterate through the - * calcRegions. Normal regions would have only one entry, holes would have zero, - * and any overlaps would have multiple entries. - * - * The interface is a bit cumbersome currently but is exposed this way so that - * clients can choose how to iterate through the region splits. - * + * This is a generic region split calculator. It requires Ranges that provide start, end, and a + * comparator. It works in two phases -- the first adds ranges and rejects backwards ranges. Then + * one calls calcRegions to generate the multimap that has a start split key as a key and possibly + * multiple Ranges as members. To traverse, one normally would get the split set, and iterate + * through the calcRegions. Normal regions would have only one entry, holes would have zero, and any + * overlaps would have multiple entries. The interface is a bit cumbersome currently but is exposed + * this way so that clients can choose how to iterate through the region splits. * @param */ @InterfaceAudience.Private @@ -57,17 +49,14 @@ public class RegionSplitCalculator { private final Comparator rangeCmp; /** - * This contains a sorted set of all the possible split points - * - * Invariant: once populated this has 0 entries if empty or at most n+1 values - * where n == number of added ranges. + * This contains a sorted set of all the possible split points Invariant: once populated this has + * 0 entries if empty or at most n+1 values where n == number of added ranges. */ private final TreeSet splits = new TreeSet<>(BYTES_COMPARATOR); /** - * This is a map from start key to regions with the same start key. - * - * Invariant: This always have n values in total + * This is a map from start key to regions with the same start key. Invariant: This always have n + * values in total */ private final Multimap starts = ArrayListMultimap.create(); @@ -83,19 +72,15 @@ public RegionSplitCalculator(Comparator cmp) { public final static Comparator BYTES_COMPARATOR = new ByteArrayComparator() { @Override public int compare(byte[] l, byte[] r) { - if (l == null && r == null) - return 0; - if (l == null) - return 1; - if (r == null) - return -1; + if (l == null && r == null) return 0; + if (l == null) return 1; + if (r == null) return -1; return super.compare(l, r); } }; /** * SPECIAL CASE wrapper for empty end key - * * @return ENDKEY if end key is empty, else normal endkey. */ private static byte[] specialEndKey(R range) { @@ -108,7 +93,6 @@ private static byte[] specialEndKey(R range) { /** * Adds an edge to the split calculator - * * @return true if is included, false if backwards/invalid */ public boolean add(R range) { @@ -118,8 +102,8 @@ public boolean add(R range) { // No need to use Arrays.equals because ENDKEY is null if (end != ENDKEY && Bytes.compareTo(start, end) > 0) { // don't allow backwards edges - LOG.debug("attempted to add backwards edge: " + Bytes.toString(start) - + " " + Bytes.toString(end)); + LOG.debug( + "attempted to add backwards edge: " + Bytes.toString(start) + " " + Bytes.toString(end)); return false; } @@ -130,16 +114,13 @@ public boolean add(R range) { } /** - * Generates a coverage multimap from split key to Regions that start with the - * split key. - * + * Generates a coverage multimap from split key to Regions that start with the split key. * @return coverage multimap */ public Multimap calcCoverage() { // This needs to be sorted to force the use of the comparator on the values, // otherwise byte array comparison isn't used - Multimap regions = TreeMultimap.create(BYTES_COMPARATOR, - rangeCmp); + Multimap regions = TreeMultimap.create(BYTES_COMPARATOR, rangeCmp); // march through all splits from the start points for (Entry> start : starts.asMap().entrySet()) { @@ -147,8 +128,7 @@ public Multimap calcCoverage() { for (R r : start.getValue()) { regions.put(key, r); - for (byte[] coveredSplit : splits.subSet(r.getStartKey(), - specialEndKey(r))) { + for (byte[] coveredSplit : splits.subSet(r.getStartKey(), specialEndKey(r))) { regions.put(coveredSplit, r); } } @@ -165,36 +145,34 @@ public Multimap getStarts() { } /** - * Find specified number of top ranges in a big overlap group. - * It could return less if there are not that many top ranges. - * Once these top ranges are excluded, the big overlap group will - * be broken into ranges with no overlapping, or smaller overlapped - * groups, and most likely some holes. - * + * Find specified number of top ranges in a big overlap group. It could return less if there are + * not that many top ranges. Once these top ranges are excluded, the big overlap group will be + * broken into ranges with no overlapping, or smaller overlapped groups, and most likely some + * holes. * @param bigOverlap a list of ranges that overlap with each other - * @param count the max number of ranges to find + * @param count the max number of ranges to find * @return a list of ranges that overlap with most others */ - public static List - findBigRanges(Collection bigOverlap, int count) { + public static List findBigRanges(Collection bigOverlap, int count) { List bigRanges = new ArrayList<>(); // The key is the count of overlaps, // The value is a list of ranges that have that many overlaps TreeMap> overlapRangeMap = new TreeMap<>(); - for (R r: bigOverlap) { + for (R r : bigOverlap) { // Calculates the # of overlaps for each region // and populates rangeOverlapMap byte[] startKey = r.getStartKey(); byte[] endKey = specialEndKey(r); int overlappedRegions = 0; - for (R rr: bigOverlap) { + for (R rr : bigOverlap) { byte[] start = rr.getStartKey(); byte[] end = specialEndKey(rr); - if (BYTES_COMPARATOR.compare(startKey, end) < 0 - && BYTES_COMPARATOR.compare(endKey, start) > 0) { + if ( + BYTES_COMPARATOR.compare(startKey, end) < 0 && BYTES_COMPARATOR.compare(endKey, start) > 0 + ) { overlappedRegions++; } } @@ -213,7 +191,7 @@ public Multimap getStarts() { } } int toBeAdded = count; - for (Integer key: overlapRangeMap.descendingKeySet()) { + for (Integer key : overlapRangeMap.descendingKeySet()) { List chunk = overlapRangeMap.get(key); int chunkSize = chunk.size(); if (chunkSize <= toBeAdded) { @@ -225,7 +203,7 @@ public Multimap getStarts() { // chained, for example: [a, c), [b, e), [d, g), [f h)... // In such a case, sideline the middle chunk will break // the group efficiently. - int start = (chunkSize - toBeAdded)/2; + int start = (chunkSize - toBeAdded) / 2; int end = start + toBeAdded; for (int i = start; i < end; i++) { bigRanges.add(chunk.get(i)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 11bbd210a79d..f248d6b9e5b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,10 +66,9 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; /** - * The {@link RegionSplitter} class provides several utilities to help in the - * administration lifecycle for developers who choose to manually split regions - * instead of having HBase handle that automatically. The most useful utilities - * are: + * The {@link RegionSplitter} class provides several utilities to help in the administration + * lifecycle for developers who choose to manually split regions instead of having HBase handle that + * automatically. The most useful utilities are: *

    *

      *
    • Create a table with a specified number of pre-split regions @@ -81,13 +79,13 @@ *

      * Question: How do I turn off automatic splitting?
      * Answer: Automatic splitting is determined by the configuration value - * HConstants.HREGION_MAX_FILESIZE. It is not recommended that you set this - * to Long.MAX_VALUE in case you forget about manual splits. A suggested setting - * is 100GB, which would result in > 1hr major compactions if reached. + * HConstants.HREGION_MAX_FILESIZE. It is not recommended that you set this to Long.MAX_VALUE + * in case you forget about manual splits. A suggested setting is 100GB, which would result in > + * 1hr major compactions if reached. *

      * Question: Why did the original authors decide to manually split?
      - * Answer: Specific workload characteristics of our use case allowed us - * to benefit from a manual split system. + * Answer: Specific workload characteristics of our use case allowed us to benefit from a + * manual split system. *

      *

        *
      • Data (~1k) that would grow instead of being replaced @@ -96,146 +94,120 @@ *
      *

      * Question: Why is manual splitting good for this workload?
      - * Answer: Although automated splitting is not a bad option, there are - * benefits to manual splitting. + * Answer: Although automated splitting is not a bad option, there are benefits to manual + * splitting. *

      *

        - *
      • With growing amounts of data, splits will continually be needed. Since - * you always know exactly what regions you have, long-term debugging and - * profiling is much easier with manual splits. It is hard to trace the logs to - * understand region level problems if it keeps splitting and getting renamed. - *
      • Data offlining bugs + unknown number of split regions == oh crap! If an - * WAL or StoreFile was mistakenly unprocessed by HBase due to a weird bug and - * you notice it a day or so later, you can be assured that the regions - * specified in these files are the same as the current regions and you have - * less headaches trying to restore/replay your data. - *
      • You can finely tune your compaction algorithm. With roughly uniform data - * growth, it's easy to cause split / compaction storms as the regions all - * roughly hit the same data size at the same time. With manual splits, you can - * let staggered, time-based major compactions spread out your network IO load. + *
      • With growing amounts of data, splits will continually be needed. Since you always know + * exactly what regions you have, long-term debugging and profiling is much easier with manual + * splits. It is hard to trace the logs to understand region level problems if it keeps splitting + * and getting renamed. + *
      • Data offlining bugs + unknown number of split regions == oh crap! If an WAL or StoreFile was + * mistakenly unprocessed by HBase due to a weird bug and you notice it a day or so later, you can + * be assured that the regions specified in these files are the same as the current regions and you + * have less headaches trying to restore/replay your data. + *
      • You can finely tune your compaction algorithm. With roughly uniform data growth, it's easy to + * cause split / compaction storms as the regions all roughly hit the same data size at the same + * time. With manual splits, you can let staggered, time-based major compactions spread out your + * network IO load. *
      *

      * Question: What's the optimal number of pre-split regions to create?
      * Answer: Mileage will vary depending upon your application. *

      - * The short answer for our application is that we started with 10 pre-split - * regions / server and watched our data growth over time. It's better to err on - * the side of too little regions and rolling split later. + * The short answer for our application is that we started with 10 pre-split regions / server and + * watched our data growth over time. It's better to err on the side of too little regions and + * rolling split later. *

      - * The more complicated answer is that this depends upon the largest storefile - * in your region. With a growing data size, this will get larger over time. You - * want the largest region to be just big enough that the - * {@link org.apache.hadoop.hbase.regionserver.HStore} compact - * selection algorithm only compacts it due to a timed major. If you don't, your - * cluster can be prone to compaction storms as the algorithm decides to run - * major compactions on a large series of regions all at once. Note that - * compaction storms are due to the uniform data growth, not the manual split + * The more complicated answer is that this depends upon the largest storefile in your region. With + * a growing data size, this will get larger over time. You want the largest region to be just big + * enough that the {@link org.apache.hadoop.hbase.regionserver.HStore} compact selection algorithm + * only compacts it due to a timed major. If you don't, your cluster can be prone to compaction + * storms as the algorithm decides to run major compactions on a large series of regions all at + * once. Note that compaction storms are due to the uniform data growth, not the manual split * decision. *

      - * If you pre-split your regions too thin, you can increase the major compaction - * interval by configuring HConstants.MAJOR_COMPACTION_PERIOD. If your data size - * grows too large, use this script to perform a network IO safe rolling split - * of all regions. + * If you pre-split your regions too thin, you can increase the major compaction interval by + * configuring HConstants.MAJOR_COMPACTION_PERIOD. If your data size grows too large, use this + * script to perform a network IO safe rolling split of all regions. */ @InterfaceAudience.Private public class RegionSplitter { private static final Logger LOG = LoggerFactory.getLogger(RegionSplitter.class); /** - * A generic interface for the RegionSplitter code to use for all it's - * functionality. Note that the original authors of this code use - * {@link HexStringSplit} to partition their table and set it as default, but - * provided this for your custom algorithm. To use, create a new derived class + * A generic interface for the RegionSplitter code to use for all it's functionality. Note that + * the original authors of this code use {@link HexStringSplit} to partition their table and set + * it as default, but provided this for your custom algorithm. To use, create a new derived class * from this interface and call {@link RegionSplitter#createPresplitTable} or - * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the - * argument splitClassName giving the name of your class. + * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the argument + * splitClassName giving the name of your class. */ public interface SplitAlgorithm { /** - * Split a pre-existing region into 2 regions. - * - * @param start - * first row (inclusive) - * @param end - * last row (exclusive) + * Split a pre-existing region into 2 regions. n * first row (inclusive) n * last row + * (exclusive) * @return the split row to use */ byte[] split(byte[] start, byte[] end); /** - * Split an entire table. - * - * @param numRegions - * number of regions to split the table into - * - * @throws RuntimeException - * user input is validated at this time. may throw a runtime - * exception in response to a parse failure - * @return array of split keys for the initial regions of the table. The - * length of the returned array should be numRegions-1. + * Split an entire table. n * number of regions to split the table into n * user input is + * validated at this time. may throw a runtime exception in response to a parse failure + * @return array of split keys for the initial regions of the table. The length of the returned + * array should be numRegions-1. */ byte[][] split(int numRegions); /** - * Some MapReduce jobs may want to run multiple mappers per region, - * this is intended for such usecase. - * - * @param start first row (inclusive) - * @param end last row (exclusive) + * Some MapReduce jobs may want to run multiple mappers per region, this is intended for such + * usecase. + * @param start first row (inclusive) + * @param end last row (exclusive) * @param numSplits number of splits to generate * @param inclusive whether start and end are returned as split points */ byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive); /** - * In HBase, the first row is represented by an empty byte array. This might - * cause problems with your split algorithm or row printing. All your APIs - * will be passed firstRow() instead of empty array. - * + * In HBase, the first row is represented by an empty byte array. This might cause problems with + * your split algorithm or row printing. All your APIs will be passed firstRow() instead of + * empty array. * @return your representation of your first row */ byte[] firstRow(); /** - * In HBase, the last row is represented by an empty byte array. This might - * cause problems with your split algorithm or row printing. All your APIs - * will be passed firstRow() instead of empty array. - * + * In HBase, the last row is represented by an empty byte array. This might cause problems with + * your split algorithm or row printing. All your APIs will be passed firstRow() instead of + * empty array. * @return your representation of your last row */ byte[] lastRow(); /** - * In HBase, the last row is represented by an empty byte array. Set this - * value to help the split code understand how to evenly divide the first - * region. - * - * @param userInput - * raw user input (may throw RuntimeException on parse failure) + * In HBase, the last row is represented by an empty byte array. Set this value to help the + * split code understand how to evenly divide the first region. n * raw user input (may throw + * RuntimeException on parse failure) */ void setFirstRow(String userInput); /** - * In HBase, the last row is represented by an empty byte array. Set this - * value to help the split code understand how to evenly divide the last - * region. Note that this last row is inclusive for all rows sharing the - * same prefix. - * - * @param userInput - * raw user input (may throw RuntimeException on parse failure) + * In HBase, the last row is represented by an empty byte array. Set this value to help the + * split code understand how to evenly divide the last region. Note that this last row is + * inclusive for all rows sharing the same prefix. n * raw user input (may throw + * RuntimeException on parse failure) */ void setLastRow(String userInput); /** - * @param input - * user or file input for row + * n * user or file input for row * @return byte array representation of this row for HBase */ byte[] strToRow(String input); /** - * @param row - * byte array representing a row in HBase + * n * byte array representing a row in HBase * @return String to use for debug & file printing */ String rowToStr(byte[] row); @@ -262,72 +234,51 @@ public interface SplitAlgorithm { * The main function for the RegionSplitter application. Common uses: *

      *

        - *
      • create a table named 'myTable' with 60 pre-split regions containing 2 - * column families 'test' & 'rs', assuming the keys are hex-encoded ASCII: + *
      • create a table named 'myTable' with 60 pre-split regions containing 2 column families + * 'test' & 'rs', assuming the keys are hex-encoded ASCII: *
          - *
        • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs - * myTable HexStringSplit + *
        • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs myTable + * HexStringSplit *
        - *
      • create a table named 'myTable' with 50 pre-split regions, - * assuming the keys are decimal-encoded ASCII: + *
      • create a table named 'myTable' with 50 pre-split regions, assuming the keys are + * decimal-encoded ASCII: *
          - *
        • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 - * myTable DecimalStringSplit + *
        • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 myTable DecimalStringSplit *
        - *
      • perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 - * outstanding splits at a time, assuming keys are uniformly distributed - * bytes: + *
      • perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 outstanding splits at + * a time, assuming keys are uniformly distributed bytes: *
          - *
        • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable - * UniformSplit + *
        • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable UniformSplit *
        *
      - * - * There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, - * DecimalStringSplit, and UniformSplit. These are different strategies for - * choosing region boundaries. See their source code for details. - * - * @param args - * Usage: RegionSplitter <TABLE> <SPLITALGORITHM> - * <-c <# regions> -f <family:family:...> | -r - * [-o <# outstanding splits>]> - * [-D <conf.param=value>] - * @throws IOException - * HBase IO problem - * @throws InterruptedException - * user requested exit - * @throws ParseException - * problem parsing user input + * There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, DecimalStringSplit, + * and UniformSplit. These are different strategies for choosing region boundaries. See their + * source code for details. n * Usage: RegionSplitter <TABLE> <SPLITALGORITHM> <-c + * <# regions> -f <family:family:...> | -r [-o <# outstanding splits>]> [-D + * <conf.param=value>] n * HBase IO problem n * user requested exit n * problem parsing user + * input */ @SuppressWarnings("static-access") - public static void main(String[] args) throws IOException, - InterruptedException, ParseException { + public static void main(String[] args) throws IOException, InterruptedException, ParseException { Configuration conf = HBaseConfiguration.create(); // parse user input Options opt = new Options(); opt.addOption(OptionBuilder.withArgName("property=value").hasArg() - .withDescription("Override HBase Configuration Settings").create("D")); + .withDescription("Override HBase Configuration Settings").create("D")); opt.addOption(OptionBuilder.withArgName("region count").hasArg() - .withDescription( - "Create a new table with a pre-split number of regions") - .create("c")); + .withDescription("Create a new table with a pre-split number of regions").create("c")); opt.addOption(OptionBuilder.withArgName("family:family:...").hasArg() - .withDescription( - "Column Families to create with new table. Required with -c") - .create("f")); + .withDescription("Column Families to create with new table. Required with -c").create("f")); opt.addOption("h", false, "Print this usage help"); opt.addOption("r", false, "Perform a rolling split of an existing region"); - opt.addOption(OptionBuilder.withArgName("count").hasArg().withDescription( - "Max outstanding splits that have unfinished major compactions") - .create("o")); - opt.addOption(null, "firstrow", true, - "First Row in Table for Split Algorithm"); - opt.addOption(null, "lastrow", true, - "Last Row in Table for Split Algorithm"); - opt.addOption(null, "risky", false, - "Skip verification steps to complete quickly. " - + "STRONGLY DISCOURAGED for production systems. "); + opt.addOption(OptionBuilder.withArgName("count").hasArg() + .withDescription("Max outstanding splits that have unfinished major compactions") + .create("o")); + opt.addOption(null, "firstrow", true, "First Row in Table for Split Algorithm"); + opt.addOption(null, "lastrow", true, "Last Row in Table for Split Algorithm"); + opt.addOption(null, "risky", false, "Skip verification steps to complete quickly. " + + "STRONGLY DISCOURAGED for production systems. "); CommandLine cmd = new GnuParser().parse(opt, args); if (cmd.hasOption("D")) { @@ -351,13 +302,13 @@ public static void main(String[] args) throws IOException, boolean oneOperOnly = createTable ^ rollingSplit; if (2 != cmd.getArgList().size() || !oneOperOnly || cmd.hasOption("h")) { - new HelpFormatter().printHelp("bin/hbase regionsplitter
    \n"+ - "SPLITALGORITHM is the java class name of a class implementing " + - "SplitAlgorithm, or one of the special strings HexStringSplit or " + - "DecimalStringSplit or UniformSplit, which are built-in split algorithms. " + - "HexStringSplit treats keys as hexadecimal ASCII, and " + - "DecimalStringSplit treats keys as decimal ASCII, and " + - "UniformSplit treats keys as arbitrary bytes.", opt); + new HelpFormatter().printHelp("bin/hbase regionsplitter
    \n" + + "SPLITALGORITHM is the java class name of a class implementing " + + "SplitAlgorithm, or one of the special strings HexStringSplit or " + + "DecimalStringSplit or UniformSplit, which are built-in split algorithms. " + + "HexStringSplit treats keys as hexadecimal ASCII, and " + + "DecimalStringSplit treats keys as decimal ASCII, and " + + "UniformSplit treats keys as arbitrary bytes.", opt); return; } TableName tableName = TableName.valueOf(cmd.getArgs()[0]); @@ -385,15 +336,14 @@ public static void main(String[] args) throws IOException, } static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo, - String[] columnFamilies, Configuration conf) - throws IOException, InterruptedException { + String[] columnFamilies, Configuration conf) throws IOException, InterruptedException { final int splitCount = conf.getInt("split.count", 0); Preconditions.checkArgument(splitCount > 1, "Split count must be > 1"); Preconditions.checkArgument(columnFamilies.length > 0, - "Must specify at least one column family. "); + "Must specify at least one column family. "); LOG.debug("Creating table " + tableName + " with " + columnFamilies.length - + " column families. Presplitting to " + splitCount + " regions"); + + " column families. Presplitting to " + splitCount + " regions"); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (String cf : columnFamilies) { @@ -427,22 +377,21 @@ static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo, } /** - * Alternative getCurrentNrHRS which is no longer available. - * @param connection - * @return Rough count of regionservers out on cluster. + * Alternative getCurrentNrHRS which is no longer available. n * @return Rough count of + * regionservers out on cluster. * @throws IOException if a remote or network exception occurs */ private static int getRegionServerCount(final Connection connection) throws IOException { try (Admin admin = connection.getAdmin()) { Collection servers = admin.getRegionServers(); - return servers == null || servers.isEmpty()? 0: servers.size(); + return servers == null || servers.isEmpty() ? 0 : servers.size(); } } - private static byte [] readFile(final FileSystem fs, final Path path) throws IOException { + private static byte[] readFile(final FileSystem fs, final Path path) throws IOException { FSDataInputStream tmpIn = fs.open(path); try { - byte [] rawData = new byte[tmpIn.available()]; + byte[] rawData = new byte[tmpIn.available()]; tmpIn.readFully(rawData); return rawData; } finally { @@ -451,7 +400,7 @@ private static int getRegionServerCount(final Connection connection) throws IOEx } static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final int minOS = conf.getInt("split.outstanding", 2); try (Connection connection = ConnectionFactory.createConnection(conf)) { // Max outstanding splits. default == 50% of servers @@ -475,9 +424,8 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // requests to the same RS can stall the outstanding split queue. // To fix, group the regions into an RS pool and round-robin through it LOG.debug("Bucketing regions by regionserver..."); - TreeMap>> daughterRegions = - Maps.newTreeMap(); - // Get a regionLocator. Need it in below. + TreeMap>> daughterRegions = Maps.newTreeMap(); + // Get a regionLocator. Need it in below. try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { for (Pair dr : tmpRegionSet) { ServerName rsLocation = regionLocator.getRegionLocation(dr.getSecond()).getServerName(); @@ -505,7 +453,7 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // Get ServerName to region count mapping final TreeMap rsSizes = Maps.newTreeMap(); List hrls = regionLocator.getAllRegionLocations(); - for (HRegionLocation hrl: hrls) { + for (HRegionLocation hrl : hrls) { ServerName sn = hrl.getServerName(); if (rsSizes.containsKey(sn)) { rsSizes.put(sn, rsSizes.get(sn) + 1); @@ -516,8 +464,8 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // Round-robin through the ServerName list. Choose the lightest-loaded servers // first to keep the master from load-balancing regions as we split. - for (Map.Entry>> daughterRegion : - daughterRegions.entrySet()) { + for (Map.Entry>> daughterRegion : daughterRegions.entrySet()) { Pair dr = null; ServerName rsLoc = daughterRegion.getKey(); LinkedList> regionList = daughterRegion.getValue(); @@ -534,8 +482,8 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // if this region moved locations ServerName newRs = regionLoc.getServerName(); if (newRs.compareTo(rsLoc) != 0) { - LOG.debug("Region with " + splitAlgo.rowToStr(split) - + " moved to " + newRs + ". Relocating..."); + LOG.debug("Region with " + splitAlgo.rowToStr(split) + " moved to " + newRs + + ". Relocating..."); // relocate it, don't use it right now if (!daughterRegions.containsKey(newRs)) { LinkedList> entry = Lists.newLinkedList(); @@ -550,15 +498,15 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur byte[] sk = regionLoc.getRegion().getStartKey(); if (sk.length != 0) { if (Bytes.equals(split, sk)) { - LOG.debug("Region already split on " - + splitAlgo.rowToStr(split) + ". Skipping this region..."); + LOG.debug("Region already split on " + splitAlgo.rowToStr(split) + + ". Skipping this region..."); ++splitCount; dr = null; continue; } byte[] start = dr.getFirst(); - Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo - .rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); + Preconditions.checkArgument(Bytes.equals(start, sk), + splitAlgo.rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); } // passed all checks! found a good region @@ -567,8 +515,7 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur if (regionList.isEmpty()) { daughterRegions.remove(rsLoc); } - if (dr == null) - continue; + if (dr == null) continue; // we have a good region, time to split! byte[] split = dr.getSecond(); @@ -600,14 +547,13 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // mark each finished region as successfully split. for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; if (splitCount % 10 == 0) { - long tDiff = (EnvironmentEdgeManager.currentTime() - startTime) - / splitCount; - LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount - + ". Avg Time / Split = " + long tDiff = (EnvironmentEdgeManager.currentTime() - startTime) / splitCount; + LOG.debug( + "STATUS UPDATE: " + splitCount + " / " + origCount + ". Avg Time / Split = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); } } @@ -616,15 +562,15 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur if (conf.getBoolean("split.verify", true)) { while (!outstanding.isEmpty()) { LOG.debug("Finally Wait for outstanding splits " + outstanding.size()); - LinkedList> finished = splitScan(outstanding, - connection, tableName, splitAlgo); + LinkedList> finished = + splitScan(outstanding, connection, tableName, splitAlgo); if (finished.isEmpty()) { Thread.sleep(30 * 1000); } else { outstanding.removeAll(finished); for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; } LOG.debug("Finally " + finished.size() + " outstanding splits finished"); @@ -634,12 +580,11 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur LOG.debug("All regions have been successfully split!"); } finally { long tDiff = EnvironmentEdgeManager.currentTime() - startTime; - LOG.debug("TOTAL TIME = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + LOG.debug("TOTAL TIME = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); LOG.debug("Splits = " + splitCount); if (0 < splitCount) { LOG.debug("Avg Time / Split = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); + + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); } } } finally { @@ -651,16 +596,15 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur } /** - * @throws IOException if the specified SplitAlgorithm class couldn't be - * instantiated + * @throws IOException if the specified SplitAlgorithm class couldn't be instantiated */ - public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, - String splitClassName) throws IOException { + public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, String splitClassName) + throws IOException { Class splitClass; // For split algorithms builtin to RegionSplitter, the user can specify // their simple class name instead of a fully qualified class name. - if(splitClassName.equals(HexStringSplit.class.getSimpleName())) { + if (splitClassName.equals(HexStringSplit.class.getSimpleName())) { splitClass = HexStringSplit.class; } else if (splitClassName.equals(DecimalStringSplit.class.getSimpleName())) { splitClass = DecimalStringSplit.class; @@ -672,12 +616,11 @@ public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, } catch (ClassNotFoundException e) { throw new IOException("Couldn't load split class " + splitClassName, e); } - if(splitClass == null) { + if (splitClass == null) { throw new IOException("Failed loading split class " + splitClassName); } - if(!SplitAlgorithm.class.isAssignableFrom(splitClass)) { - throw new IOException( - "Specified split class doesn't implement SplitAlgorithm"); + if (!SplitAlgorithm.class.isAssignableFrom(splitClass)) { + throw new IOException("Specified split class doesn't implement SplitAlgorithm"); } } try { @@ -687,12 +630,9 @@ public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, } } - static LinkedList> splitScan( - LinkedList> regionList, - final Connection connection, - final TableName tableName, - SplitAlgorithm splitAlgo) - throws IOException, InterruptedException { + static LinkedList> splitScan(LinkedList> regionList, + final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) + throws IOException, InterruptedException { LinkedList> finished = Lists.newLinkedList(); LinkedList> logicalSplitting = Lists.newLinkedList(); LinkedList> physicalSplitting = Lists.newLinkedList(); @@ -736,11 +676,10 @@ static LinkedList> splitScan( check.add(regionLocator.getRegionLocation(split).getRegion()); for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) { byte[] sk = hri.getStartKey(); - if (sk.length == 0) - sk = splitAlgo.firstRow(); + if (sk.length == 0) sk = splitAlgo.firstRow(); - HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( - connection.getConfiguration(), fs, tableDir, hri, true); + HRegionFileSystem regionFs = HRegionFileSystem + .openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true); // Check every Column Family for that region -- check does not have references. boolean refFound = false; @@ -767,18 +706,15 @@ static LinkedList> splitScan( } } - LOG.debug("Split Scan: " + finished.size() + " finished / " - + logicalSplitting.size() + " split wait / " - + physicalSplitting.size() + " reference wait"); + LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + + " split wait / " + physicalSplitting.size() + " reference wait"); return finished; } } /** - * @param conf - * @param tableName - * @return A Pair where first item is table dir and second is the split file. + * nn * @return A Pair where first item is table dir and second is the split file. * @throws IOException if a remote or network exception occurs */ private static Pair getTableDirAndSplitFile(final Configuration conf, @@ -790,8 +726,7 @@ private static Pair getTableDirAndSplitFile(final Configuration conf } static LinkedList> getSplits(final Connection connection, - TableName tableName, SplitAlgorithm splitAlgo) - throws IOException { + TableName tableName, SplitAlgorithm splitAlgo) throws IOException { Pair tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName); Path tableDir = tableDirAndSplitFile.getFirst(); @@ -814,13 +749,11 @@ static LinkedList> getSplits(final Connection connection, tmp = regionLocator.getStartEndKeys(); } Preconditions.checkArgument(tmp.getFirst().length == tmp.getSecond().length, - "Start and End rows should be equivalent"); + "Start and End rows should be equivalent"); for (int i = 0; i < tmp.getFirst().length; ++i) { byte[] start = tmp.getFirst()[i], end = tmp.getSecond()[i]; - if (start.length == 0) - start = splitAlgo.firstRow(); - if (end.length == 0) - end = splitAlgo.lastRow(); + if (start.length == 0) start = splitAlgo.firstRow(); + if (end.length == 0) end = splitAlgo.lastRow(); rows.add(Pair.newPair(start, end)); } LOG.debug("Table " + tableName + " has " + rows.size() + " regions that will be split."); @@ -835,10 +768,9 @@ static LinkedList> getSplits(final Connection connection, String startStr = splitAlgo.rowToStr(r.getFirst()); String splitStr = splitAlgo.rowToStr(splitPoint); daughterRegions.add(Pair.newPair(startStr, splitStr)); - LOG.debug("Will Split [" + startStr + " , " - + splitAlgo.rowToStr(r.getSecond()) + ") at " + splitStr); - tmpOut.writeChars("+ " + startStr + splitAlgo.separator() + splitStr - + "\n"); + LOG.debug("Will Split [" + startStr + " , " + splitAlgo.rowToStr(r.getSecond()) + ") at " + + splitStr); + tmpOut.writeChars("+ " + startStr + splitAlgo.separator() + splitStr + "\n"); } tmpOut.close(); fs.rename(tmpFile, splitFile); @@ -866,10 +798,8 @@ static LinkedList> getSplits(final Connection connection, daughterRegions.add(r); } else { LOG.debug("Removing: " + r); - Preconditions.checkArgument(cmd[0].equals("-"), - "Unknown option: " + cmd[0]); - Preconditions.checkState(daughterRegions.contains(r), - "Missing row: " + r); + Preconditions.checkArgument(cmd[0].equals("-"), "Unknown option: " + cmd[0]); + Preconditions.checkState(daughterRegions.contains(r), "Missing row: " + r); daughterRegions.remove(r); } } @@ -877,22 +807,18 @@ static LinkedList> getSplits(final Connection connection, } LinkedList> ret = Lists.newLinkedList(); for (Pair r : daughterRegions) { - ret.add(Pair.newPair(splitAlgo.strToRow(r.getFirst()), splitAlgo - .strToRow(r.getSecond()))); + ret.add(Pair.newPair(splitAlgo.strToRow(r.getFirst()), splitAlgo.strToRow(r.getSecond()))); } return ret; } /** - * HexStringSplit is a well-known {@link SplitAlgorithm} for choosing region - * boundaries. The format of a HexStringSplit region boundary is the ASCII - * representation of an MD5 checksum, or any other uniformly distributed - * hexadecimal value. Row are hex-encoded long values in the range - * "00000000" => "FFFFFFFF" and are left-padded with zeros to keep the - * same order lexicographically as if they were binary. - * - * Since this split algorithm uses hex strings as keys, it is easy to read & - * write in the shell but takes up more space and may be non-intuitive. + * HexStringSplit is a well-known {@link SplitAlgorithm} for choosing region boundaries. The + * format of a HexStringSplit region boundary is the ASCII representation of an MD5 checksum, or + * any other uniformly distributed hexadecimal value. Row are hex-encoded long values in the range + * "00000000" => "FFFFFFFF" and are left-padded with zeros to keep the same order + * lexicographically as if they were binary. Since this split algorithm uses hex strings as keys, + * it is easy to read & write in the shell but takes up more space and may be non-intuitive. */ public static class HexStringSplit extends NumberStringSplit { final static String DEFAULT_MIN_HEX = "00000000"; @@ -906,11 +832,10 @@ public HexStringSplit() { } /** - * The format of a DecimalStringSplit region boundary is the ASCII representation of - * reversed sequential number, or any other uniformly distributed decimal value. - * Row are decimal-encoded long values in the range - * "00000000" => "99999999" and are left-padded with zeros to keep the - * same order lexicographically as if they were binary. + * The format of a DecimalStringSplit region boundary is the ASCII representation of reversed + * sequential number, or any other uniformly distributed decimal value. Row are decimal-encoded + * long values in the range "00000000" => "99999999" and are left-padded with zeros to + * keep the same order lexicographically as if they were binary. */ public static class DecimalStringSplit extends NumberStringSplit { final static String DEFAULT_MIN_DEC = "00000000"; @@ -952,20 +877,18 @@ public byte[] split(byte[] start, byte[] end) { @Override public byte[][] split(int n) { Preconditions.checkArgument(lastRowInt.compareTo(firstRowInt) > 0, - "last row (%s) is configured less than first row (%s)", lastRow, - firstRow); + "last row (%s) is configured less than first row (%s)", lastRow, firstRow); // +1 to range because the last row is inclusive BigInteger range = lastRowInt.subtract(firstRowInt).add(BigInteger.ONE); Preconditions.checkState(range.compareTo(BigInteger.valueOf(n)) >= 0, - "split granularity (%s) is greater than the range (%s)", n, range); + "split granularity (%s) is greater than the range (%s)", n, range); BigInteger[] splits = new BigInteger[n - 1]; BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(n)); for (int i = 1; i < n; i++) { // NOTE: this means the last region gets all the slop. // This is not a big deal if we're assuming n << MAXHEX - splits[i - 1] = firstRowInt.add(sizeOfEachSplit.multiply(BigInteger - .valueOf(i))); + splits[i - 1] = firstRowInt.add(sizeOfEachSplit.multiply(BigInteger.valueOf(i))); } return convertToBytes(splits); } @@ -976,20 +899,18 @@ public byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive BigInteger e = convertToBigInteger(end); Preconditions.checkArgument(e.compareTo(s) > 0, - "last row (%s) is configured less than first row (%s)", rowToStr(end), - end); + "last row (%s) is configured less than first row (%s)", rowToStr(end), end); // +1 to range because the last row is inclusive BigInteger range = e.subtract(s).add(BigInteger.ONE); Preconditions.checkState(range.compareTo(BigInteger.valueOf(numSplits)) >= 0, - "split granularity (%s) is greater than the range (%s)", numSplits, range); + "split granularity (%s) is greater than the range (%s)", numSplits, range); BigInteger[] splits = new BigInteger[numSplits - 1]; BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(numSplits)); for (int i = 1; i < numSplits; i++) { // NOTE: this means the last region gets all the slop. // This is not a big deal if we're assuming n << MAXHEX - splits[i - 1] = s.add(sizeOfEachSplit.multiply(BigInteger - .valueOf(i))); + splits[i - 1] = s.add(sizeOfEachSplit.multiply(BigInteger.valueOf(i))); } if (inclusive) { @@ -1054,7 +975,6 @@ public void setLastRow(byte[] userInput) { /** * Divide 2 numbers in half (for split algorithm) - * * @param a number #1 * @param b number #2 * @return the midpoint of the 2 numbers @@ -1065,7 +985,6 @@ public BigInteger split2(BigInteger a, BigInteger b) { /** * Returns an array of bytes corresponding to an array of BigIntegers - * * @param bigIntegers numbers to convert * @return bytes corresponding to the bigIntegers */ @@ -1079,9 +998,8 @@ public byte[][] convertToBytes(BigInteger[] bigIntegers) { /** * Returns the bytes corresponding to the BigInteger - * * @param bigInteger number to convert - * @param pad padding length + * @param pad padding length * @return byte corresponding to input BigInteger */ public byte[] convertToByte(BigInteger bigInteger, int pad) { @@ -1092,7 +1010,6 @@ public byte[] convertToByte(BigInteger bigInteger, int pad) { /** * Returns the bytes corresponding to the BigInteger - * * @param bigInteger number to convert * @return corresponding bytes */ @@ -1102,35 +1019,32 @@ public byte[] convertToByte(BigInteger bigInteger) { /** * Returns the BigInteger represented by the byte array - * * @param row byte array representing row * @return the corresponding BigInteger */ public BigInteger convertToBigInteger(byte[] row) { - return (row.length > 0) ? new BigInteger(Bytes.toString(row), radix) - : BigInteger.ZERO; + return (row.length > 0) ? new BigInteger(Bytes.toString(row), radix) : BigInteger.ZERO; } @Override public String toString() { - return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) - + "," + rowToStr(lastRow()) + "]"; + return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) + "," + + rowToStr(lastRow()) + "]"; } } /** - * A SplitAlgorithm that divides the space of possible keys evenly. Useful - * when the keys are approximately uniform random bytes (e.g. hashes). Rows - * are raw byte values in the range 00 => FF and are right-padded with - * zeros to keep the same memcmp() order. This is the natural algorithm to use - * for a byte[] environment and saves space, but is not necessarily the + * A SplitAlgorithm that divides the space of possible keys evenly. Useful when the keys are + * approximately uniform random bytes (e.g. hashes). Rows are raw byte values in the range 00 + * => FF and are right-padded with zeros to keep the same memcmp() order. This is the + * natural algorithm to use for a byte[] environment and saves space, but is not necessarily the * easiest for readability. */ public static class UniformSplit implements SplitAlgorithm { static final byte xFF = (byte) 0xFF; byte[] firstRowBytes = ArrayUtils.EMPTY_BYTE_ARRAY; - byte[] lastRowBytes = - new byte[] {xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF}; + byte[] lastRowBytes = new byte[] { xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF }; + @Override public byte[] split(byte[] start, byte[] end) { return Bytes.split(start, end, 1)[1]; @@ -1138,20 +1052,17 @@ public byte[] split(byte[] start, byte[] end) { @Override public byte[][] split(int numRegions) { - Preconditions.checkArgument( - Bytes.compareTo(lastRowBytes, firstRowBytes) > 0, - "last row (%s) is configured less than first row (%s)", - Bytes.toStringBinary(lastRowBytes), - Bytes.toStringBinary(firstRowBytes)); - - byte[][] splits = Bytes.split(firstRowBytes, lastRowBytes, true, - numRegions - 1); + Preconditions.checkArgument(Bytes.compareTo(lastRowBytes, firstRowBytes) > 0, + "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(lastRowBytes), + Bytes.toStringBinary(firstRowBytes)); + + byte[][] splits = Bytes.split(firstRowBytes, lastRowBytes, true, numRegions - 1); Preconditions.checkState(splits != null, - "Could not split region with given user input: " + this); + "Could not split region with given user input: " + this); // remove endpoints, which are included in the splits list - return splits == null? null: Arrays.copyOfRange(splits, 1, splits.length - 1); + return splits == null ? null : Arrays.copyOfRange(splits, 1, splits.length - 1); } @Override @@ -1162,16 +1073,13 @@ public byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive if (Arrays.equals(end, HConstants.EMPTY_BYTE_ARRAY)) { end = lastRowBytes; } - Preconditions.checkArgument( - Bytes.compareTo(end, start) > 0, - "last row (%s) is configured less than first row (%s)", - Bytes.toStringBinary(end), - Bytes.toStringBinary(start)); - - byte[][] splits = Bytes.split(start, end, true, - numSplits - 1); + Preconditions.checkArgument(Bytes.compareTo(end, start) > 0, + "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(end), + Bytes.toStringBinary(start)); + + byte[][] splits = Bytes.split(start, end, true, numSplits - 1); Preconditions.checkState(splits != null, - "Could not calculate input splits with given user input: " + this); + "Could not calculate input splits with given user input: " + this); if (inclusive) { return splits; } else { @@ -1200,7 +1108,6 @@ public void setLastRow(String userInput) { lastRowBytes = Bytes.toBytesBinary(userInput); } - @Override public void setFirstRow(byte[] userInput) { firstRowBytes = userInput; @@ -1228,8 +1135,8 @@ public String separator() { @Override public String toString() { - return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) - + "," + rowToStr(lastRow()) + "]"; + return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) + "," + + rowToStr(lastRow()) + "]"; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java index fb2a95417427..1fd17be600f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.yetus.audience.InterfaceAudience; /** - * This class maintains mean and variation for any sequence of input provided to it. - * It is initialized with number of rolling periods which basically means the number of past - * inputs whose data will be considered to maintain mean and variation. - * It will use O(N) memory to maintain these statistics, where N is number of look up periods it - * was initialized with. - * If zero is passed during initialization then it will maintain mean and variance from the - * start. It will use O(1) memory only. But note that since it will maintain mean / variance - * from the start the statistics may behave like constants and may ignore short trends. - * All operations are O(1) except the initialization which is O(N). + * This class maintains mean and variation for any sequence of input provided to it. It is + * initialized with number of rolling periods which basically means the number of past inputs whose + * data will be considered to maintain mean and variation. It will use O(N) memory to maintain these + * statistics, where N is number of look up periods it was initialized with. If zero is passed + * during initialization then it will maintain mean and variance from the start. It will use O(1) + * memory only. But note that since it will maintain mean / variance from the start the statistics + * may behave like constants and may ignore short trends. All operations are O(1) except the + * initialization which is O(N). */ @InterfaceAudience.Private public class RollingStatCalculator { @@ -41,11 +38,10 @@ public class RollingStatCalculator { private int rollingPeriod; private int currentIndexPosition; // to be used only if we have non-zero rolling period - private long [] dataValues; + private long[] dataValues; /** - * Creates a RollingStatCalculator with given number of rolling periods. - * @param rollingPeriod + * Creates a RollingStatCalculator with given number of rolling periods. n */ public RollingStatCalculator(int rollingPeriod) { this.rollingPeriod = rollingPeriod; @@ -57,32 +53,29 @@ public RollingStatCalculator(int rollingPeriod) { } /** - * Inserts given data value to array of data values to be considered for statistics calculation - * @param data + * Inserts given data value to array of data values to be considered for statistics calculation n */ public void insertDataValue(long data) { // if current number of data points already equals rolling period and rolling period is // non-zero then remove one data and update the statistics - if(numberOfDataValues >= rollingPeriod && rollingPeriod > 0) { + if (numberOfDataValues >= rollingPeriod && rollingPeriod > 0) { this.removeData(dataValues[currentIndexPosition]); } numberOfDataValues++; - currentSum = currentSum + (double)data; - currentSqrSum = currentSqrSum + ((double)data * data); - if (rollingPeriod >0) - { + currentSum = currentSum + (double) data; + currentSqrSum = currentSqrSum + ((double) data * data); + if (rollingPeriod > 0) { dataValues[currentIndexPosition] = data; currentIndexPosition = (currentIndexPosition + 1) % rollingPeriod; } } /** - * Update the statistics after removing the given data value - * @param data + * Update the statistics after removing the given data value n */ private void removeData(long data) { - currentSum = currentSum - (double)data; - currentSqrSum = currentSqrSum - ((double)data * data); + currentSum = currentSum - (double) data; + currentSqrSum = currentSqrSum - ((double) data * data); numberOfDataValues--; } @@ -90,25 +83,24 @@ private void removeData(long data) { * @return mean of the data values that are in the current list of data values */ public double getMean() { - return this.currentSum / (double)numberOfDataValues; + return this.currentSum / (double) numberOfDataValues; } /** * @return deviation of the data values that are in the current list of data values */ public double getDeviation() { - double variance = (currentSqrSum - (currentSum*currentSum)/(double)(numberOfDataValues))/ - numberOfDataValues; + double variance = (currentSqrSum - (currentSum * currentSum) / (double) (numberOfDataValues)) + / numberOfDataValues; return Math.sqrt(variance); } /** - * @param size - * @return an array of given size initialized with zeros + * n * @return an array of given size initialized with zeros */ - private long [] fillWithZeros(int size) { - long [] zeros = new long [size]; - for (int i=0; iIf enabled, you can also exclude environment variables containing - * certain substrings by setting {@code "hbase.envvars.logging.skipwords"} - * to comma separated list of such substrings. + * Logs information about the currently running JVM process including the environment variables. + * Logging of env vars can be disabled by setting {@code "hbase.envvars.logging.disabled"} to + * {@code "true"}. + *

    + * If enabled, you can also exclude environment variables containing certain substrings by setting + * {@code "hbase.envvars.logging.skipwords"} to comma separated list of such substrings. */ public static void logProcessInfo(Configuration conf) { logHBaseConfigs(conf); @@ -124,16 +119,14 @@ public static void logProcessInfo(Configuration conf) { } } - nextEnv: - for (Entry entry : System.getenv().entrySet()) { + nextEnv: for (Entry entry : System.getenv().entrySet()) { String key = entry.getKey().toLowerCase(Locale.ROOT); String value = entry.getValue().toLowerCase(Locale.ROOT); // exclude variables which may contain skip words - for(String skipWord : skipWords) { - if (key.contains(skipWord) || value.contains(skipWord)) - continue nextEnv; + for (String skipWord : skipWords) { + if (key.contains(skipWord) || value.contains(skipWord)) continue nextEnv; } - LOG.info("env:"+entry); + LOG.info("env:" + entry); } } @@ -142,10 +135,9 @@ public static void logProcessInfo(Configuration conf) { } /** - * Parse and run the given command line. This will exit the JVM with - * the exit code returned from run(). - * If return code is 0, wait for atmost 30 seconds for all non-daemon threads to quit, - * otherwise exit the jvm + * Parse and run the given command line. This will exit the JVM with the exit code returned from + * run(). If return code is 0, wait for atmost 30 seconds for all non-daemon threads + * to quit, otherwise exit the jvm */ public void doMain(String args[]) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index 1844be641a07..bb37aa159bf0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; @@ -39,15 +38,15 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { /** - * Whether asynchronous WAL replication to the secondary region replicas is enabled or not. - * If this is enabled, a replication peer named "region_replica_replication" will be created - * which will tail the logs and replicate the mutatations to region replicas for tables that - * have region replication > 1. If this is enabled once, disabling this replication also - * requires disabling the replication peer using shell or {@link Admin} java class. - * Replication to secondary region replicas works over standard inter-cluster replication.· + * Whether asynchronous WAL replication to the secondary region replicas is enabled or not. If + * this is enabled, a replication peer named "region_replica_replication" will be created which + * will tail the logs and replicate the mutatations to region replicas for tables that have region + * replication > 1. If this is enabled once, disabling this replication also requires disabling + * the replication peer using shell or {@link Admin} java class. Replication to secondary region + * replicas works over standard inter-cluster replication.· */ - public static final String REGION_REPLICA_REPLICATION_CONF_KEY - = "hbase.region.replica.replication.enabled"; + public static final String REGION_REPLICA_REPLICATION_CONF_KEY = + "hbase.region.replica.replication.enabled"; private static final boolean DEFAULT_REGION_REPLICA_REPLICATION = false; /** @@ -59,19 +58,18 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { /** * Same as for {@link #REGION_REPLICA_REPLICATION_CONF_KEY} but for catalog replication. */ - public static final String REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY - = "hbase.region.replica.replication.catalog.enabled"; + public static final String REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY = + "hbase.region.replica.replication.catalog.enabled"; private static final boolean DEFAULT_REGION_REPLICA_REPLICATION_CATALOG = false; - /** * Enables or disables refreshing store files of secondary region replicas when the memory is * above the global memstore lower limit. Refreshing the store files means that we will do a file * list of the primary regions store files, and pick up new files. Also depending on the store * files, we can drop some memstore contents which will free up memory. */ - public static final String REGION_REPLICA_STORE_FILE_REFRESH - = "hbase.region.replica.storefile.refresh"; + public static final String REGION_REPLICA_STORE_FILE_REFRESH = + "hbase.region.replica.storefile.refresh"; private static final boolean DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH = true; /** @@ -79,8 +77,8 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { * region. Default value assumes that for doing the file refresh, the biggest secondary should be * 4 times bigger than the biggest primary. */ - public static final String REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER - = "hbase.region.replica.storefile.refresh.memstore.multiplier"; + public static final String REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER = + "hbase.region.replica.storefile.refresh.memstore.multiplier"; private static final double DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER = 4; /** @@ -100,15 +98,13 @@ public static RegionInfo getRegionInfoForFs(RegionInfo regionInfo) { * @return whether the replica is read only */ public static boolean isReadOnly(HRegion region) { - return region.getTableDescriptor().isReadOnly() - || !isDefaultReplica(region.getRegionInfo()); + return region.getTableDescriptor().isReadOnly() || !isDefaultReplica(region.getRegionInfo()); } /** - * Returns whether to replay the recovered edits to flush the results. - * Currently secondary region replicas do not replay the edits, since it would - * cause flushes which might affect the primary region. Primary regions even opened - * in read only mode should replay the edits. + * Returns whether to replay the recovered edits to flush the results. Currently secondary region + * replicas do not replay the edits, since it would cause flushes which might affect the primary + * region. Primary regions even opened in read only mode should replay the edits. * @param region the HRegion object * @return whether recovered edits should be replayed. */ @@ -117,14 +113,14 @@ public static boolean shouldReplayRecoveredEdits(HRegion region) { } /** - * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the - * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This - * way ensures that the secondary will be able to continue reading the store files even if - * they are moved to archive after compaction + * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the files of the + * primary region, so an HFileLink is used to construct the StoreFileInfo. This way ensures that + * the secondary will be able to continue reading the store files even if they are moved to + * archive after compaction */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, - RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) - throws IOException { + RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) + throws IOException { // if this is a primary region, just return the StoreFileInfo constructed from path if (RegionInfo.COMPARATOR.compare(regionInfo, regionInfoForFs) == 0) { @@ -133,9 +129,8 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, // else create a store file link. The link file does not exists on filesystem though. if (HFileLink.isHFileLink(path) || StoreFileInfo.isHFile(path)) { - HFileLink link = HFileLink - .build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, - path.getName()); + HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), + regionInfoForFs.getEncodedName(), familyName, path.getName()); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link); } else if (StoreFileInfo.isReference(path)) { Reference reference = Reference.read(fs, path); @@ -146,9 +141,8 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference, link); } else { // Reference - HFileLink link = HFileLink - .build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, - path.getName()); + HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), + regionInfoForFs.getEncodedName(), familyName, path.getName()); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference); } } else { @@ -158,11 +152,11 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, /** * @return True if Region Read Replica is enabled for tn (whether hbase:meta or - * user-space tables). + * user-space tables). */ public static boolean isRegionReplicaReplicationEnabled(Configuration conf, TableName tn) { - return isMetaRegionReplicaReplicationEnabled(conf, tn) || - isRegionReplicaReplicationEnabled(conf); + return isMetaRegionReplicaReplicationEnabled(conf, tn) + || isRegionReplicaReplicationEnabled(conf); } /** @@ -176,9 +170,8 @@ private static boolean isRegionReplicaReplicationEnabled(Configuration conf) { * @return True if hbase:meta Region Read Replica is enabled. */ public static boolean isMetaRegionReplicaReplicationEnabled(Configuration conf, TableName tn) { - return TableName.isMetaTableName(tn) && - conf.getBoolean(REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, - DEFAULT_REGION_REPLICA_REPLICATION_CATALOG); + return TableName.isMetaTableName(tn) && conf.getBoolean( + REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, DEFAULT_REGION_REPLICA_REPLICATION_CATALOG); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java index 17da6812fe68..2be277d8d96c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,7 @@ /** * This class provides ShutdownHookManager shims for HBase to interact with the Hadoop 1.0.x and the - * Hadoop 2.0+ series. - * - * NOTE: No testing done against 0.22.x, or 0.21.x. + * Hadoop 2.0+ series. NOTE: No testing done against 0.22.x, or 0.21.x. */ @InterfaceAudience.Private abstract public class ShutdownHookManager { @@ -75,10 +73,8 @@ private static class ShutdownHookManagerV2 extends ShutdownHookManager { public void addShutdownHook(Thread shutdownHookThread, int priority) { try { Methods.call(shutdownHookManagerClass, - Methods.call(shutdownHookManagerClass, null, "get", null, null), - "addShutdownHook", - new Class[] { Runnable.class, int.class }, - new Object[] { shutdownHookThread, priority }); + Methods.call(shutdownHookManagerClass, null, "get", null, null), "addShutdownHook", + new Class[] { Runnable.class, int.class }, new Object[] { shutdownHookThread, priority }); } catch (Exception ex) { throw new RuntimeException("we could not use ShutdownHookManager.addShutdownHook", ex); } @@ -87,12 +83,9 @@ public void addShutdownHook(Thread shutdownHookThread, int priority) { @Override public boolean removeShutdownHook(Runnable shutdownHook) { try { - return (Boolean) - Methods.call(shutdownHookManagerClass, - Methods.call(shutdownHookManagerClass, null, "get", null, null), - "removeShutdownHook", - new Class[] { Runnable.class }, - new Object[] { shutdownHook }); + return (Boolean) Methods.call(shutdownHookManagerClass, + Methods.call(shutdownHookManagerClass, null, "get", null, null), "removeShutdownHook", + new Class[] { Runnable.class }, new Object[] { shutdownHook }); } catch (Exception ex) { throw new RuntimeException("we could not use ShutdownHookManager", ex); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java index f896e550a169..637ec5cc4b50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.util.ArrayList; @@ -29,35 +28,31 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Simple sorted list implementation that uses {@link java.util.ArrayList} as - * the underlying collection so we can support RandomAccess. All mutations - * create a new copy of the ArrayList instance, so can be - * expensive. This class is only intended for use on small, very rarely - * written collections that expect highly concurrent reads. + * Simple sorted list implementation that uses {@link java.util.ArrayList} as the underlying + * collection so we can support RandomAccess. All mutations create a new copy of the + * ArrayList instance, so can be expensive. This class is only intended for use on + * small, very rarely written collections that expect highly concurrent reads. *

    - * Read operations are performed on a reference to the internal list at the - * time of invocation, so will not see any mutations to the collection during - * their operation. Iterating over list elements manually using the - * RandomAccess pattern involves multiple operations. For this to be safe get - * a reference to the internal list first using get(). + * Read operations are performed on a reference to the internal list at the time of invocation, so + * will not see any mutations to the collection during their operation. Iterating over list elements + * manually using the RandomAccess pattern involves multiple operations. For this to be safe get a + * reference to the internal list first using get(). *

    - * If constructed with a {@link java.util.Comparator}, the list will be sorted - * using the comparator. Adding or changing an element using an index will - * trigger a resort. + * If constructed with a {@link java.util.Comparator}, the list will be sorted using the comparator. + * Adding or changing an element using an index will trigger a resort. *

    * Iterators are read-only. They cannot be used to remove elements. */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UG_SYNC_SET_UNSYNC_GET", - justification="TODO: synchronization in here needs review!!!") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UG_SYNC_SET_UNSYNC_GET", + justification = "TODO: synchronization in here needs review!!!") @InterfaceAudience.Private public class SortedList implements List, RandomAccess { private volatile List list; private final Comparator comparator; /** - * Constructs an empty list with the default initial capacity that will be - * sorted using the given comparator. - * + * Constructs an empty list with the default initial capacity that will be sorted using the given + * comparator. * @param comparator the comparator */ public SortedList(Comparator comparator) { @@ -66,11 +61,9 @@ public SortedList(Comparator comparator) { } /** - * Constructs a list containing the elements of the given collection, in the - * order returned by the collection's iterator, that will be sorted with the - * given comparator. - * - * @param c the collection + * Constructs a list containing the elements of the given collection, in the order returned by the + * collection's iterator, that will be sorted with the given comparator. + * @param c the collection * @param comparator the comparator */ public SortedList(Collection c, Comparator comparator) { @@ -79,10 +72,9 @@ public SortedList(Collection c, Comparator comparator) { } /** - * Returns a reference to the unmodifiable list currently backing the SortedList. - * Changes to the SortedList will not be reflected in this list. Use this - * method to get a reference for iterating over using the RandomAccess - * pattern. + * Returns a reference to the unmodifiable list currently backing the SortedList. Changes to the + * SortedList will not be reflected in this list. Use this method to get a reference for iterating + * over using the RandomAccess pattern. */ public List get() { // FindBugs: UG_SYNC_SET_UNSYNC_GET complaint. Fix!! return list; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java index efd3da3a88d0..7b41331abeb9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +17,6 @@ */ package org.apache.hadoop.hbase.util; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.Comparator; import java.util.concurrent.BlockingQueue; import java.util.concurrent.PriorityBlockingQueue; @@ -27,17 +24,16 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.yetus.audience.InterfaceAudience; /** - * This queue allows a ThreadPoolExecutor to steal jobs from another ThreadPoolExecutor. - * This queue also acts as the factory for creating the PriorityBlockingQueue to be used in the - * steal-from ThreadPoolExecutor. The behavior of this queue is the same as a normal - * PriorityBlockingQueue except the take/poll(long,TimeUnit) methods would also check whether there - * are jobs in the steal-from queue if this q ueue is empty. - * - * Note the workers in ThreadPoolExecutor must be pre-started so that they can steal job from the - * other queue, otherwise the worker will only be started after there are jobs submitted to main - * queue. + * This queue allows a ThreadPoolExecutor to steal jobs from another ThreadPoolExecutor. This queue + * also acts as the factory for creating the PriorityBlockingQueue to be used in the steal-from + * ThreadPoolExecutor. The behavior of this queue is the same as a normal PriorityBlockingQueue + * except the take/poll(long,TimeUnit) methods would also check whether there are jobs in the + * steal-from queue if this q ueue is empty. Note the workers in ThreadPoolExecutor must be + * pre-started so that they can steal job from the other queue, otherwise the worker will only be + * started after there are jobs submitted to main queue. */ @InterfaceAudience.Private public class StealJobQueue extends PriorityBlockingQueue { @@ -54,7 +50,7 @@ public StealJobQueue(Comparator comparator) { } public StealJobQueue(int initCapacity, int stealFromQueueInitCapacity, - Comparator comparator) { + Comparator comparator) { super(initCapacity, comparator); this.stealFromQueue = new PriorityBlockingQueue(stealFromQueueInitCapacity, comparator) { @@ -92,7 +88,6 @@ public boolean offer(T t) { } } - @Override public T take() throws InterruptedException { lock.lockInterruptibly(); @@ -124,8 +119,7 @@ public T poll(long timeout, TimeUnit unit) throws InterruptedException { retVal = stealFromQueue.poll(); } if (retVal == null) { - if (nanos <= 0) - return null; + if (nanos <= 0) return null; nanos = notEmpty.awaitNanos(nanos); } else { return retVal; @@ -136,4 +130,3 @@ public T poll(long timeout, TimeUnit unit) throws InterruptedException { } } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index 5a28187b8245..6683b8734a88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -50,11 +49,11 @@ public final class TableDescriptorChecker { public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks"; public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true; - //should we check the compression codec type at master side, default true, HBASE-6370 + // should we check the compression codec type at master side, default true, HBASE-6370 public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression"; public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true; - //should we check encryption settings at master side, default true + // should we check encryption settings at master side, default true public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption"; public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true; @@ -62,14 +61,12 @@ private TableDescriptorChecker() { } /** - * Checks whether the table conforms to some sane limits, and configured - * values (compression, etc) work. Throws an exception if something is wrong. + * Checks whether the table conforms to some sane limits, and configured values (compression, etc) + * work. Throws an exception if something is wrong. */ public static void sanityCheck(final Configuration c, final TableDescriptor td) - throws IOException { - CompoundConfiguration conf = new CompoundConfiguration() - .add(c) - .addBytesMap(td.getValues()); + throws IOException { + CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues()); // Setting this to true logs the warning instead of throwing exception boolean logWarn = false; @@ -85,14 +82,13 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check - long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null ? - conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : - Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); + long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null + ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) + : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { - String message = - "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + - maxFileSize + ") is too small, which might cause over splitting into unmanageable " + - "number of regions."; + String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + + maxFileSize + ") is too small, which might cause over splitting into unmanageable " + + "number of regions."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -100,13 +96,13 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in // hbase-site.xml, use flushSizeLowerLimit instead to skip this check - long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null ? - conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : - Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); + long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null + ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) + : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { - String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + - "\"hbase.hregion.memstore.flush.size\" (" + flushSize + - ") is too small, which might cause" + " very frequent flushing."; + String message = + "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + + flushSize + ") is too small, which might cause" + " very frequent flushing."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -167,25 +163,25 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check blockSize if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) { - String message = "Block size for column family " + hcd.getNameAsString() + - " must be between 1K and 16MB."; + String message = "Block size for column family " + hcd.getNameAsString() + + " must be between 1K and 16MB."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check versions if (hcd.getMinVersions() < 0) { String message = - "Min versions for column family " + hcd.getNameAsString() + " must be positive."; + "Min versions for column family " + hcd.getNameAsString() + " must be positive."; warnOrThrowExceptionForFailure(logWarn, message, null); } // max versions already being checked // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor - // does not throw IllegalArgumentException + // does not throw IllegalArgumentException // check minVersions <= maxVerions if (hcd.getMinVersions() > hcd.getMaxVersions()) { - String message = "Min versions for column family " + hcd.getNameAsString() + - " must be less than the Max versions."; + String message = "Min versions for column family " + hcd.getNameAsString() + + " must be less than the Max versions."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -197,8 +193,8 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check data replication factor, it can be 0(default value) when user has not explicitly // set the value, in this case we use default replication factor set in the file system. if (hcd.getDFSReplication() < 0) { - String message = "HFile Replication for column family " + hcd.getNameAsString() + - " must be greater than zero."; + String message = "HFile Replication for column family " + hcd.getNameAsString() + + " must be greater than zero."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -215,9 +211,8 @@ private static void checkReplicationScope(final ColumnFamilyDescriptor cfd) thro // check replication scope WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope()); if (scop == null) { - String message = - "Replication scope for column family " + cfd.getNameAsString() + " is " + cfd.getScope() + - " which is invalid."; + String message = "Replication scope for column family " + cfd.getNameAsString() + " is " + + cfd.getScope() + " which is invalid."; LOG.error(message); throw new DoNotRetryIOException(message); @@ -225,13 +220,13 @@ private static void checkReplicationScope(final ColumnFamilyDescriptor cfd) thro } private static void checkCompactionPolicy(Configuration conf, TableDescriptor td) - throws IOException { + throws IOException { // FIFO compaction has some requirements // Actually FCP ignores periodic major compactions String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); if (className == null) { className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, - ExploringCompactionPolicy.class.getName()); + ExploringCompactionPolicy.class.getName()); } int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT; @@ -244,7 +239,7 @@ private static void checkCompactionPolicy(Configuration conf, TableDescriptor td for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { String compactionPolicy = - hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); + hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); if (compactionPolicy == null) { compactionPolicy = className; } @@ -273,8 +268,8 @@ private static void checkCompactionPolicy(Configuration conf, TableDescriptor td } if (blockingFileCount < 1000) { message = - "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + - " is below recommended minimum of 1000 for column family " + hcd.getNameAsString(); + "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString(); throw new IOException(message); } } @@ -299,24 +294,24 @@ public static void checkCompression(final TableDescriptor td) throws IOException } public static void checkEncryption(final Configuration conf, final TableDescriptor td) - throws IOException { + throws IOException { for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey()); } } public static void checkClassLoading(final Configuration conf, final TableDescriptor td) - throws IOException { + throws IOException { RegionSplitPolicy.getSplitPolicyClass(td, conf); RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td); } // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled. private static void warnOrThrowExceptionForFailure(boolean logWarn, String message, - Exception cause) throws IOException { + Exception cause) throws IOException { if (!logWarn) { - throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS + - " to false at conf or table descriptor if you want to bypass sanity checks", cause); + throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS + + " to false at conf or table descriptor if you want to bypass sanity checks", cause); } LOG.warn(message); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java index 3070fb37277d..72f874fe7a2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,8 @@ public final class YammerHistogramUtils { // not for public consumption - private YammerHistogramUtils() {} + private YammerHistogramUtils() { + } /** * Used formatting doubles so only two places after decimal point. @@ -38,13 +38,12 @@ private YammerHistogramUtils() {} private static DecimalFormat DOUBLE_FORMAT = new DecimalFormat("#0.00"); /** - * Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are - * not public in 2.2.0, so we use reflection to find them. + * Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are not public + * in 2.2.0, so we use reflection to find them. */ public static Histogram newHistogram(Reservoir sample) { try { - Constructor ctor = - Histogram.class.getDeclaredConstructor(Reservoir.class); + Constructor ctor = Histogram.class.getDeclaredConstructor(Reservoir.class); ctor.setAccessible(true); return (Histogram) ctor.newInstance(sample); } catch (Exception e) { @@ -55,44 +54,41 @@ public static Histogram newHistogram(Reservoir sample) { /** @return an abbreviated summary of {@code hist}. */ public static String getShortHistogramReport(final Histogram hist) { Snapshot sn = hist.getSnapshot(); - return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + - ", min=" + DOUBLE_FORMAT.format(sn.getMin()) + - ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + - ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + - ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) + - ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile()); + return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + ", min=" + + DOUBLE_FORMAT.format(sn.getMin()) + ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + + ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + ", 95th=" + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + ", 99th=" + + DOUBLE_FORMAT.format(sn.get99thPercentile()); } /** @return a summary of {@code hist}. */ public static String getHistogramReport(final Histogram hist) { Snapshot sn = hist.getSnapshot(); - return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + - ", min=" + DOUBLE_FORMAT.format(sn.getMin()) + - ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + - ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + - ", 50th=" + DOUBLE_FORMAT.format(sn.getMedian()) + - ", 75th=" + DOUBLE_FORMAT.format(sn.get75thPercentile()) + - ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) + - ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile()) + - ", 99.9th=" + DOUBLE_FORMAT.format(sn.get999thPercentile()) + - ", 99.99th=" + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + - ", 99.999th=" + DOUBLE_FORMAT.format(sn.getValue(0.99999)); + return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + ", min=" + + DOUBLE_FORMAT.format(sn.getMin()) + ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + + ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + ", 50th=" + + DOUBLE_FORMAT.format(sn.getMedian()) + ", 75th=" + + DOUBLE_FORMAT.format(sn.get75thPercentile()) + ", 95th=" + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + ", 99th=" + + DOUBLE_FORMAT.format(sn.get99thPercentile()) + ", 99.9th=" + + DOUBLE_FORMAT.format(sn.get999thPercentile()) + ", 99.99th=" + + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + ", 99.999th=" + + DOUBLE_FORMAT.format(sn.getValue(0.99999)); } /** @return pretty summary of {@code hist}. */ public static String getPrettyHistogramReport(final Histogram h) { Snapshot sn = h.getSnapshot(); - return - "Mean = " + DOUBLE_FORMAT.format(sn.getMean()) + "\n" + - "Min = " + DOUBLE_FORMAT.format(sn.getMin()) + "\n" + - "Max = " + DOUBLE_FORMAT.format(sn.getMax()) + "\n" + - "StdDev = " + DOUBLE_FORMAT.format(sn.getStdDev()) + "\n" + - "50th = " + DOUBLE_FORMAT.format(sn.getMedian()) + "\n" + - "75th = " + DOUBLE_FORMAT.format(sn.get75thPercentile()) + "\n" + - "95th = " + DOUBLE_FORMAT.format(sn.get95thPercentile()) + "\n" + - "99th = " + DOUBLE_FORMAT.format(sn.get99thPercentile()) + "\n" + - "99.9th = " + DOUBLE_FORMAT.format(sn.get999thPercentile()) + "\n" + - "99.99th = " + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + "\n" + - "99.999th = " + DOUBLE_FORMAT.format(sn.getValue(0.99999)); + return "Mean = " + DOUBLE_FORMAT.format(sn.getMean()) + "\n" + "Min = " + + DOUBLE_FORMAT.format(sn.getMin()) + "\n" + "Max = " + + DOUBLE_FORMAT.format(sn.getMax()) + "\n" + "StdDev = " + + DOUBLE_FORMAT.format(sn.getStdDev()) + "\n" + "50th = " + + DOUBLE_FORMAT.format(sn.getMedian()) + "\n" + "75th = " + + DOUBLE_FORMAT.format(sn.get75thPercentile()) + "\n" + "95th = " + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + "\n" + "99th = " + + DOUBLE_FORMAT.format(sn.get99thPercentile()) + "\n" + "99.9th = " + + DOUBLE_FORMAT.format(sn.get999thPercentile()) + "\n" + "99.99th = " + + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + "\n" + "99.999th = " + + DOUBLE_FORMAT.format(sn.getValue(0.99999)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java index 78ef55ca2c5b..173c202e2d34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -35,6 +32,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; + /** * Utlity method to migrate zookeeper data across HBase versions. * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. @@ -45,43 +45,40 @@ public class ZKDataMigrator { private static final Logger LOG = LoggerFactory.getLogger(ZKDataMigrator.class); // Shutdown constructor. - private ZKDataMigrator() {} + private ZKDataMigrator() { + } /** - * Method for table states migration. - * Used when upgrading from pre-2.0 to 2.0 - * Reading state from zk, applying them to internal state - * and delete. - * Used by master to clean migration from zk based states to - * table descriptor based states. + * Method for table states migration. Used when upgrading from pre-2.0 to 2.0 Reading state from + * zk, applying them to internal state and delete. Used by master to clean migration from zk based + * states to table descriptor based states. * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. */ @Deprecated public static Map queryForTableStates(ZKWatcher zkw) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { Map rv = new HashMap<>(); List children = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().tableZNode); - if (children == null) - return rv; - for (String child: children) { + if (children == null) return rv; + for (String child : children) { TableName tableName = TableName.valueOf(child); ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName); TableState.State newState = TableState.State.ENABLED; if (state != null) { switch (state) { - case ENABLED: - newState = TableState.State.ENABLED; - break; - case DISABLED: - newState = TableState.State.DISABLED; - break; - case DISABLING: - newState = TableState.State.DISABLING; - break; - case ENABLING: - newState = TableState.State.ENABLING; - break; - default: + case ENABLED: + newState = TableState.State.ENABLED; + break; + case DISABLED: + newState = TableState.State.DISABLED; + break; + case DISABLING: + newState = TableState.State.DISABLING; + break; + case ENABLING: + newState = TableState.State.ENABLING; + break; + default: } } rv.put(tableName, newState); @@ -91,26 +88,23 @@ public static Map queryForTableStates(ZKWatcher zkw /** * Gets table state from ZK. - * @param zkw ZKWatcher instance to use + * @param zkw ZKWatcher instance to use * @param tableName table we're checking * @return Null or - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State} - * found in znode. - * @throws KeeperException - * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State} + * found in znode. n * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. */ @Deprecated - private static ZooKeeperProtos.DeprecatedTableState.State getTableState( - final ZKWatcher zkw, final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, - tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); + private static ZooKeeperProtos.DeprecatedTableState.State getTableState(final ZKWatcher zkw, + final TableName tableName) throws KeeperException, InterruptedException { + String znode = + ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, tableName.getNameAsString()); + byte[] data = ZKUtil.getData(zkw, znode); if (data == null || data.length <= 0) return null; try { ProtobufUtil.expectPBMagicPrefix(data); ZooKeeperProtos.DeprecatedTableState.Builder builder = - ZooKeeperProtos.DeprecatedTableState.newBuilder(); + ZooKeeperProtos.DeprecatedTableState.newBuilder(); int magicLen = ProtobufUtil.lengthOfPBMagic(); ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); return builder.getState(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java index 9be182d245f7..ee8517739e70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -126,9 +126,9 @@ Optional getLargestQueueFromServersNotCompacting() { lock.readLock().lock(); try { return compactionQueues.entrySet().stream() - .filter(entry -> !compactingServers.contains(entry.getKey())) - .max(Map.Entry.comparingByValue( - (o1, o2) -> Integer.compare(o1.size(), o2.size()))).map(Map.Entry::getKey); + .filter(entry -> !compactingServers.contains(entry.getKey())) + .max(Map.Entry.comparingByValue((o1, o2) -> Integer.compare(o1.size(), o2.size()))) + .map(Map.Entry::getKey); } finally { lock.readLock().unlock(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java index 2112b97c741f..31aded84109c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,16 +52,14 @@ class MajorCompactionRequest { this.region = region; } - MajorCompactionRequest(Connection connection, RegionInfo region, - Set stores) { + MajorCompactionRequest(Connection connection, RegionInfo region, Set stores) { this(connection, region); this.stores = stores; } static Optional newRequest(Connection connection, RegionInfo info, - Set stores, long timestamp) throws IOException { - MajorCompactionRequest request = - new MajorCompactionRequest(connection, info, stores); + Set stores, long timestamp) throws IOException { + MajorCompactionRequest request = new MajorCompactionRequest(connection, info, stores); return request.createRequest(connection, stores, timestamp); } @@ -77,8 +75,8 @@ void setStores(Set stores) { this.stores = stores; } - Optional createRequest(Connection connection, - Set stores, long timestamp) throws IOException { + Optional createRequest(Connection connection, Set stores, + long timestamp) throws IOException { Set familiesToCompact = getStoresRequiringCompaction(stores, timestamp); MajorCompactionRequest request = null; if (!familiesToCompact.isEmpty()) { @@ -88,7 +86,7 @@ Optional createRequest(Connection connection, } Set getStoresRequiringCompaction(Set requestedStores, long timestamp) - throws IOException { + throws IOException { HRegionFileSystem fileSystem = getFileSystem(); Set familiesToCompact = Sets.newHashSet(); for (String family : requestedStores) { @@ -100,37 +98,36 @@ Set getStoresRequiringCompaction(Set requestedStores, long times } boolean shouldCFBeCompacted(HRegionFileSystem fileSystem, String family, long ts) - throws IOException { + throws IOException { // do we have any store files? Collection storeFiles = fileSystem.getStoreFiles(family); if (storeFiles == null) { - LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName(), " has no store files"); + LOG.info("Excluding store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName(), " has no store files"); return false; } // check for reference files if (fileSystem.hasReferences(family) && familyHasReferenceFile(fileSystem, family, ts)) { LOG.info("Including store: " + family + " with: " + storeFiles.size() - + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); + + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); return true; } // check store file timestamps boolean includeStore = this.shouldIncludeStore(fileSystem, family, storeFiles, ts); if (!includeStore) { - LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName() + " already compacted"); + LOG.info("Excluding store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName() + " already compacted"); } return includeStore; } protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family, - Collection storeFiles, long ts) throws IOException { + Collection storeFiles, long ts) throws IOException { for (StoreFileInfo storeFile : storeFiles) { if (storeFile.getModificationTime() < ts) { LOG.info("Including store: " + family + " with: " + storeFiles.size() - + " files for compaction for region: " - + fileSystem.getRegionInfo().getEncodedName()); + + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); return true; } } @@ -138,14 +135,14 @@ protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family } protected boolean familyHasReferenceFile(HRegionFileSystem fileSystem, String family, long ts) - throws IOException { + throws IOException { List referenceFiles = - getReferenceFilePaths(fileSystem.getFileSystem(), fileSystem.getStoreDir(family)); + getReferenceFilePaths(fileSystem.getFileSystem(), fileSystem.getStoreDir(family)); for (Path referenceFile : referenceFiles) { FileStatus status = fileSystem.getFileSystem().getFileLinkStatus(referenceFile); if (status.getModificationTime() < ts) { - LOG.info("Including store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName() + " (reference store files)"); + LOG.info("Including store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName() + " (reference store files)"); return true; } } @@ -153,17 +150,16 @@ protected boolean familyHasReferenceFile(HRegionFileSystem fileSystem, String fa } - List getReferenceFilePaths(FileSystem fileSystem, Path familyDir) - throws IOException { + List getReferenceFilePaths(FileSystem fileSystem, Path familyDir) throws IOException { return FSUtils.getReferenceFilePaths(fileSystem, familyDir); } HRegionFileSystem getFileSystem() throws IOException { try (Admin admin = connection.getAdmin()) { return HRegionFileSystem.openRegionFromFileSystem(admin.getConfiguration(), - CommonFSUtils.getCurrentFileSystem(admin.getConfiguration()), - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(admin.getConfiguration()), - region.getTable()), region, true); + CommonFSUtils.getCurrentFileSystem(admin.getConfiguration()), CommonFSUtils.getTableDir( + CommonFSUtils.getRootDir(admin.getConfiguration()), region.getTable()), + region, true); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java index a12fa71080ce..c84c01dbad2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -49,13 +48,13 @@ public class MajorCompactionTTLRequest extends MajorCompactionRequest { } static Optional newRequest(Connection connection, RegionInfo info, - TableDescriptor htd) throws IOException { + TableDescriptor htd) throws IOException { MajorCompactionTTLRequest request = new MajorCompactionTTLRequest(connection, info); return request.createRequest(connection, htd); } private Optional createRequest(Connection connection, TableDescriptor htd) - throws IOException { + throws IOException { Map familiesToCompact = getStoresRequiringCompaction(htd); MajorCompactionRequest request = null; if (!familiesToCompact.isEmpty()) { @@ -88,15 +87,14 @@ private long getColFamilyCutoffTime(ColumnFamilyDescriptor colDesc) { @Override protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family, - Collection storeFiles, long ts) throws IOException { + Collection storeFiles, long ts) throws IOException { for (StoreFileInfo storeFile : storeFiles) { // Lets only compact when all files are older than TTL if (storeFile.getModificationTime() >= ts) { LOG.info("There is atleast one file in store: " + family + " file: " + storeFile.getPath() - + " with timestamp " + storeFile.getModificationTime() - + " for region: " + fileSystem.getRegionInfo().getEncodedName() - + " older than TTL: " + ts); + + " with timestamp " + storeFile.getModificationTime() + " for region: " + + fileSystem.getRegionInfo().getEncodedName() + " older than TTL: " + ts); return false; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java index b8c8626d8189..a987bef33408 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -86,7 +87,7 @@ public class MajorCompactor extends Configured implements Tool { } public MajorCompactor(Configuration conf, TableName tableName, Set storesToCompact, - int concurrency, long timestamp, long sleepForMs) throws IOException { + int concurrency, long timestamp, long sleepForMs) throws IOException { this.connection = ConnectionFactory.createConnection(conf); this.tableName = tableName; this.timestamp = timestamp; @@ -104,7 +105,7 @@ public void compactAllRegions() throws Exception { Thread.sleep(sleepForMs); } Optional serverToProcess = - clusterCompactionQueues.getLargestQueueFromServersNotCompacting(); + clusterCompactionQueues.getLargestQueueFromServersNotCompacting(); if (serverToProcess.isPresent() && clusterCompactionQueues.hasWorkItems()) { ServerName serverName = serverToProcess.get(); // check to see if the region has moved... if so we have to enqueue it again with @@ -112,18 +113,18 @@ public void compactAllRegions() throws Exception { MajorCompactionRequest request = clusterCompactionQueues.reserveForCompaction(serverName); ServerName currentServer = connection.getRegionLocator(tableName) - .getRegionLocation(request.getRegion().getStartKey()).getServerName(); + .getRegionLocation(request.getRegion().getStartKey()).getServerName(); if (!currentServer.equals(serverName)) { // add it back to the queue with the correct server it should be picked up in the future. LOG.info("Server changed for region: " + request.getRegion().getEncodedName() + " from: " - + serverName + " to: " + currentServer + " re-queuing request"); + + serverName + " to: " + currentServer + " re-queuing request"); clusterCompactionQueues.addToCompactionQueue(currentServer, request); clusterCompactionQueues.releaseCompaction(serverName); } else { LOG.info("Firing off compaction request for server: " + serverName + ", " + request - + " total queue size left: " + clusterCompactionQueues - .getCompactionRequestsLeftToFinish()); + + " total queue size left: " + + clusterCompactionQueues.getCompactionRequestsLeftToFinish()); futures.add(executor.submit(new Compact(serverName, request))); } } else { @@ -143,11 +144,10 @@ public void shutdown() throws Exception { executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); if (!ERRORS.isEmpty()) { - StringBuilder builder = - new StringBuilder().append("Major compaction failed, there were: ").append(ERRORS.size()) - .append(" regions / stores that failed compacting\n") - .append("Failed compaction requests\n").append("--------------------------\n") - .append(Joiner.on("\n").join(ERRORS)); + StringBuilder builder = new StringBuilder().append("Major compaction failed, there were: ") + .append(ERRORS.size()).append(" regions / stores that failed compacting\n") + .append("Failed compaction requests\n").append("--------------------------\n") + .append(Joiner.on("\n").join(ERRORS)); LOG.error(builder.toString()); } if (connection != null) { @@ -160,25 +160,25 @@ public void shutdown() throws Exception { void initializeWorkQueues() throws IOException { if (storesToCompact.isEmpty()) { connection.getTable(tableName).getDescriptor().getColumnFamilyNames() - .forEach(a -> storesToCompact.add(Bytes.toString(a))); + .forEach(a -> storesToCompact.add(Bytes.toString(a))); LOG.info("No family specified, will execute for all families"); } LOG.info( - "Initializing compaction queues for table: " + tableName + " with cf: " + storesToCompact); + "Initializing compaction queues for table: " + tableName + " with cf: " + storesToCompact); Map> snRegionMap = getServerRegionsMap(); /* - * If numservers is specified, stop inspecting regions beyond the numservers, it will serve - * to throttle and won't end up scanning all the regions in the event there are not many - * regions to compact based on the criteria. + * If numservers is specified, stop inspecting regions beyond the numservers, it will serve to + * throttle and won't end up scanning all the regions in the event there are not many regions to + * compact based on the criteria. */ for (ServerName sn : getServersToCompact(snRegionMap.keySet())) { List regions = snRegionMap.get(sn); LOG.debug("Table: " + tableName + " Server: " + sn + " No of regions: " + regions.size()); /* - * If the tool is run periodically, then we could shuffle the regions and provide - * some random order to select regions. Helps if numregions is specified. + * If the tool is run periodically, then we could shuffle the regions and provide some random + * order to select regions. Helps if numregions is specified. */ Collections.shuffle(regions); int regionsToCompact = numRegions; @@ -201,12 +201,12 @@ void initializeWorkQueues() throws IOException { } protected Optional getMajorCompactionRequest(RegionInfo hri) - throws IOException { + throws IOException { return MajorCompactionRequest.newRequest(connection, hri, storesToCompact, timestamp); } private Collection getServersToCompact(Set snSet) { - if(numServers < 0 || snSet.size() <= numServers) { + if (numServers < 0 || snSet.size() <= numServers) { return snSet; } else { @@ -219,7 +219,7 @@ private Collection getServersToCompact(Set snSet) { private Map> getServerRegionsMap() throws IOException { Map> snRegionMap = Maps.newHashMap(); List regionLocations = - connection.getRegionLocator(tableName).getAllRegionLocations(); + connection.getRegionLocator(tableName).getAllRegionLocations(); for (HRegionLocation regionLocation : regionLocations) { ServerName sn = regionLocation.getServerName(); RegionInfo hri = regionLocation.getRegion(); @@ -253,7 +253,8 @@ class Compact implements Runnable { this.request = request; } - @Override public void run() { + @Override + public void run() { try { compactAndWait(request); } catch (NotServingRegionException e) { @@ -290,15 +291,15 @@ void compactAndWait(MajorCompactionRequest request) throws Exception { if (!skipWait) { while (isCompacting(request)) { Thread.sleep(sleepForMs); - LOG.debug("Waiting for compaction to complete for region: " + request.getRegion() - .getEncodedName()); + LOG.debug("Waiting for compaction to complete for region: " + + request.getRegion().getEncodedName()); } } } finally { if (!skipWait) { // Make sure to wait for the CompactedFileDischarger chore to do its work int waitForArchive = connection.getConfiguration() - .getInt("hbase.hfile.compaction.discharger.interval", 2 * 60 * 1000); + .getInt("hbase.hfile.compaction.discharger.interval", 2 * 60 * 1000); Thread.sleep(waitForArchive); // check if compaction completed successfully, otherwise put that request back in the // proper queue @@ -308,52 +309,50 @@ void compactAndWait(MajorCompactionRequest request) throws Exception { // the new regionserver doesn't pick it up because its accounted for in the WAL replay, // thus you have more store files on the filesystem than the regionserver knows about. boolean regionHasNotMoved = connection.getRegionLocator(tableName) - .getRegionLocation(request.getRegion().getStartKey()).getServerName() - .equals(serverName); + .getRegionLocation(request.getRegion().getStartKey()).getServerName() + .equals(serverName); if (regionHasNotMoved) { LOG.error( - "Not all store files were compacted, this may be due to the regionserver not " - + "being aware of all store files. Will not reattempt compacting, " - + request); + "Not all store files were compacted, this may be due to the regionserver not " + + "being aware of all store files. Will not reattempt compacting, " + request); ERRORS.add(request); } else { request.setStores(storesRequiringCompaction); clusterCompactionQueues.addToCompactionQueue(serverName, request); LOG.info("Compaction failed for the following stores: " + storesRequiringCompaction - + " region: " + request.getRegion().getEncodedName()); + + " region: " + request.getRegion().getEncodedName()); } } else { LOG.info("Compaction complete for region: " + request.getRegion().getEncodedName() - + " -> cf(s): " + request.getStores()); + + " -> cf(s): " + request.getStores()); } } } } private void compactRegionOnServer(MajorCompactionRequest request, Admin admin, String store) - throws IOException { - admin.majorCompactRegion(request.getRegion().getEncodedNameAsBytes(), - Bytes.toBytes(store)); + throws IOException { + admin.majorCompactRegion(request.getRegion().getEncodedNameAsBytes(), Bytes.toBytes(store)); } } private boolean isCompacting(MajorCompactionRequest request) throws Exception { CompactionState compactionState = connection.getAdmin() - .getCompactionStateForRegion(request.getRegion().getEncodedNameAsBytes()); - return compactionState.equals(CompactionState.MAJOR) || compactionState - .equals(CompactionState.MAJOR_AND_MINOR); + .getCompactionStateForRegion(request.getRegion().getEncodedNameAsBytes()); + return compactionState.equals(CompactionState.MAJOR) + || compactionState.equals(CompactionState.MAJOR_AND_MINOR); } private void addNewRegions() { try { List locations = - connection.getRegionLocator(tableName).getAllRegionLocations(); + connection.getRegionLocator(tableName).getAllRegionLocations(); for (HRegionLocation location : locations) { if (location.getRegion().getRegionId() > timestamp) { Optional compactionRequest = MajorCompactionRequest - .newRequest(connection, location.getRegion(), storesToCompact, timestamp); + .newRequest(connection, location.getRegion(), storesToCompact, timestamp); compactionRequest.ifPresent(request -> clusterCompactionQueues - .addToCompactionQueue(location.getServerName(), request)); + .addToCompactionQueue(location.getServerName(), request)); } } } catch (IOException e) { @@ -362,7 +361,7 @@ private void addNewRegions() { } protected Set getStoresRequiringCompaction(MajorCompactionRequest request) - throws IOException { + throws IOException { return request.getStoresRequiringCompaction(storesToCompact, timestamp); } @@ -370,104 +369,48 @@ protected Options getCommonOptions() { Options options = new Options(); options.addOption( - Option.builder("servers") - .required() - .desc("Concurrent servers compacting") - .hasArg() - .build() - ); - options.addOption( - Option.builder("minModTime"). - desc("Compact if store files have modification time < minModTime") - .hasArg() - .build() - ); - options.addOption( - Option.builder("zk") - .optionalArg(true) - .desc("zk quorum") - .hasArg() - .build() - ); - options.addOption( - Option.builder("rootDir") - .optionalArg(true) - .desc("hbase.rootDir") - .hasArg() - .build() - ); - options.addOption( - Option.builder("sleep") - .desc("Time to sleepForMs (ms) for checking compaction status per region and available " - + "work queues: default 30s") - .hasArg() - .build() - ); + Option.builder("servers").required().desc("Concurrent servers compacting").hasArg().build()); + options.addOption(Option.builder("minModTime") + .desc("Compact if store files have modification time < minModTime").hasArg().build()); + options.addOption(Option.builder("zk").optionalArg(true).desc("zk quorum").hasArg().build()); options.addOption( - Option.builder("retries") - .desc("Max # of retries for a compaction request," + " defaults to 3") - .hasArg() - .build() - ); - options.addOption( - Option.builder("dryRun") - .desc("Dry run, will just output a list of regions that require compaction based on " - + "parameters passed") - .hasArg(false) - .build() - ); - - options.addOption( - Option.builder("skipWait") - .desc("Skip waiting after triggering compaction.") - .hasArg(false) - .build() - ); - - options.addOption( - Option.builder("numservers") - .optionalArg(true) - .desc("Number of servers to compact in this run, defaults to all") - .hasArg() - .build() - ); - - options.addOption( - Option.builder("numregions") - .optionalArg(true) - .desc("Number of regions to compact per server, defaults to all") - .hasArg() - .build() - ); + Option.builder("rootDir").optionalArg(true).desc("hbase.rootDir").hasArg().build()); + options.addOption(Option.builder("sleep") + .desc("Time to sleepForMs (ms) for checking compaction status per region and available " + + "work queues: default 30s") + .hasArg().build()); + options.addOption(Option.builder("retries") + .desc("Max # of retries for a compaction request," + " defaults to 3").hasArg().build()); + options.addOption(Option.builder("dryRun") + .desc("Dry run, will just output a list of regions that require compaction based on " + + "parameters passed") + .hasArg(false).build()); + + options.addOption(Option.builder("skipWait").desc("Skip waiting after triggering compaction.") + .hasArg(false).build()); + + options.addOption(Option.builder("numservers").optionalArg(true) + .desc("Number of servers to compact in this run, defaults to all").hasArg().build()); + + options.addOption(Option.builder("numregions").optionalArg(true) + .desc("Number of regions to compact per server, defaults to all").hasArg().build()); return options; } @Override public int run(String[] args) throws Exception { Options options = getCommonOptions(); - options.addOption( - Option.builder("table") - .required() - .desc("table name") - .hasArg() - .build() - ); - options.addOption( - Option.builder("cf") - .optionalArg(true) - .desc("column families: comma separated eg: a,b,c") - .hasArg() - .build() - ); + options.addOption(Option.builder("table").required().desc("table name").hasArg().build()); + options.addOption(Option.builder("cf").optionalArg(true) + .desc("column families: comma separated eg: a,b,c").hasArg().build()); final CommandLineParser cmdLineParser = new DefaultParser(); CommandLine commandLine = null; try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println( - "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " - + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } @@ -485,11 +428,10 @@ public int run(String[] args) throws Exception { Configuration configuration = getConf(); int concurrency = Integer.parseInt(commandLine.getOptionValue("servers")); - long minModTime = Long.parseLong( - commandLine.getOptionValue("minModTime", - String.valueOf(EnvironmentEdgeManager.currentTime()))); + long minModTime = Long.parseLong(commandLine.getOptionValue("minModTime", + String.valueOf(EnvironmentEdgeManager.currentTime()))); String quorum = - commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM)); + commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM)); String rootDir = commandLine.getOptionValue("rootDir", configuration.get(HConstants.HBASE_DIR)); long sleep = Long.parseLong(commandLine.getOptionValue("sleep", Long.toString(30000))); @@ -499,9 +441,8 @@ public int run(String[] args) throws Exception { configuration.set(HConstants.HBASE_DIR, rootDir); configuration.set(HConstants.ZOOKEEPER_QUORUM, quorum); - MajorCompactor compactor = - new MajorCompactor(configuration, TableName.valueOf(tableName), families, concurrency, - minModTime, sleep); + MajorCompactor compactor = new MajorCompactor(configuration, TableName.valueOf(tableName), + families, concurrency, minModTime, sleep); compactor.setNumServers(numServers); compactor.setNumRegions(numRegions); compactor.setSkipWait(commandLine.hasOption("skipWait")); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java index c6ea5af7e138..c21595ad22ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -53,13 +52,13 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class MajorCompactorTTL extends MajorCompactor { - private static final Logger LOG = LoggerFactory.getLogger(MajorCompactorTTL .class); + private static final Logger LOG = LoggerFactory.getLogger(MajorCompactorTTL.class); private TableDescriptor htd; @InterfaceAudience.Private public MajorCompactorTTL(Configuration conf, TableDescriptor htd, int concurrency, - long sleepForMs) throws IOException { + long sleepForMs) throws IOException { this.connection = ConnectionFactory.createConnection(conf); this.htd = htd; this.tableName = htd.getTableName(); @@ -75,19 +74,18 @@ protected MajorCompactorTTL() { @Override protected Optional getMajorCompactionRequest(RegionInfo hri) - throws IOException { + throws IOException { return MajorCompactionTTLRequest.newRequest(connection, hri, htd); } @Override protected Set getStoresRequiringCompaction(MajorCompactionRequest request) - throws IOException { - return ((MajorCompactionTTLRequest)request).getStoresRequiringCompaction(htd).keySet(); + throws IOException { + return ((MajorCompactionTTLRequest) request).getStoresRequiringCompaction(htd).keySet(); } - public int compactRegionsTTLOnTable(Configuration conf, String table, int concurrency, - long sleep, int numServers, int numRegions, boolean dryRun, boolean skipWait) - throws Exception { + public int compactRegionsTTLOnTable(Configuration conf, String table, int concurrency, long sleep, + int numServers, int numRegions, boolean dryRun, boolean skipWait) throws Exception { Connection conn = ConnectionFactory.createConnection(conf); TableName tableName = TableName.valueOf(table); @@ -124,13 +122,7 @@ private boolean doesAnyColFamilyHaveTTL(TableDescriptor htd) { private Options getOptions() { Options options = getCommonOptions(); - options.addOption( - Option.builder("table") - .required() - .desc("table name") - .hasArg() - .build() - ); + options.addOption(Option.builder("table").required().desc("table name").hasArg().build()); return options; } @@ -144,9 +136,8 @@ public int run(String[] args) throws Exception { try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println( - "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " - + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } @@ -165,7 +156,7 @@ public int run(String[] args) throws Exception { boolean skipWait = commandLine.hasOption("skipWait"); return compactRegionsTTLOnTable(HBaseConfiguration.create(), table, concurrency, sleep, - numServers, numRegions, dryRun, skipWait); + numServers, numRegions, dryRun, skipWait); } public static void main(String[] args) throws Exception { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java index 6a6c530c3b64..99bbb2af544d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,14 +50,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * This class marches through all of the region's hfiles and verifies that - * they are all valid files. One just needs to instantiate the class, use - * checkTables(List<Path>) and then retrieve the corrupted hfiles (and - * quarantined files if in quarantining mode) - * - * The implementation currently parallelizes at the regionDir level. + * This class marches through all of the region's hfiles and verifies that they are all valid files. + * One just needs to instantiate the class, use checkTables(List<Path>) and then retrieve the + * corrupted hfiles (and quarantined files if in quarantining mode) The implementation currently + * parallelizes at the regionDir level. */ @InterfaceAudience.Private public class HFileCorruptionChecker { @@ -79,8 +76,8 @@ public class HFileCorruptionChecker { final AtomicInteger hfilesChecked = new AtomicInteger(); final AtomicInteger mobFilesChecked = new AtomicInteger(); - public HFileCorruptionChecker(Configuration conf, ExecutorService executor, - boolean quarantine) throws IOException { + public HFileCorruptionChecker(Configuration conf, ExecutorService executor, boolean quarantine) + throws IOException { this.conf = conf; this.fs = FileSystem.get(conf); this.cacheConf = CacheConfig.DISABLED; @@ -89,12 +86,8 @@ public HFileCorruptionChecker(Configuration conf, ExecutorService executor, } /** - * Checks a path to see if it is a valid hfile. - * - * @param p - * full Path to an HFile - * @throws IOException - * This is a connectivity related exception + * Checks a path to see if it is a valid hfile. n * full Path to an HFile n * This is a + * connectivity related exception */ protected void checkHFile(Path p) throws IOException { HFile.Reader r = null; @@ -107,7 +100,7 @@ protected void checkHFile(Path p) throws IOException { Path dest = createQuarantinePath(p); LOG.warn("Quarantining corrupt HFile " + p + " into " + dest); boolean success = fs.mkdirs(dest.getParent()); - success = success ? fs.rename(p, dest): false; + success = success ? fs.rename(p, dest) : false; if (!success) { failures.add(p); } else { @@ -127,12 +120,8 @@ protected void checkHFile(Path p) throws IOException { } /** - * Given a path, generates a new path to where we move a corrupted hfile (bad - * trailer, no trailer). - * - * @param hFile - * Path to a corrupt hfile (assumes that it is HBASE_DIR/ table - * /region/cf/file) + * Given a path, generates a new path to where we move a corrupted hfile (bad trailer, no + * trailer). n * Path to a corrupt hfile (assumes that it is HBASE_DIR/ table /region/cf/file) * @return path to where corrupted files are stored. This should be * HBASE_DIR/.corrupt/table/region/cf/file. */ @@ -155,11 +144,7 @@ Path createQuarantinePath(Path hFile) throws IOException { } /** - * Check all files in a column family dir. - * - * @param cfDir - * column family directory - * @throws IOException + * Check all files in a column family dir. n * column family directory n */ protected void checkColFamDir(Path cfDir) throws IOException { FileStatus[] statuses = null; @@ -167,8 +152,8 @@ protected void checkColFamDir(Path cfDir) throws IOException { statuses = fs.listStatus(cfDir); // use same filter as scanner. } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Colfam Directory " + cfDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Colfam Directory " + cfDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(cfDir); return; } @@ -176,8 +161,8 @@ protected void checkColFamDir(Path cfDir) throws IOException { List hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.isEmpty() && !fs.exists(cfDir)) { - LOG.warn("Colfam Directory " + cfDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Colfam Directory " + cfDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(cfDir); return; } @@ -191,11 +176,7 @@ protected void checkColFamDir(Path cfDir) throws IOException { } /** - * Check all files in a mob column family dir. - * - * @param cfDir - * mob column family directory - * @throws IOException + * Check all files in a mob column family dir. n * mob column family directory n */ protected void checkMobColFamDir(Path cfDir) throws IOException { FileStatus[] statuses = null; @@ -203,8 +184,8 @@ protected void checkMobColFamDir(Path cfDir) throws IOException { statuses = fs.listStatus(cfDir); // use same filter as scanner. } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Mob colfam Directory " + cfDir + - " does not exist. Likely the table is deleted. Skipping."); + LOG.warn("Mob colfam Directory " + cfDir + + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(cfDir); return; } @@ -212,8 +193,8 @@ protected void checkMobColFamDir(Path cfDir) throws IOException { List hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.isEmpty() && !fs.exists(cfDir)) { - LOG.warn("Mob colfam Directory " + cfDir + - " does not exist. Likely the table is deleted. Skipping."); + LOG.warn("Mob colfam Directory " + cfDir + + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(cfDir); return; } @@ -227,12 +208,8 @@ protected void checkMobColFamDir(Path cfDir) throws IOException { } /** - * Checks a path to see if it is a valid mob file. - * - * @param p - * full Path to a mob file. - * @throws IOException - * This is a connectivity related exception + * Checks a path to see if it is a valid mob file. n * full Path to a mob file. n * This is a + * connectivity related exception */ protected void checkMobFile(Path p) throws IOException { HFile.Reader r = null; @@ -245,7 +222,7 @@ protected void checkMobFile(Path p) throws IOException { Path dest = createQuarantinePath(p); LOG.warn("Quarantining corrupt mob file " + p + " into " + dest); boolean success = fs.mkdirs(dest.getParent()); - success = success ? fs.rename(p, dest): false; + success = success ? fs.rename(p, dest) : false; if (!success) { failureMobFiles.add(p); } else { @@ -266,8 +243,7 @@ protected void checkMobFile(Path p) throws IOException { /** * Checks all the mob files of a table. - * @param regionDir The mob region directory - * @throws IOException + * @param regionDir The mob region directory n */ private void checkMobRegionDir(Path regionDir) throws IOException { if (!fs.exists(regionDir)) { @@ -278,16 +254,16 @@ private void checkMobRegionDir(Path regionDir) throws IOException { hfs = fs.listStatus(regionDir, new FamilyDirFilter(fs)); } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Mob directory " + regionDir - + " does not exist. Likely the table is deleted. Skipping."); + LOG.warn( + "Mob directory " + regionDir + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(regionDir); return; } // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.length == 0 && !fs.exists(regionDir)) { - LOG.warn("Mob directory " + regionDir - + " does not exist. Likely the table is deleted. Skipping."); + LOG.warn( + "Mob directory " + regionDir + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(regionDir); return; } @@ -301,11 +277,7 @@ private void checkMobRegionDir(Path regionDir) throws IOException { } /** - * Check all column families in a region dir. - * - * @param regionDir - * region directory - * @throws IOException + * Check all column families in a region dir. n * region directory n */ protected void checkRegionDir(Path regionDir) throws IOException { FileStatus[] statuses = null; @@ -313,8 +285,8 @@ protected void checkRegionDir(Path regionDir) throws IOException { statuses = fs.listStatus(regionDir); } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Region Directory " + regionDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Region Directory " + regionDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(regionDir); return; } @@ -322,8 +294,8 @@ protected void checkRegionDir(Path regionDir) throws IOException { List cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (cfs.isEmpty() && !fs.exists(regionDir)) { - LOG.warn("Region Directory " + regionDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Region Directory " + regionDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(regionDir); return; } @@ -337,24 +309,22 @@ protected void checkRegionDir(Path regionDir) throws IOException { } /** - * Check all the regiondirs in the specified tableDir - * - * @param tableDir - * path to a table - * @throws IOException + * Check all the regiondirs in the specified tableDir n * path to a table n */ void checkTableDir(Path tableDir) throws IOException { - List rds = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); + List rds = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (rds == null) { if (!fs.exists(tableDir)) { - LOG.warn("Table Directory " + tableDir + - " does not exist. Likely due to concurrent delete. Skipping."); + LOG.warn("Table Directory " + tableDir + + " does not exist. Likely due to concurrent delete. Skipping."); missing.add(tableDir); } return; } - LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir, rds.size() + 1); + LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir, + rds.size() + 1); // Parallelize check at the region dir level List rdcs = new ArrayList<>(rds.size() + 1); @@ -382,8 +352,8 @@ void checkTableDir(Path tableDir) throws IOException { try { f.get(); } catch (ExecutionException e) { - LOG.warn("Failed to quarantine an HFile in regiondir " - + rdcs.get(i).regionDir, e.getCause()); + LOG.warn("Failed to quarantine an HFile in regiondir " + rdcs.get(i).regionDir, + e.getCause()); // rethrow IOExceptions if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); @@ -407,8 +377,8 @@ void checkTableDir(Path tableDir) throws IOException { } /** - * An individual work item for parallelized regiondir processing. This is - * intentionally an inner class so it can use the shared error sets and fs. + * An individual work item for parallelized regiondir processing. This is intentionally an inner + * class so it can use the shared error sets and fs. */ private class RegionDirChecker implements Callable { final Path regionDir; @@ -425,8 +395,8 @@ public Void call() throws IOException { } /** - * An individual work item for parallelized mob dir processing. This is - * intentionally an inner class so it can use the shared error sets and fs. + * An individual work item for parallelized mob dir processing. This is intentionally an inner + * class so it can use the shared error sets and fs. */ private class MobRegionDirChecker extends RegionDirChecker { @@ -490,8 +460,8 @@ public Collection getQuarantined() { } /** - * @return the set of paths that were missing. Likely due to deletion/moves from - * compaction or flushes. + * @return the set of paths that were missing. Likely due to deletion/moves from compaction or + * flushes. */ public Collection getMissing() { return new HashSet<>(missing); @@ -526,16 +496,15 @@ public Collection getQuarantinedMobFiles() { } /** - * @return the set of paths that were missing. Likely due to table deletion or - * deletion/moves from compaction. + * @return the set of paths that were missing. Likely due to table deletion or deletion/moves from + * compaction. */ public Collection getMissedMobFiles() { return new HashSet<>(missedMobFiles); } /** - * Print a human readable summary of hfile quarantining operations. - * @param out + * Print a human readable summary of hfile quarantining operations. n */ public void report(HbckErrorReporter out) { out.print("Checked " + hfilesChecked.get() + " hfile for corruption"); @@ -556,8 +525,7 @@ public void report(HbckErrorReporter out) { } String initialState = (corrupted.isEmpty()) ? "OK" : "CORRUPTED"; - String fixedState = (corrupted.size() == quarantined.size()) ? "OK" - : "CORRUPTED"; + String fixedState = (corrupted.size() == quarantined.size()) ? "OK" : "CORRUPTED"; // print mob-related report out.print("Checked " + mobFilesChecked.get() + " Mob files for corruption"); @@ -577,8 +545,8 @@ public void report(HbckErrorReporter out) { out.print(" " + mq); } String initialMobState = (corruptedMobFiles.isEmpty()) ? "OK" : "CORRUPTED"; - String fixedMobState = (corruptedMobFiles.size() == quarantinedMobFiles.size()) ? "OK" - : "CORRUPTED"; + String fixedMobState = + (corruptedMobFiles.size() == quarantinedMobFiles.size()) ? "OK" : "CORRUPTED"; if (inQuarantineMode) { out.print("Summary: " + initialState + " => " + fixedState); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java index 7203fd103bb4..234daef85b51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public ReplicationChecker(Configuration conf, ZKWatcher zkw, HbckErrorReporter e public boolean hasUnDeletedQueues() { return errorReporter.getErrorList() - .contains(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE); + .contains(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE); } private Map> getUnDeletedQueues() throws ReplicationException { @@ -73,8 +73,8 @@ private Map> getUnDeletedQueues() throws ReplicationExc if (!peerIds.contains(queueInfo.getPeerId())) { undeletedQueues.computeIfAbsent(replicator, key -> new ArrayList<>()).add(queueId); LOG.debug( - "Undeleted replication queue for removed peer found: " + - "[removedPeerId={}, replicator={}, queueId={}]", + "Undeleted replication queue for removed peer found: " + + "[removedPeerId={}, replicator={}, queueId={}]", queueInfo.getPeerId(), replicator, queueId); } } @@ -100,17 +100,17 @@ public void checkUnDeletedQueues() throws ReplicationException { undeletedQueueIds.forEach((replicator, queueIds) -> { queueIds.forEach(queueId -> { ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId); - String msg = "Undeleted replication queue for removed peer found: " + - String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(), + String msg = "Undeleted replication queue for removed peer found: " + + String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(), replicator, queueId); errorReporter.reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg); }); }); undeletedHFileRefsPeerIds = getUndeletedHFileRefsPeers(); - undeletedHFileRefsPeerIds.stream().map( - peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found") - .forEach(msg -> errorReporter - .reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg)); + undeletedHFileRefsPeerIds.stream() + .map(peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found") + .forEach(msg -> errorReporter + .reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg)); } public void fixUnDeletedQueues() throws ReplicationException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java index e4b4a814e2d0..db8b0fdbf509 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.hbck; import java.io.IOException; @@ -25,10 +24,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This interface provides callbacks for handling particular table integrity - * invariant violations. This could probably be boiled down to handling holes - * and handling overlaps but currently preserves the older more specific error - * condition codes. + * This interface provides callbacks for handling particular table integrity invariant violations. + * This could probably be boiled down to handling holes and handling overlaps but currently + * preserves the older more specific error condition codes. */ @InterfaceAudience.Private public interface TableIntegrityErrorHandler { @@ -41,66 +39,56 @@ public interface TableIntegrityErrorHandler { void setTableInfo(HbckTableInfo ti); /** - * Callback for handling case where a Table has a first region that does not - * have an empty start key. - * - * @param hi An HbckRegionInfo of the second region in a table. This should have - * a non-empty startkey, and can be used to fabricate a first region that - * has an empty start key. + * Callback for handling case where a Table has a first region that does not have an empty start + * key. + * @param hi An HbckRegionInfo of the second region in a table. This should have a non-empty + * startkey, and can be used to fabricate a first region that has an empty start key. */ void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException; /** - * Callback for handling case where a Table has a last region that does not - * have an empty end key. - * - * @param curEndKey The end key of the current last region. There should be a new region - * with start key as this and an empty end key. + * Callback for handling case where a Table has a last region that does not have an empty end key. + * @param curEndKey The end key of the current last region. There should be a new region with + * start key as this and an empty end key. */ void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException; /** * Callback for handling a region that has the same start and end key. - * * @param hi An HbckRegionInfo for a degenerate key. */ void handleDegenerateRegion(HbckRegionInfo hi) throws IOException; /** - * Callback for handling two regions that have the same start key. This is - * a specific case of a region overlap. + * Callback for handling two regions that have the same start key. This is a specific case of a + * region overlap. * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** - * Callback for handling two regions that have the same regionID - * a specific case of a split + * Callback for handling two regions that have the same regionID a specific case of a split * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ void handleSplit(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** - * Callback for handling two reigons that overlap in some arbitrary way. - * This is a specific case of region overlap, and called for each possible - * pair. If two regions have the same start key, the handleDuplicateStartKeys - * method is called. + * Callback for handling two reigons that overlap in some arbitrary way. This is a specific case + * of region overlap, and called for each possible pair. If two regions have the same start key, + * the handleDuplicateStartKeys method is called. * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ - void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException; + void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** * Callback for handling a region hole between two keys. * @param holeStartKey key at the beginning of the region hole - * @param holeEndKey key at the end of the region hole - + * @param holeEndKey key at the end of the region hole */ - void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeEndKey) - throws IOException; + void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeEndKey) throws IOException; /** * Callback for handling an group of regions that overlap. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java index f39c623aa460..39b07820ef4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,12 +24,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Simple implementation of TableIntegrityErrorHandler. Can be used as a base - * class. + * Simple implementation of TableIntegrityErrorHandler. Can be used as a base class. */ @InterfaceAudience.Private -abstract public class TableIntegrityErrorHandlerImpl implements - TableIntegrityErrorHandler { +abstract public class TableIntegrityErrorHandlerImpl implements TableIntegrityErrorHandler { HbckTableInfo ti; /** @@ -73,8 +71,7 @@ public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException { * {@inheritDoc} */ @Override - public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException { + public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException { } /** @@ -82,23 +79,21 @@ public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) */ @Override public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException { + throws IOException { } /** * {@inheritDoc} */ @Override - public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) - throws IOException { + public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) throws IOException { } /** * {@inheritDoc} */ @Override - public void handleOverlapGroup(Collection overlap) - throws IOException { + public void handleOverlapGroup(Collection overlap) throws IOException { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 75605e604c82..5b642d98fd3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -72,10 +72,10 @@ public abstract class AbstractFSWALProvider> implemen // Only public so classes back in regionserver.wal can access public interface Reader extends WAL.Reader { /** - * @param fs File system. + * @param fs File system. * @param path Path. - * @param c Configuration. - * @param s Input stream that may have been pre-opened by the caller; may be null. + * @param c Configuration. + * @param s Input stream that may have been pre-opened by the caller; may be null. */ void init(FileSystem fs, Path path, Configuration c, FSDataInputStream s) throws IOException; } @@ -97,14 +97,14 @@ public interface Reader extends WAL.Reader { private final ReadWriteLock walCreateLock = new ReentrantReadWriteLock(); /** - * @param factory factory that made us, identity used for FS layout. may not be null - * @param conf may not be null + * @param factory factory that made us, identity used for FS layout. may not be null + * @param conf may not be null * @param providerId differentiate between providers from one factory, used for FS layout. may be - * null + * null */ @Override public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException { + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -296,16 +296,16 @@ public static boolean validateWALFilename(String filename) { * with as part of their name, usually the suffix. Sometimes there will be an extra suffix as when * it is a WAL for the meta table. For example, WALs might look like this * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the - * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a - * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have - * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending - * order. Here is an example: 0000000000000016310. Allow for this. + * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication + * WAL which adds a '.syncrep' suffix. Check for these. File also may have no timestamp on it. For + * example the recovered.edits files are WALs but are named in ascending order. Here is an + * example: 0000000000000016310. Allow for this. * @param name Name of the WAL file. * @return Timestamp or {@link #NO_TIMESTAMP}. */ public static long getTimestamp(String name) { Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(name); - return matcher.matches() ? Long.parseLong(matcher.group(2)): NO_TIMESTAMP; + return matcher.matches() ? Long.parseLong(matcher.group(2)) : NO_TIMESTAMP; } /** @@ -348,7 +348,7 @@ public static String getWALArchiveDirectoryName(Configuration conf, final String * this log file otherwise. */ public static ServerName getServerNameFromWALDirectoryName(Configuration conf, String path) - throws IOException { + throws IOException { if (path == null || path.length() <= HConstants.HREGION_LOGDIR_NAME.length()) { return null; } @@ -439,8 +439,8 @@ public static boolean isMetaFile(String p) { } /** - * Comparator used to compare WAL files together based on their start time. - * Just compares start times and nothing else. + * Comparator used to compare WAL files together based on their start time. Just compares start + * times and nothing else. */ public static class WALStartTimeComparator implements Comparator { @Override @@ -449,10 +449,9 @@ public int compare(Path o1, Path o2) { } /** - * Split a path to get the start time - * For example: 10.20.20.171%3A60020.1277499063250 - * Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL - * which adds a '.syncrep' suffix. Check. + * Split a path to get the start time For example: 10.20.20.171%3A60020.1277499063250 Could also + * be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL which adds a + * '.syncrep' suffix. Check. * @param p path to split * @return start time */ @@ -461,8 +460,6 @@ public static long getTS(Path p) { } } - - public static boolean isArchivedLogFile(Path p) { String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; return p.toString().contains(oldLog); @@ -492,9 +489,8 @@ public static Path findArchivedLog(Path path, Configuration conf) throws IOExcep ServerName serverName = getServerNameFromWALDirectoryName(path); // Try finding the log in separate old log dir - oldLogDir = - new Path(walRootDir, new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME) - .append(Path.SEPARATOR).append(serverName.getServerName()).toString()); + oldLogDir = new Path(walRootDir, new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME) + .append(Path.SEPARATOR).append(serverName.getServerName()).toString()); archivedLogLocation = new Path(oldLogDir, path.getName()); if (fs.exists(archivedLogLocation)) { LOG.info("Log " + path + " was moved to " + archivedLogLocation); @@ -511,7 +507,7 @@ public static Path findArchivedLog(Path path, Configuration conf) throws IOExcep * @return WAL Reader instance */ public static org.apache.hadoop.hbase.wal.WAL.Reader openReader(Path path, Configuration conf) - throws IOException { + throws IOException { long retryInterval = 2000; // 2 sec int maxAttempts = 30; int attempt = 0; @@ -586,6 +582,7 @@ private static String getWALNameGroupFromWALName(String name, int group) { throw new IllegalArgumentException(name + " is not a valid wal file name"); } } + /** * Get prefix of the log from its name, assuming WAL name in format of * log_prefix.filenumber.log_suffix diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java index 0da082a4caf9..1b559dcce201 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -48,7 +47,7 @@ abstract class AbstractRecoveredEditsOutputSink extends OutputSink { private final ConcurrentMap regionMaximumEditLogSeqNum = new ConcurrentHashMap<>(); public AbstractRecoveredEditsOutputSink(WALSplitter walSplitter, - WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { + WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { super(controller, entryBuffers, numWriters); this.walSplitter = walSplitter; } @@ -57,14 +56,14 @@ public AbstractRecoveredEditsOutputSink(WALSplitter walSplitter, * @return a writer that wraps a {@link WALProvider.Writer} and its Path. Caller should close. */ protected RecoveredEditsWriter createRecoveredEditsWriter(TableName tableName, byte[] region, - long seqId) throws IOException { + long seqId) throws IOException { Path regionEditsPath = getRegionSplitEditsPath(tableName, region, seqId, walSplitter.getFileBeingSplit().getPath().getName(), walSplitter.getTmpDirName(), walSplitter.conf); if (walSplitter.walFS.exists(regionEditsPath)) { - LOG.warn("Found old edits file. It could be the " + - "result of a previous failed split attempt. Deleting " + regionEditsPath + ", length=" + - walSplitter.walFS.getFileStatus(regionEditsPath).getLen()); + LOG.warn("Found old edits file. It could be the " + + "result of a previous failed split attempt. Deleting " + regionEditsPath + ", length=" + + walSplitter.walFS.getFileStatus(regionEditsPath).getLen()); if (!walSplitter.walFS.delete(regionEditsPath, false)) { LOG.warn("Failed delete of old {}", regionEditsPath); } @@ -77,7 +76,7 @@ protected RecoveredEditsWriter createRecoveredEditsWriter(TableName tableName, b } protected Path closeRecoveredEditsWriter(RecoveredEditsWriter editsWriter, - List thrown) throws IOException { + List thrown) throws IOException { try { editsWriter.writer.close(); } catch (IOException ioe) { @@ -88,14 +87,16 @@ protected Path closeRecoveredEditsWriter(RecoveredEditsWriter editsWriter, return null; } final String msg = "Closed recovered edits writer path=" + editsWriter.path + " (wrote " - + editsWriter.editsWritten + " edits, skipped " + editsWriter.editsSkipped + " edits in " + ( - editsWriter.nanosSpent / 1000 / 1000) + " ms)"; + + editsWriter.editsWritten + " edits, skipped " + editsWriter.editsSkipped + " edits in " + + (editsWriter.nanosSpent / 1000 / 1000) + " ms)"; LOG.info(msg); updateStatusWithMsg(msg); if (editsWriter.editsWritten == 0) { // just remove the empty recovered.edits file - if (walSplitter.walFS.exists(editsWriter.path) - && !walSplitter.walFS.delete(editsWriter.path, false)) { + if ( + walSplitter.walFS.exists(editsWriter.path) + && !walSplitter.walFS.delete(editsWriter.path, false) + ) { final String errorMsg = "Failed deleting empty " + editsWriter.path; LOG.warn(errorMsg); updateStatusWithMsg(errorMsg); @@ -125,8 +126,7 @@ protected Path closeRecoveredEditsWriter(RecoveredEditsWriter editsWriter, updateStatusWithMsg(renameEditMsg); } } catch (IOException ioe) { - final String errorMsg = "Could not rename recovered edits " + editsWriter.path - + " to " + dst; + final String errorMsg = "Could not rename recovered edits " + editsWriter.path + " to " + dst; LOG.error(errorMsg, ioe); updateStatusWithMsg(errorMsg); thrown.add(ioe); @@ -173,17 +173,17 @@ private void deleteOneWithFewerEntries(RecoveredEditsWriter editsWriter, Path ds e); } if (editsWriter.minLogSeqNum < dstMinLogSeqNum) { - LOG.warn("Found existing old edits file. It could be the result of a previous failed" + - " split attempt or we have duplicated wal entries. Deleting " + dst + ", length=" + - walSplitter.walFS.getFileStatus(dst).getLen()); + LOG.warn("Found existing old edits file. It could be the result of a previous failed" + + " split attempt or we have duplicated wal entries. Deleting " + dst + ", length=" + + walSplitter.walFS.getFileStatus(dst).getLen()); if (!walSplitter.walFS.delete(dst, false)) { LOG.warn("Failed deleting of old {}", dst); throw new IOException("Failed deleting of old " + dst); } } else { - LOG.warn( - "Found existing old edits file and we have less entries. Deleting " + editsWriter.path + - ", length=" + walSplitter.walFS.getFileStatus(editsWriter.path).getLen()); + LOG + .warn("Found existing old edits file and we have less entries. Deleting " + editsWriter.path + + ", length=" + walSplitter.walFS.getFileStatus(editsWriter.path).getLen()); if (!walSplitter.walFS.delete(editsWriter.path, false)) { LOG.warn("Failed deleting of {}", editsWriter.path); throw new IOException("Failed deleting of " + editsWriter.path); @@ -252,7 +252,7 @@ void writeRegionEntries(List entries) throws IOException { } private void logAndThrowWriterAppendFailure(WAL.Entry logEntry, IOException e) - throws IOException { + throws IOException { e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; final String errorMsg = "Failed to write log entry " + logEntry.toString() + " to log"; LOG.error(HBaseMarkers.FATAL, errorMsg, e); @@ -262,7 +262,7 @@ private void logAndThrowWriterAppendFailure(WAL.Entry logEntry, IOException e) private void filterCellByStore(WAL.Entry logEntry) { Map maxSeqIdInStores = walSplitter.getRegionMaxSeqIdInStores() - .get(Bytes.toString(logEntry.getKey().getEncodedRegionName())); + .get(Bytes.toString(logEntry.getKey().getEncodedRegionName())); if (MapUtils.isEmpty(maxSeqIdInStores)) { return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index 672b41e26057..031648dba82a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,14 +48,13 @@ * NOTE: This class extends Thread rather than Chore because the sleep time can be interrupted when * there is something to do, rather than the Chore sleep time which is invariant. *

    - * The {@link #scheduleFlush(String, List)} is abstract here, - * as sometimes we hold a region without a region server but we still want to roll its WAL. + * The {@link #scheduleFlush(String, List)} is abstract here, as sometimes we hold a region without + * a region server but we still want to roll its WAL. *

    * TODO: change to a pool of threads */ @InterfaceAudience.Private -public abstract class AbstractWALRoller extends Thread - implements Closeable { +public abstract class AbstractWALRoller extends Thread implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(AbstractWALRoller.class); protected static final String WAL_ROLL_PERIOD_KEY = "hbase.regionserver.logroll.period"; @@ -67,9 +66,8 @@ public abstract class AbstractWALRoller extends Thread public static final long DEFAULT_WAL_ROLL_WAIT_TIMEOUT = 30000; /** - * Configure for the max count of log rolling retry. - * The real retry count is also limited by the timeout of log rolling - * via {@link #WAL_ROLL_WAIT_TIMEOUT} + * Configure for the max count of log rolling retry. The real retry count is also limited by the + * timeout of log rolling via {@link #WAL_ROLL_WAIT_TIMEOUT} */ protected static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; @@ -190,15 +188,15 @@ public void run() { } } try { - for (Iterator> iter = wals.entrySet().iterator(); - iter.hasNext();) { + for (Iterator> iter = wals.entrySet().iterator(); iter + .hasNext();) { Entry entry = iter.next(); WAL wal = entry.getKey(); RollController controller = entry.getValue(); if (controller.isRollRequested()) { // WAL roll requested, fall through LOG.debug("WAL {} roll requested", wal); - } else if (controller.needsPeriodicRoll(now)){ + } else if (controller.needsPeriodicRoll(now)) { // Time for periodic roll, fall through LOG.debug("WAL {} roll period {} ms elapsed", wal, this.rollPeriod); } else { @@ -223,7 +221,7 @@ public void run() { if (waitingTime < rollWaitTimeout && nAttempts < maxRollRetry) { nAttempts++; LOG.warn("Retry to roll log, nAttempts={}, waiting time={}ms, sleeping 1s to retry," - + " last exception", nAttempts, waitingTime, ioe); + + " last exception", nAttempts, waitingTime, ioe); sleep(1000); } else { LOG.error("Roll wal failed and waiting timeout, will not retry", ioe); @@ -256,7 +254,7 @@ protected void afterWALArchive(Path oldPath, Path newPath) { /** * @param encodedRegionName Encoded name of region to flush. - * @param families stores of region to flush. + * @param families stores of region to flush. */ protected abstract void scheduleFlush(String encodedRegionName, List families); @@ -271,8 +269,7 @@ private boolean isWaiting() { public boolean walRollFinished() { // TODO add a status field of roll in RollController return wals.values().stream() - .noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime())) - && isWaiting(); + .noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime())) && isWaiting(); } /** @@ -291,8 +288,8 @@ public void close() { } /** - * Independently control the roll of each wal. When use multiwal, - * can avoid all wal roll together. see HBASE-24665 for detail + * Independently control the roll of each wal. When use multiwal, can avoid all wal roll together. + * see HBASE-24665 for detail */ protected class RollController { private final WAL wal; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java index 06729e2356a2..2fe0402ca4df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,12 +54,12 @@ public class AsyncFSWALProvider extends AbstractFSWALProvider { // Only public so classes back in regionserver.wal can access public interface AsyncWriter extends WALProvider.AsyncWriter { /** - * @throws IOException if something goes wrong initializing an output stream + * @throws IOException if something goes wrong initializing an output stream * @throws StreamLacksCapabilityException if the given FileSystem can't provide streams that - * meet the needs of the given Writer implementation. + * meet the needs of the given Writer implementation. */ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long blocksize, - StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; + StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; } private EventLoopGroup eventLoopGroup; @@ -87,33 +87,33 @@ protected void doInit(Configuration conf) throws IOException { * Public because of AsyncFSWAL. Should be package-private */ public static AsyncWriter createAsyncWriter(Configuration conf, FileSystem fs, Path path, - boolean overwritable, EventLoopGroup eventLoopGroup, - Class channelClass) throws IOException { + boolean overwritable, EventLoopGroup eventLoopGroup, Class channelClass) + throws IOException { return createAsyncWriter(conf, fs, path, overwritable, WALUtil.getWALBlockSize(conf, fs, path), - eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, path.getName())); + eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, path.getName())); } /** * Public because of AsyncFSWAL. Should be package-private */ public static AsyncWriter createAsyncWriter(Configuration conf, FileSystem fs, Path path, - boolean overwritable, long blocksize, EventLoopGroup eventLoopGroup, - Class channelClass, StreamSlowMonitor monitor) throws IOException { + boolean overwritable, long blocksize, EventLoopGroup eventLoopGroup, + Class channelClass, StreamSlowMonitor monitor) throws IOException { // Configuration already does caching for the Class lookup. - Class logWriterClass = conf.getClass( - WRITER_IMPL, AsyncProtobufLogWriter.class, AsyncWriter.class); + Class logWriterClass = + conf.getClass(WRITER_IMPL, AsyncProtobufLogWriter.class, AsyncWriter.class); try { AsyncWriter writer = logWriterClass.getConstructor(EventLoopGroup.class, Class.class) - .newInstance(eventLoopGroup, channelClass); + .newInstance(eventLoopGroup, channelClass); writer.init(fs, path, conf, overwritable, blocksize, monitor); return writer; } catch (Exception e) { if (e instanceof CommonFSUtils.StreamLacksCapabilityException) { - LOG.error("The RegionServer async write ahead log provider " + - "relies on the ability to call " + e.getMessage() + " for proper operation during " + - "component failures, but the current FileSystem does not support doing so. Please " + - "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + - "it points to a FileSystem mount that has suitable capabilities for output streams."); + LOG.error("The RegionServer async write ahead log provider " + + "relies on the ability to call " + e.getMessage() + " for proper operation during " + + "component failures, but the current FileSystem does not support doing so. Please " + + "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + + "it points to a FileSystem mount that has suitable capabilities for output streams."); } else { LOG.debug("Error instantiating log writer.", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java index ed3c8b7f3e2a..7db007e1d2eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,10 @@ /** * Used for {@link BoundedRecoveredEditsOutputSink}. The core part of limiting opening writers is it - * doesn't return chunk only if the heap size is over maxHeapUsage. Thus it doesn't need to create - * a writer for each region during splitting. The returned {@link EntryBuffers.RegionEntryBuffer} - * will be write to recovered edits file and close the writer immediately. - * See {@link BoundedRecoveredEditsOutputSink#append(EntryBuffers.RegionEntryBuffer)} for more - * details. + * doesn't return chunk only if the heap size is over maxHeapUsage. Thus it doesn't need to create a + * writer for each region during splitting. The returned {@link EntryBuffers.RegionEntryBuffer} will + * be write to recovered edits file and close the writer immediately. See + * {@link BoundedRecoveredEditsOutputSink#append(EntryBuffers.RegionEntryBuffer)} for more details. */ @InterfaceAudience.Private public class BoundedEntryBuffers extends EntryBuffers { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java index bafcee339e7d..cf531354e440 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,18 +21,17 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.RegionGroupingProvider.RegionGroupingStrategy; +import org.apache.yetus.audience.InterfaceAudience; /** * A WAL grouping strategy that limits the number of wal groups to * "hbase.wal.regiongrouping.numgroups". */ @InterfaceAudience.Private -public class BoundedGroupingStrategy implements RegionGroupingStrategy{ +public class BoundedGroupingStrategy implements RegionGroupingStrategy { static final String NUM_REGION_GROUPS = "hbase.wal.regiongrouping.numgroups"; static final int DEFAULT_NUM_REGION_GROUPS = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java index e2aa478075c3..150521b7957f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.MultipleIOException; @@ -36,14 +35,13 @@ import org.slf4j.LoggerFactory; /** - * Class that manages the output streams from the log splitting process. - * Every region may have many recovered edits file. But the opening writers is bounded. - * Bounded means the output streams will be no more than the size of threadpool. + * Class that manages the output streams from the log splitting process. Every region may have many + * recovered edits file. But the opening writers is bounded. Bounded means the output streams will + * be no more than the size of threadpool. */ @InterfaceAudience.Private class BoundedRecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { - private static final Logger LOG = - LoggerFactory.getLogger(BoundedRecoveredEditsOutputSink.class); + private static final Logger LOG = LoggerFactory.getLogger(BoundedRecoveredEditsOutputSink.class); // Since the splitting process may create multiple output files, we need a map // to track the output count of each region. @@ -52,22 +50,20 @@ class BoundedRecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { private final AtomicInteger openingWritersNum = new AtomicInteger(0); public BoundedRecoveredEditsOutputSink(WALSplitter walSplitter, - WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { + WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { super(walSplitter, controller, entryBuffers, numWriters); } @Override - public void append(EntryBuffers.RegionEntryBuffer buffer) - throws IOException { + public void append(EntryBuffers.RegionEntryBuffer buffer) throws IOException { List entries = buffer.entryBuffer; if (entries.isEmpty()) { LOG.warn("got an empty buffer, skipping"); return; } // The key point is create a new writer, write edits then close writer. - RecoveredEditsWriter writer = - createRecoveredEditsWriter(buffer.tableName, buffer.encodedRegionName, - entries.get(0).getKey().getSequenceId()); + RecoveredEditsWriter writer = createRecoveredEditsWriter(buffer.tableName, + buffer.encodedRegionName, entries.get(0).getKey().getSequenceId()); if (writer != null) { openingWritersNum.incrementAndGet(); writer.writeRegionEntries(entries); @@ -96,7 +92,6 @@ public List close() throws IOException { /** * Write out the remaining RegionEntryBuffers and close the writers. - * * @return true when there is no error. */ private boolean writeRemainingEntryBuffers() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 50bc5fe62fb8..05f3d46d2313 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.wal; import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.HashMap; @@ -49,10 +50,10 @@ import org.slf4j.LoggerFactory; /** - * A WALSplitter sink that outputs {@link org.apache.hadoop.hbase.io.hfile.HFile}s. - * Runs with a bounded number of HFile writers at any one time rather than let the count run up. + * A WALSplitter sink that outputs {@link org.apache.hadoop.hbase.io.hfile.HFile}s. Runs with a + * bounded number of HFile writers at any one time rather than let the count run up. * @see BoundedRecoveredEditsOutputSink for a sink implementation that writes intermediate - * recovered.edits files. + * recovered.edits files. */ @InterfaceAudience.Private public class BoundedRecoveredHFilesOutputSink extends OutputSink { @@ -90,10 +91,10 @@ void append(RegionEntryBuffer buffer) throws IOException { String familyName = Bytes.toString(CellUtil.cloneFamily(cell)); // comparator need to be specified for meta familyCells - .computeIfAbsent(familyName, - key -> new CellSet( - isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)) - .add(cell); + .computeIfAbsent(familyName, + key -> new CellSet( + isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)) + .add(cell); familySeqIds.compute(familyName, (k, v) -> v == null ? seqId : Math.max(v, seqId)); } } @@ -136,7 +137,6 @@ public List close() throws IOException { /** * Write out the remaining RegionEntryBuffers and close the writers. - * * @return true when there is no error. */ private boolean writeRemainingEntryBuffers() throws IOException { @@ -188,21 +188,21 @@ boolean keepRegionEvent(Entry entry) { } /** - * @return Returns a base HFile without compressions or encodings; good enough for recovery - * given hfile has metadata on how it was written. + * @return Returns a base HFile without compressions or encodings; good enough for recovery given + * hfile has metadata on how it was written. */ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String regionName, - long seqId, String familyName, boolean isMetaTable) throws IOException { + long seqId, String familyName, boolean isMetaTable) throws IOException { Path outputDir = WALSplitUtil.tryCreateRecoveredHFilesDir(walSplitter.rootFS, walSplitter.conf, tableName, regionName, familyName); StoreFileWriter.Builder writerBuilder = - new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) - .withOutputDir(outputDir); - HFileContext hFileContext = new HFileContextBuilder(). - withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)). - withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)). - withCellComparator(isMetaTable? - MetaCellComparator.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build(); + new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) + .withOutputDir(outputDir); + HFileContext hFileContext = + new HFileContextBuilder().withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)).withCellComparator( + isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR) + .build(); return writerBuilder.withFileContext(hFileContext).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 6e5a0538296c..4700ecdea8e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; @@ -44,10 +43,8 @@ // imports for things that haven't moved from regionserver.wal yet. /** - * No-op implementation of {@link WALProvider} used when the WAL is disabled. - * - * Should only be used when severe data loss is acceptable. - * + * No-op implementation of {@link WALProvider} used when the WAL is disabled. Should only be used + * when severe data loss is acceptable. */ @InterfaceAudience.Private class DisabledWALProvider implements WALProvider { @@ -58,7 +55,7 @@ class DisabledWALProvider implements WALProvider { @Override public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException { + throws IOException { if (null != disabled) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -97,11 +94,11 @@ private static class DisabledWAL implements WAL { protected final AtomicBoolean closed = new AtomicBoolean(false); public DisabledWAL(final Path path, final Configuration conf, - final List listeners) { + final List listeners) { this.coprocessorHost = new WALCoprocessorHost(this, conf); this.path = path; if (null != listeners) { - for(WALActionsListener listener : listeners) { + for (WALActionsListener listener : listeners) { registerWALActionsListener(listener); } } @@ -148,7 +145,7 @@ public Map> rollWriter(boolean force) { @Override public void shutdown() { - if(closed.compareAndSet(false, true)) { + if (closed.compareAndSet(false, true)) { if (!this.listeners.isEmpty()) { for (WALActionsListener listener : this.listeners) { listener.logCloseRequested(); @@ -168,13 +165,12 @@ public long appendData(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IO } @Override - public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) - throws IOException { + public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException { return append(info, key, edits, false); } private long append(RegionInfo info, WALKeyImpl key, WALEdit edits, boolean inMemstore) - throws IOException { + throws IOException { WriteEntry writeEntry = key.getMvcc().begin(); if (!edits.isReplay()) { for (Cell cell : edits.getCells()) { @@ -197,8 +193,10 @@ private long append(RegionInfo info, WALKeyImpl key, WALEdit edits, boolean inMe } @Override - public void updateStore(byte[] encodedRegionName, byte[] familyName, - Long sequenceid, boolean onlyIfGreater) { return; } + public void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, + boolean onlyIfGreater) { + return; + } @Override public void sync() { @@ -215,8 +213,8 @@ public void sync(long txid) { } @Override - public Long startCacheFlush(final byte[] encodedRegionName, Map - flushedFamilyNamesToSeq) { + public Long startCacheFlush(final byte[] encodedRegionName, + Map flushedFamilyNamesToSeq) { return startCacheFlush(encodedRegionName, flushedFamilyNamesToSeq.keySet()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java index 1ff311d9d714..17a93ea4b996 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; @@ -132,10 +131,9 @@ synchronized boolean isRegionCurrentlyWriting(byte[] region) { } /** - * A buffer of some number of edits for a given region. - * This accumulates edits and also provides a memory optimization in order to - * share a single byte array instance for the table and region name. - * Also tracks memory usage of the accumulated edits. + * A buffer of some number of edits for a given region. This accumulates edits and also provides a + * memory optimization in order to share a single byte array instance for the table and region + * name. Also tracks memory usage of the accumulated edits. */ static class RegionEntryBuffer implements HeapSize { private long heapInBuffer = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index f5c39c0edf27..5d63ac2cf458 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,21 +44,22 @@ public class FSHLogProvider extends AbstractFSWALProvider { // Only public so classes back in regionserver.wal can access public interface Writer extends WALProvider.Writer { /** - * @throws IOException if something goes wrong initializing an output stream + * @throws IOException if something goes wrong initializing an output stream * @throws StreamLacksCapabilityException if the given FileSystem can't provide streams that - * meet the needs of the given Writer implementation. + * meet the needs of the given Writer implementation. */ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long blocksize, - StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; + StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; } /** * Public because of FSHLog. Should be package-private * @param overwritable if the created writer can overwrite. For recovered edits, it is true and - * for WAL it is false. Thus we can distinguish WAL and recovered edits by this. + * for WAL it is false. Thus we can distinguish WAL and recovered edits by + * this. */ public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, - final boolean overwritable) throws IOException { + final boolean overwritable) throws IOException { return createWriter(conf, fs, path, overwritable, WALUtil.getWALBlockSize(conf, fs, path, overwritable)); } @@ -68,25 +68,24 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, * Public because of FSHLog. Should be package-private */ public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, - final boolean overwritable, long blocksize) throws IOException { + final boolean overwritable, long blocksize) throws IOException { // Configuration already does caching for the Class lookup. Class logWriterClass = - conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, - Writer.class); + conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, Writer.class); Writer writer = null; try { writer = logWriterClass.getDeclaredConstructor().newInstance(); FileSystem rootFs = FileSystem.get(path.toUri(), conf); writer.init(rootFs, path, conf, overwritable, blocksize, - StreamSlowMonitor.create(conf, path.getName())); + StreamSlowMonitor.create(conf, path.getName())); return writer; - } catch (Exception e) { + } catch (Exception e) { if (e instanceof CommonFSUtils.StreamLacksCapabilityException) { - LOG.error("The RegionServer write ahead log provider for FileSystem implementations " + - "relies on the ability to call " + e.getMessage() + " for proper operation during " + - "component failures, but the current FileSystem does not support doing so. Please " + - "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + - "it points to a FileSystem mount that has suitable capabilities for output streams."); + LOG.error("The RegionServer write ahead log provider for FileSystem implementations " + + "relies on the ability to call " + e.getMessage() + " for proper operation during " + + "component failures, but the current FileSystem does not support doing so. Please " + + "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + + "it points to a FileSystem mount that has suitable capabilities for output streams."); } else { LOG.debug("Error instantiating log writer.", e); } @@ -97,9 +96,9 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, @Override protected FSHLog createWAL() throws IOException { return new FSHLog(CommonFSUtils.getWALFileSystem(conf), abortable, - CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), - getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null); + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), + getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java index 3022a25fdb11..c718fb961725 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.RegionGroupingProvider.RegionGroupingStrategy; +import org.apache.yetus.audience.InterfaceAudience; /** - * A WAL grouping strategy based on namespace. - * Notice: the wal-group mapping might change if we support dynamic namespace updating later, - * and special attention needed if we support feature like group-based replication. + * A WAL grouping strategy based on namespace. Notice: the wal-group mapping might change if we + * support dynamic namespace updating later, and special attention needed if we support feature like + * group-based replication. */ @InterfaceAudience.Private public class NamespaceGroupingStrategy implements RegionGroupingStrategy { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java index 7f33eda9e652..d5c48bfff8e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,14 +42,14 @@ public final class NettyAsyncFSWALConfigHelper { private static final String CONFIG_NAME = "global-event-loop"; - private static final Map>> EVENT_LOOP_CONFIG_MAP = - new HashMap<>(); + private static final Map>> EVENT_LOOP_CONFIG_MAP = new HashMap<>(); /** * Set the EventLoopGroup and channel class for {@code AsyncFSWALProvider}. */ public static void setEventLoopConfig(Configuration conf, EventLoopGroup group, - Class channelClass) { + Class channelClass) { Preconditions.checkNotNull(group, "group is null"); Preconditions.checkNotNull(channelClass, "channel class is null"); conf.set(EVENT_LOOP_CONFIG, CONFIG_NAME); @@ -68,5 +68,6 @@ static Pair> getEventLoopConfig(Configu return EVENT_LOOP_CONFIG_MAP.get(name); } - private NettyAsyncFSWALConfigHelper() {} + private NettyAsyncFSWALConfigHelper() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java index d995b979ea3b..cdd571b4cc3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,17 +27,16 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * The following class is an abstraction class to provide a common interface to support different @@ -72,7 +71,7 @@ abstract class OutputSink { protected final CompletionService closeCompletionService; public OutputSink(WALSplitter.PipelineController controller, EntryBuffers entryBuffers, - int numWriters) { + int numWriters) { this.numThreads = numWriters; this.controller = controller; this.entryBuffers = entryBuffers; @@ -103,7 +102,6 @@ void startWriterThreads() throws IOException { /** * Wait for writer threads to dump all info to the sink - * * @return true when there is no error */ boolean finishWriterThreads() throws IOException { @@ -183,7 +181,7 @@ public static class WriterThread extends Thread { private OutputSink outputSink = null; WriterThread(WALSplitter.PipelineController controller, EntryBuffers entryBuffers, - OutputSink sink, int i) { + OutputSink sink, int i) { super(Thread.currentThread().getName() + "-Writer-" + i); this.controller = controller; this.entryBuffers = entryBuffers; @@ -191,7 +189,7 @@ public static class WriterThread extends Thread { } @Override - public void run() { + public void run() { try { doRun(); } catch (Throwable t) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java index 645af60efcb4..d8cf1371c341 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java @@ -28,21 +28,20 @@ import java.util.concurrent.Future; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.MultipleIOException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Class that manages the output streams from the log splitting process. - * Every region only has one recovered edits file PER split WAL (if we split - * multiple WALs during a log-splitting session, on open, a Region may - * have multiple recovered.edits files to replay -- one per split WAL). - * @see BoundedRecoveredEditsOutputSink which is like this class but imposes upper bound on - * the number of writers active at one time (makes for better throughput). + * Class that manages the output streams from the log splitting process. Every region only has one + * recovered edits file PER split WAL (if we split multiple WALs during a log-splitting session, on + * open, a Region may have multiple recovered.edits files to replay -- one per split WAL). + * @see BoundedRecoveredEditsOutputSink which is like this class but imposes upper bound on the + * number of writers active at one time (makes for better throughput). */ @InterfaceAudience.Private class RecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { @@ -50,21 +49,19 @@ class RecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { private ConcurrentMap writers = new ConcurrentHashMap<>(); public RecoveredEditsOutputSink(WALSplitter walSplitter, - WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { + WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { super(walSplitter, controller, entryBuffers, numWriters); } @Override - public void append(EntryBuffers.RegionEntryBuffer buffer) - throws IOException { + public void append(EntryBuffers.RegionEntryBuffer buffer) throws IOException { List entries = buffer.entryBuffer; if (entries.isEmpty()) { LOG.warn("got an empty buffer, skipping"); return; } - RecoveredEditsWriter writer = - getRecoveredEditsWriter(buffer.tableName, buffer.encodedRegionName, - entries.get(0).getKey().getSequenceId()); + RecoveredEditsWriter writer = getRecoveredEditsWriter(buffer.tableName, + buffer.encodedRegionName, entries.get(0).getKey().getSequenceId()); if (writer != null) { writer.writeRegionEntries(entries); } @@ -76,7 +73,7 @@ public void append(EntryBuffers.RegionEntryBuffer buffer) * @return null if this region shouldn't output any logs */ private RecoveredEditsWriter getRecoveredEditsWriter(TableName tableName, byte[] region, - long seqId) throws IOException { + long seqId) throws IOException { RecoveredEditsWriter ret = writers.get(Bytes.toString(region)); if (ret != null) { return ret; @@ -103,7 +100,6 @@ public List close() throws IOException { /** * Close all of the output streams. - * * @return true when there is no error. */ private boolean closeWriters() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 20d043b6ae26..4e4748be3a0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,13 +27,11 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; -// imports for classes still in regionserver.wal import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.KeyLocker; @@ -43,18 +40,15 @@ import org.slf4j.LoggerFactory; /** - * A WAL Provider that returns a WAL per group of regions. - * - * This provider follows the decorator pattern and mainly holds the logic for WAL grouping. - * WAL creation/roll/close is delegated to {@link #DELEGATE_PROVIDER} - * - * Region grouping is handled via {@link RegionGroupingStrategy} and can be configured via the - * property "hbase.wal.regiongrouping.strategy". Current strategy choices are + * A WAL Provider that returns a WAL per group of regions. This provider follows the decorator + * pattern and mainly holds the logic for WAL grouping. WAL creation/roll/close is delegated to + * {@link #DELEGATE_PROVIDER} Region grouping is handled via {@link RegionGroupingStrategy} and can + * be configured via the property "hbase.wal.regiongrouping.strategy". Current strategy choices are *

      - *
    • defaultStrategy : Whatever strategy this version of HBase picks. currently - * "bounded".
    • - *
    • identity : each region belongs to its own group.
    • - *
    • bounded : bounded number of groups and region evenly assigned to each group.
    • + *
    • defaultStrategy : Whatever strategy this version of HBase picks. currently + * "bounded".
    • + *
    • identity : each region belongs to its own group.
    • + *
    • bounded : bounded number of groups and region evenly assigned to each group.
    • *
    * Optionally, a FQCN to a custom implementation may be given. */ @@ -72,6 +66,7 @@ public static interface RegionGroupingStrategy { * Given an identifier and a namespace, pick a group. */ String group(final byte[] identifier, byte[] namespace); + void init(Configuration config, String providerId); } @@ -85,17 +80,18 @@ static enum Strategies { namespace(NamespaceGroupingStrategy.class); final Class clazz; + Strategies(Class clazz) { this.clazz = clazz; } } /** - * instantiate a strategy from a config property. - * requires conf to have already been set (as well as anything the provider might need to read). + * instantiate a strategy from a config property. requires conf to have already been set (as well + * as anything the provider might need to read). */ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, - final String defaultValue) throws IOException { + final String defaultValue) throws IOException { Class clazz; try { clazz = Strategies.valueOf(conf.get(key, defaultValue)).clazz; @@ -111,8 +107,8 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, result.init(conf, providerId); return result; } catch (Exception e) { - LOG.error("couldn't set up region grouping strategy, check config key " + - REGION_GROUPING_STRATEGY); + LOG.error( + "couldn't set up region grouping strategy, check config key " + REGION_GROUPING_STRATEGY); LOG.debug("Exception details for failure to load region grouping strategy.", e); throw new IOException("couldn't set up region grouping strategy", e); } @@ -123,8 +119,8 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, /** delegate provider for WAL creation/roll/close, but not support multiwal */ public static final String DELEGATE_PROVIDER = "hbase.wal.regiongrouping.delegate.provider"; - public static final String DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider - .name(); + public static final String DEFAULT_DELEGATE_PROVIDER = + WALFactory.Providers.defaultProvider.name(); private static final String META_WAL_GROUP_NAME = "meta"; @@ -143,7 +139,7 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, @Override public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException { + throws IOException { if (null != strategy) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -228,7 +224,7 @@ public WAL getWAL(RegionInfo region) throws IOException { public void shutdown() throws IOException { // save the last exception and rethrow IOException failure = null; - for (WALProvider provider: cached.values()) { + for (WALProvider provider : cached.values()) { try { provider.shutdown(); } catch (IOException e) { @@ -266,7 +262,9 @@ public void close() throws IOException { static class IdentityGroupingStrategy implements RegionGroupingStrategy { @Override - public void init(Configuration config, String providerId) {} + public void init(Configuration config, String providerId) { + } + @Override public String group(final byte[] identifier, final byte[] namespace) { return Bytes.toString(identifier); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java index f57ec31c531a..e94d87aa698c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -108,7 +108,7 @@ public void setPeerInfoProvider(SyncReplicationPeerInfoProvider peerInfoProvider @Override public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException { + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -144,8 +144,7 @@ private DualAsyncFSWAL createWAL(String peerId, String remoteWALDir) throws IOEx throw new IllegalArgumentException("No valid constructor provided for class " + clazz); } constructor.setAccessible(true); - return (DualAsyncFSWAL) constructor.newInstance( - CommonFSUtils.getWALFileSystem(conf), + return (DualAsyncFSWAL) constructor.newInstance(CommonFSUtils.getWALFileSystem(conf), ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir), CommonFSUtils.getWALRootDir(conf), ReplicationUtils.getPeerRemoteWALDir(remoteWALDir, peerId), @@ -196,7 +195,7 @@ public WAL getWAL(RegionInfo region) throws IOException { } WAL wal = null; Optional> peerIdAndRemoteWALDir = - peerInfoProvider.getPeerIdAndRemoteWALDir(region.getTable()); + peerInfoProvider.getPeerIdAndRemoteWALDir(region.getTable()); if (peerIdAndRemoteWALDir.isPresent()) { Pair pair = peerIdAndRemoteWALDir.get(); wal = getWAL(pair.getFirst(), pair.getSecond()); @@ -284,7 +283,7 @@ public void addWALActionsListener(WALActionsListener listener) { @Override public void peerSyncReplicationStateChange(String peerId, SyncReplicationState from, - SyncReplicationState to, int stage) { + SyncReplicationState to, int stage) { if (from == SyncReplicationState.ACTIVE) { if (stage == 0) { Lock lock = createLock.acquireLock(peerId); @@ -306,7 +305,7 @@ public void peerSyncReplicationStateChange(String peerId, SyncReplicationState f } private static class DefaultSyncReplicationPeerInfoProvider - implements SyncReplicationPeerInfoProvider { + implements SyncReplicationPeerInfoProvider { @Override public Optional> getPeerIdAndRemoteWALDir(TableName table) { @@ -315,7 +314,7 @@ public Optional> getPeerIdAndRemoteWALDir(TableName table) @Override public boolean checkState(TableName table, - BiPredicate checker) { + BiPredicate checker) { return false; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 2a434a73b672..fb1f17a7b579 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -33,10 +33,9 @@ /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides - * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc). - * - * Note that some internals, such as log rolling and performance evaluation tools, will use - * WAL.equals to determine if they have already seen a given WAL. + * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc). Note that some + * internals, such as log rolling and performance evaluation tools, will use WAL.equals to determine + * if they have already seen a given WAL. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -54,43 +53,36 @@ public interface WAL extends Closeable, WALFileLengthProvider { /** * Roll the log writer. That is, start writing log messages to a new file. - * *

    - * The implementation is synchronized in order to make sure there's one rollWriter - * running at any given time. - * - * @return If lots of logs, flush the stores of returned regions so next time through we - * can clean logs. Returns null if nothing to flush. Names are actual - * region names as returned by {@link RegionInfo#getEncodedName()} + * The implementation is synchronized in order to make sure there's one rollWriter running at any + * given time. + * @return If lots of logs, flush the stores of returned regions so next time through we can clean + * logs. Returns null if nothing to flush. Names are actual region names as returned by + * {@link RegionInfo#getEncodedName()} */ Map> rollWriter() throws FailedLogCloseException, IOException; /** * Roll the log writer. That is, start writing log messages to a new file. - * *

    - * The implementation is synchronized in order to make sure there's one rollWriter - * running at any given time. - * - * @param force - * If true, force creation of a new writer even if no entries have - * been written to the current writer - * @return If lots of logs, flush the stores of returned regions so next time through we - * can clean logs. Returns null if nothing to flush. Names are actual - * region names as returned by {@link RegionInfo#getEncodedName()} + * The implementation is synchronized in order to make sure there's one rollWriter running at any + * given time. n * If true, force creation of a new writer even if no entries have been written to + * the current writer + * @return If lots of logs, flush the stores of returned regions so next time through we can clean + * logs. Returns null if nothing to flush. Names are actual region names as returned by + * {@link RegionInfo#getEncodedName()} */ Map> rollWriter(boolean force) throws IOException; /** - * Stop accepting new writes. If we have unsynced writes still in buffer, sync them. - * Extant edits are left in place in backing storage to be replayed later. + * Stop accepting new writes. If we have unsynced writes still in buffer, sync them. Extant edits + * are left in place in backing storage to be replayed later. */ void shutdown() throws IOException; /** - * Caller no longer needs any edits from this WAL. Implementers are free to reclaim - * underlying resources after this call; i.e. filesystem based WALs can archive or - * delete files. + * Caller no longer needs any edits from this WAL. Implementers are free to reclaim underlying + * resources after this call; i.e. filesystem based WALs can archive or delete files. */ @Override void close() throws IOException; @@ -102,10 +94,10 @@ public interface WAL extends Closeable, WALFileLengthProvider { * The WAL is not flushed/sync'd after this transaction completes BUT on return this edit must * have its region edit/sequence id assigned else it messes up our unification of mvcc and * sequenceid. On return key will have the region edit/sequence id filled in. - * @param info the regioninfo associated with append - * @param key Modified by this call; we add to it this edits region edit/sequence id. + * @param info the regioninfo associated with append + * @param key Modified by this call; we add to it this edits region edit/sequence id. * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit - * sequence id that is after all currently appended edits. + * sequence id that is after all currently appended edits. * @return Returns a 'transaction id' and key will have the region edit/sequence id * in it. * @see #appendMarker(RegionInfo, WALKeyImpl, WALEdit) @@ -113,19 +105,19 @@ public interface WAL extends Closeable, WALFileLengthProvider { long appendData(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException; /** - * Append an operational 'meta' event marker edit to the WAL. A marker meta edit could - * be a FlushDescriptor, a compaction marker, or a region event marker; e.g. region open - * or region close. The difference between a 'marker' append and a 'data' append as in - * {@link #appendData(RegionInfo, WALKeyImpl, WALEdit)}is that a marker will not have - * transitioned through the memstore. + * Append an operational 'meta' event marker edit to the WAL. A marker meta edit could be a + * FlushDescriptor, a compaction marker, or a region event marker; e.g. region open or region + * close. The difference between a 'marker' append and a 'data' append as in + * {@link #appendData(RegionInfo, WALKeyImpl, WALEdit)}is that a marker will not have transitioned + * through the memstore. *

    * The WAL is not flushed/sync'd after this transaction completes BUT on return this edit must * have its region edit/sequence id assigned else it messes up our unification of mvcc and * sequenceid. On return key will have the region edit/sequence id filled in. - * @param info the regioninfo associated with append - * @param key Modified by this call; we add to it this edits region edit/sequence id. + * @param info the regioninfo associated with append + * @param key Modified by this call; we add to it this edits region edit/sequence id. * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit - * sequence id that is after all currently appended edits. + * sequence id that is after all currently appended edits. * @return Returns a 'transaction id' and key will have the region edit/sequence id * in it. * @see #appendData(RegionInfo, WALKeyImpl, WALEdit) @@ -133,12 +125,11 @@ public interface WAL extends Closeable, WALFileLengthProvider { long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException; /** - * updates the seuence number of a specific store. - * depending on the flag: replaces current seq number if the given seq id is bigger, - * or even if it is lower than existing one + * updates the seuence number of a specific store. depending on the flag: replaces current seq + * number if the given seq id is bigger, or even if it is lower than existing one */ void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, - boolean onlyIfGreater); + boolean onlyIfGreater); /** * Sync what we have in the WAL. @@ -153,35 +144,35 @@ void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, /** * @param forceSync Flag to force sync rather than flushing to the buffer. Example - Hadoop hflush - * vs hsync. + * vs hsync. */ default void sync(boolean forceSync) throws IOException { sync(); } /** - * @param txid Transaction id to sync to. + * @param txid Transaction id to sync to. * @param forceSync Flag to force sync rather than flushing to the buffer. Example - Hadoop hflush - * vs hsync. + * vs hsync. */ default void sync(long txid, boolean forceSync) throws IOException { sync(txid); } /** - * WAL keeps track of the sequence numbers that are as yet not flushed im memstores - * in order to be able to do accounting to figure which WALs can be let go. This method tells WAL - * that some region is about to flush. The flush can be the whole region or for a column family - * of the region only. - * - *

    Currently, it is expected that the update lock is held for the region; i.e. no - * concurrent appends while we set up cache flush. + * WAL keeps track of the sequence numbers that are as yet not flushed im memstores in order to be + * able to do accounting to figure which WALs can be let go. This method tells WAL that some + * region is about to flush. The flush can be the whole region or for a column family of the + * region only. + *

    + * Currently, it is expected that the update lock is held for the region; i.e. no concurrent + * appends while we set up cache flush. * @param families Families to flush. May be a subset of all families in the region. - * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if - * we are flushing a subset of all families but there are no edits in those families not - * being flushed; in other words, this is effectively same as a flush of all of the region - * though we were passed a subset of regions. Otherwise, it returns the sequence id of the - * oldest/lowest outstanding edit. + * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if we are + * flushing a subset of all families but there are no edits in those families not being + * flushed; in other words, this is effectively same as a flush of all of the region + * though we were passed a subset of regions. Otherwise, it returns the sequence id of the + * oldest/lowest outstanding edit. * @see #completeCacheFlush(byte[], long) * @see #abortCacheFlush(byte[]) */ @@ -192,17 +183,17 @@ default void sync(long txid, boolean forceSync) throws IOException { /** * Complete the cache flush. * @param encodedRegionName Encoded region name. - * @param maxFlushedSeqId The maxFlushedSeqId for this flush. There is no edit in memory that is - * less that this sequence id. + * @param maxFlushedSeqId The maxFlushedSeqId for this flush. There is no edit in memory that is + * less that this sequence id. * @see #startCacheFlush(byte[], Set) * @see #abortCacheFlush(byte[]) */ void completeCacheFlush(final byte[] encodedRegionName, long maxFlushedSeqId); /** - * Abort a cache flush. Call if the flush fails. Note that the only recovery - * for an aborted flush currently is a restart of the regionserver so the - * snapshot content dropped by the failure gets restored to the memstore. + * Abort a cache flush. Call if the flush fails. Note that the only recovery for an aborted flush + * currently is a restart of the regionserver so the snapshot content dropped by the failure gets + * restored to the memstore. * @param encodedRegionName Encoded region name. */ void abortCacheFlush(byte[] encodedRegionName); @@ -217,7 +208,7 @@ default void sync(long txid, boolean forceSync) throws IOException { * @param encodedRegionName The region to get the number for. * @return The earliest/lowest/oldest sequence id if present, HConstants.NO_SEQNUM if absent. * @deprecated Since version 1.2.0. Removing because not used and exposes subtle internal - * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])} + * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])} */ @Deprecated long getEarliestMemStoreSeqNum(byte[] encodedRegionName); @@ -225,29 +216,31 @@ default void sync(long txid, boolean forceSync) throws IOException { /** * Gets the earliest unflushed sequence id in the memstore for the store. * @param encodedRegionName The region to get the number for. - * @param familyName The family to get the number for. + * @param familyName The family to get the number for. * @return The earliest/lowest/oldest sequence id if present, HConstants.NO_SEQNUM if absent. */ long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] familyName); /** - * Human readable identifying information about the state of this WAL. - * Implementors are encouraged to include information appropriate for debugging. - * Consumers are advised not to rely on the details of the returned String; it does - * not have a defined structure. + * Human readable identifying information about the state of this WAL. Implementors are encouraged + * to include information appropriate for debugging. Consumers are advised not to rely on the + * details of the returned String; it does not have a defined structure. */ @Override String toString(); /** - * When outside clients need to consume persisted WALs, they rely on a provided - * Reader. + * When outside clients need to consume persisted WALs, they rely on a provided Reader. */ interface Reader extends Closeable { Entry next() throws IOException; + Entry next(Entry reuse) throws IOException; + void seek(long pos) throws IOException; + long getPosition() throws IOException; + void reset() throws IOException; } @@ -264,9 +257,8 @@ public Entry() { /** * Constructor for both params - * * @param edit log's edit - * @param key log's key + * @param key log's key */ public Entry(WALKeyImpl key, WALEdit edit) { this.key = key; @@ -274,18 +266,14 @@ public Entry(WALKeyImpl key, WALEdit edit) { } /** - * Gets the edit - * - * @return edit + * Gets the edit n */ public WALEdit getEdit() { return edit; } /** - * Gets the key - * - * @return key + * Gets the key n */ public WALKeyImpl getKey() { return key; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index 61f36fab74af..6794c2d5bd23 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -45,50 +45,52 @@ * Used in HBase's transaction log (WAL) to represent a collection of edits (Cell/KeyValue objects) * that came in as a single transaction. All the edits for a given transaction are written out as a * single record, in PB format, followed (optionally) by Cells written via the WALCellEncoder. - *

    This class is LimitedPrivate for CPs to read-only. The {@link #add} methods are - * classified as private methods, not for use by CPs.

    - * - *

    A particular WALEdit 'type' is the 'meta' type used to mark key operational - * events in the WAL such as compaction, flush, or region open. These meta types do not traverse - * hbase memstores. They are edits made by the hbase system rather than edit data submitted by - * clients. They only show in the WAL. These 'Meta' types have not been formally specified - * (or made into an explicit class type). They evolved organically. HBASE-8457 suggests codifying - * a WALEdit 'type' by adding a type field to WALEdit that gets serialized into the WAL. TODO. - * Would have to work on the consumption-side. Reading WALs on replay we seem to consume - * a Cell-at-a-time rather than by WALEdit. We are already in the below going out of our - * way to figure particular types -- e.g. if a compaction, replay, or close meta Marker -- during - * normal processing so would make sense to do this. Current system is an awkward marking of Cell - * columnfamily as {@link #METAFAMILY} and then setting qualifier based off meta edit type. For - * replay-time where we read Cell-at-a-time, there are utility methods below for figuring - * meta type. See also - * {@link #createBulkLoadEvent(RegionInfo, WALProtos.BulkLoadDescriptor)}, etc., for where we - * create meta WALEdit instances.

    - * - *

    WALEdit will accumulate a Set of all column family names referenced by the Cells - * {@link #add(Cell)}'d. This is an optimization. Usually when loading a WALEdit, we have the - * column family name to-hand.. just shove it into the WALEdit if available. Doing this, we can - * save on a parse of each Cell to figure column family down the line when we go to add the - * WALEdit to the WAL file. See the hand-off in FSWALEntry Constructor. + *

    + * This class is LimitedPrivate for CPs to read-only. The {@link #add} methods are classified as + * private methods, not for use by CPs. + *

    + *

    + * A particular WALEdit 'type' is the 'meta' type used to mark key operational events in the WAL + * such as compaction, flush, or region open. These meta types do not traverse hbase memstores. They + * are edits made by the hbase system rather than edit data submitted by clients. They only show in + * the WAL. These 'Meta' types have not been formally specified (or made into an explicit class + * type). They evolved organically. HBASE-8457 suggests codifying a WALEdit 'type' by adding a type + * field to WALEdit that gets serialized into the WAL. TODO. Would have to work on the + * consumption-side. Reading WALs on replay we seem to consume a Cell-at-a-time rather than by + * WALEdit. We are already in the below going out of our way to figure particular types -- e.g. if a + * compaction, replay, or close meta Marker -- during normal processing so would make sense to do + * this. Current system is an awkward marking of Cell columnfamily as {@link #METAFAMILY} and then + * setting qualifier based off meta edit type. For replay-time where we read Cell-at-a-time, there + * are utility methods below for figuring meta type. See also + * {@link #createBulkLoadEvent(RegionInfo, WALProtos.BulkLoadDescriptor)}, etc., for where we create + * meta WALEdit instances. + *

    + *

    + * WALEdit will accumulate a Set of all column family names referenced by the Cells + * {@link #add(Cell)}'d. This is an optimization. Usually when loading a WALEdit, we have the column + * family name to-hand.. just shove it into the WALEdit if available. Doing this, we can save on a + * parse of each Cell to figure column family down the line when we go to add the WALEdit to the WAL + * file. See the hand-off in FSWALEntry Constructor. * @see WALKey */ // TODO: Do not expose this class to Coprocessors. It has set methods. A CP might meddle. @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION, - HBaseInterfaceAudience.COPROC }) + HBaseInterfaceAudience.COPROC }) public class WALEdit implements HeapSize { // Below defines are for writing WALEdit 'meta' Cells.. // TODO: Get rid of this system of special 'meta' Cells. See HBASE-8457. It suggests // adding a type to WALEdit itself for use denoting meta Edits and their types. - public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY"); + public static final byte[] METAFAMILY = Bytes.toBytes("METAFAMILY"); /** * @deprecated Since 2.3.0. Not used. */ @Deprecated - public static final byte [] METAROW = Bytes.toBytes("METAROW"); + public static final byte[] METAROW = Bytes.toBytes("METAROW"); /** * @deprecated Since 2.3.0. Make it protected, internal-use only. Use - * {@link #isCompactionMarker(Cell)} + * {@link #isCompactionMarker(Cell)} */ @Deprecated @InterfaceAudience.Private @@ -99,37 +101,36 @@ public class WALEdit implements HeapSize { */ @Deprecated @InterfaceAudience.Private - public static final byte [] FLUSH = Bytes.toBytes("HBASE::FLUSH"); + public static final byte[] FLUSH = Bytes.toBytes("HBASE::FLUSH"); /** - * Qualifier for region event meta 'Marker' WALEdits start with the - * {@link #REGION_EVENT_PREFIX} prefix ('HBASE::REGION_EVENT::'). After the prefix, - * we note the type of the event which we get from the RegionEventDescriptor protobuf - * instance type (A RegionEventDescriptor protobuf instance is written as the meta Marker - * Cell value). Adding a type suffix means we do not have to deserialize the protobuf to - * figure out what type of event this is.. .just read the qualifier suffix. For example, - * a close region event descriptor will have a qualifier of HBASE::REGION_EVENT::REGION_CLOSE. - * See WAL.proto and the EventType in RegionEventDescriptor protos for all possible - * event types. + * Qualifier for region event meta 'Marker' WALEdits start with the {@link #REGION_EVENT_PREFIX} + * prefix ('HBASE::REGION_EVENT::'). After the prefix, we note the type of the event which we get + * from the RegionEventDescriptor protobuf instance type (A RegionEventDescriptor protobuf + * instance is written as the meta Marker Cell value). Adding a type suffix means we do not have + * to deserialize the protobuf to figure out what type of event this is.. .just read the qualifier + * suffix. For example, a close region event descriptor will have a qualifier of + * HBASE::REGION_EVENT::REGION_CLOSE. See WAL.proto and the EventType in RegionEventDescriptor + * protos for all possible event types. */ private static final String REGION_EVENT_STR = "HBASE::REGION_EVENT"; private static final String REGION_EVENT_PREFIX_STR = REGION_EVENT_STR + "::"; - private static final byte [] REGION_EVENT_PREFIX = Bytes.toBytes(REGION_EVENT_PREFIX_STR); + private static final byte[] REGION_EVENT_PREFIX = Bytes.toBytes(REGION_EVENT_PREFIX_STR); /** * @deprecated Since 2.3.0. Remove. Not for external use. Not used. */ @Deprecated - public static final byte [] REGION_EVENT = Bytes.toBytes(REGION_EVENT_STR); + public static final byte[] REGION_EVENT = Bytes.toBytes(REGION_EVENT_STR); /** * We use this define figuring if we are carrying a close event. */ - private static final byte [] REGION_EVENT_CLOSE = - createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType.REGION_CLOSE); + private static final byte[] REGION_EVENT_CLOSE = + createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType.REGION_CLOSE); @InterfaceAudience.Private - public static final byte [] BULK_LOAD = Bytes.toBytes("HBASE::BULK_LOAD"); + public static final byte[] BULK_LOAD = Bytes.toBytes("HBASE::BULK_LOAD"); private final transient boolean replay; @@ -137,10 +138,10 @@ public class WALEdit implements HeapSize { /** * All the Cell families in cells. Updated by {@link #add(Cell)} and - * {@link #add(Map)}. This Set is passed to the FSWALEntry so it does not have - * to recalculate the Set of families in a transaction; makes for a bunch of CPU savings. + * {@link #add(Map)}. This Set is passed to the FSWALEntry so it does not have to recalculate the + * Set of families in a transaction; makes for a bunch of CPU savings. */ - private Set families = null; + private Set families = null; public WALEdit() { this(1, false); @@ -148,7 +149,7 @@ public WALEdit() { /** * @deprecated since 2.0.1 and will be removed in 4.0.0. Use {@link #WALEdit(int, boolean)} - * instead. + * instead. * @see #WALEdit(int, boolean) * @see HBASE-20781 */ @@ -159,7 +160,7 @@ public WALEdit(boolean replay) { /** * @deprecated since 2.0.1 and will be removed in 4.0.0. Use {@link #WALEdit(int, boolean)} - * instead. + * instead. * @see #WALEdit(int, boolean) * @see HBASE-20781 */ @@ -187,7 +188,7 @@ private Set getOrCreateFamilies() { * For use by FSWALEntry ONLY. An optimization. * @return All families in {@link #getCells()}; may be null. */ - public Set getFamilies() { + public Set getFamilies() { return this.families; } @@ -196,7 +197,7 @@ private Set getOrCreateFamilies() { * @deprecated Since 2.3.0. Do not expose. Make protected. */ @Deprecated - public static boolean isMetaEditFamily(final byte [] f) { + public static boolean isMetaEditFamily(final byte[] f) { return Bytes.equals(METAFAMILY, f); } @@ -208,8 +209,8 @@ public static boolean isMetaEditFamily(Cell cell) { } /** - * @return True if this is a meta edit; has one edit only and its columnfamily - * is {@link #METAFAMILY}. + * @return True if this is a meta edit; has one edit only and its columnfamily is + * {@link #METAFAMILY}. */ public boolean isMetaEdit() { return this.families != null && this.families.size() == 1 && this.families.contains(METAFAMILY); @@ -224,7 +225,7 @@ public boolean isReplay() { } @InterfaceAudience.Private - public WALEdit add(Cell cell, byte [] family) { + public WALEdit add(Cell cell, byte[] family) { getOrCreateFamilies().add(family); return addCell(cell); } @@ -249,10 +250,8 @@ public ArrayList getCells() { } /** - * This is not thread safe. - * This will change the WALEdit and shouldn't be used unless you are sure that nothing - * else depends on the contents being immutable. - * + * This is not thread safe. This will change the WALEdit and shouldn't be used unless you are sure + * that nothing else depends on the contents being immutable. * @param cells the list of cells that this WALEdit now contains. */ @InterfaceAudience.Private @@ -264,7 +263,7 @@ public void setCells(ArrayList cells) { /** * Reads WALEdit from cells. - * @param cellDecoder Cell decoder. + * @param cellDecoder Cell decoder. * @param expectedCount Expected cell count. * @return Number of KVs read. */ @@ -288,7 +287,7 @@ public long heapSize() { public long estimatedSerializedSizeOf() { long ret = 0; - for (Cell cell: cells) { + for (Cell cell : cells) { ret += PrivateCellUtil.estimatedSerializedSizeOf(cell); } return ret; @@ -314,37 +313,38 @@ public static WALEdit createFlushWALEdit(RegionInfo hri, FlushDescriptor f) { } public static FlushDescriptor getFlushDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumn(cell, METAFAMILY, FLUSH)? - FlushDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumn(cell, METAFAMILY, FLUSH) + ? FlushDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** * @return A meta Marker WALEdit that has a single Cell whose value is the passed in - * regionEventDesc serialized and whose row is this region, - * columnfamily is {@link #METAFAMILY} and qualifier is - * {@link #REGION_EVENT_PREFIX} + {@link RegionEventDescriptor#getEventType()}; - * for example HBASE::REGION_EVENT::REGION_CLOSE. + * regionEventDesc serialized and whose row is this region, columnfamily is + * {@link #METAFAMILY} and qualifier is {@link #REGION_EVENT_PREFIX} + + * {@link RegionEventDescriptor#getEventType()}; for example + * HBASE::REGION_EVENT::REGION_CLOSE. */ public static WALEdit createRegionEventWALEdit(RegionInfo hri, - RegionEventDescriptor regionEventDesc) { + RegionEventDescriptor regionEventDesc) { return createRegionEventWALEdit(getRowForRegion(hri), regionEventDesc); } @InterfaceAudience.Private - public static WALEdit createRegionEventWALEdit(byte [] rowForRegion, - RegionEventDescriptor regionEventDesc) { + public static WALEdit createRegionEventWALEdit(byte[] rowForRegion, + RegionEventDescriptor regionEventDesc) { KeyValue kv = new KeyValue(rowForRegion, METAFAMILY, - createRegionEventDescriptorQualifier(regionEventDesc.getEventType()), - EnvironmentEdgeManager.currentTime(), regionEventDesc.toByteArray()); + createRegionEventDescriptorQualifier(regionEventDesc.getEventType()), + EnvironmentEdgeManager.currentTime(), regionEventDesc.toByteArray()); return new WALEdit().add(kv, METAFAMILY); } /** - * @return Cell qualifier for the passed in RegionEventDescriptor Type; e.g. we'll - * return something like a byte array with HBASE::REGION_EVENT::REGION_OPEN in it. + * @return Cell qualifier for the passed in RegionEventDescriptor Type; e.g. we'll return + * something like a byte array with HBASE::REGION_EVENT::REGION_OPEN in it. */ @InterfaceAudience.Private - public static byte [] createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType t) { + public static byte[] createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType t) { return Bytes.toBytes(REGION_EVENT_PREFIX_STR + t.toString()); } @@ -353,28 +353,28 @@ public static WALEdit createRegionEventWALEdit(byte [] rowForRegion, * @return True if this is a Marker Edit and it is a RegionClose type. */ public boolean isRegionCloseMarker() { - return isMetaEdit() && PrivateCellUtil.matchingQualifier(this.cells.get(0), - REGION_EVENT_CLOSE, 0, REGION_EVENT_CLOSE.length); + return isMetaEdit() && PrivateCellUtil.matchingQualifier(this.cells.get(0), REGION_EVENT_CLOSE, + 0, REGION_EVENT_CLOSE.length); } /** - * @return Returns a RegionEventDescriptor made by deserializing the content of the - * passed in cell, IFF the cell is a RegionEventDescriptor - * type WALEdit. + * @return Returns a RegionEventDescriptor made by deserializing the content of the passed in + * cell, IFF the cell is a RegionEventDescriptor type WALEdit. */ public static RegionEventDescriptor getRegionEventDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumnFamilyAndQualifierPrefix(cell, METAFAMILY, REGION_EVENT_PREFIX)? - RegionEventDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumnFamilyAndQualifierPrefix(cell, METAFAMILY, REGION_EVENT_PREFIX) + ? RegionEventDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** * @return A Marker WALEdit that has c serialized as its value */ public static WALEdit createCompaction(final RegionInfo hri, final CompactionDescriptor c) { - byte [] pbbytes = c.toByteArray(); + byte[] pbbytes = c.toByteArray(); KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, COMPACTION, EnvironmentEdgeManager.currentTime(), pbbytes); - return new WALEdit().add(kv, METAFAMILY); //replication scope null so this won't be replicated + return new WALEdit().add(kv, METAFAMILY); // replication scope null so this won't be replicated } public static byte[] getRowForRegion(RegionInfo hri) { @@ -382,7 +382,7 @@ public static byte[] getRowForRegion(RegionInfo hri) { if (startKey.length == 0) { // empty row key is not allowed in mutations because it is both the start key and the end key // we return the smallest byte[] that is bigger (in lex comparison) than byte[0]. - return new byte[] {0}; + return new byte[] { 0 }; } return startKey; } @@ -393,12 +393,11 @@ public static byte[] getRowForRegion(RegionInfo hri) { * @return deserialized CompactionDescriptor or null. */ public static CompactionDescriptor getCompaction(Cell kv) throws IOException { - return isCompactionMarker(kv)? CompactionDescriptor.parseFrom(CellUtil.cloneValue(kv)): null; + return isCompactionMarker(kv) ? CompactionDescriptor.parseFrom(CellUtil.cloneValue(kv)) : null; } /** * Returns true if the given cell is a serialized {@link CompactionDescriptor} - * * @see #getCompaction(Cell) */ public static boolean isCompactionMarker(Cell cell) { @@ -407,15 +406,14 @@ public static boolean isCompactionMarker(Cell cell) { /** * Create a bulk loader WALEdit - * * @param hri The RegionInfo for the region in which we are bulk loading * @param bulkLoadDescriptor The descriptor for the Bulk Loader * @return The WALEdit for the BulkLoad */ public static WALEdit createBulkLoadEvent(RegionInfo hri, - WALProtos.BulkLoadDescriptor bulkLoadDescriptor) { + WALProtos.BulkLoadDescriptor bulkLoadDescriptor) { KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, BULK_LOAD, - EnvironmentEdgeManager.currentTime(), bulkLoadDescriptor.toByteArray()); + EnvironmentEdgeManager.currentTime(), bulkLoadDescriptor.toByteArray()); return new WALEdit().add(kv, METAFAMILY); } @@ -425,21 +423,20 @@ public static WALEdit createBulkLoadEvent(RegionInfo hri, * @return deserialized BulkLoadDescriptor or null. */ public static WALProtos.BulkLoadDescriptor getBulkLoadDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumn(cell, METAFAMILY, BULK_LOAD)? - WALProtos.BulkLoadDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumn(cell, METAFAMILY, BULK_LOAD) + ? WALProtos.BulkLoadDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** - * Append the given map of family->edits to a WALEdit data structure. - * This does not write to the WAL itself. - * Note that as an optimization, we will stamp the Set of column families into the WALEdit - * to save on our having to calculate column families subsequently down in the actual WAL + * Append the given map of family->edits to a WALEdit data structure. This does not write to the + * WAL itself. Note that as an optimization, we will stamp the Set of column families into the + * WALEdit to save on our having to calculate column families subsequently down in the actual WAL * writing. - * * @param familyMap map of family->edits */ public void add(Map> familyMap) { - for (Map.Entry> e: familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { // 'foreach' loop NOT used. See HBASE-12023 "...creates too many iterator objects." int listSize = e.getValue().size(); // Add all Cells first and then at end, add the family rather than call {@link #add(Cell)} @@ -451,7 +448,7 @@ public void add(Map> familyMap) { } } - private void addFamily(byte [] family) { + private void addFamily(byte[] family) { getOrCreateFamilies().add(family); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index cff3154626be..3a019ce84368 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -39,23 +39,19 @@ import org.slf4j.LoggerFactory; /** - * Entry point for users of the Write Ahead Log. - * Acts as the shim between internal use and the particular WALProvider we use to handle wal - * requests. - * - * Configure which provider gets used with the configuration setting "hbase.wal.provider". Available - * implementations: + * Entry point for users of the Write Ahead Log. Acts as the shim between internal use and the + * particular WALProvider we use to handle wal requests. Configure which provider gets used with the + * configuration setting "hbase.wal.provider". Available implementations: *

      - *
    • defaultProvider : whatever provider is standard for the hbase version. Currently - * "asyncfs"
    • - *
    • asyncfs : a provider that will run on top of an implementation of the Hadoop - * FileSystem interface via an asynchronous client.
    • - *
    • filesystem : a provider that will run on top of an implementation of the Hadoop - * FileSystem interface via HDFS's synchronous DFSClient.
    • - *
    • multiwal : a provider that will use multiple "filesystem" wal instances per region - * server.
    • + *
    • defaultProvider : whatever provider is standard for the hbase version. Currently + * "asyncfs"
    • + *
    • asyncfs : a provider that will run on top of an implementation of the Hadoop + * FileSystem interface via an asynchronous client.
    • + *
    • filesystem : a provider that will run on top of an implementation of the Hadoop + * FileSystem interface via HDFS's synchronous DFSClient.
    • + *
    • multiwal : a provider that will use multiple "filesystem" wal instances per region + * server.
    • *
    - * * Alternatively, you may provide a custom implementation of {@link WALProvider} by class name. */ @InterfaceAudience.Private @@ -73,6 +69,7 @@ enum Providers { asyncfs(AsyncFSWALProvider.class); final Class clazz; + Providers(Class clazz) { this.clazz = clazz; } @@ -137,8 +134,10 @@ public Class getProviderClass(String key, String defaultV // AsyncFSWALProvider is not guaranteed to work on all Hadoop versions, when it's chosen as // the default and we can't use it, we want to fall back to FSHLog which we know works on // all versions. - if (provider == getDefaultProvider() && provider.clazz == AsyncFSWALProvider.class - && !AsyncFSWALProvider.load()) { + if ( + provider == getDefaultProvider() && provider.clazz == AsyncFSWALProvider.class + && !AsyncFSWALProvider.load() + ) { // AsyncFSWAL has better performance in most cases, and also uses less resources, we will // try to use it if possible. It deeply hacks into the internal of DFSClient so will be // easily broken when upgrading hadoop. @@ -170,10 +169,10 @@ static WALProvider createProvider(Class clazz) throws IOE } /** - * @param conf must not be null, will keep a reference to read params in later reader/writer - * instances. + * @param conf must not be null, will keep a reference to read params in later reader/writer + * instances. * @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations - * to make a directory + * to make a directory */ public WALFactory(Configuration conf, String factoryId) throws IOException { // default enableSyncReplicationWALProvider is true, only disable SyncReplicationWALProvider @@ -182,16 +181,16 @@ public WALFactory(Configuration conf, String factoryId) throws IOException { } /** - * @param conf must not be null, will keep a reference to read params in later reader/writer - * instances. - * @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations - * to make a directory - * @param abortable the server associated with this WAL file + * @param conf must not be null, will keep a reference to read params + * in later reader/writer instances. + * @param factoryId a unique identifier for this factory. used i.e. by + * filesystem implementations to make a directory + * @param abortable the server associated with this WAL file * @param enableSyncReplicationWALProvider whether wrap the wal provider to a - * {@link SyncReplicationWALProvider} + * {@link SyncReplicationWALProvider} */ public WALFactory(Configuration conf, String factoryId, Abortable abortable, - boolean enableSyncReplicationWALProvider) throws IOException { + boolean enableSyncReplicationWALProvider) throws IOException { // until we've moved reader/writer construction down into providers, this initialization must // happen prior to provider initialization, in case they need to instantiate a reader/writer. timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000); @@ -220,9 +219,8 @@ public WALFactory(Configuration conf, String factoryId, Abortable abortable, } /** - * Shutdown all WALs and clean up any underlying storage. - * Use only when you will not need to replay and edits that have gone to any wals from this - * factory. + * Shutdown all WALs and clean up any underlying storage. Use only when you will not need to + * replay and edits that have gone to any wals from this factory. */ public void close() throws IOException { final WALProvider metaProvider = this.metaProvider.get(); @@ -237,9 +235,9 @@ public void close() throws IOException { } /** - * Tell the underlying WAL providers to shut down, but do not clean up underlying storage. - * If you are not ending cleanly and will need to replay edits from this factory's wals, - * use this method if you can as it will try to leave things as tidy as possible. + * Tell the underlying WAL providers to shut down, but do not clean up underlying storage. If you + * are not ending cleanly and will need to replay edits from this factory's wals, use this method + * if you can as it will try to leave things as tidy as possible. */ public void shutdown() throws IOException { IOException exception = null; @@ -247,7 +245,7 @@ public void shutdown() throws IOException { if (null != metaProvider) { try { metaProvider.shutdown(); - } catch(IOException ioe) { + } catch (IOException ioe) { exception = ioe; } } @@ -279,8 +277,8 @@ public WALProvider getMetaProvider() throws IOException { } catch (Throwable t) { // the WAL provider should be an enum. Proceed } - } - if (clz == null){ + } + if (clz == null) { clz = getProviderClass(META_WAL_PROVIDER, conf.get(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); } provider = createProvider(clz); @@ -300,8 +298,10 @@ public WALProvider getMetaProvider() throws IOException { */ public WAL getWAL(RegionInfo region) throws IOException { // Use different WAL for hbase:meta. Instantiates the meta WALProvider if not already up. - if (region != null && region.isMetaRegion() && - region.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { + if ( + region != null && region.isMetaRegion() + && region.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID + ) { return getMetaProvider().getWAL(region); } else { return provider.getWAL(region); @@ -309,24 +309,24 @@ public WAL getWAL(RegionInfo region) throws IOException { } public Reader createReader(final FileSystem fs, final Path path) throws IOException { - return createReader(fs, path, (CancelableProgressable)null); + return createReader(fs, path, (CancelableProgressable) null); } /** - * Create a reader for the WAL. If you are reading from a file that's being written to and need - * to reopen it multiple times, use {@link WAL.Reader#reset()} instead of this method - * then just seek back to the last known good position. - * @return A WAL reader. Close when done with it. + * Create a reader for the WAL. If you are reading from a file that's being written to and need to + * reopen it multiple times, use {@link WAL.Reader#reset()} instead of this method then just seek + * back to the last known good position. + * @return A WAL reader. Close when done with it. */ - public Reader createReader(final FileSystem fs, final Path path, - CancelableProgressable reporter) throws IOException { + public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter) + throws IOException { return createReader(fs, path, reporter, true); } public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter, - boolean allowCustom) throws IOException { + boolean allowCustom) throws IOException { Class lrClass = - allowCustom ? logReaderClass : ProtobufLogReader.class; + allowCustom ? logReaderClass : ProtobufLogReader.class; try { // A wal file could be under recovery, so it may take several // tries to get it open. Instead of claiming it is corrupted, retry @@ -355,10 +355,11 @@ public Reader createReader(final FileSystem fs, final Path path, CancelableProgr // Only inspect the Exception to consider retry when it's an IOException if (e instanceof IOException) { String msg = e.getMessage(); - if (msg != null - && (msg.contains("Cannot obtain block length") - || msg.contains("Could not obtain the last block") || msg - .matches("Blocklist for [^ ]* has changed.*"))) { + if ( + msg != null && (msg.contains("Cannot obtain block length") + || msg.contains("Could not obtain the last block") + || msg.matches("Blocklist for [^ ]* has changed.*")) + ) { if (++nbAttempt == 1) { LOG.warn("Lease should have recovered. This is not expected. Will retry", e); } @@ -367,7 +368,7 @@ public Reader createReader(final FileSystem fs, final Path path, CancelableProgr } if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) { LOG.error("Can't open after " + nbAttempt + " attempts and " - + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); } else { try { Thread.sleep(nbAttempt < 3 ? 500 : 1000); @@ -394,8 +395,7 @@ public Reader createReader(final FileSystem fs, final Path path, CancelableProgr } /** - * Create a writer for the WAL. - * Uses defaults. + * Create a writer for the WAL. Uses defaults. *

    * Should be package-private. public only for tests and * {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} @@ -406,12 +406,11 @@ public Writer createWALWriter(final FileSystem fs, final Path path) throws IOExc } /** - * Should be package-private, visible for recovery testing. - * Uses defaults. + * Should be package-private, visible for recovery testing. Uses defaults. * @return an overwritable writer for recovered edits. caller should close. */ public Writer createRecoveredEditsWriter(final FileSystem fs, final Path path) - throws IOException { + throws IOException { return FSHLogProvider.createWriter(conf, fs, path, true); } @@ -421,7 +420,7 @@ public Writer createRecoveredEditsWriter(final FileSystem fs, final Path path) // For now, first Configuration object wins. Practically this just impacts the reader/writer class private static final AtomicReference singleton = new AtomicReference<>(); private static final String SINGLETON_ID = WALFactory.class.getName(); - + // Public only for FSHLog public static WALFactory getInstance(Configuration configuration) { WALFactory factory = singleton.get(); @@ -443,55 +442,51 @@ public static WALFactory getInstance(Configuration configuration) { } /** - * Create a reader for the given path, accept custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. + * Create a reader for the given path, accept custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. * @return a WAL Reader, caller must close. */ public static Reader createReader(final FileSystem fs, final Path path, - final Configuration configuration) throws IOException { + final Configuration configuration) throws IOException { return getInstance(configuration).createReader(fs, path); } /** - * Create a reader for the given path, accept custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. + * Create a reader for the given path, accept custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. * @return a WAL Reader, caller must close. */ static Reader createReader(final FileSystem fs, final Path path, - final Configuration configuration, final CancelableProgressable reporter) throws IOException { + final Configuration configuration, final CancelableProgressable reporter) throws IOException { return getInstance(configuration).createReader(fs, path, reporter); } /** - * Create a reader for the given path, ignore custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. - * only public pending move of {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} + * Create a reader for the given path, ignore custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. only public pending move of + * {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} * @return a WAL Reader, caller must close. */ public static Reader createReaderIgnoreCustomClass(final FileSystem fs, final Path path, - final Configuration configuration) throws IOException { + final Configuration configuration) throws IOException { return getInstance(configuration).createReader(fs, path, null, false); } /** - * If you already have a WALFactory, you should favor the instance method. - * Uses defaults. + * If you already have a WALFactory, you should favor the instance method. Uses defaults. * @return a Writer that will overwrite files. Caller must close. */ static Writer createRecoveredEditsWriter(final FileSystem fs, final Path path, - final Configuration configuration) - throws IOException { + final Configuration configuration) throws IOException { return FSHLogProvider.createWriter(configuration, fs, path, true); } /** - * If you already have a WALFactory, you should favor the instance method. - * Uses defaults. + * If you already have a WALFactory, you should favor the instance method. Uses defaults. * @return a writer that won't overwrite files. Caller must close. */ public static Writer createWALWriter(final FileSystem fs, final Path path, - final Configuration configuration) - throws IOException { + final Configuration configuration) throws IOException { return FSHLogProvider.createWriter(configuration, fs, path, false); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java index fdbacbda2779..2c6ae83f63df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java @@ -17,26 +17,24 @@ */ package org.apache.hadoop.hbase.wal; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.regionserver.SequenceId; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; - +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.SequenceId; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Key for WAL Entry. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION, - HBaseInterfaceAudience.COPROC}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION, + HBaseInterfaceAudience.COPROC }) public interface WALKey extends SequenceId, Comparable { /** * Unmodifiable empty list of UUIDs. @@ -87,31 +85,30 @@ default long getNonce() { /** * Add a named String value to this WALKey to be persisted into the WAL - * @param attributeKey Name of the attribute + * @param attributeKey Name of the attribute * @param attributeValue Value of the attribute */ void addExtendedAttribute(String attributeKey, byte[] attributeValue); - /** - * Return a named String value injected into the WALKey during processing, such as by a - * coprocessor - * @param attributeKey The key of a key / value pair - */ - default byte[] getExtendedAttribute(String attributeKey){ + /** + * Return a named String value injected into the WALKey during processing, such as by a + * coprocessor + * @param attributeKey The key of a key / value pair + */ + default byte[] getExtendedAttribute(String attributeKey) { return null; } - /** - * Returns a map of all extended attributes injected into this WAL key. - */ + /** + * Returns a map of all extended attributes injected into this WAL key. + */ default Map getExtendedAttributes() { return new HashMap<>(); } + /** - * Produces a string map for this key. Useful for programmatic use and - * manipulation of the data stored in an WALKeyImpl, for example, printing - * as JSON. - * + * Produces a string map for this key. Useful for programmatic use and manipulation of the data + * stored in an WALKeyImpl, for example, printing as JSON. * @return a Map containing data from this key */ default Map toStringMap() { @@ -120,8 +117,8 @@ default Map toStringMap() { stringMap.put("region", Bytes.toStringBinary(getEncodedRegionName())); stringMap.put("sequence", getSequenceId()); Map extendedAttributes = getExtendedAttributes(); - if (extendedAttributes != null){ - for (Map.Entry entry : extendedAttributes.entrySet()){ + if (extendedAttributes != null) { + for (Map.Entry entry : extendedAttributes.entrySet()) { stringMap.put(entry.getKey(), Bytes.toStringBinary(entry.getValue())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java index 4c3fc4edc787..3ca1b1589fec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,19 +44,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.ScopeType; /** - * Default implementation of Key for an Entry in the WAL. - * For internal use only though Replication needs to have access. - * - * The log intermingles edits to many tables and rows, so each log entry - * identifies the appropriate table and row. Within a table and row, they're - * also sorted. - * - *

    Some Transactional edits (START, COMMIT, ABORT) will not have an associated row. - * + * Default implementation of Key for an Entry in the WAL. For internal use only though Replication + * needs to have access. The log intermingles edits to many tables and rows, so each log entry + * identifies the appropriate table and row. Within a table and row, they're also sorted. + *

    + * Some Transactional edits (START, COMMIT, ABORT) will not have an associated row. */ // TODO: Key and WALEdit are never used separately, or in one-to-many relation, for practical -// purposes. They need to be merged into WALEntry. -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION}) +// purposes. They need to be merged into WALEntry. +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION }) public class WALKeyImpl implements WALKey { public static final WALKeyImpl EMPTY_WALKEYIMPL = new WALKeyImpl(); @@ -65,11 +61,10 @@ public MultiVersionConcurrencyControl getMvcc() { } /** - * Use it to complete mvcc transaction. This WALKeyImpl was part of - * (the transaction is started when you call append; see the comment on FSHLog#append). To - * complete call + * Use it to complete mvcc transaction. This WALKeyImpl was part of (the transaction is started + * when you call append; see the comment on FSHLog#append). To complete call + * {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} or * {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} - * or {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} * @return A WriteEntry gotten from local WAL subsystem. * @see #setWriteEntry(MultiVersionConcurrencyControl.WriteEntry) */ @@ -84,7 +79,7 @@ public void setWriteEntry(MultiVersionConcurrencyControl.WriteEntry writeEntry) this.sequenceId = writeEntry.getWriteNumber(); } - private byte [] encodedRegionName; + private byte[] encodedRegionName; private TableName tablename; @@ -119,18 +114,18 @@ public void setWriteEntry(MultiVersionConcurrencyControl.WriteEntry writeEntry) private Map extendedAttributes; public WALKeyImpl() { - init(null, null, 0L, HConstants.LATEST_TIMESTAMP, - new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, null, null); + init(null, null, 0L, HConstants.LATEST_TIMESTAMP, new ArrayList<>(), HConstants.NO_NONCE, + HConstants.NO_NONCE, null, null, null); } public WALKeyImpl(final NavigableMap replicationScope) { - init(null, null, 0L, HConstants.LATEST_TIMESTAMP, - new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope, null); + init(null, null, 0L, HConstants.LATEST_TIMESTAMP, new ArrayList<>(), HConstants.NO_NONCE, + HConstants.NO_NONCE, null, replicationScope, null); } @InterfaceAudience.Private public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, - final long now, UUID clusterId) { + final long now, UUID clusterId) { List clusterIds = new ArrayList<>(1); clusterIds.add(clusterId); init(encodedRegionName, tablename, logSeqNum, now, clusterIds, HConstants.NO_NONCE, @@ -139,7 +134,7 @@ public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, lon @InterfaceAudience.Private public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, - final long now, UUID clusterId, MultiVersionConcurrencyControl mvcc) { + final long now, UUID clusterId, MultiVersionConcurrencyControl mvcc) { List clusterIds = new ArrayList<>(1); clusterIds.add(clusterId); init(encodedRegionName, tablename, logSeqNum, now, clusterIds, HConstants.NO_NONCE, @@ -148,90 +143,68 @@ public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, lon // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now) { - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - EMPTY_UUIDS, - HConstants.NO_NONCE, - HConstants.NO_NONCE, - null, null, null); + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, + HConstants.NO_NONCE, null, null, null); } // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, - final NavigableMap replicationScope) { + final NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, null, replicationScope, null); + HConstants.NO_NONCE, null, replicationScope, null); } public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, - MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { + MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, mvcc, replicationScope, null); + HConstants.NO_NONCE, mvcc, replicationScope, null); } public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, - MultiVersionConcurrencyControl mvcc, - final NavigableMap replicationScope, - Map extendedAttributes) { + MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope, + Map extendedAttributes) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, mvcc, replicationScope, extendedAttributes); + HConstants.NO_NONCE, mvcc, replicationScope, extendedAttributes); } - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - final long now, - MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - EMPTY_UUIDS, - HConstants.NO_NONCE, - HConstants.NO_NONCE, - mvcc, null, null); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, + HConstants.NO_NONCE, mvcc, null, null); } /** - * Copy constructor that takes in an existing WALKeyImpl plus some extended attributes. - * Intended for coprocessors to add annotations to a system-generated WALKey - * for persistence to the WAL. - * @param key Key to be copied into this new key + * Copy constructor that takes in an existing WALKeyImpl plus some extended attributes. Intended + * for coprocessors to add annotations to a system-generated WALKey for persistence to the WAL. + * @param key Key to be copied into this new key * @param extendedAttributes Extra attributes to copy into the new key */ - public WALKeyImpl(WALKeyImpl key, - Map extendedAttributes){ - init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), - key.getWriteTime(), key.getClusterIds(), key.getNonceGroup(), key.getNonce(), - key.getMvcc(), key.getReplicationScopes(), extendedAttributes); + public WALKeyImpl(WALKeyImpl key, Map extendedAttributes) { + init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), key.getWriteTime(), + key.getClusterIds(), key.getNonceGroup(), key.getNonce(), key.getMvcc(), + key.getReplicationScopes(), extendedAttributes); } /** - * Copy constructor that takes in an existing WALKey, the extra WALKeyImpl fields that the - * parent interface is missing, plus some extended attributes. Intended - * for coprocessors to add annotations to a system-generated WALKey for - * persistence to the WAL. + * Copy constructor that takes in an existing WALKey, the extra WALKeyImpl fields that the parent + * interface is missing, plus some extended attributes. Intended for coprocessors to add + * annotations to a system-generated WALKey for persistence to the WAL. */ - public WALKeyImpl(WALKey key, - List clusterIds, - MultiVersionConcurrencyControl mvcc, - final NavigableMap replicationScopes, - Map extendedAttributes){ - init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), - key.getWriteTime(), clusterIds, key.getNonceGroup(), key.getNonce(), - mvcc, replicationScopes, extendedAttributes); + public WALKeyImpl(WALKey key, List clusterIds, MultiVersionConcurrencyControl mvcc, + final NavigableMap replicationScopes, Map extendedAttributes) { + init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), key.getWriteTime(), + clusterIds, key.getNonceGroup(), key.getNonce(), mvcc, replicationScopes, extendedAttributes); } + /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - *

    Used by log splitting and snapshots. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. + *

    + * Used by log splitting and snapshots. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). + * HRegionInfo#getEncodedNameAsBytes(). * @param tablename - name of table * @param logSeqNum - log sequence number * @param now Time at which this edit was written. @@ -239,23 +212,22 @@ public WALKeyImpl(WALKey key, * @param nonceGroup the nonceGroup * @param nonce the nonce * @param mvcc the mvcc associate the WALKeyImpl - * @param replicationScope the non-default replication scope - * associated with the region's column families + * @param replicationScope the non-default replication scope associated with the region's column + * families */ // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, - final long now, List clusterIds, long nonceGroup, long nonce, - MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { + final long now, List clusterIds, long nonceGroup, long nonce, + MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce, mvcc, - replicationScope, null); + replicationScope, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - *

    Used by log splitting and snapshots. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. + *

    + * Used by log splitting and snapshots. * @param encodedRegionName Encoded name of the region as returned by * HRegionInfo#getEncodedNameAsBytes(). * @param tablename - name of table @@ -264,117 +236,77 @@ public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, lon * @param clusterIds the clusters that have consumed the change(used in Replication) */ // TODO: Fix being able to pass in sequenceid. - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - final long now, - List clusterIds, - long nonceGroup, - long nonce, - MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, - nonce, mvcc, null, null); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + final long now, List clusterIds, long nonceGroup, long nonce, + MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce, mvcc, null, + null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by * HRegionInfo#getEncodedNameAsBytes(). * @param tablename the tablename * @param now Time at which this edit was written. - * @param clusterIds the clusters that have consumed the change(used in Replication) - * @param nonceGroup - * @param nonce - * @param mvcc mvcc control used to generate sequence numbers and control read/write points + * @param clusterIds the clusters that have consumed the change(used in Replication) nn + * * @param mvcc mvcc control used to generate sequence numbers and + * control read/write points */ - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc) { + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, - null, null); + null, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). - * @param tablename - * @param now Time at which this edit was written. + * HRegionInfo#getEncodedNameAsBytes(). n * @param now Time + * at which this edit was written. * @param clusterIds the clusters that have consumed the change(used in Replication) * @param nonceGroup the nonceGroup * @param nonce the nonce - * @param mvcc mvcc control used to generate sequence numbers and control read/write points + * @param mvcc mvcc control used to generate sequence numbers and control read/write + * points * @param replicationScope the non-default replication scope of the column families */ - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope) { + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, - replicationScope, null); + replicationScope, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). - * @param tablename - * @param logSeqNum - * @param nonceGroup - * @param nonce + * HRegionInfo#getEncodedNameAsBytes(). nnnn */ // TODO: Fix being able to pass in sequenceid. - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - long nonceGroup, - long nonce, - final MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, - tablename, - logSeqNum, - EnvironmentEdgeManager.currentTime(), - EMPTY_UUIDS, - nonceGroup, - nonce, - mvcc, null, null); - } - - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope, - Map extendedAttributes){ - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - clusterIds, - nonceGroup, - nonce, - mvcc, replicationScope, extendedAttributes); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + long nonceGroup, long nonce, final MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, logSeqNum, EnvironmentEdgeManager.currentTime(), EMPTY_UUIDS, + nonceGroup, nonce, mvcc, null, null); + } + + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope, + Map extendedAttributes) { + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, + replicationScope, extendedAttributes); } @InterfaceAudience.Private - protected void init(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - final long now, - List clusterIds, - long nonceGroup, - long nonce, - MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope, - Map extendedAttributes) { + protected void init(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + final long now, List clusterIds, long nonceGroup, long nonce, + MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope, + Map extendedAttributes) { this.sequenceId = logSeqNum; this.writeTime = now; this.clusterIds = clusterIds; @@ -398,7 +330,7 @@ protected void setSequenceId(long sequenceId) { /** @return encoded region name */ @Override - public byte [] getEncodedRegionName() { + public byte[] getEncodedRegionName() { return encodedRegionName; } @@ -489,27 +421,28 @@ public List getClusterIds() { * returns DEFAULT_CLUSTER_ID (cases where replication is not enabled) */ @Override - public UUID getOriginatingClusterId(){ - return clusterIds.isEmpty()? HConstants.DEFAULT_CLUSTER_ID: clusterIds.get(0); + public UUID getOriginatingClusterId() { + return clusterIds.isEmpty() ? HConstants.DEFAULT_CLUSTER_ID : clusterIds.get(0); } @Override - public void addExtendedAttribute(String attributeKey, byte[] attributeValue){ - if (extendedAttributes == null){ + public void addExtendedAttribute(String attributeKey, byte[] attributeValue) { + if (extendedAttributes == null) { extendedAttributes = new HashMap(); } extendedAttributes.put(attributeKey, attributeValue); } @Override - public byte[] getExtendedAttribute(String attributeKey){ + public byte[] getExtendedAttribute(String attributeKey) { return extendedAttributes != null ? extendedAttributes.get(attributeKey) : null; } @Override - public Map getExtendedAttributes(){ - return extendedAttributes != null ? new HashMap(extendedAttributes) : - new HashMap(); + public Map getExtendedAttributes() { + return extendedAttributes != null + ? new HashMap(extendedAttributes) + : new HashMap(); } @Override @@ -525,7 +458,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { return false; } - return compareTo((WALKey)obj) == 0; + return compareTo((WALKey) obj) == 0; } @Override @@ -544,7 +477,7 @@ public int compareTo(WALKey o) { long otherSid = o.getSequenceId(); if (sid < otherSid) { result = -1; - } else if (sid > otherSid) { + } else if (sid > otherSid) { result = 1; } if (result == 0) { @@ -560,10 +493,9 @@ public int compareTo(WALKey o) { } /** - * Drop this instance's tablename byte array and instead - * hold a reference to the provided tablename. This is not - * meant to be a general purpose setter - it's only used - * to collapse references to conserve memory. + * Drop this instance's tablename byte array and instead hold a reference to the provided + * tablename. This is not meant to be a general purpose setter - it's only used to collapse + * references to conserve memory. */ void internTableName(TableName tablename) { // We should not use this as a setter - only to swap @@ -573,12 +505,11 @@ void internTableName(TableName tablename) { } /** - * Drop this instance's region name byte array and instead - * hold a reference to the provided region name. This is not - * meant to be a general purpose setter - it's only used - * to collapse references to conserve memory. + * Drop this instance's region name byte array and instead hold a reference to the provided region + * name. This is not meant to be a general purpose setter - it's only used to collapse references + * to conserve memory. */ - void internEncodedRegionName(byte []encodedRegionName) { + void internEncodedRegionName(byte[] encodedRegionName) { // We should not use this as a setter - only to swap // in a new reference to the same table name. assert Bytes.equals(this.encodedRegionName, encodedRegionName); @@ -586,7 +517,7 @@ void internEncodedRegionName(byte []encodedRegionName) { } public WALProtos.WALKey.Builder getBuilder(WALCellCodec.ByteStringCompressor compressor) - throws IOException { + throws IOException { WALProtos.WALKey.Builder builder = WALProtos.WALKey.newBuilder(); builder.setEncodedRegionName( compressor.compress(this.encodedRegionName, CompressionContext.DictionaryIndex.REGION)); @@ -612,16 +543,16 @@ public WALProtos.WALKey.Builder getBuilder(WALCellCodec.ByteStringCompressor com if (replicationScope != null) { for (Map.Entry e : replicationScope.entrySet()) { ByteString family = - compressor.compress(e.getKey(), CompressionContext.DictionaryIndex.FAMILY); + compressor.compress(e.getKey(), CompressionContext.DictionaryIndex.FAMILY); builder.addScopes(FamilyScope.newBuilder().setFamily(family) - .setScopeType(ScopeType.forNumber(e.getValue()))); + .setScopeType(ScopeType.forNumber(e.getValue()))); } } - if (extendedAttributes != null){ - for (Map.Entry e : extendedAttributes.entrySet()){ - WALProtos.Attribute attr = WALProtos.Attribute.newBuilder(). - setKey(e.getKey()).setValue(compressor.compress(e.getValue(), - CompressionContext.DictionaryIndex.TABLE)).build(); + if (extendedAttributes != null) { + for (Map.Entry e : extendedAttributes.entrySet()) { + WALProtos.Attribute attr = WALProtos.Attribute.newBuilder().setKey(e.getKey()) + .setValue(compressor.compress(e.getValue(), CompressionContext.DictionaryIndex.TABLE)) + .build(); builder.addExtendedAttributes(attr); } } @@ -629,11 +560,11 @@ public WALProtos.WALKey.Builder getBuilder(WALCellCodec.ByteStringCompressor com } public void readFieldsFromPb(WALProtos.WALKey walKey, - WALCellCodec.ByteStringUncompressor uncompressor) throws IOException { + WALCellCodec.ByteStringUncompressor uncompressor) throws IOException { this.encodedRegionName = uncompressor.uncompress(walKey.getEncodedRegionName(), CompressionContext.DictionaryIndex.REGION); byte[] tablenameBytes = - uncompressor.uncompress(walKey.getTableName(), CompressionContext.DictionaryIndex.TABLE); + uncompressor.uncompress(walKey.getTableName(), CompressionContext.DictionaryIndex.TABLE); this.tablename = TableName.valueOf(tablenameBytes); clusterIds.clear(); for (HBaseProtos.UUID clusterId : walKey.getClusterIdsList()) { @@ -650,7 +581,7 @@ public void readFieldsFromPb(WALProtos.WALKey walKey, this.replicationScope = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (FamilyScope scope : walKey.getScopesList()) { byte[] family = - uncompressor.uncompress(scope.getFamily(), CompressionContext.DictionaryIndex.FAMILY); + uncompressor.uncompress(scope.getFamily(), CompressionContext.DictionaryIndex.FAMILY); this.replicationScope.put(family, scope.getScopeType().getNumber()); } } @@ -659,11 +590,11 @@ public void readFieldsFromPb(WALProtos.WALKey walKey, if (walKey.hasOrigSequenceNumber()) { this.origLogSeqNum = walKey.getOrigSequenceNumber(); } - if (walKey.getExtendedAttributesCount() > 0){ + if (walKey.getExtendedAttributesCount() > 0) { this.extendedAttributes = new HashMap<>(walKey.getExtendedAttributesCount()); - for (WALProtos.Attribute attr : walKey.getExtendedAttributesList()){ + for (WALProtos.Attribute attr : walKey.getExtendedAttributesList()) { byte[] value = - uncompressor.uncompress(attr.getValue(), CompressionContext.DictionaryIndex.TABLE); + uncompressor.uncompress(attr.getValue(), CompressionContext.DictionaryIndex.TABLE); extendedAttributes.put(attr.getKey(), value); } } @@ -683,7 +614,7 @@ public long estimatedSerializedSizeOf() { size += Bytes.SIZEOF_LONG; // nonce } if (replicationScope != null) { - for (Map.Entry scope: replicationScope.entrySet()) { + for (Map.Entry scope : replicationScope.entrySet()) { size += scope.getKey().length; size += Bytes.SIZEOF_INT; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index 07bcb1067ffc..d76ac7da7d7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,6 +46,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @@ -56,17 +57,10 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser; /** - * WALPrettyPrinter prints the contents of a given WAL with a variety of - * options affecting formatting and extent of content. - * - * It targets two usage cases: pretty printing for ease of debugging directly by - * humans, and JSON output for consumption by monitoring and/or maintenance - * scripts. - * - * It can filter by row, region, or sequence id. - * - * It can also toggle output of values. - * + * WALPrettyPrinter prints the contents of a given WAL with a variety of options affecting + * formatting and extent of content. It targets two usage cases: pretty printing for ease of + * debugging directly by humans, and JSON output for consumption by monitoring and/or maintenance + * scripts. It can filter by row, region, or sequence id. It can also toggle output of values. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -75,7 +69,7 @@ public class WALPrettyPrinter { // Output template for pretty printing. private static final String outputTmpl = - "Sequence=%s, table=%s, region=%s, at write timestamp=%s"; + "Sequence=%s, table=%s, region=%s, at write timestamp=%s"; private boolean outputValues; private boolean outputJSON; @@ -99,7 +93,7 @@ public class WALPrettyPrinter { private PrintStream out; // for JSON encoding private static final Gson GSON = GsonUtil.createGson().create(); - //allows for jumping straight to a given portion of the file + // allows for jumping straight to a given portion of the file private long position; /** @@ -148,11 +142,8 @@ public void disableJSON() { } /** - * sets the region by which output will be filtered - * - * @param sequence - * when nonnegative, serves as a filter; only log entries with this - * sequence id will be printed + * sets the region by which output will be filtered n * when nonnegative, serves as a filter; only + * log entries with this sequence id will be printed */ public void setSequenceFilter(long sequence) { this.sequence = sequence; @@ -165,34 +156,26 @@ public void setSequenceFilter(long sequence) { public void setTableFilter(String tablesWithDelimiter) { Collections.addAll(tableSet, tablesWithDelimiter.split(",")); } + /** - * sets the region by which output will be filtered - * - * @param region - * when not null, serves as a filter; only log entries from this - * region will be printed + * sets the region by which output will be filtered n * when not null, serves as a filter; only + * log entries from this region will be printed */ public void setRegionFilter(String region) { this.region = region; } /** - * sets the row key by which output will be filtered - * - * @param row - * when not null, serves as a filter; only log entries from this row - * will be printed + * sets the row key by which output will be filtered n * when not null, serves as a filter; only + * log entries from this row will be printed */ public void setRowFilter(String row) { this.row = row; } /** - * sets the rowPrefix key prefix by which output will be filtered - * - * @param rowPrefix - * when not null, serves as a filter; only log entries with rows - * having this prefix will be printed + * sets the rowPrefix key prefix by which output will be filtered n * when not null, serves as a + * filter; only log entries with rows having this prefix will be printed */ public void setRowPrefixFilter(String rowPrefix) { this.rowPrefix = rowPrefix; @@ -206,17 +189,16 @@ public void setOutputOnlyRowKey() { } /** - * sets the position to start seeking the WAL file - * @param position - * initial position to start seeking the given WAL file + * sets the position to start seeking the WAL file n * initial position to start seeking the given + * WAL file */ public void setPosition(long position) { this.position = position; } /** - * enables output as a single, persistent list. at present, only relevant in - * the case of JSON output. + * enables output as a single, persistent list. at present, only relevant in the case of JSON + * output. */ public void beginPersistentOutput() { if (persistentOutput) { @@ -230,8 +212,7 @@ public void beginPersistentOutput() { } /** - * ends output of a single, persistent list. at present, only relevant in the - * case of JSON output. + * ends output of a single, persistent list. at present, only relevant in the case of JSON output. */ public void endPersistentOutput() { if (!persistentOutput) { @@ -244,19 +225,12 @@ public void endPersistentOutput() { } /** - * reads a log file and outputs its contents, one transaction at a time, as - * specified by the currently configured options - * - * @param conf - * the HBase configuration relevant to this log file - * @param p - * the path of the log file to be read - * @throws IOException - * may be unable to access the configured filesystem or requested - * file. + * reads a log file and outputs its contents, one transaction at a time, as specified by the + * currently configured options n * the HBase configuration relevant to this log file n * the path + * of the log file to be read n * may be unable to access the configured filesystem or requested + * file. */ - public void processFile(final Configuration conf, final Path p) - throws IOException { + public void processFile(final Configuration conf, final Path p) throws IOException { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { throw new FileNotFoundException(p.toString()); @@ -304,8 +278,7 @@ public void processFile(final Configuration conf, final Path p) Map txn = key.toStringMap(); long writeTime = key.getWriteTime(); // check output filters - if (!tableSet.isEmpty() && - !tableSet.contains(txn.get("table").toString())) { + if (!tableSet.isEmpty() && !tableSet.contains(txn.get("table").toString())) { continue; } if (sequence >= 0 && ((Long) txn.get("sequence")) != sequence) { @@ -341,8 +314,8 @@ public void processFile(final Configuration conf, final Path p) } else { // Pretty output, complete with indentation by atomic action if (!outputOnlyRowKey) { - out.println(String.format(outputTmpl, - txn.get("sequence"), txn.get("table"), txn.get("region"), new Date(writeTime))); + out.println(String.format(outputTmpl, txn.get("sequence"), txn.get("table"), + txn.get("region"), new Date(writeTime))); } for (int i = 0; i < actions.size(); i++) { Map op = actions.get(i); @@ -362,8 +335,8 @@ public void processFile(final Configuration conf, final Path p) } } - public static void printCell(PrintStream out, Map op, - boolean outputValues, boolean outputOnlyRowKey) { + public static void printCell(PrintStream out, Map op, boolean outputValues, + boolean outputOnlyRowKey) { String rowDetails = "row=" + op.get("row"); if (outputOnlyRowKey) { out.println(rowDetails); @@ -382,16 +355,18 @@ public static void printCell(PrintStream out, Map op, out.println("cell total size sum: " + op.get("total_size_sum")); } - public static Map toStringMap(Cell cell, - boolean printRowKeyOnly, String rowPrefix, String row, boolean outputValues) { + public static Map toStringMap(Cell cell, boolean printRowKeyOnly, + String rowPrefix, String row, boolean outputValues) { Map stringMap = new HashMap<>(); - String rowKey = Bytes.toStringBinary(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength()); + String rowKey = + Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); // Row and row prefix are mutually options so both cannot be true at the // same time. We can include checks in the same condition // Check if any of the filters are satisfied by the row, if not return empty map - if ((!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) || - (!Strings.isNullOrEmpty(row) && !rowKey.equals(row))) { + if ( + (!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) + || (!Strings.isNullOrEmpty(row) && !rowKey.equals(row)) + ) { return stringMap; } @@ -400,11 +375,10 @@ public static Map toStringMap(Cell cell, return stringMap; } stringMap.put("type", cell.getType()); - stringMap.put("family", Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength())); - stringMap.put("qualifier", - Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())); + stringMap.put("family", + Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())); + stringMap.put("qualifier", Bytes.toStringBinary(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); stringMap.put("timestamp", cell.getTimestamp()); stringMap.put("vlen", cell.getValueLength()); stringMap.put("total_size_sum", cell.heapSize()); @@ -413,8 +387,7 @@ public static Map toStringMap(Cell cell, Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { Tag tag = tagsIterator.next(); - tagsString - .add((tag.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(tag))); + tagsString.add((tag.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(tag))); } stringMap.put("tag", tagsString); } @@ -433,13 +406,9 @@ public static void main(String[] args) throws IOException { } /** - * Pass one or more log file names and formatting options and it will dump out - * a text version of the contents on stdout. - * - * @param args - * Command line arguments - * @throws IOException - * Thrown upon file system errors etc. + * Pass one or more log file names and formatting options and it will dump out a text version of + * the contents on stdout. n * Command line arguments n * Thrown upon file system + * errors etc. */ public static void run(String[] args) throws IOException { // create options @@ -450,11 +419,9 @@ public static void run(String[] args) throws IOException { options.addOption("t", "tables", true, "Table names (comma separated) to filter by; eg: test1,test2,test3 "); options.addOption("r", "region", true, - "Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'"); - options.addOption("s", "sequence", true, - "Sequence to filter by. Pass sequence number."); - options.addOption("k", "outputOnlyRowKey", false, - "Print only row keys"); + "Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'"); + options.addOption("s", "sequence", true, "Sequence to filter by. Pass sequence number."); + options.addOption("k", "outputOnlyRowKey", false, "Print only row keys"); options.addOption("w", "row", true, "Row to filter by. Pass row name."); options.addOption("f", "rowPrefix", true, "Row prefix to filter by."); options.addOption("g", "goto", true, "Position to seek to in the file"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index 01c1d11ead70..91b397df0705 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.List; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; @@ -44,12 +42,12 @@ public interface WALProvider { /** * Set up the provider to create wals. will only be called once per instance. - * @param factory factory that made us may not be null - * @param conf may not be null + * @param factory factory that made us may not be null + * @param conf may not be null * @param providerId differentiate between providers from one factory. may be null */ void init(WALFactory factory, Configuration conf, String providerId, Abortable server) - throws IOException; + throws IOException; /** * @param region the region which we want to get a WAL for it. Could be null. @@ -78,18 +76,17 @@ void init(WALFactory factory, Configuration conf, String providerId, Abortable s interface WriterBase extends Closeable { long getLength(); + /** - * NOTE: We add this method for {@link WALFileLengthProvider} used for replication, - * considering the case if we use {@link AsyncFSWAL},we write to 3 DNs concurrently, - * according to the visibility guarantee of HDFS, the data will be available immediately - * when arriving at DN since all the DNs will be considered as the last one in pipeline. - * This means replication may read uncommitted data and replicate it to the remote cluster - * and cause data inconsistency. - * The method {@link WriterBase#getLength} may return length which just in hdfs client - * buffer and not successfully synced to HDFS, so we use this method to return the length - * successfully synced to HDFS and replication thread could only read writing WAL file - * limited by this length. - * see also HBASE-14004 and this document for more details: + * NOTE: We add this method for {@link WALFileLengthProvider} used for replication, considering + * the case if we use {@link AsyncFSWAL},we write to 3 DNs concurrently, according to the + * visibility guarantee of HDFS, the data will be available immediately when arriving at DN + * since all the DNs will be considered as the last one in pipeline. This means replication may + * read uncommitted data and replicate it to the remote cluster and cause data inconsistency. + * The method {@link WriterBase#getLength} may return length which just in hdfs client buffer + * and not successfully synced to HDFS, so we use this method to return the length successfully + * synced to HDFS and replication thread could only read writing WAL file limited by this + * length. see also HBASE-14004 and this document for more details: * https://docs.google.com/document/d/11AyWtGhItQs6vsLRIx32PwTxmBY3libXwGXI25obVEY/edit# * @return byteSize successfully synced to underlying filesystem. */ @@ -131,6 +128,6 @@ interface AsyncWriter extends WriterBase { default WALFileLengthProvider getWALFileLengthProvider() { return path -> getWALs().stream().map(w -> w.getLogFileSizeIfBeingWritten(path)) - .filter(o -> o.isPresent()).findAny().orElse(OptionalLong.empty()); + .filter(o -> o.isPresent()).findAny().orElse(OptionalLong.empty()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java index 94747ae026d2..d001ea755b74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java @@ -108,7 +108,7 @@ public static void finishSplitLogFile(String logfile, Configuration conf) throws * that couldn't be successfully parsed to corruptDir (.corrupt) for later investigation */ static void archive(final Path wal, final boolean corrupt, final Path oldWALDir, - final FileSystem walFS, final Configuration conf) throws IOException { + final FileSystem walFS, final Configuration conf) throws IOException { Path dir; Path target; if (corrupt) { @@ -132,8 +132,8 @@ private static void mkdir(FileSystem fs, Path dir) throws IOException { } /** - * Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. - * WAL may have already been moved; makes allowance. + * Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. WAL may have + * already been moved; makes allowance. */ public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOException { if (fs.exists(p)) { @@ -149,19 +149,19 @@ public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOExcep * Path to a file under RECOVERED_EDITS_DIR directory of the region found in logEntry * named for the sequenceid in the passed logEntry: e.g. * /hbase/some_table/2323432434/recovered.edits/2332. This method also ensures existence of - * RECOVERED_EDITS_DIR under the region creating it if necessary. - * And also set storage policy for RECOVERED_EDITS_DIR if WAL_STORAGE_POLICY is configured. - * @param tableName the table name - * @param encodedRegionName the encoded region name - * @param seqId the sequence id which used to generate file name + * RECOVERED_EDITS_DIR under the region creating it if necessary. And also set storage policy for + * RECOVERED_EDITS_DIR if WAL_STORAGE_POLICY is configured. + * @param tableName the table name + * @param encodedRegionName the encoded region name + * @param seqId the sequence id which used to generate file name * @param fileNameBeingSplit the file being split currently. Used to generate tmp file name. - * @param tmpDirName of the directory used to sideline old recovered edits file - * @param conf configuration + * @param tmpDirName of the directory used to sideline old recovered edits file + * @param conf configuration * @return Path to file into which to dump split log edits. */ @SuppressWarnings("deprecation") static Path getRegionSplitEditsPath(TableName tableName, byte[] encodedRegionName, long seqId, - String fileNameBeingSplit, String tmpDirName, Configuration conf) throws IOException { + String fileNameBeingSplit, String tmpDirName, Configuration conf) throws IOException { FileSystem walFS = CommonFSUtils.getWALFileSystem(conf); Path tableDir = CommonFSUtils.getWALTableDir(conf, tableName); String encodedRegionNameStr = Bytes.toString(encodedRegionName); @@ -175,8 +175,7 @@ static Path getRegionSplitEditsPath(TableName tableName, byte[] encodedRegionNam } tmp = new Path(tmp, HConstants.RECOVERED_EDITS_DIR + "_" + encodedRegionNameStr); LOG.warn("Found existing old file: {}. It could be some " - + "leftover of an old installation. It should be a folder instead. " - + "So moving it to {}", + + "leftover of an old installation. It should be a folder instead. " + "So moving it to {}", dir, tmp); if (!walFS.rename(dir, tmp)) { LOG.warn("Failed to sideline old file {}", dir); @@ -227,12 +226,12 @@ public static Path getRegionDirRecoveredEditsDir(final Path regionDir) { /** * Check whether there is recovered.edits in the region dir - * @param conf conf + * @param conf conf * @param regionInfo the region to check * @return true if recovered.edits exist in the region dir */ public static boolean hasRecoveredEdits(final Configuration conf, final RegionInfo regionInfo) - throws IOException { + throws IOException { // No recovered.edits for non default replica regions if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { return false; @@ -288,12 +287,12 @@ public static long getMaxRegionSequenceId(Configuration conf, RegionInfo region, /** * Returns sorted set of edit files made by splitter, excluding files with '.temp' suffix. - * @param walFS WAL FileSystem used to retrieving split edits files. + * @param walFS WAL FileSystem used to retrieving split edits files. * @param regionDir WAL region dir to look for recovered edits files under. * @return Files in passed regionDir as a sorted set. */ public static NavigableSet getSplitEditFilesSorted(final FileSystem walFS, - final Path regionDir) throws IOException { + final Path regionDir) throws IOException { NavigableSet filesSorted = new TreeSet<>(); Path editsdir = getRegionDirRecoveredEditsDir(regionDir); if (!walFS.exists(editsdir)) { @@ -333,14 +332,14 @@ public boolean accept(Path p) { /** * Move aside a bad edits file. - * @param fs the file system used to rename bad edits file. + * @param fs the file system used to rename bad edits file. * @param edits Edits file to move aside. * @return The name of the moved aside file. */ public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits) - throws IOException { + throws IOException { Path moveAsideName = - new Path(edits.getParent(), edits.getName() + "." + EnvironmentEdgeManager.currentTime()); + new Path(edits.getParent(), edits.getName() + "." + EnvironmentEdgeManager.currentTime()); if (!fs.rename(edits, moveAsideName)) { LOG.warn("Rename failed from {} to {}", edits, moveAsideName); } @@ -352,11 +351,11 @@ public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits) */ public static boolean isSequenceIdFile(final Path file) { return file.getName().endsWith(SEQUENCE_ID_FILE_SUFFIX) - || file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX); + || file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX); } private static FileStatus[] getSequenceIdFiles(FileSystem walFS, Path regionDir) - throws IOException { + throws IOException { // TODO: Why are we using a method in here as part of our normal region open where // there is no splitting involved? Fix. St.Ack 01/20/2017. Path editsDir = getRegionDirRecoveredEditsDir(regionDir); @@ -374,7 +373,7 @@ private static long getMaxSequenceId(FileStatus[] files) { String fileName = file.getPath().getName(); try { maxSeqId = Math.max(maxSeqId, Long - .parseLong(fileName.substring(0, fileName.length() - SEQUENCE_ID_FILE_SUFFIX_LENGTH))); + .parseLong(fileName.substring(0, fileName.length() - SEQUENCE_ID_FILE_SUFFIX_LENGTH))); } catch (NumberFormatException ex) { LOG.warn("Invalid SeqId File Name={}", fileName); } @@ -393,16 +392,16 @@ public static long getMaxRegionSequenceId(FileSystem walFS, Path regionDir) thro * Create a file with name as region's max sequence id */ public static void writeRegionSequenceIdFile(FileSystem walFS, Path regionDir, long newMaxSeqId) - throws IOException { + throws IOException { FileStatus[] files = getSequenceIdFiles(walFS, regionDir); long maxSeqId = getMaxSequenceId(files); if (maxSeqId > newMaxSeqId) { throw new IOException("The new max sequence id " + newMaxSeqId - + " is less than the old max sequence id " + maxSeqId); + + " is less than the old max sequence id " + maxSeqId); } // write a new seqId file Path newSeqIdFile = - new Path(getRegionDirRecoveredEditsDir(regionDir), newMaxSeqId + SEQUENCE_ID_FILE_SUFFIX); + new Path(getRegionDirRecoveredEditsDir(regionDir), newMaxSeqId + SEQUENCE_ID_FILE_SUFFIX); if (newMaxSeqId != maxSeqId) { try { if (!walFS.createNewFile(newSeqIdFile) && !walFS.exists(newSeqIdFile)) { @@ -425,7 +424,7 @@ public static void writeRegionSequenceIdFile(FileSystem walFS, Path regionDir, l /** A struct used by getMutationsFromWALEntry */ public static class MutationReplay implements Comparable { public MutationReplay(ClientProtos.MutationProto.MutationType type, Mutation mutation, - long nonceGroup, long nonce) { + long nonceGroup, long nonce) { this.type = type; this.mutation = mutation; if (this.mutation.getDurability() != Durability.SKIP_WAL) { @@ -437,9 +436,12 @@ public MutationReplay(ClientProtos.MutationProto.MutationType type, Mutation mut } private final ClientProtos.MutationProto.MutationType type; - @SuppressWarnings("checkstyle:VisibilityModifier") public final Mutation mutation; - @SuppressWarnings("checkstyle:VisibilityModifier") public final long nonceGroup; - @SuppressWarnings("checkstyle:VisibilityModifier") public final long nonce; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final Mutation mutation; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final long nonceGroup; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final long nonce; @Override public int compareTo(final MutationReplay d) { @@ -469,21 +471,21 @@ public ClientProtos.MutationProto.MutationType getType() { * This function is used to construct mutations from a WALEntry. It also reconstructs WALKey & * WALEdit from the passed in WALEntry * @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances - * extracted from the passed in WALEntry. + * extracted from the passed in WALEntry. * @return list of Pair<MutationType, Mutation> to be replayed * @deprecated Since 3.0.0, will be removed in 4.0.0. */ @Deprecated public static List getMutationsFromWALEntry(AdminProtos.WALEntry entry, - CellScanner cells, Pair logEntry, Durability durability) throws IOException { + CellScanner cells, Pair logEntry, Durability durability) throws IOException { if (entry == null) { // return an empty array return Collections.emptyList(); } - long replaySeqId = - (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() - : entry.getKey().getLogSequenceNumber(); + long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) + ? entry.getKey().getOrigSequenceNumber() + : entry.getKey().getLogSequenceNumber(); int count = entry.getAssociatedCellCount(); List mutations = new ArrayList<>(); Cell previousCell = null; @@ -505,20 +507,20 @@ public static List getMutationsFromWALEntry(AdminProtos.WALEntry } boolean isNewRowOrType = - previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() - || !CellUtil.matchingRows(previousCell, cell); + previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() + || !CellUtil.matchingRows(previousCell, cell); if (isNewRowOrType) { // Create new mutation if (CellUtil.isDelete(cell)) { m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); // Deletes don't have nonces. mutations.add(new MutationReplay(ClientProtos.MutationProto.MutationType.DELETE, m, - HConstants.NO_NONCE, HConstants.NO_NONCE)); + HConstants.NO_NONCE, HConstants.NO_NONCE)); } else { m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); // Puts might come from increment or append, thus we need nonces. long nonceGroup = - entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE; + entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE; long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE; mutations.add( new MutationReplay(ClientProtos.MutationProto.MutationType.PUT, m, nonceGroup, nonce)); @@ -536,15 +538,15 @@ public static List getMutationsFromWALEntry(AdminProtos.WALEntry // reconstruct WALKey if (logEntry != null) { org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = - entry.getKey(); + entry.getKey(); List clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount()); for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) { clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits())); } key = new WALKeyImpl(walKeyProto.getEncodedRegionName().toByteArray(), - TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, - walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), - walKeyProto.getNonce(), null); + TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, + walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), + null); logEntry.setFirst(key); logEntry.setSecond(val); } @@ -556,15 +558,15 @@ public static List getMutationsFromWALEntry(AdminProtos.WALEntry * Return path to recovered.hfiles directory of the region's column family: e.g. * /hbase/some_table/2323432434/cf/recovered.hfiles/. This method also ensures existence of * recovered.hfiles directory under the region's column family, creating it if necessary. - * @param rootFS the root file system - * @param conf configuration - * @param tableName the table name + * @param rootFS the root file system + * @param conf configuration + * @param tableName the table name * @param encodedRegionName the encoded region name - * @param familyName the column family name + * @param familyName the column family name * @return Path to recovered.hfiles directory of the region's column family. */ static Path tryCreateRecoveredHFilesDir(FileSystem rootFS, Configuration conf, - TableName tableName, String encodedRegionName, String familyName) throws IOException { + TableName tableName, String encodedRegionName, String familyName) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); Path regionDir = FSUtils.getRegionDirFromTableDir(CommonFSUtils.getTableDir(rootDir, tableName), encodedRegionName); @@ -585,8 +587,8 @@ private static Path getRecoveredHFilesDir(final Path regionDir, String familyNam return new Path(new Path(regionDir, familyName), HConstants.RECOVERED_HFILES_DIR); } - public static FileStatus[] getRecoveredHFiles(final FileSystem rootFS, - final Path regionDir, String familyName) throws IOException { + public static FileStatus[] getRecoveredHFiles(final FileSystem rootFS, final Path regionDir, + String familyName) throws IOException { Path dir = getRecoveredHFilesDir(regionDir, familyName); return CommonFSUtils.listStatus(rootFS, dir); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index ed684868cdd1..02a9904d1d2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -63,10 +63,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId; /** - * Split RegionServer WAL files. Splits the WAL into new files, - * one per region, to be picked up on Region reopen. Deletes the split WAL when finished. - * Create an instance and call {@link #splitWAL(FileStatus, CancelableProgressable)} per file or - * use static helper methods. + * Split RegionServer WAL files. Splits the WAL into new files, one per region, to be picked up on + * Region reopen. Deletes the split WAL when finished. Create an instance and call + * {@link #splitWAL(FileStatus, CancelableProgressable)} per file or use static helper methods. */ @InterfaceAudience.Private public class WALSplitter { @@ -92,8 +91,8 @@ public class WALSplitter { private EntryBuffers entryBuffers; /** - * Coordinator for split log. Used by the zk-based log splitter. - * Not used by the procedure v2-based log splitter. + * Coordinator for split log. Used by the zk-based log splitter. Not used by the procedure + * v2-based log splitter. */ private SplitLogWorkerCoordination splitLogWorkerCoordination; @@ -120,10 +119,10 @@ public class WALSplitter { public static final boolean DEFAULT_WAL_SPLIT_TO_HFILE = false; /** - * True if we are to run with bounded amount of writers rather than let the count blossom. - * Default is 'false'. Does not apply if you have set 'hbase.wal.split.to.hfile' as that - * is always bounded. Only applies when you are doing recovery to 'recovered.edits' - * files (the old default). Bounded writing tends to have higher throughput. + * True if we are to run with bounded amount of writers rather than let the count blossom. Default + * is 'false'. Does not apply if you have set 'hbase.wal.split.to.hfile' as that is always + * bounded. Only applies when you are doing recovery to 'recovered.edits' files (the old default). + * Bounded writing tends to have higher throughput. */ public final static String SPLIT_WRITER_CREATION_BOUNDED = "hbase.split.writer.creation.bounded"; @@ -137,14 +136,14 @@ public class WALSplitter { private final boolean hfile; private final boolean skipErrors; - WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, - FileSystem walFS, Path rootDir, FileSystem rootFS) { + WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, FileSystem walFS, + Path rootDir, FileSystem rootFS) { this(factory, conf, walRootDir, walFS, rootDir, rootFS, null, null, null); } - WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, - FileSystem walFS, Path rootDir, FileSystem rootFS, LastSequenceId idChecker, - SplitLogWorkerCoordination splitLogWorkerCoordination, RegionServerServices rsServices) { + WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, FileSystem walFS, + Path rootDir, FileSystem rootFS, LastSequenceId idChecker, + SplitLogWorkerCoordination splitLogWorkerCoordination, RegionServerServices rsServices) { this.conf = HBaseConfiguration.create(conf); String codecClassName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName()); @@ -184,16 +183,14 @@ Map> getRegionMaxSeqIdInStores() { } /** - * Splits a WAL file. - * Used by old {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} and tests. - * Not used by new procedure-based WAL splitter. - * + * Splits a WAL file. Used by old {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} and + * tests. Not used by new procedure-based WAL splitter. * @return false if it is interrupted by the progress-able. */ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem walFS, - Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, - SplitLogWorkerCoordination splitLogWorkerCoordination, WALFactory factory, - RegionServerServices rsServices) throws IOException { + Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, + SplitLogWorkerCoordination splitLogWorkerCoordination, WALFactory factory, + RegionServerServices rsServices) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walDir, walFS, rootDir, rootFS, idChecker, @@ -205,14 +202,13 @@ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem w } /** - * Split a folder of WAL files. Delete the directory when done. - * Used by tools and unit tests. It should be package private. - * It is public only because TestWALObserver is in a different package, + * Split a folder of WAL files. Delete the directory when done. Used by tools and unit tests. It + * should be package private. It is public only because TestWALObserver is in a different package, * which uses this method to do log splitting. * @return List of output files created by the split. */ public static List split(Path walRootDir, Path walsDir, Path archiveDir, FileSystem walFS, - Configuration conf, final WALFactory factory) throws IOException { + Configuration conf, final WALFactory factory) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walRootDir, walFS, rootDir, rootFS); @@ -220,11 +216,11 @@ public static List split(Path walRootDir, Path walsDir, Path archiveDir, F SplitLogManager.getFileList(conf, Collections.singletonList(walsDir), null); List splits = new ArrayList<>(); if (!wals.isEmpty()) { - for (FileStatus wal: wals) { + for (FileStatus wal : wals) { SplitWALResult splitWALResult = splitter.splitWAL(wal, null); if (splitWALResult.isFinished()) { WALSplitUtil.archive(wal.getPath(), splitWALResult.isCorrupt(), archiveDir, walFS, conf); - //splitter.outputSink.splits is mark as final, do not need null check + // splitter.outputSink.splits is mark as final, do not need null check splits.addAll(splitter.outputSink.splits); } } @@ -236,9 +232,9 @@ public static List split(Path walRootDir, Path walsDir, Path archiveDir, F } /** - * Data structure returned as result by #splitWAL(FileStatus, CancelableProgressable). - * Test {@link #isFinished()} to see if we are done with the WAL and {@link #isCorrupt()} for if - * the WAL is corrupt. + * Data structure returned as result by #splitWAL(FileStatus, CancelableProgressable). Test + * {@link #isFinished()} to see if we are done with the WAL and {@link #isCorrupt()} for if the + * WAL is corrupt. */ static final class SplitWALResult { private final boolean finished; @@ -265,16 +261,16 @@ private void createOutputSinkAndEntryBuffers() { PipelineController controller = new PipelineController(); if (this.hfile) { this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize); - this.outputSink = new BoundedRecoveredHFilesOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = new BoundedRecoveredHFilesOutputSink(this, controller, this.entryBuffers, + this.numWriterThreads); } else if (this.splitWriterCreationBounded) { this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize); - this.outputSink = new BoundedRecoveredEditsOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = new BoundedRecoveredEditsOutputSink(this, controller, this.entryBuffers, + this.numWriterThreads); } else { this.entryBuffers = new EntryBuffers(controller, this.bufferSize); - this.outputSink = new RecoveredEditsOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = + new RecoveredEditsOutputSink(this, controller, this.entryBuffers, this.numWriterThreads); } } @@ -327,8 +323,9 @@ SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) thr String encodedRegionNameAsStr = Bytes.toString(region); Long lastFlushedSequenceId = lastFlushedSequenceIds.get(encodedRegionNameAsStr); if (lastFlushedSequenceId == null) { - if (!(isRegionDirPresentUnderRoot(entry.getKey().getTableName(), - encodedRegionNameAsStr))) { + if ( + !(isRegionDirPresentUnderRoot(entry.getKey().getTableName(), encodedRegionNameAsStr)) + ) { // The region directory itself is not present in the FS. This indicates that // the region/table is already removed. We can just skip all the edits for this // region. Setting lastFlushedSequenceId as Long.MAX_VALUE so that all edits @@ -342,13 +339,13 @@ SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) thr Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) { maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), - storeSeqId.getSequenceId()); + storeSeqId.getSequenceId()); } regionMaxSeqIdInStores.put(encodedRegionNameAsStr, maxSeqIdInStores); lastFlushedSequenceId = ids.getLastFlushedSequenceId(); if (LOG.isDebugEnabled()) { LOG.debug("Last flushed sequenceid for " + encodedRegionNameAsStr + ": " - + TextFormat.shortDebugString(ids)); + + TextFormat.shortDebugString(ids)); } } if (lastFlushedSequenceId == null) { @@ -370,11 +367,12 @@ SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) thr entryBuffers.appendEntry(entry); int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck; // If sufficient edits have passed, check if we should report progress. - if (editsCount % interval == 0 - || moreWritersFromLastCheck > numOpenedFilesBeforeReporting) { + if ( + editsCount % interval == 0 || moreWritersFromLastCheck > numOpenedFilesBeforeReporting + ) { numOpenedFilesLastCheck = this.getNumOpenWriters(); String countsStr = (editsCount - (editsSkipped + outputSink.getTotalSkippedEdits())) - + " edits, skipped " + editsSkipped + " edits."; + + " edits, skipped " + editsSkipped + " edits."; status.setStatus("Split " + countsStr); if (cancel != null && !cancel.progress()) { cancelled = true; @@ -419,10 +417,10 @@ SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) thr } finally { long processCost = EnvironmentEdgeManager.currentTime() - startTS; // See if length got updated post lease recovery - String msg = "Processed " + editsCount + " edits across " + - outputSink.getNumberOfRecoveredRegions() + " Regions in " + processCost + - " ms; skipped=" + editsSkipped + "; WAL=" + wal + ", size=" + lengthStr + - ", length=" + length + ", corrupted=" + corrupt + ", cancelled=" + cancelled; + String msg = "Processed " + editsCount + " edits across " + + outputSink.getNumberOfRecoveredRegions() + " Regions in " + processCost + + " ms; skipped=" + editsSkipped + "; WAL=" + wal + ", size=" + lengthStr + ", length=" + + length + ", corrupted=" + corrupt + ", cancelled=" + cancelled; LOG.info(msg); status.markComplete(msg); if (LOG.isDebugEnabled()) { @@ -441,8 +439,8 @@ private boolean isRegionDirPresentUnderRoot(TableName tn, String region) throws * Create a new {@link Reader} for reading logs to split. * @return Returns null if file has length zero or file can't be found. */ - protected Reader getReader(FileStatus walStatus, boolean skipErrors, CancelableProgressable cancel) - throws IOException, CorruptedLogFileException { + protected Reader getReader(FileStatus walStatus, boolean skipErrors, + CancelableProgressable cancel) throws IOException, CorruptedLogFileException { Path path = walStatus.getPath(); long length = walStatus.getLen(); Reader in; @@ -479,14 +477,14 @@ protected Reader getReader(FileStatus walStatus, boolean skipErrors, CancelableP if (!skipErrors || e instanceof InterruptedIOException) { throw e; // Don't mark the file corrupted if interrupted, or not skipErrors } - throw new CorruptedLogFileException("skipErrors=true; could not open " + path + - ", skipping", e); + throw new CorruptedLogFileException("skipErrors=true; could not open " + path + ", skipping", + e); } return in; } private Entry getNextLogLine(Reader in, Path path, boolean skipErrors) - throws CorruptedLogFileException, IOException { + throws CorruptedLogFileException, IOException { try { return in.next(); } catch (EOFException eof) { @@ -496,8 +494,10 @@ private Entry getNextLogLine(Reader in, Path path, boolean skipErrors) } catch (IOException e) { // If the IOE resulted from bad file format, // then this problem is idempotent and retrying won't help - if (e.getCause() != null && (e.getCause() instanceof ParseException - || e.getCause() instanceof org.apache.hadoop.fs.ChecksumException)) { + if ( + e.getCause() != null && (e.getCause() instanceof ParseException + || e.getCause() instanceof org.apache.hadoop.fs.ChecksumException) + ) { LOG.warn("Parse exception from {}; continuing", path, e); return null; } @@ -577,9 +577,8 @@ static class CorruptedLogFileException extends Exception { /** * CorruptedLogFileException with cause - * * @param message the message for this exception - * @param cause the cause for this exception + * @param cause the cause for this exception */ CorruptedLogFileException(String message, Throwable cause) { super(message, cause); diff --git a/hbase-server/src/main/python/hbase/merge_conf.py b/hbase-server/src/main/python/hbase/merge_conf.py index 764d98af2882..ff6dfacf5bf4 100644 --- a/hbase-server/src/main/python/hbase/merge_conf.py +++ b/hbase-server/src/main/python/hbase/merge_conf.py @@ -150,4 +150,3 @@ def append_text_child(self, property_element, tag_name, value): if __name__ == '__main__': MergeConfTool().main() - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java index 0f68e11f0f87..0e76eb247a4d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,13 +70,13 @@ public static void tearDownAfterClass() throws Exception { public void setUp() throws Exception { MemoryCompactionPolicy policy = getMemoryCompactionPolicy(); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setValue(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, policy.name()); + .setValue(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, policy.name()); if (policy == MemoryCompactionPolicy.EAGER) { builder.setValue(MemStoreLAB.USEMSLAB_KEY, "false"); builder.setValue(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, "0.9"); } Stream.of(FAMILIES).map(ColumnFamilyDescriptorBuilder::of) - .forEachOrdered(builder::setColumnFamily); + .forEachOrdered(builder::setColumnFamily); UTIL.getAdmin().createTable(builder.build()); tool.setConf(UTIL.getConfiguration()); } @@ -87,12 +87,12 @@ public void tearDown() throws Exception { } private void runTestAtomicity(long millisToRun, int numWriters, int numGetters, int numScanners, - int numUniqueRows) throws Exception { + int numUniqueRows) throws Exception { runTestAtomicity(millisToRun, numWriters, numGetters, numScanners, numUniqueRows, false); } private void runTestAtomicity(long millisToRun, int numWriters, int numGetters, int numScanners, - int numUniqueRows, boolean useMob) throws Exception { + int numUniqueRows, boolean useMob) throws Exception { List args = Lists.newArrayList("-millis", String.valueOf(millisToRun), "-numWriters", String.valueOf(numWriters), "-numGetters", String.valueOf(numGetters), "-numScanners", String.valueOf(numScanners), "-numUniqueRows", String.valueOf(numUniqueRows), "-crazyFlush"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java index ae940dc24a17..1839b43d5db1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,11 +47,12 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** @@ -89,7 +90,7 @@ private ExecutorService createThreadPool() { long keepAliveTime = 60; BlockingQueue workQueue = new LinkedBlockingQueue( - maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); + maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, @@ -145,7 +146,7 @@ public static class AtomicityWriter extends RepeatingTestThread { AtomicLong numWritten = new AtomicLong(); public AtomicityWriter(TestContext ctx, byte[][] targetRows, byte[][] targetFamilies, - ExecutorService pool) throws IOException { + ExecutorService pool) throws IOException { super(ctx); this.targetRows = targetRows; this.targetFamilies = targetFamilies; @@ -191,7 +192,7 @@ public static class AtomicGetReader extends RepeatingTestThread { AtomicLong numRead = new AtomicLong(); public AtomicGetReader(TestContext ctx, byte[] targetRow, byte[][] targetFamilies, - ExecutorService pool) throws IOException { + ExecutorService pool) throws IOException { super(ctx); this.targetRow = targetRow; this.targetFamilies = targetFamilies; @@ -260,7 +261,7 @@ public static class AtomicScanReader extends RepeatingTestThread { AtomicLong numRowsScanned = new AtomicLong(); public AtomicScanReader(TestContext ctx, byte[][] targetFamilies, ExecutorService pool) - throws IOException { + throws IOException { super(ctx); this.targetFamilies = targetFamilies; connection = ConnectionFactory.createConnection(ctx.getConf(), pool); @@ -321,13 +322,13 @@ private void createTableIfMissing(Admin admin, boolean useMob) throws IOExceptio if (!admin.tableExists(TABLE_NAME)) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME); Stream.of(FAMILIES).map(ColumnFamilyDescriptorBuilder::of) - .forEachOrdered(builder::setColumnFamily); + .forEachOrdered(builder::setColumnFamily); admin.createTable(builder.build()); } ColumnFamilyDescriptor cfd = admin.getDescriptor(TABLE_NAME).getColumnFamilies()[0]; if (cfd.isMobEnabled() != useMob) { admin.modifyColumnFamily(TABLE_NAME, ColumnFamilyDescriptorBuilder.newBuilder(cfd) - .setMobEnabled(useMob).setMobThreshold(4).build()); + .setMobEnabled(useMob).setMobThreshold(4).build()); } } @@ -372,7 +373,7 @@ public void doAnAction() throws Exception { List getters = Lists.newArrayList(); for (int i = 0; i < numGetters; i++) { AtomicGetReader getter = - new AtomicGetReader(ctx, rows[i % numUniqueRows], FAMILIES, sharedPool); + new AtomicGetReader(ctx, rows[i % numUniqueRows], FAMILIES, sharedPool); getters.add(getter); ctx.addThread(getter); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java index b0ea6f4879f1..87adccf2e3d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java @@ -33,18 +33,19 @@ import org.slf4j.LoggerFactory; /** - * A {@link TestRule} that clears all user namespaces and tables - * {@link ExternalResource#before() before} the test executes. Can be used in either the - * {@link Rule} or {@link ClassRule} positions. Lazily realizes the provided - * {@link AsyncConnection} so as to avoid initialization races with other {@link Rule Rules}. - * Does not {@link AsyncConnection#close() close()} provided connection instance when - * finished. + * A {@link TestRule} that clears all user namespaces and tables {@link ExternalResource#before() + * before} the test executes. Can be used in either the {@link Rule} or {@link ClassRule} positions. + * Lazily realizes the provided {@link AsyncConnection} so as to avoid initialization races with + * other {@link Rule Rules}. Does not {@link AsyncConnection#close() close()} provided + * connection instance when finished. *

    * Use in combination with {@link MiniClusterRule} and {@link ConnectionRule}, for example: * - *
    {@code
    + * 
    + * {
    + *   @code
      *   public class TestMyClass {
    - *     @ClassRule
    + *     @ClassRule
      *     public static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build();
      *
      *     private final ConnectionRule connectionRule =
    @@ -52,12 +53,12 @@
      *     private final ClearUserNamespacesAndTablesRule clearUserNamespacesAndTablesRule =
      *       new ClearUserNamespacesAndTablesRule(connectionRule::getConnection);
      *
    - *     @Rule
    - *     public TestRule rule = RuleChain
    - *       .outerRule(connectionRule)
    - *       .around(clearUserNamespacesAndTablesRule);
    + *     @Rule
    + *     public TestRule rule =
    + *       RuleChain.outerRule(connectionRule).around(clearUserNamespacesAndTablesRule);
      *   }
    - * }
    + * } + *
    */ public class ClearUserNamespacesAndTablesRule extends ExternalResource { private static final Logger logger = @@ -83,18 +84,14 @@ private CompletableFuture clearTablesAndNamespaces() { } private CompletableFuture deleteUserTables() { - return listTableNames() - .thenApply(tableNames -> tableNames.stream() - .map(tableName -> disableIfEnabled(tableName).thenCompose(_void -> deleteTable(tableName))) - .toArray(CompletableFuture[]::new)) - .thenCompose(CompletableFuture::allOf); + return listTableNames().thenApply(tableNames -> tableNames.stream() + .map(tableName -> disableIfEnabled(tableName).thenCompose(_void -> deleteTable(tableName))) + .toArray(CompletableFuture[]::new)).thenCompose(CompletableFuture::allOf); } private CompletableFuture> listTableNames() { - return CompletableFuture - .runAsync(() -> logger.trace("listing tables")) - .thenCompose(_void -> admin.listTableNames(false)) - .thenApply(tableNames -> { + return CompletableFuture.runAsync(() -> logger.trace("listing tables")) + .thenCompose(_void -> admin.listTableNames(false)).thenApply(tableNames -> { if (logger.isTraceEnabled()) { final StringJoiner joiner = new StringJoiner(", ", "[", "]"); tableNames.stream().map(TableName::getNameAsString).forEach(joiner::add); @@ -105,63 +102,51 @@ private CompletableFuture> listTableNames() { } private CompletableFuture isTableEnabled(final TableName tableName) { - return admin.isTableEnabled(tableName) - .thenApply(isEnabled -> { - logger.trace("table {} is enabled.", tableName); - return isEnabled; - }); + return admin.isTableEnabled(tableName).thenApply(isEnabled -> { + logger.trace("table {} is enabled.", tableName); + return isEnabled; + }); } private CompletableFuture disableIfEnabled(final TableName tableName) { - return isTableEnabled(tableName) - .thenCompose(isEnabled -> isEnabled - ? disableTable(tableName) - : CompletableFuture.completedFuture(null)); + return isTableEnabled(tableName).thenCompose( + isEnabled -> isEnabled ? disableTable(tableName) : CompletableFuture.completedFuture(null)); } private CompletableFuture disableTable(final TableName tableName) { - return CompletableFuture - .runAsync(() -> logger.trace("disabling enabled table {}", tableName)) + return CompletableFuture.runAsync(() -> logger.trace("disabling enabled table {}", tableName)) .thenCompose(_void -> admin.disableTable(tableName)); } private CompletableFuture deleteTable(final TableName tableName) { - return CompletableFuture - .runAsync(() -> logger.trace("deleting disabled table {}", tableName)) + return CompletableFuture.runAsync(() -> logger.trace("deleting disabled table {}", tableName)) .thenCompose(_void -> admin.deleteTable(tableName)); } private CompletableFuture> listUserNamespaces() { - return CompletableFuture - .runAsync(() -> logger.trace("listing namespaces")) - .thenCompose(_void -> admin.listNamespaceDescriptors()) - .thenApply(namespaceDescriptors -> { + return CompletableFuture.runAsync(() -> logger.trace("listing namespaces")) + .thenCompose(_void -> admin.listNamespaceDescriptors()).thenApply(namespaceDescriptors -> { final StringJoiner joiner = new StringJoiner(", ", "[", "]"); - final List names = namespaceDescriptors.stream() - .map(NamespaceDescriptor::getName) - .peek(joiner::add) - .collect(Collectors.toList()); + final List names = namespaceDescriptors.stream().map(NamespaceDescriptor::getName) + .peek(joiner::add).collect(Collectors.toList()); logger.trace("found existing namespaces {}", joiner); return names; }) .thenApply(namespaces -> namespaces.stream() - .filter(namespace -> !Objects.equals( - namespace, NamespaceDescriptor.SYSTEM_NAMESPACE.getName())) - .filter(namespace -> !Objects.equals( - namespace, NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) + .filter( + namespace -> !Objects.equals(namespace, NamespaceDescriptor.SYSTEM_NAMESPACE.getName())) + .filter( + namespace -> !Objects.equals(namespace, NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) .collect(Collectors.toList())); } private CompletableFuture deleteNamespace(final String namespace) { - return CompletableFuture - .runAsync(() -> logger.trace("deleting namespace {}", namespace)) + return CompletableFuture.runAsync(() -> logger.trace("deleting namespace {}", namespace)) .thenCompose(_void -> admin.deleteNamespace(namespace)); } private CompletableFuture deleteUserNamespaces() { - return listUserNamespaces() - .thenCompose(namespaces -> CompletableFuture.allOf(namespaces.stream() - .map(this::deleteNamespace) - .toArray(CompletableFuture[]::new))); + return listUserNamespaces().thenCompose(namespaces -> CompletableFuture + .allOf(namespaces.stream().map(this::deleteNamespace).toArray(CompletableFuture[]::new))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java index 77bd1c531c68..f4f49ad94395 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java @@ -32,18 +32,20 @@ *

    * Use in combination with {@link MiniClusterRule}, for example: * - *
    {@code
    + * 
    + * {
    + *   @code
      *   public class TestMyClass {
      *     private static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build();
      *     private static final ConnectionRule connectionRule =
      *       ConnectionRule.createAsyncConnectionRule(miniClusterRule::createConnection);
      *
    - *     @ClassRule
    - *     public static final TestRule rule = RuleChain
    - *       .outerRule(miniClusterRule)
    - *       .around(connectionRule);
    + *     @ClassRule
    + *     public static final TestRule rule =
    + *       RuleChain.outerRule(miniClusterRule).around(connectionRule);
      *   }
    - * }
    + * } + *
    */ public final class ConnectionRule extends ExternalResource { @@ -53,29 +55,22 @@ public final class ConnectionRule extends ExternalResource { private Connection connection; private AsyncConnection asyncConnection; - public static ConnectionRule createConnectionRule( - final Supplier connectionSupplier - ) { + public static ConnectionRule createConnectionRule(final Supplier connectionSupplier) { return new ConnectionRule(connectionSupplier, null); } public static ConnectionRule createAsyncConnectionRule( - final Supplier> asyncConnectionSupplier - ) { + final Supplier> asyncConnectionSupplier) { return new ConnectionRule(null, asyncConnectionSupplier); } - public static ConnectionRule createConnectionRule( - final Supplier connectionSupplier, - final Supplier> asyncConnectionSupplier - ) { + public static ConnectionRule createConnectionRule(final Supplier connectionSupplier, + final Supplier> asyncConnectionSupplier) { return new ConnectionRule(connectionSupplier, asyncConnectionSupplier); } - private ConnectionRule( - final Supplier connectionSupplier, - final Supplier> asyncConnectionSupplier - ) { + private ConnectionRule(final Supplier connectionSupplier, + final Supplier> asyncConnectionSupplier) { this.connectionSupplier = connectionSupplier; this.asyncConnectionSupplier = asyncConnectionSupplier; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java index 0584be85e72b..4a804c5dfc2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -346,7 +346,7 @@ public ServerName getServerHoldingMeta() throws IOException { /** * Get the ServerName of region server serving the specified region * @param regionName Name of the region in bytes - * @param tn Table name that has the region. + * @param tn Table name that has the region. * @return ServerName that hosts the region or null */ public abstract ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 4b4ce9e03a1d..a274b39757f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -151,7 +151,9 @@ import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper.States; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -412,13 +414,13 @@ private void createSubDirAndSystemProperty(String propertyName, Path parent, Str if (sysValue != null) { // There is already a value set. So we do nothing but hope // that there will be no conflicts - LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue + - " so I do NOT create it in " + parent); + LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue + + " so I do NOT create it in " + parent); String confValue = conf.get(propertyName); if (confValue != null && !confValue.endsWith(sysValue)) { - LOG.warn(propertyName + " property value differs in configuration and system: " + - "Configuration=" + confValue + " while System=" + sysValue + - " Erasing configuration value by system value."); + LOG.warn(propertyName + " property value differs in configuration and system: " + + "Configuration=" + confValue + " while System=" + sysValue + + " Erasing configuration value by system value."); } conf.set(propertyName, sysValue); } else { @@ -534,9 +536,7 @@ public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception { * Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things * like HDFS block location verification. If you start MiniDFSCluster without host names, all * instances of the datanodes will have the same host name. - * @param hosts hostnames DNs to run on. - * @throws Exception - * @see #shutdownMiniDFSCluster() + * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ public MiniDFSCluster startMiniDFSCluster(final String[] hosts) throws Exception { @@ -550,9 +550,7 @@ public MiniDFSCluster startMiniDFSCluster(final String[] hosts) throws Exception /** * Start a minidfscluster. Can only create one. * @param servers How many DNs to start. - * @param hosts hostnames DNs to run on. - * @throws Exception - * @see #shutdownMiniDFSCluster() + * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ public MiniDFSCluster startMiniDFSCluster(int servers, final String[] hosts) throws Exception { @@ -580,8 +578,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, Str Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR"); Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, - true, null, racks, hosts, null); + this.dfsCluster = + new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null); // Set this just-started cluster as our filesystem. setFs(); @@ -604,8 +602,8 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR"); Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, - null, null, null); + dfsCluster = + new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null); return dfsCluster; } @@ -860,7 +858,7 @@ public SingleProcessHBaseCluster startMiniHBaseCluster() * Starts up mini hbase cluster. Usually you won't want this. You'll usually want * {@link #startMiniCluster()}. All other options will use default values, defined in * {@link StartTestingClusterOption.Builder}. - * @param numMasters Master node number. + * @param numMasters Master node number. * @param numRegionServers Number of region servers. * @return The mini HBase cluster created. * @see #shutdownMiniHBaseCluster() @@ -881,9 +879,9 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRe * Starts up mini hbase cluster. Usually you won't want this. You'll usually want * {@link #startMiniCluster()}. All other options will use default values, defined in * {@link StartTestingClusterOption.Builder}. - * @param numMasters Master node number. + * @param numMasters Master node number. * @param numRegionServers Number of region servers. - * @param rsPorts Ports that RegionServer should use. + * @param rsPorts Ports that RegionServer should use. * @return The mini HBase cluster created. * @see #shutdownMiniHBaseCluster() * @deprecated since 2.2.0 and will be removed in 4.0.0. Use @@ -903,13 +901,13 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRe * Starts up mini hbase cluster. Usually you won't want this. You'll usually want * {@link #startMiniCluster()}. All other options will use default values, defined in * {@link StartTestingClusterOption.Builder}. - * @param numMasters Master node number. + * @param numMasters Master node number. * @param numRegionServers Number of region servers. - * @param rsPorts Ports that RegionServer should use. - * @param masterClass The class to use as HMaster, or null for default. - * @param rsClass The class to use as HRegionServer, or null for default. - * @param createRootDir Whether to create a new root or data directory path. - * @param createWALDir Whether to create a new WAL directory. + * @param rsPorts Ports that RegionServer should use. + * @param masterClass The class to use as HMaster, or null for default. + * @param rsClass The class to use as HRegionServer, or null for default. + * @param createRootDir Whether to create a new root or data directory path. + * @param createWALDir Whether to create a new WAL directory. * @return The mini HBase cluster created. * @see #shutdownMiniHBaseCluster() * @deprecated since 2.2.0 and will be removed in 4.0.0. Use @@ -1038,8 +1036,7 @@ private void cleanup() throws IOException { * Returns the path to the default root dir the minicluster uses. If create is true, * a new root directory path is fetched irrespective of whether it has been fetched before or not. * If false, previous path is used. Note: this does not cause the root dir to be created. - * @return Fully qualified path for the default hbase root dir - * @throws IOException + * @return Fully qualified path for the default hbase root dir n */ public Path getDefaultRootDirPath(boolean create) throws IOException { if (!create) { @@ -1052,8 +1049,7 @@ public Path getDefaultRootDirPath(boolean create) throws IOException { /** * Same as {{@link HBaseTestingUtil#getDefaultRootDirPath(boolean create)} except that * create flag is false. Note: this does not cause the root dir to be created. - * @return Fully qualified path for the default hbase root dir - * @throws IOException + * @return Fully qualified path for the default hbase root dir n */ public Path getDefaultRootDirPath() throws IOException { return getDefaultRootDirPath(false); @@ -1064,10 +1060,9 @@ public Path getDefaultRootDirPath() throws IOException { * won't make use of this method. Root hbasedir is created for you as part of mini cluster * startup. You'd only use this method if you were doing manual operation. * @param create This flag decides whether to get a new root or data directory path or not, if it - * has been fetched already. Note : Directory will be made irrespective of whether path - * has been fetched or not. If directory already exists, it will be overwritten - * @return Fully qualified path to hbase root dir - * @throws IOException + * has been fetched already. Note : Directory will be made irrespective of whether + * path has been fetched or not. If directory already exists, it will be overwritten + * @return Fully qualified path to hbase root dir n */ public Path createRootDir(boolean create) throws IOException { FileSystem fs = FileSystem.get(this.conf); @@ -1081,8 +1076,7 @@ public Path createRootDir(boolean create) throws IOException { /** * Same as {@link HBaseTestingUtil#createRootDir(boolean create)} except that create * flag is false. - * @return Fully qualified path to hbase root dir - * @throws IOException + * @return Fully qualified path to hbase root dir n */ public Path createRootDir() throws IOException { return createRootDir(false); @@ -1092,8 +1086,7 @@ public Path createRootDir() throws IOException { * Creates a hbase walDir in the user's home directory. Normally you won't make use of this * method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use * this method if you were doing manual operation. - * @return Fully qualified path to hbase root dir - * @throws IOException + * @return Fully qualified path to hbase root dir n */ public Path createWALRootDir() throws IOException { FileSystem fs = FileSystem.get(this.conf); @@ -1114,54 +1107,42 @@ private void setHBaseFsTmpDir() throws IOException { } /** - * Flushes all caches in the mini hbase cluster - * @throws IOException + * Flushes all caches in the mini hbase cluster n */ public void flush() throws IOException { getMiniHBaseCluster().flushcache(); } /** - * Flushes all caches in the mini hbase cluster - * @throws IOException + * Flushes all caches in the mini hbase cluster n */ public void flush(TableName tableName) throws IOException { getMiniHBaseCluster().flushcache(tableName); } /** - * Compact all regions in the mini hbase cluster - * @throws IOException + * Compact all regions in the mini hbase cluster n */ public void compact(boolean major) throws IOException { getMiniHBaseCluster().compact(major); } /** - * Compact all of a table's reagion in the mini hbase cluster - * @throws IOException + * Compact all of a table's reagion in the mini hbase cluster n */ public void compact(TableName tableName, boolean major) throws IOException { getMiniHBaseCluster().compact(tableName, major); } /** - * Create a table. - * @param tableName - * @param family - * @return A Table instance for the created table. - * @throws IOException + * Create a table. nn * @return A Table instance for the created table. n */ public Table createTable(TableName tableName, String family) throws IOException { return createTable(tableName, new String[] { family }); } /** - * Create a table. - * @param tableName - * @param families - * @return A Table instance for the created table. - * @throws IOException + * Create a table. nn * @return A Table instance for the created table. n */ public Table createTable(TableName tableName, String[] families) throws IOException { List fams = new ArrayList<>(families.length); @@ -1172,23 +1153,14 @@ public Table createTable(TableName tableName, String[] families) throws IOExcept } /** - * Create a table. - * @param tableName - * @param family - * @return A Table instance for the created table. - * @throws IOException + * Create a table. nn * @return A Table instance for the created table. n */ public Table createTable(TableName tableName, byte[] family) throws IOException { return createTable(tableName, new byte[][] { family }); } /** - * Create a table with multiple regions. - * @param tableName - * @param family - * @param numRegions - * @return A Table instance for the created table. - * @throws IOException + * Create a table with multiple regions. nnn * @return A Table instance for the created table. n */ public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions) throws IOException { @@ -1201,34 +1173,22 @@ public Table createMultiRegionTable(TableName tableName, byte[] family, int numR } /** - * Create a table. - * @param tableName - * @param families - * @return A Table instance for the created table. - * @throws IOException + * Create a table. nn * @return A Table instance for the created table. n */ public Table createTable(TableName tableName, byte[][] families) throws IOException { return createTable(tableName, families, (byte[][]) null); } /** - * Create a table with multiple regions. - * @param tableName - * @param families - * @return A Table instance for the created table. - * @throws IOException + * Create a table with multiple regions. nn * @return A Table instance for the created table. n */ public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException { return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE); } /** - * Create a table with multiple regions. - * @param tableName - * @param replicaCount replica count. - * @param families - * @return A Table instance for the created table. - * @throws IOException + * Create a table with multiple regions. n * @param replicaCount replica count. n * @return A + * Table instance for the created table. n */ public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families) throws IOException { @@ -1236,12 +1196,7 @@ public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[ } /** - * Create a table. - * @param tableName - * @param families - * @param splitKeys - * @return A Table instance for the created table. - * @throws IOException + * Create a table. nnn * @return A Table instance for the created table. n */ public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys) throws IOException { @@ -1250,9 +1205,9 @@ public Table createTable(TableName tableName, byte[][] families, byte[][] splitK /** * Create a table. - * @param tableName the table name - * @param families the families - * @param splitKeys the splitkeys + * @param tableName the table name + * @param families the families + * @param splitKeys the splitkeys * @param replicaCount the region replica count * @return A Table instance for the created table. * @throws IOException throws IOException @@ -1286,10 +1241,10 @@ public Table createTable(TableDescriptor htd, byte[][] families, Configuration c /** * Create a table. - * @param htd table descriptor - * @param families array of column families + * @param htd table descriptor + * @param families array of column families * @param splitKeys array of split keys - * @param c Configuration to use + * @param c Configuration to use * @return A Table instance for the created table. * @throws IOException if getAdmin or createTable fails */ @@ -1303,12 +1258,12 @@ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitK /** * Create a table. - * @param htd table descriptor - * @param families array of column families + * @param htd table descriptor + * @param families array of column families * @param splitKeys array of split keys - * @param type Bloom type + * @param type Bloom type * @param blockSize block size - * @param c Configuration to use + * @param c Configuration to use * @return A Table instance for the created table. * @throws IOException if getAdmin or createTable fails */ @@ -1338,10 +1293,9 @@ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitK /** * Create a table. - * @param htd table descriptor + * @param htd table descriptor * @param splitRows array of split keys - * @return A Table instance for the created table. - * @throws IOException + * @return A Table instance for the created table. n */ public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd); @@ -1364,11 +1318,11 @@ public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOExcep /** * Create a table. - * @param tableName the table name - * @param families the families - * @param splitKeys the split keys + * @param tableName the table name + * @param families the families + * @param splitKeys the split keys * @param replicaCount the replica count - * @param c Configuration to use + * @param c Configuration to use * @return A Table instance for the created table. */ public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys, @@ -1639,9 +1593,9 @@ public TableDescriptor createTableDescriptor(final TableName tableName, byte[][] /** * Create an HRegion that writes to the local tmp dirs - * @param desc a table descriptor indicating which table the region belongs to + * @param desc a table descriptor indicating which table the region belongs to * @param startKey the start boundary of the region - * @param endKey the end boundary of the region + * @param endKey the end boundary of the region * @return a region that writes to local dir for testing */ public HRegion createLocalHRegion(TableDescriptor desc, byte[] startKey, byte[] endKey) @@ -1664,9 +1618,8 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws * @param info regioninfo * @param conf configuration * @param desc table descriptor - * @param wal wal for this region. - * @return created hregion - * @throws IOException + * @param wal wal for this region. + * @return created hregion n */ public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc, WAL wal) throws IOException { @@ -1674,14 +1627,8 @@ public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDesc } /** - * @param tableName - * @param startKey - * @param stopKey - * @param isReadOnly - * @param families - * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} - * when done. - * @throws IOException + * nnnnn * @return A region on which you must call + * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done. n */ public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) @@ -1722,8 +1669,7 @@ public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] s * Provide an existing table name to truncate. Scans the table and issues a delete for each row * read. * @param tableName existing table - * @return HTable to that new table - * @throws IOException + * @return HTable to that new table n */ public Table deleteTableData(TableName tableName) throws IOException { Table table = getConnection().getTable(tableName); @@ -1741,7 +1687,7 @@ public Table deleteTableData(TableName tableName) throws IOException { /** * Truncate a table using the admin command. Effectively disables, deletes, and recreates the * table. - * @param tableName table which must exist. + * @param tableName table which must exist. * @param preserveRegions keep the existing split points * @return HTable for the new table */ @@ -1770,8 +1716,7 @@ public Table truncateTable(final TableName tableName) throws IOException { * Load table with rows from 'aaa' to 'zzz'. * @param t Table * @param f Family - * @return Count of rows loaded. - * @throws IOException + * @return Count of rows loaded. n */ public int loadTable(final Table t, final byte[] f) throws IOException { return loadTable(t, new byte[][] { f }); @@ -1781,8 +1726,7 @@ public int loadTable(final Table t, final byte[] f) throws IOException { * Load table with rows from 'aaa' to 'zzz'. * @param t Table * @param f Family - * @return Count of rows loaded. - * @throws IOException + * @return Count of rows loaded. n */ public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException { return loadTable(t, new byte[][] { f }, null, writeToWAL); @@ -1792,8 +1736,7 @@ public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws I * Load table of multiple column families with rows from 'aaa' to 'zzz'. * @param t Table * @param f Array of Families to load - * @return Count of rows loaded. - * @throws IOException + * @return Count of rows loaded. n */ public int loadTable(final Table t, final byte[][] f) throws IOException { return loadTable(t, f, null); @@ -1801,11 +1744,10 @@ public int loadTable(final Table t, final byte[][] f) throws IOException { /** * Load table of multiple column families with rows from 'aaa' to 'zzz'. - * @param t Table - * @param f Array of Families to load + * @param t Table + * @param f Array of Families to load * @param value the values of the cells. If null is passed, the row key is used as value - * @return Count of rows loaded. - * @throws IOException + * @return Count of rows loaded. n */ public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException { return loadTable(t, f, value, true); @@ -1813,11 +1755,10 @@ public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOExc /** * Load table of multiple column families with rows from 'aaa' to 'zzz'. - * @param t Table - * @param f Array of Families to load + * @param t Table + * @param f Array of Families to load * @param value the values of the cells. If null is passed, the row key is used as value - * @return Count of rows loaded. - * @throws IOException + * @return Count of rows loaded. n */ public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException { @@ -1874,14 +1815,16 @@ public void validate() { for (byte b3 = 'a'; b3 <= 'z'; b3++) { int count = seenRows[i(b1)][i(b2)][i(b3)]; int expectedCount = 0; - if (Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0 && - Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0) { + if ( + Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0 + && Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0 + ) { expectedCount = 1; } if (count != expectedCount) { String row = new String(new byte[] { b1, b2, b3 }, StandardCharsets.UTF_8); - throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " + - "instead of " + expectedCount); + throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " + + "instead of " + expectedCount); } } } @@ -1899,11 +1842,10 @@ public int loadRegion(final Region r, final byte[] f) throws IOException { /** * Load region with rows from 'aaa' to 'zzz'. - * @param r Region - * @param f Family + * @param r Region + * @param f Family * @param flush flush the cache if true - * @return Count of rows loaded. - * @throws IOException + * @return Count of rows loaded. n */ public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException { byte[] k = new byte[3]; @@ -2463,7 +2405,7 @@ public void expireSession(ZKWatcher nodeZK) throws Exception { *
  • http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
  • *
  • https://issues.apache.org/jira/browse/ZOOKEEPER-1105
  • * - * @param nodeZK - the ZK watcher to expire + * @param nodeZK - the ZK watcher to expire * @param checkStatus - true to check if we can create a Table with the current configuration. */ public void expireSession(ZKWatcher nodeZK, boolean checkStatus) throws Exception { @@ -2494,8 +2436,9 @@ public void process(WatchedEvent watchedEvent) { // ensure that we have connection to the server before closing down, otherwise // the close session event will be eaten out before we start CONNECTING state long start = EnvironmentEdgeManager.currentTime(); - while (newZK.getState() != States.CONNECTED && - EnvironmentEdgeManager.currentTime() - start < 1000) { + while ( + newZK.getState() != States.CONNECTED && EnvironmentEdgeManager.currentTime() - start < 1000 + ) { Thread.sleep(1); } newZK.close(); @@ -2651,7 +2594,7 @@ public void unassignRegion(byte[] regionName) throws IOException { /** * Closes the region containing the given row. - * @param row The row to find the containing region. + * @param row The row to find the containing region. * @param table The table to find the region. */ public void unassignRegionByRow(String row, RegionLocator table) throws IOException { @@ -2660,9 +2603,8 @@ public void unassignRegionByRow(String row, RegionLocator table) throws IOExcept /** * Closes the region containing the given row. - * @param row The row to find the containing region. - * @param table The table to find the region. - * @throws IOException + * @param row The row to find the containing region. + * @param table The table to find the region. n */ public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException { HRegionLocation hrl = table.getRegionLocation(row); @@ -2671,7 +2613,7 @@ public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOExcept /** * Retrieves a splittable region randomly from tableName - * @param tableName name of table + * @param tableName name of table * @param maxAttempts maximum number of attempts, unlimited for value of -1 * @return the HRegion chosen, null if none was found within limit of maxAttempts */ @@ -2717,11 +2659,11 @@ public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, /** * Set the MiniDFSCluster - * @param cluster cluster to use + * @param cluster cluster to use * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it - * is set. + * is set. * @throws IllegalStateException if the passed cluster is up when it is required to be down - * @throws IOException if the FileSystem could not be set from the passed dfs cluster + * @throws IOException if the FileSystem could not be set from the passed dfs cluster */ public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown) throws IllegalStateException, IOException { @@ -2752,7 +2694,7 @@ public void waitTableAvailable(TableName table, long timeoutMillis) /** * Wait until all regions in a table have been assigned - * @param table Table to wait on. + * @param table Table to wait on. * @param timeoutMillis Timeout. */ public void waitTableAvailable(byte[] table, long timeoutMillis) @@ -2790,8 +2732,8 @@ public String explainTableState(final TableName table, TableState.State state) throws IOException { TableState tableState = MetaTableAccessor.getTableState(getConnection(), table); if (tableState == null) { - return "TableState in META: No table state in META for table " + table + - " last state in meta (including deleted is " + findLastTableState(table) + ")"; + return "TableState in META: No table state in META for table " + table + + " last state in meta (including deleted is " + findLastTableState(table) + ")"; } else if (!tableState.inStates(state)) { return "TableState in META: Not " + state + " state, but " + tableState; } else { @@ -2826,7 +2768,7 @@ public boolean visit(Result r) throws IOException { * table. * @param table the table to wait on. * @throws InterruptedException if interrupted while waiting - * @throws IOException if an IO problem is encountered + * @throws IOException if an IO problem is encountered */ public void waitTableEnabled(TableName table) throws InterruptedException, IOException { waitTableEnabled(table, 30000); @@ -2836,7 +2778,7 @@ public void waitTableEnabled(TableName table) throws InterruptedException, IOExc * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions * have been all assigned. * @see #waitTableEnabled(TableName, long) - * @param table Table to wait on. + * @param table Table to wait on. * @param timeoutMillis Time to wait on it being marked enabled. */ public void waitTableEnabled(byte[] table, long timeoutMillis) @@ -2864,7 +2806,7 @@ public void waitTableDisabled(TableName table, long millisTimeout) /** * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' - * @param table Table to wait on. + * @param table Table to wait on. * @param timeoutMillis Time to wait on it being marked disabled. */ public void waitTableDisabled(byte[] table, long timeoutMillis) @@ -2917,7 +2859,7 @@ public boolean ensureSomeNonStoppedRegionServersAvailable(final int num) throws /** * This method clones the passed c configuration setting a new user into the clone. * Use it getting new instances of FileSystem. Only works for DistributedFileSystem w/o Kerberos. - * @param c Initial configuration + * @param c Initial configuration * @param differentiatingSuffix Suffix to differentiate this user from others. * @return A new configuration instance with a different user set into it. */ @@ -3039,14 +2981,14 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException { * timeout. This means all regions have been deployed, master has been informed and updated * hbase:meta with the regions deployed server. * @param tableName the table name - * @param timeout timeout, in milliseconds + * @param timeout timeout, in milliseconds */ public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) throws IOException { if (!TableName.isMetaTableName(tableName)) { try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { - LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + - timeout + "ms"); + LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @Override public String explainFailure() throws IOException { @@ -3075,10 +3017,12 @@ public boolean evaluate() throws IOException { byte[] startCode = r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); ServerName serverName = - ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + - Bytes.toLong(startCode)); - if (!getHBaseClusterInterface().isDistributedCluster() && - getHBaseCluster().isKilledRS(serverName)) { + ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + + Bytes.toLong(startCode)); + if ( + !getHBaseClusterInterface().isDistributedCluster() + && getHBaseCluster().isKilledRS(serverName) + ) { return false; } } @@ -3179,8 +3123,9 @@ public static void assertKVListsEqual(String additionalMsg, final List String safeGetAsStr(List lst, int i) { } public String getClusterKey() { - return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + - conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":" + - conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + + ":" + + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); } /** @@ -3218,9 +3163,9 @@ public String getClusterKey() { public Table createRandomTable(TableName tableName, final Collection families, final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions, final int numRowsPerFlush) throws IOException, InterruptedException { - LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + - numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions=" + - maxVersions + "\n"); + LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + + numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions=" + + maxVersions + "\n"); final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L); final int numCF = families.size(); @@ -3261,8 +3206,8 @@ public Table createRandomTable(TableName tableName, final Collection fam final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { final byte[] value = - Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + - iCol + "_ts_" + ts + "_random_" + rand.nextLong()); + Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.addColumn(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.addColumn(cf, qual, ts); @@ -3437,9 +3382,9 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableDescripto } totalNumberOfRegions = numberOfServers * numRegionsPerServer; - LOG.info( - "Number of live regionservers: " + numberOfServers + ", " + "pre-splitting table into " + - totalNumberOfRegions + " regions " + "(regions per server: " + numRegionsPerServer + ")"); + LOG.info("Number of live regionservers: " + numberOfServers + ", " + + "pre-splitting table into " + totalNumberOfRegions + " regions " + "(regions per server: " + + numRegionsPerServer + ")"); byte[][] splits = splitter.split(totalNumberOfRegions); @@ -3630,8 +3575,7 @@ public void waitUntilNoRegionsInTransition(final long timeout) throws IOExceptio } /** - * Wait until no regions in transition. (time limit 15min) - * @throws IOException + * Wait until no regions in transition. (time limit 15min) n */ public void waitUntilNoRegionsInTransition() throws IOException { waitUntilNoRegionsInTransition(15 * 60000); @@ -3804,7 +3748,7 @@ public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescr Collection rtdFamilies = Arrays.asList(rtd.getColumnFamilies()); assertEquals(ltdFamilies.size(), rtdFamilies.size()); for (Iterator it = ltdFamilies.iterator(), - it2 = rtdFamilies.iterator(); it.hasNext();) { + it2 = rtdFamilies.iterator(); it.hasNext();) { assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index b3fb634a1de1..4d24b7430124 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,29 +19,28 @@ import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; - import org.apache.commons.math3.random.RandomData; import org.apache.commons.math3.random.RandomDataImpl; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.crypto.CryptoCipherProvider; import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.aes.AES; -import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class runs performance benchmarks for {@link HFile}. @@ -53,19 +51,19 @@ public class HFilePerformanceEvaluation { private static final int ROW_COUNT = 1000000; private static final int RFILE_BLOCKSIZE = 8 * 1024; private static StringBuilder testSummary = new StringBuilder(); - + // Disable verbose INFO logging from org.apache.hadoop.io.compress.CodecPool static { - System.setProperty("org.apache.commons.logging.Log", + System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.SimpleLog"); - System.setProperty("org.apache.commons.logging.simplelog.log.org.apache.hadoop.io.compress.CodecPool", - "WARN"); + System.setProperty( + "org.apache.commons.logging.simplelog.log.org.apache.hadoop.io.compress.CodecPool", "WARN"); } - + private static final Logger LOG = LoggerFactory.getLogger(HFilePerformanceEvaluation.class.getName()); - static byte [] format(final int i) { + static byte[] format(final int i) { String v = Integer.toString(i); return Bytes.toBytes("0000000000".substring(v.length()) + v); } @@ -80,52 +78,43 @@ static Cell createCell(final int i) { } /** - * HFile is Cell-based. It used to be byte arrays. Doing this test, pass Cells. All Cells + * HFile is Cell-based. It used to be byte arrays. Doing this test, pass Cells. All Cells * intentionally have same coordinates in all fields but row. - * @param i Integer to format as a row Key. + * @param i Integer to format as a row Key. * @param value Value to use * @return Created Cell. */ - static Cell createCell(final int i, final byte [] value) { + static Cell createCell(final int i, final byte[] value) { return createCell(format(i), value); } - static Cell createCell(final byte [] keyRow) { - return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(keyRow) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build(); + static Cell createCell(final byte[] keyRow) { + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(keyRow) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(HConstants.EMPTY_BYTE_ARRAY).build(); } - static Cell createCell(final byte [] keyRow, final byte [] value) { - return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(keyRow) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(value) - .build(); + static Cell createCell(final byte[] keyRow, final byte[] value) { + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(keyRow) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(value).build(); } /** - * Add any supported codec or cipher to test the HFile read/write performance. - * Specify "none" to disable codec or cipher or both. - * @throws Exception + * Add any supported codec or cipher to test the HFile read/write performance. Specify "none" to + * disable codec or cipher or both. n */ private void runBenchmarks() throws Exception { final Configuration conf = new Configuration(); final FileSystem fs = FileSystem.get(conf); final Path mf = fs.makeQualified(new Path("performanceevaluation.mapfile")); - + // codec=none cipher=none runWriteBenchmark(conf, fs, mf, "none", "none"); runReadBenchmark(conf, fs, mf, "none", "none"); - + // codec=gz cipher=none runWriteBenchmark(conf, fs, mf, "gz", "none"); runReadBenchmark(conf, fs, mf, "gz", "none"); @@ -181,105 +170,95 @@ private void runBenchmarks() throws Exception { } /** - * Write a test HFile with the given codec & cipher - * @param conf - * @param fs - * @param mf - * @param codec "none", "lzo", "gz", "snappy" - * @param cipher "none", "aes" - * @throws Exception + * Write a test HFile with the given codec & cipher nnn * @param codec "none", "lzo", "gz", + * "snappy" + * @param cipher "none", "aes" n */ private void runWriteBenchmark(Configuration conf, FileSystem fs, Path mf, String codec, - String cipher) throws Exception { + String cipher) throws Exception { if (fs.exists(mf)) { fs.delete(mf, true); } - runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT, codec, cipher), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT, codec, cipher), ROW_COUNT, + codec, getCipherName(conf, cipher)); } /** - * Run all the read benchmarks for the test HFile - * @param conf - * @param fs - * @param mf - * @param codec "none", "lzo", "gz", "snappy" + * Run all the read benchmarks for the test HFile nnn * @param codec "none", "lzo", "gz", "snappy" * @param cipher "none", "aes" */ private void runReadBenchmark(final Configuration conf, final FileSystem fs, final Path mf, - final String codec, final String cipher) { + final String codec, final String cipher) { PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("UniformRandomSmallScan failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("UniformRandomReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("GaussianRandomReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("SequentialReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } - }); + }); } - - protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount, - String codec, String cipher) throws Exception { - LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + - codec + "] " + "cipher[" + cipher + "] for " + rowCount + " rows."); - + + protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount, String codec, + String cipher) throws Exception { + LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + codec + "] " + + "cipher[" + cipher + "] for " + rowCount + " rows."); + long elapsedTime = benchmark.run(); - - LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + - codec + "] " + "cipher[" + cipher + "] for " + rowCount + " rows took " + - elapsedTime + "ms."); - + + LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + codec + "] " + + "cipher[" + cipher + "] for " + rowCount + " rows took " + elapsedTime + "ms."); + // Store results to print summary at the end testSummary.append("Running ").append(benchmark.getClass().getSimpleName()) - .append(" with codec[").append(codec).append("] cipher[").append(cipher) - .append("] for ").append(rowCount).append(" rows took ").append(elapsedTime) - .append("ms.").append("\n"); + .append(" with codec[").append(codec).append("] cipher[").append(cipher).append("] for ") + .append(rowCount).append(" rows took ").append(elapsedTime).append("ms.").append("\n"); } static abstract class RowOrientedBenchmark { @@ -291,8 +270,8 @@ static abstract class RowOrientedBenchmark { protected String codec = "none"; protected String cipher = "none"; - public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows, String codec, String cipher) { + public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows, + String codec, String cipher) { this.conf = conf; this.fs = fs; this.mf = mf; @@ -301,8 +280,7 @@ public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, this.cipher = cipher; } - public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows) { + public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { this.conf = conf; this.fs = fs; this.mf = mf; @@ -325,8 +303,7 @@ void tearDown() throws Exception { /** * Run benchmark - * @return elapsed time. - * @throws Exception + * @return elapsed time. n */ long run() throws Exception { long elapsedTime; @@ -352,8 +329,8 @@ static class SequentialWriteBenchmark extends RowOrientedBenchmark { protected HFile.Writer writer; private byte[] bytes = new byte[ROW_LENGTH]; - public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows, String codec, String cipher) { + public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows, + String codec, String cipher) { super(conf, fs, mf, totalRows, codec, cipher); } @@ -361,27 +338,23 @@ public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, void setUp() throws Exception { HFileContextBuilder builder = new HFileContextBuilder() - .withCompression(HFileWriterImpl.compressionByName(codec)) - .withBlockSize(RFILE_BLOCKSIZE); - + .withCompression(HFileWriterImpl.compressionByName(codec)).withBlockSize(RFILE_BLOCKSIZE); + if (cipher == "aes") { byte[] cipherKey = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(cipherKey); builder.withEncryptionContext(Encryption.newContext(conf) - .setCipher(Encryption.getCipher(conf, cipher)) - .setKey(cipherKey)); + .setCipher(Encryption.getCipher(conf, cipher)).setKey(cipherKey)); } else if (!"none".equals(cipher)) { throw new IOException("Cipher " + cipher + " not supported."); } - + HFileContext hFileContext = builder.build(); - writer = HFile.getWriterFactoryNoCache(conf) - .withPath(fs, mf) - .withFileContext(hFileContext) - .create(); + writer = + HFile.getWriterFactoryNoCache(conf).withPath(fs, mf).withFileContext(hFileContext).create(); } - + @Override void doRow(int i) throws Exception { writer.append(createCell(i, generateValue())); @@ -408,8 +381,7 @@ static abstract class ReadBenchmark extends RowOrientedBenchmark { protected HFile.Reader reader; - public ReadBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows) { + public ReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -428,8 +400,7 @@ void tearDown() throws Exception { static class SequentialReadBenchmark extends ReadBenchmark { private HFileScanner scanner; - public SequentialReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public SequentialReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -459,15 +430,14 @@ protected int getReportingPeriod() { static class UniformRandomReadBenchmark extends ReadBenchmark { - public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @Override void doRow(int i) throws Exception { HFileScanner scanner = this.reader.getScanner(conf, false, true); - byte [] b = getRandomRow(); + byte[] b = getRandomRow(); if (scanner.seekTo(createCell(b)) < 0) { LOG.info("Not able to seekTo " + new String(b)); return; @@ -478,22 +448,21 @@ void doRow(int i) throws Exception { PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } - private byte [] getRandomRow() { + private byte[] getRandomRow() { return format(ThreadLocalRandom.current().nextInt(totalRows)); } } static class UniformRandomSmallScan extends ReadBenchmark { - public UniformRandomSmallScan(Configuration conf, FileSystem fs, - Path mf, int totalRows) { - super(conf, fs, mf, totalRows/10); + public UniformRandomSmallScan(Configuration conf, FileSystem fs, Path mf, int totalRows) { + super(conf, fs, mf, totalRows / 10); } @Override void doRow(int i) throws Exception { HFileScanner scanner = this.reader.getScanner(conf, false, false); - byte [] b = getRandomRow(); + byte[] b = getRandomRow(); // System.out.println("Random row: " + new String(b)); Cell c = createCell(b); if (scanner.seekTo(c) != 0) { @@ -503,7 +472,7 @@ void doRow(int i) throws Exception { // TODO: HFileScanner doesn't do Cells yet. Temporary fix. c = scanner.getCell(); // System.out.println("Found row: " + - // new String(c.getRowArray(), c.getRowOffset(), c.getRowLength())); + // new String(c.getRowArray(), c.getRowOffset(), c.getRowLength())); PerformanceEvaluationCommons.assertKey(b, c); for (int ii = 0; ii < 30; ii++) { if (!scanner.next()) { @@ -515,7 +484,7 @@ void doRow(int i) throws Exception { } } - private byte [] getRandomRow() { + private byte[] getRandomRow() { return format(ThreadLocalRandom.current().nextInt(totalRows)); } } @@ -524,8 +493,7 @@ static class GaussianRandomReadBenchmark extends ReadBenchmark { private RandomData randomData = new RandomDataImpl(); - public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -544,18 +512,15 @@ void doRow(int i) throws Exception { } } - private byte [] getGaussianRandomRowBytes() { - int r = (int) randomData.nextGaussian((double)totalRows / 2.0, - (double)totalRows / 10.0); + private byte[] getGaussianRandomRowBytes() { + int r = (int) randomData.nextGaussian((double) totalRows / 2.0, (double) totalRows / 10.0); // make sure r falls into [0,totalRows) - return format(Math.min(totalRows, Math.max(r,0))); + return format(Math.min(totalRows, Math.max(r, 0))); } } /** - * @param args - * @throws Exception - * @throws IOException + * nnn */ public static void main(String[] args) throws Exception { new HFilePerformanceEvaluation().runBenchmarks(); @@ -564,8 +529,10 @@ public static void main(String[] args) throws Exception { private String getCipherName(Configuration conf, String cipherName) { if (cipherName.equals("aes")) { String provider = conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY); - if (provider == null || provider.equals("") - || provider.equals(DefaultCipherProvider.class.getName())) { + if ( + provider == null || provider.equals("") + || provider.equals(DefaultCipherProvider.class.getName()) + ) { return "aes-default"; } else if (provider.equals(CryptoCipherProvider.class.getName())) { return "aes-commons"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java index c490c836c634..823d890348f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java @@ -1,26 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.Set; -import java.util.Collections; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java index 68935ffe5c8c..50d671588524 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; @@ -32,74 +31,62 @@ public class MetaMockingUtil { /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the HRegionInfo object or null - * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. - * @throws IOException + * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n */ - public static Result getMetaTableRowResult(final RegionInfo region) - throws IOException { + public static Result getMetaTableRowResult(final RegionInfo region) throws IOException { return getMetaTableRowResult(region, null, null, null); } /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the HRegionInfo object or null - * @param sn to use making startcode and server hostname:port in meta or null - * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. - * @throws IOException + * @param sn to use making startcode and server hostname:port in meta or null + * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n */ public static Result getMetaTableRowResult(final RegionInfo region, final ServerName sn) - throws IOException { + throws IOException { return getMetaTableRowResult(region, sn, null, null); } /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the RegionInfo object or null - * @param sn to use making startcode and server hostname:port in meta or null + * @param sn to use making startcode and server hostname:port in meta or null * @param splita daughter region or null - * @param splitb daughter region or null - * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. - * @throws IOException + * @param splitb daughter region or null + * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n */ public static Result getMetaTableRowResult(RegionInfo region, final ServerName sn, - RegionInfo splita, RegionInfo splitb) throws IOException { + RegionInfo splita, RegionInfo splitb) throws IOException { List kvs = new ArrayList<>(); if (region != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - RegionInfo.toByteArray(region))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER, RegionInfo.toByteArray(region))); } if (sn != null) { - kvs.add(new KeyValue(region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(sn.getAddress().toString()))); - kvs.add(new KeyValue(region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(sn.getStartcode()))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getAddress().toString()))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode()))); } if (splita != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, - RegionInfo.toByteArray(splita))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splita))); } if (splitb != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, - RegionInfo.toByteArray(splitb))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitb))); } - //important: sort the kvs so that binary search work + // important: sort the kvs so that binary search work Collections.sort(kvs, MetaCellComparator.META_COMPARATOR); return Result.create(kvs); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java index f13258f93736..1b95c56e84a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java @@ -83,8 +83,10 @@ public Builder setConfiguration(Supplier supplier) { } public MiniClusterRule build() { - return new MiniClusterRule(conf, miniClusterOption != null ? miniClusterOption : - StartTestingClusterOption.builder().build()); + return new MiniClusterRule(conf, + miniClusterOption != null + ? miniClusterOption + : StartTestingClusterOption.builder().build()); } } @@ -111,8 +113,8 @@ public HBaseTestingUtil getTestingUtility() { } /** - * Create a {@link Connection} to the managed {@link SingleProcessHBaseCluster}. It's up to - * the caller to {@link Connection#close() close()} the connection when finished. + * Create a {@link Connection} to the managed {@link SingleProcessHBaseCluster}. It's up to the + * caller to {@link Connection#close() close()} the connection when finished. */ public Connection createConnection() { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index ebe6edd73c49..5f6297c8dc4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Basic mock region server services. Should only be instantiated by HBaseTestingUtility.b + * Basic mock region server services. Should only be instantiated by HBaseTestingUtility.b */ public class MockRegionServerServices implements RegionServerServices { protected static final Logger LOG = LoggerFactory.getLogger(MockRegionServerServices.class); @@ -94,7 +94,7 @@ public MockRegionServerServices(ZKWatcher zkw, ServerName serverName) { this.conf = (zkw == null ? new Configuration() : zkw.getConfiguration()); } - public MockRegionServerServices(){ + public MockRegionServerServices() { this(null, null); } @@ -226,7 +226,7 @@ public HFileSystem getFileSystem() { } public void setFileSystem(FileSystem hfs) { - this.hfs = (HFileSystem)hfs; + this.hfs = (HFileSystem) hfs; } @Override @@ -256,7 +256,7 @@ public ChoreService getChoreService() { @Override public void updateRegionFavoredNodesMapping(String encodedRegionName, - List favoredNodes) { + List favoredNodes) { } @Override @@ -306,7 +306,7 @@ public MetricsRegionServer getMetrics() { @Override public EntityLock regionLock(List regionInfos, String description, Abortable abort) - throws IOException { + throws IOException { return null; } @@ -335,8 +335,8 @@ public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) { } @Override - public boolean reportFileArchivalForQuotas( - TableName tableName, Collection> archivedFiles) { + public boolean reportFileArchivalForQuotas(TableName tableName, + Collection> archivedFiles) { return true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java index 5268d3d7b380..98d910278b8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; @@ -32,8 +30,7 @@ public abstract class MultithreadedTestUtil { - private static final Logger LOG = - LoggerFactory.getLogger(MultithreadedTestUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(MultithreadedTestUtil.class); public static class TestContext { private final Configuration conf; @@ -50,7 +47,7 @@ protected Configuration getConf() { return conf; } - public synchronized boolean shouldRun() { + public synchronized boolean shouldRun() { return !stopped && err == null; } @@ -75,6 +72,7 @@ public void waitFor(long millis) throws Exception { } } } + private synchronized void checkException() throws Exception { if (err != null) { throw new RuntimeException("Deferred", err); @@ -109,8 +107,7 @@ public void stop() throws Exception { } /** - * A thread that can be added to a test context, and properly - * passes exceptions through. + * A thread that can be added to a test context, and properly passes exceptions through. */ public static abstract class TestThread extends Thread { protected final TestContext ctx; @@ -157,13 +154,16 @@ public final void doWork() throws Exception { } public abstract void doAnAction() throws Exception; - public void workDone() throws IOException {} + + public void workDone() throws IOException { + } } /** - * Verify that no assertions have failed inside a future. - * Used for unit tests that spawn threads. E.g., + * Verify that no assertions have failed inside a future. Used for unit tests that spawn threads. + * E.g., *

    + * *

        *   List<Future<Void>> results = Lists.newArrayList();
        *   Future<Void> f = executor.submit(new Callable<Void> {
    @@ -174,14 +174,14 @@ public void workDone() throws IOException {}
        *   results.add(f);
        *   assertOnFutures(results);
        * 
    + * * @param threadResults A list of futures - * @throws InterruptedException If interrupted when waiting for a result - * from one of the futures - * @throws ExecutionException If an exception other than AssertionError - * occurs inside any of the futures + * @throws InterruptedException If interrupted when waiting for a result from one of the futures + * @throws ExecutionException If an exception other than AssertionError occurs inside any of the + * futures */ public static void assertOnFutures(List> threadResults) - throws InterruptedException, ExecutionException { + throws InterruptedException, ExecutionException { for (Future threadResult : threadResults) { try { threadResult.get(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java index 97d326aa6324..8a6347ce6056 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,10 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Code shared by PE tests. */ @@ -40,28 +37,29 @@ public static void assertValueSize(final int expectedSize, final int got) { } } - public static void assertKey(final byte [] expected, final ByteBuffer got) { - byte [] b = new byte[got.limit()]; + public static void assertKey(final byte[] expected, final ByteBuffer got) { + byte[] b = new byte[got.limit()]; got.get(b, 0, got.limit()); assertKey(expected, b); } - public static void assertKey(final byte [] expected, final Cell c) { + public static void assertKey(final byte[] expected, final Cell c) { assertKey(expected, c.getRowArray(), c.getRowOffset(), c.getRowLength()); } - public static void assertKey(final byte [] expected, final byte [] got) { + public static void assertKey(final byte[] expected, final byte[] got) { assertKey(expected, got, 0, got.length); } - public static void assertKey(final byte [] expected, final byte [] gotArray, - final int gotArrayOffset, final int gotArrayLength) { - if (!org.apache.hadoop.hbase.util.Bytes.equals(expected, 0, expected.length, - gotArray, gotArrayOffset, gotArrayLength)) { - throw new AssertionError("Expected " + - org.apache.hadoop.hbase.util.Bytes.toString(expected) + - " but got " + - org.apache.hadoop.hbase.util.Bytes.toString(gotArray, gotArrayOffset, gotArrayLength)); + public static void assertKey(final byte[] expected, final byte[] gotArray, + final int gotArrayOffset, final int gotArrayLength) { + if ( + !org.apache.hadoop.hbase.util.Bytes.equals(expected, 0, expected.length, gotArray, + gotArrayOffset, gotArrayLength) + ) { + throw new AssertionError( + "Expected " + org.apache.hadoop.hbase.util.Bytes.toString(expected) + " but got " + + org.apache.hadoop.hbase.util.Bytes.toString(gotArray, gotArrayOffset, gotArrayLength)); } } @@ -72,10 +70,10 @@ public static void concurrentReads(final Runnable r) { for (int i = 0; i < count; i++) { threads.add(new Thread(r, "concurrentRead-" + i)); } - for (Thread t: threads) { + for (Thread t : threads) { t.start(); } - for (Thread t: threads) { + for (Thread t : threads) { try { t.join(); } catch (InterruptedException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java index b4ba729b1d52..89e64317d923 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ private ProcedureTestUtil() { } private static Optional getProcedure(HBaseTestingUtil util, - Class> clazz, JsonParser parser) throws IOException { + Class> clazz, JsonParser parser) throws IOException { JsonArray array = parser.parse(util.getAdmin().getProcedures()).getAsJsonArray(); Iterator iterator = array.iterator(); while (iterator.hasNext()) { @@ -54,7 +54,7 @@ private static Optional getProcedure(HBaseTestingUtil util, } public static void waitUntilProcedureWaitingTimeout(HBaseTestingUtil util, - Class> clazz, long timeout) throws IOException { + Class> clazz, long timeout) throws IOException { JsonParser parser = new JsonParser(); util.waitFor(timeout, () -> getProcedure(util, clazz, parser) @@ -63,7 +63,7 @@ public static void waitUntilProcedureWaitingTimeout(HBaseTestingUtil util, } public static void waitUntilProcedureTimeoutIncrease(HBaseTestingUtil util, - Class> clazz, int times) throws IOException, InterruptedException { + Class> clazz, int times) throws IOException, InterruptedException { JsonParser parser = new JsonParser(); long oldTimeout = 0; int timeoutIncrements = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/RegionReplicationLagEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/RegionReplicationLagEvaluation.java index da4101dbb853..3de8227f204a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/RegionReplicationLagEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/RegionReplicationLagEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,6 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; @@ -76,7 +77,7 @@ public class RegionReplicationLagEvaluation extends Configured implements Tool { private FastLongHistogram histogram = new FastLongHistogram(); @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") FastLongHistogram getHistogram() { return histogram; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java index af30b58f463d..5e72464ecb55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,7 +61,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface { /** * Start a MiniHBaseCluster. - * @param conf Configuration to be used for cluster + * @param conf Configuration to be used for cluster * @param numRegionServers initial number of region servers to start. */ public SingleProcessHBaseCluster(Configuration conf, int numRegionServers) @@ -72,8 +71,8 @@ public SingleProcessHBaseCluster(Configuration conf, int numRegionServers) /** * Start a MiniHBaseCluster. - * @param conf Configuration to be used for cluster - * @param numMasters initial number of masters to start. + * @param conf Configuration to be used for cluster + * @param numMasters initial number of masters to start. * @param numRegionServers initial number of region servers to start. */ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegionServers) @@ -83,8 +82,8 @@ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegi /** * Start a MiniHBaseCluster. - * @param conf Configuration to be used for cluster - * @param numMasters initial number of masters to start. + * @param conf Configuration to be used for cluster + * @param numMasters initial number of masters to start. * @param numRegionServers initial number of region servers to start. */ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegionServers, @@ -96,9 +95,9 @@ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegi /** * @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster - * restart where for sure the regionservers come up on same address+port (but just with - * different startcode); by default mini hbase clusters choose new arbitrary ports on - * each cluster start. + * restart where for sure the regionservers come up on same address+port (but just + * with different startcode); by default mini hbase clusters choose new arbitrary + * ports on each cluster start. */ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numAlwaysStandByMasters, int numRegionServers, List rsPorts, Class masterClass, @@ -470,9 +469,10 @@ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber) { /** * Shut down the specified region server cleanly * @param serverNumber Used as index into a list. - * @param shutdownFS True is we are to shutdown the filesystem as part of this regionserver's - * shutdown. Usually we do but you do not want to do this if you are running multiple - * regionservers in a test and you shut down one before end of the test. + * @param shutdownFS True is we are to shutdown the filesystem as part of this regionserver's + * shutdown. Usually we do but you do not want to do this if you are running + * multiple regionservers in a test and you shut down one before end of the + * test. * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, @@ -585,9 +585,9 @@ public JVMClusterUtil.MasterThread stopMaster(int serverNumber) { /** * Shut down the specified master cleanly * @param serverNumber Used as index into a list. - * @param shutdownFS True is we are to shutdown the filesystem as part of this master's shutdown. - * Usually we do but you do not want to do this if you are running multiple master in a - * test and you shut down one before end of the test. + * @param shutdownFS True is we are to shutdown the filesystem as part of this master's + * shutdown. Usually we do but you do not want to do this if you are running + * multiple master in a test and you shut down one before end of the test. * @return the master that was stopped */ public JVMClusterUtil.MasterThread stopMaster(int serverNumber, final boolean shutdownFS) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java index 30c54244dfc9..30519764af5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -173,11 +172,11 @@ public boolean isCreateWALDir() { @Override public String toString() { - return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass + - ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) + - ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts=" + - Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir=" + - createRootDir + ", createWALDir=" + createWALDir + '}'; + return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass + + ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) + + ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts=" + + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir=" + + createRootDir + ", createWALDir=" + createWALDir + '}'; } /** @@ -265,8 +264,7 @@ public Builder numZkServers(int numZkServers) { } public Builder numWorkers(int numWorkers) { - return numDataNodes(numWorkers) - .numRegionServers(numWorkers); + return numDataNodes(numWorkers).numRegionServers(numWorkers); } public Builder createRootDir(boolean createRootDir) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java index ff770815dd2f..8e7f924ce102 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ public class TestAcidGuaranteesWithAdaptivePolicy extends AcidGuaranteesTestBase @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAcidGuaranteesWithAdaptivePolicy.class); + HBaseClassTestRule.forClass(TestAcidGuaranteesWithAdaptivePolicy.class); @Override protected MemoryCompactionPolicy getMemoryCompactionPolicy() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java index 02c6a98a7a41..5f1b5e0e41ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ public class TestAcidGuaranteesWithBasicPolicy extends AcidGuaranteesTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAcidGuaranteesWithBasicPolicy.class); + HBaseClassTestRule.forClass(TestAcidGuaranteesWithBasicPolicy.class); @Override protected MemoryCompactionPolicy getMemoryCompactionPolicy() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java index 5f2e245a8349..e1d5b55cf523 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ public class TestAcidGuaranteesWithEagerPolicy extends AcidGuaranteesTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAcidGuaranteesWithEagerPolicy.class); + HBaseClassTestRule.forClass(TestAcidGuaranteesWithEagerPolicy.class); @Override protected MemoryCompactionPolicy getMemoryCompactionPolicy() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java index 4b2bcd04733b..c8daa07724eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ public class TestAcidGuaranteesWithNoInMemCompaction extends AcidGuaranteesTestB @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAcidGuaranteesWithNoInMemCompaction.class); + HBaseClassTestRule.forClass(TestAcidGuaranteesWithNoInMemCompaction.class); @Override protected MemoryCompactionPolicy getMemoryCompactionPolicy() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java index 8dd82fe0e0c9..88de0e43a200 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; @@ -34,7 +35,7 @@ public class TestCachedClusterId { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCachedClusterId.class); + HBaseClassTestRule.forClass(TestCachedClusterId.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -44,6 +45,7 @@ public class TestCachedClusterId { private static class GetClusterIdThread extends TestThread { CachedClusterId cachedClusterId; + public GetClusterIdThread(TestContext ctx, CachedClusterId clusterId) { super(ctx); cachedClusterId = clusterId; @@ -76,8 +78,8 @@ public void testClusterIdMatch() { @Test public void testMultiThreadedGetClusterId() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); - CachedClusterId cachedClusterId = new CachedClusterId(TEST_UTIL.getHBaseCluster().getMaster(), - conf); + CachedClusterId cachedClusterId = + new CachedClusterId(TEST_UTIL.getHBaseCluster().getMaster(), conf); TestContext context = new TestContext(conf); int numThreads = 16; for (int i = 0; i < numThreads; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java index 7cf9b6c012ec..3d3ca12bd82d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertTrue; + import java.util.List; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -28,12 +29,12 @@ /** * Checks tests are categorized. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestCheckTestClasses { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCheckTestClasses.class); + HBaseClassTestRule.forClass(TestCheckTestClasses.class); /** * Throws an assertion if we find a test class without category (small/medium/large/integration). @@ -48,7 +49,7 @@ public void checkClasses() throws Exception { badClasses.add(c); } } - assertTrue("There are " + badClasses.size() + " test classes without category: " - + badClasses, badClasses.isEmpty()); + assertTrue("There are " + badClasses.size() + " test classes without category: " + badClasses, + badClasses.isEmpty()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java index 4e38eba6417c..0afd0fa0a7c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.Waiter.Predicate; @@ -71,7 +70,7 @@ public class TestClientClusterMetrics { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientClusterMetrics.class); + HBaseClassTestRule.forClass(TestClientClusterMetrics.class); private static HBaseTestingUtil UTIL; private static Admin ADMIN; @@ -84,13 +83,13 @@ public class TestClientClusterMetrics { // We need to promote the visibility of tryRegionServerReport for this test public static class MyRegionServer - extends SingleProcessHBaseCluster.MiniHBaseClusterRegionServer { + extends SingleProcessHBaseCluster.MiniHBaseClusterRegionServer { public MyRegionServer(Configuration conf) throws IOException, InterruptedException { super(conf); } + @Override - public void tryRegionServerReport(long reportStartTime, long reportEndTime) - throws IOException { + public void tryRegionServerReport(long reportStartTime, long reportEndTime) throws IOException { super.tryRegionServerReport(reportStartTime, reportEndTime); } } @@ -100,8 +99,8 @@ public static void setUpBeforeClass() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName()); UTIL = new HBaseTestingUtil(conf); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .rsClass(TestClientClusterMetrics.MyRegionServer.class) + StartTestingClusterOption option = + StartTestingClusterOption.builder().rsClass(TestClientClusterMetrics.MyRegionServer.class) .numMasters(MASTERS).numRegionServers(SLAVES).numDataNodes(SLAVES).build(); UTIL.startMiniCluster(option); CLUSTER = UTIL.getHBaseCluster(); @@ -125,11 +124,11 @@ public void testDefaults() throws Exception { Assert.assertEquals(origin.getClusterId(), defaults.getClusterId()); Assert.assertEquals(origin.getAverageLoad(), defaults.getAverageLoad(), 0); Assert.assertEquals(origin.getBackupMasterNames().size(), - defaults.getBackupMasterNames().size()); + defaults.getBackupMasterNames().size()); Assert.assertEquals(origin.getDeadServerNames().size(), defaults.getDeadServerNames().size()); Assert.assertEquals(origin.getRegionCount(), defaults.getRegionCount()); Assert.assertEquals(origin.getLiveServerMetrics().size(), - defaults.getLiveServerMetrics().size()); + defaults.getLiveServerMetrics().size()); Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort()); Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size()); Assert.assertEquals(ADMIN.getRegionServers().size(), defaults.getServersName().size()); @@ -137,11 +136,10 @@ public void testDefaults() throws Exception { @Test public void testAsyncClient() throws Exception { - try (AsyncConnection asyncConnect = ConnectionFactory.createAsyncConnection( - UTIL.getConfiguration()).get()) { + try (AsyncConnection asyncConnect = + ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) { AsyncAdmin asyncAdmin = asyncConnect.getAdmin(); - CompletableFuture originFuture = - asyncAdmin.getClusterMetrics(); + CompletableFuture originFuture = asyncAdmin.getClusterMetrics(); CompletableFuture defaultsFuture = asyncAdmin.getClusterMetrics(EnumSet.allOf(Option.class)); ClusterMetrics origin = originFuture.get(); @@ -160,8 +158,8 @@ public void testAsyncClient() throws Exception { Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort()); Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size()); origin.getTableRegionStatesCount().forEach(((tableName, regionStatesCount) -> { - RegionStatesCount defaultRegionStatesCount = defaults.getTableRegionStatesCount() - .get(tableName); + RegionStatesCount defaultRegionStatesCount = + defaults.getTableRegionStatesCount().get(tableName); Assert.assertEquals(defaultRegionStatesCount, regionStatesCount); })); } @@ -190,11 +188,11 @@ public boolean evaluate() throws Exception { }); // Retrieve live servers and dead servers info. EnumSet